content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
import re
import unittest.mock as mock
from django import test
from django.utils import timezone
import testutils.factories as factories
import app.definitions.models as models
import app.revisioner.tasks.v1.core as coretasks
import app.inspector.engines.mysql_inspector as engine
class MySQLInspectorInterfaceTests(test.TestCase):
"""Tests to ensure that the engine follows the subscribed interface.
"""
def setUp(self):
"""Get ready for some tests...
"""
self.connection = {
'host': 'localhost',
'username': 'admin',
'password': '1234567890',
'port': 3306,
'database': 'acme',
}
self.engine = engine.MySQLInspector(**self.connection)
def test_has_indexes_sql(self):
"""It should have `indexes_sql` attribute defined.
"""
assert isinstance(engine.MySQLInspector.indexes_sql, str)
def test_has_definitions_sql(self):
"""It should have `definitions_sql` attribute defined.
"""
assert isinstance(engine.MySQLInspector.definitions_sql, str)
@mock.patch.object(engine.MySQLInspector, 'get_first', return_value={'version': '5.7.0'})
def test_get_tables_and_views_sql(self, get_first):
"""It should create the proper `tables_and_views_sql` where clause.
"""
sch = ['one', 'two', 'three']
sql = self.engine.get_tables_and_views_sql(sch)
exc = '''
WHERE t.table_schema NOT IN (%s, %s, %s)
'''
self.assertIn(
''.join(exc.split()).strip(),
''.join(sql.split()).strip(),
)
@mock.patch.object(engine.MySQLInspector, 'get_first', return_value={'version': '5.7.0'})
def test_get_tables_and_views_sql_ordered(self, get_first):
"""The Revisioner depends on the data coming in a specific order.
"""
sch = ['one', 'two', 'three']
sql = self.engine.get_tables_and_views_sql(sch).replace('\n', '')
exc = 'ORDER BY c.table_schema, c.table_name, c.ordinal_position'
self.assertEqual(exc, sql[-len(exc):])
@mock.patch.object(engine.MySQLInspector, 'get_first', return_value={'version': '5.7.0'})
def test_get_indexes_sql(self, get_first):
"""It should create the proper `indexes_sql` where clause.
"""
sch = ['one', 'two']
sql = self.engine.get_indexes_sql(sch)
exc = '''
WHERE t.table_schema NOT IN (%s, %s)
'''
self.assertIn(
''.join(exc.split()).strip(),
''.join(sql.split()).strip(),
)
def test_cursor_kwargs(self):
"""Snapshot test for cursor kwargs.
"""
assert self.engine.cursor_kwargs == {
'cursor_class': self.engine.dictcursor
}
def test_sys_schemas(self):
"""It should have the expected system table schemas.
"""
assert set(self.engine.sys_schemas) == {
'information_schema',
'mysql',
'performance_schema',
'sys',
}
def test_has_indexes(self):
"""It should have indexes.
"""
assert engine.MySQLInspector.has_indexes()
@mock.patch.object(engine.MySQLInspector, 'get_first', return_value={'version': '9.6.0'})
def test_get_db_version(self, get_first):
"""It should implement MySQL.get_db_version()
"""
self.assertEqual(self.engine.get_db_version(), '9.6.0')
@mock.patch.object(engine.MySQLInspector, 'get_db_version', return_value='10.1.2')
def test_version(self, get_db_version):
"""It should implement MySQL.version
"""
self.assertEqual(self.engine.version, '10.1.2')
def test_connect_kwargs(self):
"""It should have the connect timeout parameter.
"""
assert self.engine.connect_kwargs == {
'host': 'localhost',
'user': 'admin',
'password': '1234567890',
'port': 3306,
'database': 'acme',
'connect_timeout': 5,
}
def test_get_last_commit_time_for_table(self):
"""It should implement MySQL.get_last_commit_time_for_table
"""
self.assertEqual(self.engine.get_last_commit_time_for_table('public', 'accounts'), None)
class MySQLInspectorIntegrationTestMixin(object):
"""Test cases that hit a live database spun up via Docker.
"""
hostname = None
schema_count = 3
def get_connection(self):
return {
'host': self.hostname,
'username': 'metamapper_ro',
'password': '340Uuxwp7Mcxo7Khy',
'port': 3306,
'database': 'employees',
}
def get_inspector(self):
return engine.MySQLInspector(**self.get_connection())
def test_verify_connection(self):
"""It can connect to MySQL using the provided credentials.
"""
self.assertTrue(
self.get_inspector().verify_connection(),
'Host: %s' % self.hostname,
)
def test_tables_and_views(self):
"""It should return the correct table and view response.
"""
records = self.get_inspector().get_tables_and_views()
schemas = set()
table_items = []
table_types = set()
column_items = []
for record in records:
schemas.add((record['schema_object_id'], record['table_schema']))
# It should have unique table identities.
self.assertTrue(record['table_object_id'])
self.assertTrue(record['table_object_id'] not in table_items)
table_items.append(record['table_object_id'])
table_types.add(record['table_type'])
# It should have unique column identities.
for column in record['columns']:
self.assertTrue(column['column_object_id'])
self.assertTrue(column['column_object_id'] not in column_items)
column_items.append(column['column_object_id'])
# Each schema should have a unique identity.
self.assertEqual(len(schemas), self.schema_count)
self.assertEqual(table_types, {'base table', 'view'})
def test_indexes(self):
"""It should return the correct index response.
"""
records = self.get_inspector().get_indexes()
schemas = set()
index_items = []
for record in records:
schemas.add((record['schema_object_id'], record['schema_name']))
# It should have unique index identities.
self.assertTrue(record['index_object_id'])
self.assertTrue(record['index_object_id'] not in index_items)
index_items.append(record['index_object_id'])
# Each schema should have a unique identity.
self.assertEqual(len(schemas), self.schema_count)
def test_get_db_version(self):
"""It should return the correct version.
"""
self.assertTrue(
re.match(self.version_regexp, self.get_inspector().get_db_version()),
'Host: %s' % self.hostname,
)
def test_initial_revisioner_run(self):
"""It should be able to commit the initial run to the metastore.
"""
datastore = factories.DatastoreFactory(engine='mysql', **self.get_connection())
run = datastore.run_history.create(
workspace_id=datastore.workspace_id,
started_at=timezone.now(),
)
coretasks.start_run(run.id)
run.refresh_from_db()
self.assertTrue(run.finished_at is not None)
self.assertEqual(datastore.schemas.count(), self.schema_count)
column = models.Column.objects.get(name='emp_no', table__name='employees')
self.assertEqual(column.db_comment, 'The employee identification number')
@test.tag('mysql', 'inspector')
class MySQLFivePointSixIntegrationTests(MySQLInspectorIntegrationTestMixin, test.TestCase):
"""Integration tests for MySQL v5.6
"""
hostname = 'mysql-5.6'
version_regexp = r'5\.6\.[0-9]{1,3}'
@test.tag('mysql', 'inspector')
class MySQLFivePointSevenIntegrationTests(MySQLInspectorIntegrationTestMixin, test.TestCase):
"""Integration tests for MySQL v5.7
"""
hostname = 'mysql-5.7'
version_regexp = r'5\.7\.[0-9]{1,3}'
@test.tag('mysql', 'inspector')
class MySQLEightPointZeroIntegrationTests(MySQLInspectorIntegrationTestMixin, test.TestCase):
"""Integration tests for MySQL v8.0
"""
hostname = 'mysql-8.0'
version_regexp = r'8\.0\.[0-9]{1,3}'
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 HopeBayTech.
#
# This file is part of Tera.
# See https://github.com/HopeBayMobile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See CFFI docs at https://cffi.readthedocs.org/en/latest/
from _pyhcfs import ffi, lib
import os
import errno
import inspect
import sys
error_msg = {
-2: "Unsupported meta version",
}
def str_error_msg(name, ret_val):
msg = ""
if ret_val == -1:
msg = name + ': ' + os.strerror(ffi.errno)
else:
msg = name + ": " + error_msg[ret_val]
return msg
def __convert_struct_field(s, fields):
for field, fieldtype in fields:
if fieldtype.type.kind == 'primitive':
yield (field, getattr(s, field))
else:
yield (field, convert_to_python(getattr(s, field)))
def convert_to_python(s):
type = ffi.typeof(s)
if type.kind == 'struct':
return dict(__convert_struct_field(s, type.fields))
elif type.kind == 'array':
if type.item.kind == 'primitive':
if type.item.cname == 'char':
return ffi.string(s)
else:
return [s[i] for i in range(type.length)]
else:
return [convert_to_python(s[i]) for i in range(type.length)]
elif type.kind == 'primitive':
return int(s)
else:
return s
def list_volume(fsmgr_path):
"""
Return list of hcfs external volumes
"""
ptr_ret_entry = ffi.new("PORTABLE_DIR_ENTRY **")
ret_num = ffi.new("uint64_t *")
ret = lib.list_volume(fsmgr_path, ptr_ret_entry, ret_num)
if ret < 0:
print(
'Error:',
str_error_msg(
inspect.stack()[0][3],
ret),
file=sys.stderr)
return ret
response = []
for x in range(ret_num[0]):
entry = ptr_ret_entry[0][x]
response += [(entry.inode, entry.d_type, ffi.string(entry.d_name))]
return response
def parse_meta(meta_path):
"""
Get data from hcfs metafile
cdef: #define D_ISDIR 0
#define D_ISREG 1
#define D_ISLNK 2
#define D_ISFIFO 3
#define D_ISSOCK 4
int32_t parse_meta(char *meta_path, RET_META *meta);
@Input meta_path: A string contains local path of metafile
@Output result: 0 on success, negative integer on error.
@Output file_type: defined in fuseop.h
@Output child_number: number of childs if it is a folder, not used if
file_type is not D_ISDIR.
"""
meta = ffi.new("RET_META *")
lib.parse_meta(meta_path, meta)
ret = convert_to_python(meta[0])
if ret['result'] < 0:
ret['error_msg'] = str_error_msg(inspect.stack()[0][3], ret['result'])
print('Error:', ret['error_msg'], file=sys.stderr)
return ret
def list_dir_inorder(meta_path="", offset=(0, 0), limit=1000):
"""
int32_t list_dir_inorder(const char *meta_path, const int64_t page_pos,
const int32_t start_el, const int32_t limit,
int64_t *end_page_pos, int32_t *end_el_no,
iint32_t *num_children, PORTABLE_DIR_ENTRY *file_list);
"""
ret = {
'result': -1,
'offset': (0, 0),
'child_list': [],
'num_child_walked': 0
}
if limit <= 0 or offset[0] < 0 or offset[1] < 0:
return ret
end_page_pos = ffi.new("int64_t *")
end_el_no = ffi.new("int32_t *")
num_child_walked = ffi.new("int32_t *")
file_list = ffi.new("PORTABLE_DIR_ENTRY []", limit)
ret_code = lib.list_dir_inorder(
meta_path,
offset[0],
offset[1],
limit,
end_page_pos,
end_el_no,
num_child_walked,
file_list)
ret['result'] = ret_code
ret['offset'] = (end_page_pos[0], end_el_no[0])
if ret['result'] >= 0:
ret['num_child_walked'] = num_child_walked[0]
ret['child_list'] = [
convert_to_python(file_list[i])
for i in range(ret['num_child_walked'])]
else:
ret['error_msg'] = str_error_msg(inspect.stack()[0][3], ret['result'])
print('Error:', ret['error_msg'], file=sys.stderr)
return ret
def get_external_vol(tmp_meta):
return list_external_vol(tmp_meta)
def get_vol_usage(meta_path=""):
ret = {
'result': -1,
'usage': 0
}
vol_usage = ffi.new("int64_t *", 0)
ret_code = lib.get_vol_usage(meta_path, vol_usage)
ret['result'] = ret_code
ret['usage'] = vol_usage[0]
if ret['result'] < 0:
ret['error_msg'] = str_error_msg(inspect.stack()[0][3], ret['result'])
print('Error:', ret['error_msg'], file=sys.stderr)
return ret
def list_file_blocks(meta_path=""):
ret = {
'result': -1,
'ret_num': 0,
'block_list': [],
}
block_list = ffi.new("PORTABLE_BLOCK_NAME **")
ret_num = ffi.new("int64_t *")
inode = ffi.new("int64_t *")
ret_code = lib.list_file_blocks(meta_path, block_list, ret_num, inode)
ret['result'] = ret_code
if ret['result'] < 0:
ret['error_msg'] = str_error_msg(inspect.stack()[0][3], ret['result'])
print('Error:', ret['error_msg'], file=sys.stderr)
ret['ret_num'] = ret_num[0]
for x in range(ret_num[0]):
ret['block_list'] += [
'data_{0}_{1}_{2}'.format(
inode[0],
block_list[0][x].block_num,
block_list[0][x].block_seq)]
return ret
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from botocore.vendored import requests
import logging
from six.moves import configparser
from botocore.compat import json
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
class Credentials(object):
"""
Holds the credentials needed to authenticate requests. In addition
the Credential object knows how to search for credentials and how
to choose the right credentials when multiple credentials are found.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found. Valid values are: iam_role|env|config|boto.
"""
def __init__(self, access_key=None, secret_key=None, token=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
self.method = None
self.profiles = []
def retrieve_iam_role_credentials(url=METADATA_SECURITY_CREDENTIALS_URL,
timeout=None, num_attempts=1):
if timeout is None:
timeout = DEFAULT_METADATA_SERVICE_TIMEOUT
d = {}
try:
r = _get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
d[field[0:-1]] = retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = _get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
d[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
return d
def _get_request(url, timeout, num_attempts):
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except (requests.Timeout, requests.ConnectionError) as e:
logger.debug("Caught exception wil trying to retrieve credentials "
"from metadata service: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def search_iam_role(session, **kwargs):
credentials = None
timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
retrieve_kwargs = {}
if timeout is not None:
retrieve_kwargs['timeout'] = float(timeout)
if num_attempts is not None:
retrieve_kwargs['num_attempts'] = int(num_attempts)
metadata = retrieve_iam_role_credentials(**retrieve_kwargs)
if metadata:
for role_name in metadata:
credentials = Credentials(metadata[role_name]['AccessKeyId'],
metadata[role_name]['SecretAccessKey'],
metadata[role_name]['Token'])
credentials.method = 'iam-role'
logger.info('Found IAM Role: %s', role_name)
return credentials
def search_environment(**kwargs):
"""
Search for credentials in explicit environment variables.
"""
session = kwargs.get('session')
credentials = None
access_key = session.get_config_variable('access_key', ('env',))
secret_key = session.get_config_variable('secret_key', ('env',))
token = session.get_config_variable('token', ('env',))
if access_key and secret_key:
credentials = Credentials(access_key, secret_key, token)
credentials.method = 'env'
logger.info('Found credentials in Environment variables.')
return credentials
def search_credentials_file(**kwargs):
"""
Search for a credential file used by original EC2 CLI tools.
"""
credentials = None
if 'AWS_CREDENTIAL_FILE' in os.environ:
full_path = os.path.expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
lines = map(str.strip, open(full_path).readlines())
except IOError:
logger.warn('Unable to load AWS_CREDENTIAL_FILE (%s).', full_path)
else:
config = dict(line.split('=', 1) for line in lines if '=' in line)
access_key = config.get('AWSAccessKeyId')
secret_key = config.get('AWSSecretKey')
if access_key and secret_key:
credentials = Credentials(access_key, secret_key)
credentials.method = 'credentials-file'
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
return credentials
def search_file(**kwargs):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
credentials = None
session = kwargs.get('session')
access_key = session.get_config_variable('access_key', methods=('config',))
secret_key = session.get_config_variable('secret_key', methods=('config',))
token = session.get_config_variable('token', ('config',))
if access_key and secret_key:
credentials = Credentials(access_key, secret_key, token)
credentials.method = 'config'
logger.info('Found credentials in config file.')
return credentials
def search_boto_config(**kwargs):
"""
Look for credentials in boto config file.
"""
credentials = access_key = secret_key = None
if 'BOTO_CONFIG' in os.environ:
paths = [os.environ['BOTO_CONFIG']]
else:
paths = ['/etc/boto.cfg', '~/.boto']
paths = [os.path.expandvars(p) for p in paths]
paths = [os.path.expanduser(p) for p in paths]
cp = configparser.RawConfigParser()
cp.read(paths)
if cp.has_section('Credentials'):
if cp.has_option('Credentials', 'aws_access_key_id'):
access_key = cp.get('Credentials', 'aws_access_key_id')
if cp.has_option('Credentials', 'aws_secret_access_key'):
secret_key = cp.get('Credentials', 'aws_secret_access_key')
if access_key and secret_key:
credentials = Credentials(access_key, secret_key)
credentials.method = 'boto'
logger.info('Found credentials in boto config file.')
return credentials
AllCredentialFunctions = [search_environment,
search_credentials_file,
search_file,
search_boto_config,
search_iam_role]
_credential_methods = (('env', search_environment),
('config', search_file),
('credentials-file', search_credentials_file),
('boto', search_boto_config),
('iam-role', search_iam_role))
def get_credentials(session):
credentials = None
for cred_method, cred_fn in _credential_methods:
credentials = cred_fn(session=session)
if credentials:
break
return credentials
|
"""Bastionification utility.
A bastion (for another object -- the 'original') is an object that has
the same methods as the original but does not give access to its
instance variables. Bastions have a number of uses, but the most
obvious one is to provide code executing in restricted mode with a
safe interface to an object implemented in unrestricted mode.
The bastionification routine has an optional second argument which is
a filter function. Only those methods for which the filter method
(called with the method name as argument) returns true are accessible.
The default filter method returns true unless the method name begins
with an underscore.
There are a number of possible implementations of bastions. We use a
'lazy' approach where the bastion's __getattr__() discipline does all
the work for a particular method the first time it is used. This is
usually fastest, especially if the user doesn't call all available
methods. The retrieved methods are stored as instance variables of
the bastion, so the overhead is only occurred on the first use of each
method.
Detail: the bastion class has a __repr__() discipline which includes
the repr() of the original object. This is precomputed when the
bastion is created.
"""
__all__ = ["BastionClass", "Bastion"]
from types import MethodType
class BastionClass:
"""Helper class used by the Bastion() function.
You could subclass this and pass the subclass as the bastionclass
argument to the Bastion() function, as long as the constructor has
the same signature (a get() function and a name for the object).
"""
def __init__(self, get, name):
"""Constructor.
Arguments:
get - a function that gets the attribute value (by name)
name - a human-readable name for the original object
(suggestion: use repr(object))
"""
self._get_ = get
self._name_ = name
def __repr__(self):
"""Return a representation string.
This includes the name passed in to the constructor, so that
if you print the bastion during debugging, at least you have
some idea of what it is.
"""
return "<Bastion for %s>" % self._name_
def __getattr__(self, name):
"""Get an as-yet undefined attribute value.
This calls the get() function that was passed to the
constructor. The result is stored as an instance variable so
that the next time the same attribute is requested,
__getattr__() won't be invoked.
If the get() function raises an exception, this is simply
passed on -- exceptions are not cached.
"""
attribute = self._get_(name)
self.__dict__[name] = attribute
return attribute
def Bastion(object, filter = lambda name: name[:1] != '_',
name=None, bastionclass=BastionClass):
"""Create a bastion for an object, using an optional filter.
See the Bastion module's documentation for background.
Arguments:
object - the original object
filter - a predicate that decides whether a function name is OK;
by default all names are OK that don't start with '_'
name - the name of the object; default repr(object)
bastionclass - class used to create the bastion; default BastionClass
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
# Note: we define *two* ad-hoc functions here, get1 and get2.
# Both are intended to be called in the same way: get(name).
# It is clear that the real work (getting the attribute
# from the object and calling the filter) is done in get1.
# Why can't we pass get1 to the bastion? Because the user
# would be able to override the filter argument! With get2,
# overriding the default argument is no security loophole:
# all it does is call it.
# Also notice that we can't place the object and filter as
# instance variables on the bastion object itself, since
# the user has full access to all instance variables!
def get1(name, object=object, filter=filter):
"""Internal function for Bastion(). See source comments."""
if filter(name):
attribute = getattr(object, name)
if type(attribute) == MethodType:
return attribute
raise AttributeError, name
def get2(name, get1=get1):
"""Internal function for Bastion(). See source comments."""
return get1(name)
if name is None:
name = repr(object)
return bastionclass(get2, name)
def _test():
"""Test the Bastion() function."""
class Original:
def __init__(self):
self.sum = 0
def add(self, n):
self._add(n)
def _add(self, n):
self.sum = self.sum + n
def total(self):
return self.sum
o = Original()
b = Bastion(o)
testcode = """if 1:
b.add(81)
b.add(18)
print "b.total() =", b.total()
try:
print "b.sum =", b.sum,
except:
print "inaccessible"
else:
print "accessible"
try:
print "b._add =", b._add,
except:
print "inaccessible"
else:
print "accessible"
try:
print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
except:
print "inaccessible"
else:
print "accessible"
\n"""
exec testcode
print '='*20, "Using rexec:", '='*20
import rexec
r = rexec.RExec()
m = r.add_module('__main__')
m.b = b
r.r_exec(testcode)
if __name__ == '__main__':
_test()
|
"""
tests for ocd functions.
"""
# pylint: disable=E1101
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
import random
import torch
import pytest
import numpy as np
from ocd import OCD
@pytest.fixture(scope='module')
def fix_seed():
seed = len('ocd testing')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available:
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@pytest.fixture(scope='module')
def expected_edit_dist_mask():
mask = [[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]],
[[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1]],
[[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]]]
return mask
@pytest.fixture(scope='module')
def expected_q_values():
q_values = [[[0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0],
[-1.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0],
[-2.0, -1.0, -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0],
[-3.0, -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0],
[-3.0, -3.0, -2.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0],
[-4.0, -4.0, -3.0, -3.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0],
[-4.0, -4.0, -4.0, -4.0, -3.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0],
[-4.0, -4.0, -4.0, -4.0, -4.0, -3.0, -4.0, -4.0, -4.0, -4.0, -4.0],
[-4.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0, -3.0, -4.0]],
[[0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0],
[-1.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0],
[-2.0, -1.0, -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0],
[-3.0, -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0],
[-4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, -4.0, -4.0, -4.0],
[-4.0, -4.0, -4.0, -4.0, -4.0, -3.0, -4.0, -4.0, -4.0, -4.0, -4.0],
[-5.0, -5.0, -5.0, -5.0, -5.0, -4.0, -5.0, -5.0, -5.0, -4.0, -5.0],
[-5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -4.0, -5.0],
[-6.0, -6.0, -6.0, -6.0, -6.0, -6.0, -6.0, -6.0, -6.0, -5.0, -6.0]]]
return q_values
@pytest.fixture(scope='module')
def expected_policy():
policy = [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.50, 0.50, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.3333, 0.3333, 0.3333, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.250, 0.250, 0.250, 0.250, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.50, 0.0, 0.0, 0.0, 0.50, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]]
return policy
def test__mask(fix_seed, expected_edit_dist_mask):
seq = [[1, 2, 3, 4],
[5, 4, 6, 4],
[3, 2, 4, 0]]
end_symbol_id = 4
seq = torch.LongTensor(seq)
mask = OCD.sequence_mask(seq, end_symbol_id)
expected_mask = [[0, 0, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]]
assert mask.tolist() == expected_mask
edit_dist_mask = OCD.edit_distance_mask(seq, end_symbol_id)
assert edit_dist_mask.tolist() == expected_edit_dist_mask
def test_edit_q_values(fix_seed, expected_q_values):
"""
vocabulary = {'S':0, 'U':1, 'N':2, 'D':3, 'A':4 , 'Y':5,
'T':6, 'R':7, 'P':8, '</s>': 9, '<pad>': 10}
"""
vocab_size = 11
end_symbol_id = 9
# batch_y = {'SUNDAY</s><pad><pad>', 'SUNDAY</s><pad><pad>'}
gold_y = [[0, 1, 2, 3, 4, 5, 9, 10, 10],
[0, 1, 2, 3, 4, 5, 9, 10, 10]]
sampled = {'SATURDAY</s>', 'SATRAPY</s>U'}
sampled_y = [[0, 4, 6, 1, 7, 3, 4, 5, 9],
[0, 4, 6, 7, 4, 8, 5, 9, 1]]
q_values = OCD.edit_distance_q_values(torch.LongTensor(sampled_y),
torch.LongTensor(gold_y),
end_symbol_id, vocab_size)
assert q_values.tolist() == expected_q_values
def test_policy(fix_seed, expected_q_values, expected_policy):
optimal_pi = OCD.compute_optimal_pi(torch.FloatTensor(expected_q_values))
assert (np.round(optimal_pi.tolist()[1], 4) == expected_policy).all()
def test_loss(fix_seed, expected_q_values):
optimal_pi = OCD.compute_optimal_pi(torch.FloatTensor(expected_q_values))
model_log_probs = torch.nn.functional.log_softmax(torch.randn_like(optimal_pi), dim=2)
b_sz, seq_len, vocab_size = model_log_probs.size()
sampled_y_mask = torch.zeros(b_sz, seq_len).byte()
loss = OCD.loss(optimal_pi, model_log_probs, sampled_y_mask)
assert round(loss.item(), 4) == 20.7344
def test_forward(fix_seed, expected_q_values):
optimal_pi = OCD.compute_optimal_pi(torch.FloatTensor(expected_q_values))
model_scores = torch.randn_like(optimal_pi)
gold_y = [[0, 1, 2, 3, 4, 5, 9, 10, 10],
[0, 1, 2, 3, 4, 5, 9, 10, 10]]
ocd = OCD(vocab_size=11, end_symbol_id=9)
loss = ocd(model_scores, torch.LongTensor(gold_y))
assert round(loss.item(), 4) == 8.0762
|
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.models as models
import torchvision.utils as utils
import matplotlib.pyplot as plt
import sys
import os
import json
from google.cloud import storage
import random
experiment=" Vaporwave Dark BKG"
simpName="vaporwave-dark-bkg"
bucketUrlPrefix='https://storage.googleapis.com/alpha-flake-output/'
f = open('paradata.json')
jblob = json.load(f)
class VGG_19(nn.Module):
def __init__(self):
super(VGG_19, self).__init__()
# model used: VGG19 (like in the paper)
# everything after the 28th layer is technically not needed
self.model = models.vgg19(pretrained=True).features[:30]
# better results when changing the MaxPool layers to AvgPool (-> paper)
for i, _ in enumerate(self.model):
# Indicies of the MaxPool layers -> replaced by AvgPool with same parameters
if i in [4, 9, 18, 27]:
self.model[i] = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, x):
features = []
for i, layer in enumerate(self.model):
x = layer(x)
# indicies of the conv layers after the now AvgPool layers
if i in [0, 5, 10, 19, 28]:
features.append(x)
return features
def load_img(path_to_image, img_size):
transform = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
])
img = Image.open(path_to_image)
img = transform(img).unsqueeze(0)
#print("\n\n\n",dir(img),"\n\n\n")
return img
def transfer_style(iterations, optimizer, alpha, beta, generated_image, content_image, style_image, show_images=False):
for iter in range(iterations+1):
generated_features = model(generated_image)
content_features = model(content_image)
style_features = model(style_image)
content_loss = 0
style_loss = 0
for generated_feature, content_feature, style_feature in zip(generated_features, content_features, style_features):
batch_size, n_feature_maps, height, width = generated_feature.size()
# in paper it is 1/2*((g - c)**2) ... but it is easies this way because I don't have to worry about dimensions ... and it workes as well
content_loss += (torch.mean((generated_feature - content_feature) ** 2))
# batch_size is one ... so it isn't needed. I still inclueded it for better understanding.
G = torch.mm((generated_feature.view(batch_size*n_feature_maps, height*width)), (generated_feature.view(batch_size*n_feature_maps, height*width)).t())
A = torch.mm((style_feature.view(batch_size*n_feature_maps, height*width)), (style_feature.view(batch_size*n_feature_maps, height*width)).t())
# different in paper!!
E_l = ((G - A)**2)
# w_l ... one divided by the number of active layers with a non-zero loss-weight -> directly from the paper (technically isn't needed)
w_l = 1/5
style_loss += torch.mean(w_l*E_l)
# I found little difference when changing the alpha and beta values ... still kept it in for better understanding of paper
total_loss = alpha * content_loss + beta * style_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if iter % 100 == 0:
print('-'*15)
print(f'\n{iter} \nTotal Loss: {total_loss.item()} \n Content Loss: {content_loss} \t Style Loss: {style_loss}')
print('-'*15)
# show image
if show_images == True:
plt.figure(figsize=(10, 10))
plt.imshow(generated_image.permute(0, 2, 3, 1)[0].cpu().detach().numpy())
plt.show()
return generated_image
#if iter % 500 == 0:
#utils.save_image(generated, f'./gen_{iter}.png')
if __name__ == '__main__':
print(sys.argv)
content_path=sys.argv[1]
style_path=sys.argv[2]
job_uuid=sys.argv[3]
out_path=job_uuid + ".png"
basedContent=os.path.basename(content_path)
basedStyle=os.path.basename(style_path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#2560 - 2048
content_img = load_img(content_path, 2048).to(device)
style_img = load_img(style_path, 2048).to(device)
model = VGG_19().to(device)
# freeze parameters
for param in model.parameters():
param.requires_grad = False
# generated image (init) is the content image ... could also be noise
# requires_grad because the network itself is frozen ... the thing we are changine is this
generated_init = content_img.clone().requires_grad_(True)
iterations = random.randint(200,1000)
# the real difference is visibale whe changing the learning rate ... 1e-2 is rather high -> heavy changes to content image
lr = 1e-2
# I found no real difference when changing these values...this is why I keep them at 1
alpha = 1
beta = 1
optimizer = optim.Adam([generated_init], lr=lr)
generated_image = transfer_style(iterations=iterations,
optimizer=optimizer,
alpha=alpha,
beta=beta,
generated_image=generated_init,
content_image=content_img,
style_image=style_img,
show_images=False # only true in jupyter notebook
)
utils.save_image(generated_image, out_path)
# Success - Save Everything => :
client = storage.Client()
bucket = client.get_bucket('alpha-flake-output')
bucketRoot = 'experiment-'+simpName+'/'+job_uuid+'/'
fOut = bucketRoot + job_uuid + ".png"
fContent = "shared/flake-bbg/" + basedContent
fStyle = "shared/vaporwave/" + basedStyle
blob = bucket.blob(fOut)
blob.upload_from_filename(filename=out_path)
blob = bucket.blob(fContent)
if not blob.exists():
blob.upload_from_filename(filename=content_path)
blob = bucket.blob(fStyle)
if not blob.exists():
blob.upload_from_filename(filename=style_path)
newDat={
'experiment':experiment,
'nst':{
'iterations':str(iterations),
'alpha':str(alpha),
'beta':str(beta),
'learn rate':str(lr),
'image prompt':bucketUrlPrefix + fContent,
'style prompt':bucketUrlPrefix + fStyle,
'model':'vgg16'
},
'url': bucketUrlPrefix + fOut,
'uuid':job_uuid
}
jblob['data'].append(newDat)
with open('paradata.json', 'w', encoding='utf-8') as ff:
json.dump(jblob, ff, ensure_ascii=False, indent=4)
blob = bucket.blob('experiment-viewer/paradata.json')
blob.cache_control="no-store"
blob.upload_from_filename('./paradata.json')
|
"""Define utilities."""
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/graspinglab/Autonomous_racing/Paresh-Soni-F110-2020/Ros-Lab/soni_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/graspinglab/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/graspinglab/catkin_ws/devel;/opt/ros/kinetic" != "" else []
|
import glob
import inspect
import logging
import os
import unittest
from pathlib import Path
from securify.solidity import compile_cfg
from securify.staticanalysis import static_analysis
def make_test_case(path_src):
def test_case(self: unittest.TestCase):
cfg, ast, *_ = compile_cfg(path_src)
result = static_analysis.analyze_cfg(cfg, logger=logging).facts_out
compliant_output = result["patternCompliance"]
violation_output = result["patternViolation"]
conflict_output = result["patternConflict"]
def exists(actual, e):
actual = {a[2] for a in actual if a[0].strip() == "PASS"}
self.assertTrue(len(actual) > 0, e)
def not_exists(actual, e):
actual = {a[2] for a in actual if a[0].strip() == "PASS"}
self.assertTrue(len(actual) == 0, e)
if "unsafe" in path_src:
exists(violation_output, "Violations")
not_exists(conflict_output, "Conflict")
else:
exists(compliant_output, "Compliance")
not_exists(violation_output, "Compliance")
not_exists(conflict_output, "Conflict")
return test_case
class TestPatternsPass(unittest.TestCase):
base_path = os.path.dirname(os.path.abspath(__file__)) + "**/"
frame = inspect.currentframe()
for filename in glob.iglob(f'{base_path}**/*.sol', recursive=True):
path = Path(filename)
test_name = str(path.relative_to(Path(os.path.abspath(__file__)).parent)) \
.replace(".sol", "") \
.replace("\\", ".") \
.replace("/", ".")
frame.f_locals[f'test_{test_name}'] = make_test_case(str(path))
|
__author__ = 'Sergey Osipov <Serega.Osipov@gmail.com>'
import numpy as np
def get_photon_energy(wavelengths):
"""
computes the energy of the photon of a given wavelength
:param wavelengths: [m]
:return: J = W*s
"""
plank_constant = 6.62606957 * 10**-34 # J*s
speed_of_light = 299792458 # m*s^-1
nu = speed_of_light / wavelengths # s^-1
E = plank_constant * nu # J = W*s
return E
def integrate_spectral_flux(wavelengths, spectral_flux, wl_min=0, wl_max=np.inf):
"""
Integrate spectral flux in wavelength
:param wavelengths: um
:param spectral_flux: as returned from disort (my impl) [W * m^-2 * um^-1]
:return: W m^-2
"""
ind = np.logical_and(wavelengths >= wl_min, wavelengths <= wl_max)
broadband_flux = np.trapz(spectral_flux[ind], wavelengths[ind], axis=0)
return broadband_flux
|
from django.db import models
from server_delta_app import models as server_delta_app_models, managers
class SourceIncomeModel(server_delta_app_models.BaseModel):
"""
Model that represent the customer debt registered in app
"""
class Meta:
db_table = "source_income"
value = models.DecimalField(max_digits=10, decimal_places=2)
description = models.CharField(max_length=1000)
customer_dossier = models.ForeignKey(server_delta_app_models.CustomerDossierModel, on_delete=models.CASCADE, related_name='sources_income')
objects = managers.SourceIncomeManager()
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/7/27 22:15
# @author :Mo
# @function :test func_recursive
from tookit_sihui.ml_common.func_recursive.func_recursive import gen_syn_sentences
if __name__=="__main__":
org_data = ["[你][喜欢|喜爱|爱][虾米|啥子|什么]", "[1|11][2|22][3|33][44|444]", "大漠帝国"]
syn_sentences = gen_syn_sentences(org_data)
# syn_sentences = sorted(syn_sentences)
print(syn_sentences)
gg = 0
# 测试结果
# ['你喜欢虾米', '你喜爱虾米', '你爱虾米', '你喜欢啥子', '你喜爱啥子', '你爱啥子', '你喜欢什么', '你喜爱什么', '你爱什么', '12344', '112344', '122344', '1122344', '123344', '1123344', '1223344', '11223344', '123444', '1123444', '1223444', '11223444', '1233444', '11233444', '12233444', '112233444', '大漠帝国']
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import browser_credentials
from telemetry.core import extension_dict
from telemetry.core import tab_list
from telemetry.core import temporary_http_server
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import browser_backend
class Browser(object):
"""A running browser instance that can be controlled in a limited way.
To create a browser instance, use browser_finder.FindBrowser.
Be sure to clean up after yourself by calling Close() when you are done with
the browser. Or better yet:
browser_to_create = FindBrowser(options)
with browser_to_create.Create() as browser:
... do all your operations on browser here
"""
def __init__(self, backend, platform):
self._browser_backend = backend
self._http_server = None
self._wpr_server = None
self._platform = platform
self._tabs = tab_list.TabList(backend.tab_list_backend)
self._extensions = None
if backend.supports_extensions:
self._extensions = extension_dict.ExtensionDict(
backend.extension_dict_backend)
self.credentials = browser_credentials.BrowserCredentials()
self._platform.SetFullPerformanceModeEnabled(True)
def __enter__(self):
return self
def __exit__(self, *args):
self.Close()
@property
def platform(self):
return self._platform
@property
def browser_type(self):
return self._browser_backend.browser_type
@property
def is_content_shell(self):
"""Returns whether this browser is a content shell, only."""
return self._browser_backend.is_content_shell
@property
def supports_extensions(self):
return self._browser_backend.supports_extensions
@property
def supports_tab_control(self):
return self._browser_backend.supports_tab_control
@property
def tabs(self):
return self._tabs
@property
def extensions(self):
"""Returns the extension dictionary if it exists."""
if not self.supports_extensions:
raise browser_backend.ExtensionsNotSupportedException(
'Extensions not supported')
return self._extensions
@property
def supports_tracing(self):
return self._browser_backend.supports_tracing
def StartTracing(self):
return self._browser_backend.StartTracing()
def StopTracing(self):
return self._browser_backend.StopTracing()
def GetTraceResultAndReset(self):
"""Returns the result of the trace, as TraceResult object."""
return self._browser_backend.GetTraceResultAndReset()
def Close(self):
"""Closes this browser."""
self._platform.SetFullPerformanceModeEnabled(False)
if self._wpr_server:
self._wpr_server.Close()
self._wpr_server = None
if self._http_server:
self._http_server.Close()
self._http_server = None
self._browser_backend.Close()
self.credentials = None
@property
def http_server(self):
return self._http_server
def SetHTTPServerDirectory(self, path):
if path:
abs_path = os.path.abspath(path)
if self._http_server and self._http_server.path == path:
return
else:
abs_path = None
if self._http_server:
self._http_server.Close()
self._http_server = None
if not abs_path:
return
self._http_server = temporary_http_server.TemporaryHTTPServer(
self._browser_backend, abs_path)
def SetReplayArchivePath(self, archive_path):
if self._wpr_server:
self._wpr_server.Close()
self._wpr_server = None
if not archive_path:
return None
if self._browser_backend.wpr_mode == wpr_modes.WPR_OFF:
return
use_record_mode = self._browser_backend.wpr_mode == wpr_modes.WPR_RECORD
if not use_record_mode:
assert os.path.isfile(archive_path)
self._wpr_server = wpr_server.ReplayServer(
self._browser_backend,
archive_path,
use_record_mode,
self._browser_backend.WEBPAGEREPLAY_HOST,
self._browser_backend.webpagereplay_local_http_port,
self._browser_backend.webpagereplay_local_https_port,
self._browser_backend.webpagereplay_remote_http_port,
self._browser_backend.webpagereplay_remote_https_port)
def GetStandardOutput(self):
return self._browser_backend.GetStandardOutput()
|
#!/usr/bin/env python
################# FROM FRANK ###################################
import os,sys
from dcnn_base import DCNN_CONFIG
from dcnn_main import DCNN
from dcnn_logger import setup_script_logging #FB
from dcnn_utils import file_looper,get_args,dict2str,expandvars #FB
__version__= "2020-07-06-001"
####################################################
#==============================================
# USER SETTINGS 4 Magic
# ==============================================
basedir = None
data_meg = 'meg_rawdata/Philly'
fnconfig = 'config_CTF_Philly.yaml'
pattern = '-raw.fif'
verbose = True
do_label_ica = False
do_label_check = True
do_performance_check = False
# --
do_log2file = True
def run(fnconfig=None,basedir=None,data_meg=None,data_train=None,pattern='-raw.fif',
verbose=False,do_label_ica=False,do_label_check=False,log2file=False):
# ToDo run for list of files or search in subdirs
# -- init config CLS
cfg = DCNN_CONFIG(verbose=verbose)
cfg.load(fname=fnconfig)
#-- init dcnn CLS
dcnn = DCNN(**cfg.config) # init object with config details
#---
if basedir: # FB test
dcnn.path.basedir = basedir
if data_meg:
dcnn.path.data_meg = data_meg # input directory
if data_train:
dcnn.path.data_train = data_train # input directory
dcnn.verbose = True
dcnn.get_info()
# ==========================================================
# run ICA auto labelling
# ==========================================================
if do_label_ica:
#path_in = os.path.join(cfg.config['path']['basedir'],cfg.config['path']['data_meg'])
path_in = dcnn.path.data_meg
# -- looper catch error via try/exception setup log2file
for fnraw in file_looper(rootdir=path_in,pattern=pattern,version=__version__,verbose=verbose,logoverwrite=True,log2file=log2file):
#logger.info(fnraw)
# -- read raw data and apply noise reduction
dcnn.meg.update(fname=fnraw)
# -- apply ICA on chops, label ICs save results to disk
# ToDo store chop-times, ECG,EOG in raw.annotations
# - chop: make use of annotations in get_chop_times_indices()
fgdcnn = dcnn.label_ica(save=True)
if verbose:
dcnn.get_info()
# ==========================================================
# check ICA auto labelling
# ==========================================================
npz_pattern = pattern.split(".",-1)[0] +"-gdcnn.npz"
if do_label_check:
path_in = dcnn.path.data_train
from mne.report import Report
report = Report(title='Check IC labels')
for fname in file_looper(rootdir=path_in, pattern=npz_pattern,version=__version__,verbose=verbose,log2file=log2file,logoverwrite=False):
dcnn.load_gdcnn(fname)
# check IC labels (and apply corrections)
#dcnn.check_labels(save=True)
fnreport ="test_report"
# check IC labels (and apply corrections)
name = os.path.basename(fname[:-4])
print ('>>> working on %s' % name)
figs, captions = dcnn.plot_ica_traces(fname)
report.add_figs_to_section(figs, captions=captions, section=name, replace=True)
report.save(fnreport + '.h5', overwrite=True)
report.save(fnreport + '.html', overwrite=True, open_browser=True)
#dcnn.check_labels(save=True, path_out=cfg.config['path']['data_train'])
#if verbose: # ToDo set verbose level True,2,3
# logger.debug("dcnn ica chop dump.\n{}\n{}\n\n".format( dcnn.ica.chop, dict2str(dcnn.ica.chop.dump()) ))
# logger.debug("dcnn ica n_chop.\n{}\n\n".format(dcnn.ica.chop.n_chop))
# logger.debug("dcnn ica topo data.\n{}\n\n".format(dcnn.ica.topo.data))
# logger.debug("dcnn ica topo img.\n{}\n\n".format(dcnn.ica.topo.images))
# ==========================================================
# ICA performance plot
# ==========================================================
if do_performance_check:
path_in = cfg.config['path']['data_train']
for fname in file_looper(rootdir=path_in, pattern=npz_pattern,version=__version__,verbose=verbose,log2file=log2file,logoverwrite=False):
dcnn.load_gdcnn(fname)
# ToDo plot performance, save to report
if __name__ == "__main__":
# -- get parameter / flags from cmd line
argv = sys.argv
opt, parser = get_args(argv,version=__version__)
if len(argv) < 2:
parser.print_help()
sys.exit(-1)
#flog= "dcnn_"+os.getenv("USER","test")+".log"
logger = setup_script_logging(name="DCNN",opt=opt,logfile=False,version=__version__,level="DEBUG")
try:
if opt.jd: # JD settings
opt.config = fnconfig
opt.pattern = pattern
opt.verbose = verbose
opt.do_label_ica = do_label_ica
opt.do_label_check = do_label_check
opt.log2file = do_log2file
elif opt.fb: # call from shell
#opt.basedir = "$JUMEG_PATH_LOCAL_DATA"+"/gDCNN"
#opt.data_meg = "data_examples"
#opt.data_train = "$JUMEG_PATH_LOCAL_DATA"+"/exp/dcnn/ica_labeled/Juelich"
#-- 4D
opt.pattern = "*int-raw.fif" #"*.c,rfDC_bcc,nr-raw.fif"
opt.config = "config_4D_Juelich.yaml"
# ToDo trouble with
# 205399_MEG94T_121220_1322_2_c,rfDC_EC_bcc-raw-gdcnn.npz
# eog / ICs found ???
opt.ica = do_label_ica
opt.check = do_label_check
opt.log2file = do_log2file
if opt.verbose:
# show cmd line parameter
# https://stackoverflow.com/questions/39978186/python-print-all-argparse-arguments-including-defaults/39978305
msg=["DCNN input parameter:"]
for k,v in sorted(vars(opt).items()): msg.append(" -> {0:12}: {1}".format(k,v))
logger.info("\n".join(msg) )
run(fnconfig=opt.config,basedir=opt.basedir,data_meg=opt.data_meg,data_train=opt.data_train,pattern=opt.pattern,
verbose=opt.verbose,do_label_ica=opt.ica,do_label_check=opt.check,log2file=opt.log2file)
except:
logger.exception("ERROR in DCNNN")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import xml.etree.ElementTree
from config import ConfigSectionMap
def dust_from_xml(root):
"""
get arrivals from xml which contains arrival items (n different routes) for specified bus station
:param root: root node of xml response
:return: arrival list (this, next)
"""
try:
# find matching route
pm25 = None
pm10 = None
body = root[1] # <body>
if int(body.find('totalCount').text) > 0:
item = body.find('items')[0]
pm10 = item.find('pm10Value').text
pm25 = item.find('pm25Value').text
except xml.etree.ElementTree.ParseError as e:
print("XML error", str(e))
print(xml.etree.ElementTree.tostring(root))
return (pm10, pm25)
def dust_view(dusts):
"""
create bus arrival view from arrivals
:param arrivals: (arrival, arrival, arrival...)
:return: string "arrival_str/arrival_str/arrival_str..."
"""
view = ""
for index, element in enumerate(dusts):
if index != 0:
view += '/'
view += str(element)
return view
|
"""
NSL-KDD
https://www.unb.ca/cic/datasets/nsl.html
http://205.174.165.80/CICDataset/NSL-KDD/Dataset/
"""
import pandas as pd
import os
from pyzhmh import __dirname__, download_one_file, unpack_one_file
__dirname__ = __dirname__()
NSL_KDD_URL = "http://205.174.165.80/CICDataset/NSL-KDD/Dataset/NSL-KDD.zip"
NSL_KDD_FILES = [
'KDDTest+.txt',
'KDDTest-21.txt',
'KDDTrain+.txt',
'KDDTrain+_20Percent.txt'
]
NSL_KDD_CACHE = os.path.join(__dirname__, 'data')
if not os.path.exists(NSL_KDD_CACHE):
os.makedirs(NSL_KDD_CACHE)
def load_naslkdd_data(index: int = 3):
if index < 0 or index > len(NSL_KDD_FILES) - 1:
return None
fullname = os.path.join(NSL_KDD_CACHE, 'NSL-KDD.zip')
if download_one_file(fullname, {'url': NSL_KDD_URL}):
try:
for item in NSL_KDD_FILES:
os.stat(os.path.join(NSL_KDD_CACHE, item))
except OSError:
# 有文件不存在
unpack_one_file(fullname, NSL_KDD_CACHE)
# 读取
return pd.read_csv(os.path.join(NSL_KDD_CACHE, NSL_KDD_FILES[index]), header=None)
|
"""Necessary constants for MQTT."""
NUMBER_OF_PARTITION = 10
|
#!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.ioloop
from tornadows import soaphandler
from tornadows import webservices
from tornadows import xmltypes
from tornadows import complextypes
from tornadows.soaphandler import webservice
import datetime
# This dictionary emulate a documental repository
repo = {}
class Document(complextypes.ComplexType):
number = int
theme = str
author = str
text = str
created = datetime.date
class Message(complextypes.ComplexType):
doc = Document
msg = str
class Repository(soaphandler.SoapHandler):
""" Service of repository, store documents (Document) """
@webservice(_params=Message,_returns=str)
def save(self, msg):
global repo
repo[msg.doc.number] = msg.doc
return 'Save document number : %d'%msg.doc.number
@webservice(_params=int,_returns=Message)
def find(self, num):
global repo
response = Message()
try:
doc = Document()
d = repo[num]
doc.number = d.number
doc.theme = d.theme
doc.author = d.author
doc.text = d.text
doc.created = d.created
response.doc = doc
response.msg = 'OK'
except:
response.doc = Document()
response.msg = 'Document number %d dont exist'%num
return response
if __name__ == '__main__':
service = [('RepositoryService',Repository)]
app = webservices.WebService(service).listen(8080)
tornado.ioloop.IOLoop.instance().start()
|
import numpy as np
from manimlib.animation.animation import Animation
from manimlib.constants import *
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.scene.scene import Scene
class RearrangeEquation(Scene):
def construct(
self,
start_terms,
end_terms,
index_map,
path_arc=np.pi,
start_transform=None,
end_transform=None,
leave_start_terms=False,
transform_kwargs={},
):
transform_kwargs["path_func"] = path
start_mobs, end_mobs = self.get_mobs_from_terms(start_terms, end_terms)
if start_transform:
start_mobs = start_transform(Mobject(*start_mobs)).split()
if end_transform:
end_mobs = end_transform(Mobject(*end_mobs)).split()
unmatched_start_indices = set(range(len(start_mobs)))
unmatched_end_indices = set(range(len(end_mobs)))
unmatched_start_indices.difference_update(
[n % len(start_mobs) for n in index_map])
unmatched_end_indices.difference_update(
[n % len(end_mobs) for n in list(index_map.values())])
mobject_pairs = [
(start_mobs[a], end_mobs[b]) for a, b in index_map.items()
] + [(Point(end_mobs[b].get_center()), end_mobs[b])
for b in unmatched_end_indices]
if not leave_start_terms:
mobject_pairs += [(start_mobs[a],
Point(start_mobs[a].get_center()))
for a in unmatched_start_indices]
self.add(*start_mobs)
if leave_start_terms:
self.add(Mobject(*start_mobs))
self.wait()
self.play(
*[Transform(*pair, **transform_kwargs) for pair in mobject_pairs])
self.wait()
def get_mobs_from_terms(self, start_terms, end_terms):
"""
Need to ensure that all image mobjects for a tex expression
stemming from the same string are point-for-point copies of one
and other. This makes transitions much smoother, and not look
like point-clouds.
"""
num_start_terms = len(start_terms)
all_mobs = np.array(
TexMobject(start_terms).split() + TexMobject(end_terms).split())
all_terms = np.array(start_terms + end_terms)
for term in set(all_terms):
matches = all_terms == term
if sum(matches) > 1:
base_mob = all_mobs[list(all_terms).index(term)]
all_mobs[matches] = [
base_mob.copy().replace(target_mob)
for target_mob in all_mobs[matches]
]
return all_mobs[:num_start_terms], all_mobs[num_start_terms:]
class FlipThroughSymbols(Animation):
CONFIG = {
"start_center": ORIGIN,
"end_center": ORIGIN,
}
def __init__(self, tex_list, **kwargs):
mobject = TexMobject(self.curr_tex).shift(start_center)
Animation.__init__(self, mobject, **kwargs)
def interpolate_mobject(self, alpha):
new_tex = self.tex_list[np.ceil(alpha * len(self.tex_list)) - 1]
if new_tex != self.curr_tex:
self.curr_tex = new_tex
self.mobject = TexMobject(new_tex).shift(self.start_center)
if not all(self.start_center == self.end_center):
self.mobject.center().shift((1 - alpha) * self.start_center +
alpha * self.end_center)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Gareth J. Greenaway <gareth@saltstack.com>`
:codeauthor: :email:`David Murphy <dmurphy@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import datetime
import time
# Import Salt Testing libs
from tests.support.helpers import destructiveTest
from tests.support.unit import TestCase, skipIf
from tests.support.paths import TMP
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt libs
import salt.utils.platform
import salt.utils.files
import salt.modules.gpg as gpg
GPG_TEST_KEY_PASSPHRASE = 'testkeypassphrase'
GPG_TEST_KEY_ID = '7416F045'
GPG_TEST_PUB_KEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBFz1dx4BDACph7J5nuWE+zb9rZqTaL8akAnPAli2j6Qtk7BTDzTM9Kq80U2P
O3QRAFBQDODsgmrBTWgeZeNhN6Snz2WrZ8pC0RMK+mCmEt5S49ydWtvWl/xtzPfg
sy8h8OrIjSH1G0NQv9vdBpg2Y9vXLNDCZAqH0/Sh/wCkHT80c4i8TU09UZfx96S6
fFVmB7nHu/ztgiIkC6Fr04WGHlkd50g8r8CFPvwKOTD+rfoMsGppmAC1+y8ajfik
B+tEL88Rv2s4hLU78nQ3iGtdVRSs5Ip0x4O/PYZIDEd8KuBMo+flSzOZj2HCbUzN
MHiTXIbL8hBlwku9LGO7Itks4v2vfDh57JRHykwzGnvOlgXtSSvbayWLohNXtzWq
WtsMKrsrsIUprg1jhIVHnMSZZXMMizlni6MT5vbil2Bgn1g7diNscDnfCD6vlWUH
FRS1XkFFZ5ozu0+lC/5UaTcrjhH8/hQBNJwjsDSPsyKYQm+t7PXqq4SlI3Er7JJW
esK0diZ6reeebl0AEQEAAbQhdGVzdGtleSA8cGFja2FnaW5nQHNhbHRzdGFjay5j
b20+iQHOBBMBCgA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEEjS1DixNC
naoZrRFuvMeUg3QW8EUFAlz1ekoACgkQvMeUg3QW8EVm1gv/Z5CCqFoF8IXpa3g0
G+9C4gRS0h+tEYtjebAgxBn/dqk6cSNIb1BGDM/sSWxK5/jweN8CF/ojVgP1CFrX
igULnL3g5351L+8feU2ImP2OML438/GcOOJ+iwTKUBRuqxqK8NwcRuuN6YmbjtUw
JSD2+60DlWfGsInyenwGkgBAM44Y6dSAXKsm6dJ/IGVTQIsHE5mKRykrRZ1jyXxw
i1CF8fEyG6fNNb8I8Oiyj52xdYMxBvGmgPpFlF1jRoU+OViGFeUiZ49XFlC8GNCf
boWfXg+PCwZeWUeJ/s6a6iC5HG7iC0XYRiqhzymP8ctrwwm5dPT4ebYNfUSqq8i0
/YG6JGdsQZrq0i0FuceUSfOKVeFvSm+AkCG3kLxxgM925fqBetWiAJRCDYdmy8uc
jm618584hqa/yFMOfwVoLqq8mVg84wyGU6gXo6hXCa/008PITFZD47ILuN/Z8iLm
8Or/aJD5Tq+Wg3r2Di9+Ku8/gcn3bVxwFRjiVPrFwJkyjfMFuQGNBFz1dx4BDADU
Ynz0efLmaKqZIjfxwDf7iVqwcD9b8cMXIv2aa3Y56QDVJBXU5952LiU8XuzBBPq+
4FYQ78UmxSv3Rk6LKb9P2ih2L1PaJuQ1ZkNrQLqab3olpAu/Xe3raGLgCOU0RKJw
EPF3RcKu8ALuRcovfwzXWg8w19QRUPewZdVC4VgslKp8mNLECvdUxxVIDQWf06RZ
uCAfbqdiYReE62QT7NR4lAa1RpfU7Nt149OcQEP8VKTAZgTYyuwhRXFbrDD3Zp58
k5H0nKHNX+w1Ayih/YUk2b3etaBhlcTVAy/73TPfrd3Gl8dtzJZNtUD/eLWdGfP9
mCghmyAqbiQngH2eAMeifIYornynZFVBPBlvnwy7Iouq0V6tIVyNPGp0jcy1j2XT
NRBJyFbvam3hmrRW8A/VOJQ1W7LOKaM/5lh/BarrSLKn0xlL97GTmuSqlS+WrmyM
kU182TUYyUD7Rs3mydnMVS/N4aRxu4ITaTm9vieZLmAPR9vPgo+GwdHEkwm797kA
EQEAAYkBtgQYAQoAIAIbDBYhBI0tQ4sTQp2qGa0RbrzHlIN0FvBFBQJc9XqkAAoJ
ELzHlIN0FvBFlyEL/jVhm2PFj2mCLuKE5/nV4JvxY4Qu4+NCFiEdYK+zUoD36gEJ
3VjHL5dydHuZWcvm+XLW1PseNx2awVs47mjv2iZOLwY6BtfAFWhWEFmBEe6fTFXz
KkDWRst4gm0b0B7S3byoABwcyYNS6RkTfUApK4zdYErbfOLoT+Xa08YQKLVK7fmE
KBnBnnHUvktYTEvhwv9BID+qLnTVSQcjRcXbDQAYm14c7Nyb/SyxcUaUkCk41MVY
+vzNQlFrVc4h2np41X8JbmrsQb37E7lE+h32sJFBU03SGf0vT7SXXQj+UD/DEGay
Gt/8aRa5FGrcJyM5mTdbSgvCp0EjTrdokK5GHwM23cbSTo+nN9BNhIBRc4929SaJ
DVRqOIoJ+eHZdf3gIkMPOA3fBbMYzW65LIxt/p49tHD0c/nioZETycEgGuuYbnrn
IfXHFqiCAxkobIHqUg/BSu1cs8GNgE7BVUXy8JThuzmVdh4Pvd3YN1ouoPyVuDrk
ylirh0aqUQdSeIuJTg==
=yF8M
-----END PGP PUBLIC KEY BLOCK-----
"""
GPG_TEST_PRIV_KEY = """-----BEGIN PGP PRIVATE KEY BLOCK-----
lQWFBFz1dx4BDACph7J5nuWE+zb9rZqTaL8akAnPAli2j6Qtk7BTDzTM9Kq80U2P
O3QRAFBQDODsgmrBTWgeZeNhN6Snz2WrZ8pC0RMK+mCmEt5S49ydWtvWl/xtzPfg
sy8h8OrIjSH1G0NQv9vdBpg2Y9vXLNDCZAqH0/Sh/wCkHT80c4i8TU09UZfx96S6
fFVmB7nHu/ztgiIkC6Fr04WGHlkd50g8r8CFPvwKOTD+rfoMsGppmAC1+y8ajfik
B+tEL88Rv2s4hLU78nQ3iGtdVRSs5Ip0x4O/PYZIDEd8KuBMo+flSzOZj2HCbUzN
MHiTXIbL8hBlwku9LGO7Itks4v2vfDh57JRHykwzGnvOlgXtSSvbayWLohNXtzWq
WtsMKrsrsIUprg1jhIVHnMSZZXMMizlni6MT5vbil2Bgn1g7diNscDnfCD6vlWUH
FRS1XkFFZ5ozu0+lC/5UaTcrjhH8/hQBNJwjsDSPsyKYQm+t7PXqq4SlI3Er7JJW
esK0diZ6reeebl0AEQEAAf4HAwIqiZQqEMAZQ/+u0gE6tBcp52lUhE9fjORqgsY6
C5klAfrnrQyHXYkfjjQMWErSDR5FHeOxOPdZNnhVTBRaUIypLd+Os/YWl5lVO223
znbfK8GJIwHbDFQBSxtkC3WtD8cCqtKXvzApZzqeOhgNcaFJE956ZNlZfsCf0qsm
6xpEq07YiRVga6jJvjIFiPv7wMtdQQ67pEP4/tavLl+yuf6oJko2FjuG3RxrTf/C
CB4tyHCsRwgV7ouEdoSVhjFiDSS5xeWWLPRaXu4ceL0AjClHmdlMJtcpbyXKoh3U
uG5Cnwv9gXh24Qc6uuTWX61u7EhFLUWmKMFOI8dA+INYS8cXU8t6TU6XeV/01c7N
Q1O6QUCOx5VRbWRQuuvQN4f1gZm5QqN2jpNWjoUp2GSoxcHycEVSweEr+TmaouDA
ZOo12gx6dppkiwqS7Feq28qdpiZZPfdl/CvuWHxveNU9OVlexJ6A5PLep053qY+3
OlkvvkOxwmkJk2A3ITb1XngQkZCQDxAqCG6xMYjGIblKqqLqV1/q3pQ1nNbq5/iM
OtoxB7O7kZcyk7fQodk8EUz/ehTAZ5K8EWUETmiH9YlKTBbw8YMYEnuKfUFW6xqT
ROqurJfBlYmZEOxQ3oDVLZSfJQ3g/SXAOTKprB9GKyahM026Y+gfqR7yfwA8ifrH
E+HV4u7n/UjaUgu45LRGLGZQ7slmm79uYcVhBodQ0/eWcow7roHpOCFWTgyY3uhS
xdfuqgkEF8BGjQFaI4VNVeY+3+SM989BagAFuDlJw33fDoFSTPt9C+sqP1t1IvLv
9Eajn55MhO6gUptO3ViFPQE/EkiOrmaAJglu1LHEF/ssqWb/1+RGqF6N0OkKC+gx
iuuTgWl4wfxUsyh2JqIcj+xHRSf3G9OVJYkXaYsSNQ2eoSRlEzzu7Cxi83/qt6Dm
S+ST4wHl2VypfkhbNMq0W1aR8Kubi2Ixvk31ZDyk0uecRf3kNjVwD84WPjDedBsh
5rtCZO5kCAyWooCG41il09HfV9NCTjACCeO+dl4FO5aaLS0JSCBLVtORtwDCCZz+
QhS9CeXC+ook7sIaaiT0xWSnPmhEYE6roqwj4Lq3vvXIgHZjxeJizlGO0OSdTPBw
9wQ5ij/8G6MEGap4thvTohsFGUxHK2xx8Z089kGdmKd4piY/kjtX7AFtLEc0YiDa
w7PTlrqJA9FRta7g/aYVCKBk8G+8dxiErErFgm6RBxnQiJ/lLUAVsJv1cAQ8oyCK
GyDzGXEFk9eQtKGczF4CK8NhOMc9HabnQnzxcMGiVXEn/E3bDqocfeOAWEYft8zJ
sy96EJAk581uZ4CiKOcQW+Zv3N8O7ogdtCF0ZXN0a2V5IDxwYWNrYWdpbmdAc2Fs
dHN0YWNrLmNvbT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AW
IQSNLUOLE0KdqhmtEW68x5SDdBbwRQUCXPV6SgAKCRC8x5SDdBbwRWbWC/9nkIKo
WgXwhelreDQb70LiBFLSH60Ri2N5sCDEGf92qTpxI0hvUEYMz+xJbErn+PB43wIX
+iNWA/UIWteKBQucveDnfnUv7x95TYiY/Y4wvjfz8Zw44n6LBMpQFG6rGorw3BxG
643piZuO1TAlIPb7rQOVZ8awifJ6fAaSAEAzjhjp1IBcqybp0n8gZVNAiwcTmYpH
KStFnWPJfHCLUIXx8TIbp801vwjw6LKPnbF1gzEG8aaA+kWUXWNGhT45WIYV5SJn
j1cWULwY0J9uhZ9eD48LBl5ZR4n+zprqILkcbuILRdhGKqHPKY/xy2vDCbl09Ph5
tg19RKqryLT9gbokZ2xBmurSLQW5x5RJ84pV4W9Kb4CQIbeQvHGAz3bl+oF61aIA
lEINh2bLy5yObrXznziGpr/IUw5/BWguqryZWDzjDIZTqBejqFcJr/TTw8hMVkPj
sgu439nyIubw6v9okPlOr5aDevYOL34q7z+ByfdtXHAVGOJU+sXAmTKN8wWdBYYE
XPV3HgEMANRifPR58uZoqpkiN/HAN/uJWrBwP1vxwxci/ZprdjnpANUkFdTn3nYu
JTxe7MEE+r7gVhDvxSbFK/dGTospv0/aKHYvU9om5DVmQ2tAuppveiWkC79d7eto
YuAI5TREonAQ8XdFwq7wAu5Fyi9/DNdaDzDX1BFQ97Bl1ULhWCyUqnyY0sQK91TH
FUgNBZ/TpFm4IB9up2JhF4TrZBPs1HiUBrVGl9Ts23Xj05xAQ/xUpMBmBNjK7CFF
cVusMPdmnnyTkfScoc1f7DUDKKH9hSTZvd61oGGVxNUDL/vdM9+t3caXx23Mlk21
QP94tZ0Z8/2YKCGbICpuJCeAfZ4Ax6J8hiiufKdkVUE8GW+fDLsii6rRXq0hXI08
anSNzLWPZdM1EEnIVu9qbeGatFbwD9U4lDVbss4poz/mWH8FqutIsqfTGUv3sZOa
5KqVL5aubIyRTXzZNRjJQPtGzebJ2cxVL83hpHG7ghNpOb2+J5kuYA9H28+Cj4bB
0cSTCbv3uQARAQAB/gcDAgUPU1tmC3CS/x0qZYicVcMiU5wop6fnbnNkEfUQip8V
qpL64/GpP6X7sJiY2BCo0/5AMPDKlTwPxogMQ6NduZ2AbvJybGC7AQULMkd4Y4H1
nwrDk5HWO5dLVoXRSVw9Dm6oaV4bi6wno9yapkq7AVRnvtNEza47gxmV2iwRoU5H
5ciQTU6nd1TkFNhD4ZwZ25CMqffvbrE2Ie6RsBUr9HlxYIint91rVLkkBGhw8W4t
KushxAZpBOQB0Rqtuak/q+/xyDnvNvU/A9GeKpRrxzwAbIdtW0VjPulDk1tThGDA
kmuxSJ1yxUb+CzA/5YoMXto1OqjUI2hO108xgTVl5hpmckBnwsPtbjrtDYFAqwfq
qF9YAVQ3RfMn3ThZ2oXg+FJMcwd9uVJn2/LZd81Nc6g4UogD1sD2ye2vqDGTEztK
BAdthEcufnUP5UrEixE8CPzAJOjuC5ROU57FXCaSSUfIwXO3UoxvNWcuXDC7RVDz
nsv/Hg2j0pSeFht2NO6Pom+4XHY+LHImPTfXamN6IDsTRJGQW8R7Y131fjPQMn7I
0WjyIiqD4eLo5KQYjL+0bE0JiLRaJnlfbu1uoV3sgi8bwG6WlnLh7eKDErA2P0Zs
r0KX5yGR5Ih2CAMfMmqqYrkEYmNxNbLvL5ie9F35MnvRbqyY/9pl0p1Eah7uGnuK
or13bg801HoZJLBTr4sJTqkwuUztloVyBdM6T5hal+rxX37Wnj1PgD0e0Ydqo6hJ
7WJ/Zjd+0qk90VoiGNRre7tMBYDQ3w3wS+tSta3kxTKj5I4hLZncN+pt9F6o+tgd
YAhWO93DzWjMMUV/jkKTJqwAFAuRlnTwzbBS70N2Z8jrGczV05RV9OH7DRr34noF
O7/Bn0iDpKZdbArtkJZyu4B+MUp/RRiuxn7iWOM2tEjDhUuyHXYYFppFB8fG7r52
VcxH/Sc3VcXB0l2KywrAG2oZfiE8M4NPHuiIHFpcjeK2VLrP2iGLvdlL4IsvtFIU
uLiFi7r0egEi/Ge8ebRF7TtjmhL5Jzi9bbUGuvxSIGZU1HCECq+hHVy45XwKrRTo
AzDIwNjBFmZzL7FI7X16W/6Y11VVmXEmDt9dmmu78bT0z2Bq0Q7K9C7Eq2qzW65z
+4fntFF8BWDs3l5yPKLjg+wlgPPXieHgrUQpZOYCsFJqig3xFZSu1ZMzYdlvyNSF
KAgMPZfi37kAUo8ZiH27SZAA/aTK6b69oEQ6I7CsMJZLRp/gzYvn4NN/DIK3fuYc
jsKB6OR3gWmU7EDf/1EZkO0YK2YiwkSrDALJdUo7ArYR2KIZTUEG9rxDBUD8IyIz
PGdh7sBG4PhOxpQ+SiZyzLzZAJjviQG2BBgBCgAgAhsMFiEEjS1DixNCnaoZrRFu
vMeUg3QW8EUFAlz1eqQACgkQvMeUg3QW8EWXIQv+NWGbY8WPaYIu4oTn+dXgm/Fj
hC7j40IWIR1gr7NSgPfqAQndWMcvl3J0e5lZy+b5ctbU+x43HZrBWzjuaO/aJk4v
BjoG18AVaFYQWYER7p9MVfMqQNZGy3iCbRvQHtLdvKgAHBzJg1LpGRN9QCkrjN1g
Stt84uhP5drTxhAotUrt+YQoGcGecdS+S1hMS+HC/0EgP6oudNVJByNFxdsNABib
Xhzs3Jv9LLFxRpSQKTjUxVj6/M1CUWtVziHaenjVfwluauxBvfsTuUT6HfawkUFT
TdIZ/S9PtJddCP5QP8MQZrIa3/xpFrkUatwnIzmZN1tKC8KnQSNOt2iQrkYfAzbd
xtJOj6c30E2EgFFzj3b1JokNVGo4ign54dl1/eAiQw84Dd8FsxjNbrksjG3+nj20
cPRz+eKhkRPJwSAa65hueuch9ccWqIIDGShsgepSD8FK7VyzwY2ATsFVRfLwlOG7
OZV2Hg+93dg3Wi6g/JW4OuTKWKuHRqpRB1J4i4lO
=WRTN
-----END PGP PRIVATE KEY BLOCK-----
"""
try:
import gnupg # pylint: disable=import-error,unused-import
HAS_GPG = True
except ImportError:
HAS_GPG = False
@destructiveTest
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not salt.utils.platform.is_linux(), 'These tests can only be run on linux')
class GpgTestCase(TestCase, LoaderModuleMockMixin):
'''
Validate the gpg module
'''
def setup_loader_modules(self):
return {gpg: {'__salt__': {}}}
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def setUp(self):
super(GpgTestCase, self).setUp()
self.gpghome = os.path.join(TMP, 'gpghome')
if not os.path.isdir(self.gpghome):
# left behind... Don't fail because of this!
os.makedirs(self.gpghome)
self.gpgfile_pub = os.path.join(TMP, 'gpgfile.pub')
with salt.utils.files.fopen(self.gpgfile_pub, 'wb') as fp:
fp.write(salt.utils.stringutils.to_bytes(GPG_TEST_PUB_KEY))
self.gpgfile_priv = os.path.join(TMP, 'gpgfile.priv')
with salt.utils.files.fopen(self.gpgfile_priv, 'wb') as fp:
fp.write(salt.utils.stringutils.to_bytes(GPG_TEST_PRIV_KEY))
self.user = 'salt'
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def tearDown(self):
if os.path.isfile(self.gpgfile_pub):
os.remove(self.gpgfile_pub)
shutil.rmtree(self.gpghome, ignore_errors=True)
super(GpgTestCase, self).tearDown()
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_list_keys(self):
'''
Test gpg.list_keys
'''
_user_mock = {u'shell': u'/bin/bash',
u'workphone': u'',
u'uid': 0,
u'passwd': u'x',
u'roomnumber': u'',
u'gid': 0,
u'groups': [
u'root'
],
u'home': u'/root',
u'fullname': u'root',
u'homephone': u'',
u'name': u'root'}
_list_result = [{u'dummy': u'',
u'keyid': u'xxxxxxxxxxxxxxxx',
u'expires': u'2011188692',
u'sigs': [],
u'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
u'length': u'4096',
u'ownertrust': u'-',
u'sig': u'',
u'algo': u'1',
u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
u'date': u'1506612692',
u'trust': u'-',
u'type': u'pub',
u'uids': [u'GPG Person <person@example.com>']}]
_expected_result = [{u'keyid': u'xxxxxxxxxxxxxxxx',
u'uids': [u'GPG Person <person@example.com>'],
u'created': '2017-09-28',
u'expires': '2033-09-24',
u'keyLength': u'4096',
u'ownerTrust': u'Unknown',
u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
u'trust': u'Unknown'}]
mock_opt = MagicMock(return_value='root')
with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
with patch.object(gpg, '_list_keys', return_value=_list_result):
self.assertEqual(gpg.list_keys(), _expected_result)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_get_key(self):
'''
Test gpg.get_key
'''
_user_mock = {u'shell': u'/bin/bash',
u'workphone': u'',
u'uid': 0,
u'passwd': u'x',
u'roomnumber': u'',
u'gid': 0,
u'groups': [
u'root'
],
u'home': u'/root',
u'fullname': u'root',
u'homephone': u'',
u'name': u'root'}
_list_result = [{u'dummy': u'',
u'keyid': u'xxxxxxxxxxxxxxxx',
u'expires': u'2011188692',
u'sigs': [],
u'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
u'length': u'4096',
u'ownertrust': u'-',
u'sig': u'',
u'algo': u'1',
u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
u'date': u'1506612692',
u'trust': u'-',
u'type': u'pub',
u'uids': [u'GPG Person <person@example.com>']}]
_expected_result = {u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
u'keyid': u'xxxxxxxxxxxxxxxx',
u'uids': [u'GPG Person <person@example.com>'],
u'created': u'2017-09-28',
u'trust': u'Unknown',
u'ownerTrust': u'Unknown',
u'expires': u'2033-09-24',
u'keyLength': u'4096'}
mock_opt = MagicMock(return_value='root')
with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
with patch.object(gpg, '_list_keys', return_value=_list_result):
ret = gpg.get_key('xxxxxxxxxxxxxxxx')
self.assertEqual(ret, _expected_result)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_delete_key(self):
'''
Test gpg.delete_key
'''
_user_mock = {u'shell': u'/bin/bash',
u'workphone': u'',
u'uid': 0,
u'passwd': u'x',
u'roomnumber': u'',
u'gid': 0,
u'groups': [
u'root'
],
u'home': self.gpghome,
u'fullname': u'root',
u'homephone': u'',
u'name': u'root'}
_list_result = [{'dummy': u'',
'keyid': u'xxxxxxxxxxxxxxxx',
'expires': u'2011188692',
'sigs': [],
'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
'length': u'4096',
'ownertrust': u'-',
'sig': u'',
'algo': u'1',
'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'date': u'1506612692',
'trust': u'-',
'type': u'pub',
'uids': [u'GPG Person <person@example.com>']}]
_expected_result = {u'res': True,
u'message': u'Secret key for xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx deleted\nPublic key for xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx deleted'}
mock_opt = MagicMock(return_value='root')
with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
with patch.object(gpg, '_list_keys', return_value=_list_result):
with patch('salt.modules.gpg.gnupg.GPG.delete_keys', MagicMock(return_value='ok')):
ret = gpg.delete_key('xxxxxxxxxxxxxxxx', delete_secret=True)
self.assertEqual(ret, _expected_result)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_search_keys(self):
'''
Test gpg.search_keys
'''
_user_mock = {'shell': '/bin/bash',
'workphone': '',
'uid': 0,
'passwd': 'x',
'roomnumber': '',
'gid': 0,
'groups': [
'root'
],
'home': self.gpghome,
'fullname': 'root',
'homephone': '',
'name': 'root'}
_search_result = [{u'keyid': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
u'uids': [u'GPG Person <person@example.com>'],
u'expires': u'',
u'sigs': [],
u'length': u'1024',
u'algo': u'17',
u'date': int(time.mktime(datetime.datetime(2004, 11, 13).timetuple())),
u'type': u'pub'}]
_expected_result = [{u'uids': [u'GPG Person <person@example.com>'],
'created': '2004-11-13',
u'keyLength': u'1024',
u'keyid': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'}]
mock_opt = MagicMock(return_value='root')
with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
with patch.object(gpg, '_search_keys', return_value=_search_result):
ret = gpg.search_keys('person@example.com')
self.assertEqual(ret, _expected_result)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_gpg_import_pub_key(self):
config_user = MagicMock(return_value='salt')
user_info = MagicMock(return_value={'name': 'salt', 'home': self.gpghome, 'uid': 1000})
with patch.dict(gpg.__salt__, {'config.option': config_user}):
with patch.dict(gpg.__salt__, {'user.info': user_info}):
ret = gpg.import_key(None, self.gpgfile_pub, 'salt', self.gpghome)
self.assertEqual(ret['res'], True)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_gpg_import_priv_key(self):
config_user = MagicMock(return_value='salt')
user_info = MagicMock(return_value={'name': 'salt', 'home': self.gpghome, 'uid': 1000})
with patch.dict(gpg.__salt__, {'config.option': config_user}):
with patch.dict(gpg.__salt__, {'user.info': user_info}):
ret = gpg.import_key(None, self.gpgfile_priv, 'salt', self.gpghome)
self.assertEqual(ret['res'], True)
@skipIf(not HAS_GPG, 'GPG Module Unavailable')
def test_gpg_sign(self):
config_user = MagicMock(return_value='salt')
user_info = MagicMock(return_value={'name': 'salt', 'home': self.gpghome, 'uid': 1000})
pillar_mock = MagicMock(return_value={'gpg_passphrase': GPG_TEST_KEY_PASSPHRASE})
with patch.dict(gpg.__salt__, {'config.option': config_user}):
with patch.dict(gpg.__salt__, {'user.info': user_info}):
with patch.dict(gpg.__salt__, {'pillar.get': pillar_mock}):
ret = gpg.import_key(None, self.gpgfile_priv, 'salt', self.gpghome)
self.assertEqual(ret['res'], True)
gpg_text_input = 'The quick brown fox jumped over the lazy dog'
gpg_sign_output = gpg.sign(config_user, GPG_TEST_KEY_ID, gpg_text_input, None, None, True, self.gpghome)
self.assertIsNotNone(gpg_sign_output)
|
import requests
from requests.compat import urlencode
from errors import TwitchAPIError
from utils import join_urls
class TwitchSubscribeClient:
"""Streamer event subscriptions manager.
Allows to subscribe to streamer's updates.
Subscribable events:
* Streamer starts following some channe;;
* Streamer got followed by someone;
* Streamer's stream changed;
* TODO: Streamer profile changed (seems, subscribing
that requires TSL certs configure. To investigate).
"""
BASE_URL = 'https://api.twitch.tv/helix/'
WEBHOOKS_HUB_ENDPOINT = join_urls('webhooks', 'hub')
LEASE_SECONDS = 1000
REQUEST_TIMEOUT_SECONDS = 90
class RequestMode:
SUBSCRIBE = 'subscribe'
UNSUBSCRIBE = 'unsubscribe'
def __init__(self, streamer_name: str, client_id: str,
access_token: str, session_id: str, callback_url: str):
"""SubscriptionClient constructor.
:param streamer_name: Favorite streamer's name.
:param client_id: Twitch App client id.
:param access_token: Access token.
:param session_id: unique socket session id.
"""
self._client_id = client_id
self._access_token = access_token
self._session_id = session_id
self._callback_url = callback_url
self.streamer_id = self._get_user_id(streamer_name)
def subscribe_to_all_events(self) -> None:
"""Subscribe to all available events."""
# TODO: make asyncrohous via async/await.
self.subscribe_following()
self.subscribe_followed_by()
self.subscribe_stream_changed()
# User changes subscrbtiotion requires TSL/SSL certs installed (https).
# self.subscribe_user_changed()
def unsubscribe_from_all_events(self) -> None:
"""Revoke subscription from all events."""
# TODO: Implement events unsubscription for production.
...
def subscribe_following(self) -> requests.Response:
"""Subscribe the "Streamer starts following someone" event."""
topic_url = join_urls(self.BASE_URL, 'users/follows')
params = dict(to_id=self.streamer_id)
return self._subscribe(topic_url, params)
def subscribe_followed_by(self) -> requests.Response:
"""Subscribe the "Streamer is followed by someone" event."""
topic_url = join_urls(self.BASE_URL, 'users/follows')
params = dict(from_id=self.streamer_id)
return self._subscribe(topic_url, params)
def subscribe_stream_changed(self) -> requests.Response:
"""Subscribe stream changes events."""
topic_url = join_urls(self.BASE_URL, 'streams')
params = dict(user_id=self.streamer_id)
return self._subscribe(topic_url, params)
def subscribe_user_changed(self) -> requests.Response:
"""Subscribe "user changed" event.
TODO: This will not work, when callback server uses unsecure connection."""
topic_url = join_urls(self.BASE_URL, 'users')
params = dict(id=self.streamer_id)
return self._subscribe(topic_url, params)
def _subscribe(self, topic_url: str, params: dict) -> requests.Response:
"""Subscribe certain topic with the given params.
:param: topic_url. Twitch topic url.
:param: params. Subscription params.
:return: Obtained response:
"""
return self._webhooks_hub_request(
topic_url,
self.RequestMode.SUBSCRIBE,
params=params
)
def _unsubscribe(self, topic_url: str, params: dict) -> requests.Response:
"""Unsubscribe topic.
:param: topic_url. Subscribing topic url.
:param: params. Subscription params.
:return: Received response.
"""
return self._webhooks_hub_request(
topic_url,
self.RequestMode.UNSUBSCRIBE,
params=params
)
def _webhooks_hub_request(self, topic_url: str, mode: str,
params: dict=None, method: str='POST') -> requests.Response:
"""Send request to Twitch Webhooks Hub.
:param: topic_url: Subscribing topic url.
:param mode: Suscription mode.
:param params: Subscription params.
:param method: Request method.
:return: Received response.
"""
url = join_urls(self.BASE_URL, self.WEBHOOKS_HUB_ENDPOINT)
urlencoded_params = urlencode(params)
cb_url = join_urls(
self._callback_url,
self._session_id
)
return requests.request(method, url, data={
'hub.mode': mode,
'hub.topic': f'{topic_url}?{urlencoded_params}',
'hub.callback': cb_url,
'hub.lease_seconds': self.LEASE_SECONDS
# TODO: support hub.secret for production
# "hub.secret":"s3cRe7",
}, headers=self._headers)
@property
def _bearer_token(self) -> str:
return f'Bearer {self._access_token}'
@property
def _headers(self) -> dict:
return {
'Authorization': self._bearer_token,
'Client-ID': self._client_id
}
def _get_user_id(self, username: str) -> requests.Response:
"""Get streamer's ID by username.
:param: username.
:return: Obtained streamer's ID.
"""
response = self._base_request(f'users/?login={username}')
# Raise corresponding error, if error code returned.
response.raise_for_status()
try:
user_id = response.json()['data'][0]['id']
except IndexError:
raise TwitchAPIError('Failed to obtain user id.')
return user_id
def _base_request(self, endpoint: str, method: str='GET', params: dict=None):
if params is None:
params = ''
else:
params = '?' + urlencode(params)
url = join_urls(self.BASE_URL, endpoint, params)
response = requests.request(method, url, headers=self._headers,
timeout=self.REQUEST_TIMEOUT_SECONDS)
# Raise corresponding error, if error code returned.
response.raise_for_status()
return response
|
# -*- coding: utf-8 -*-
import pickle
import traceback
from copy import deepcopy
from inspect import signature
from pathlib import Path
import joblib
from joblib._store_backends import concurrency_safe_rename, concurrency_safe_write
from .hashing import CodeObj
from .proxy_backend import Factory, HashProxy
def _concurrency_safe_write(to_write, filename, write_func):
"""Writes an object into a file in a concurrency-safe way."""
try:
temporary_filename = concurrency_safe_write(to_write, filename, write_func)
except:
print("Something went wrong before moving the file.!")
traceback.print_exc()
concurrency_safe_rename(temporary_filename, filename)
def get_map_filename(memory):
return Path(memory.location) / "_process_proxy_map"
def load_hash_map(memory):
map_filename = get_map_filename(memory)
if not map_filename.is_file():
return {}
with map_filename.open("rb") as f:
return pickle.load(f)
def get_processed_hash(key, func, var, memory):
if not hasattr(memory.store_backend, "_process_proxy_hash_map"):
memory.store_backend._process_proxy_hash_map = load_hash_map(memory)
else:
if key in memory.store_backend._process_proxy_hash_map:
return memory.store_backend._process_proxy_hash_map[key]
# Update the mapping before giving up and calculating the result.
memory.store_backend._process_proxy_hash_map = load_hash_map(memory)
if key in memory.store_backend._process_proxy_hash_map:
return memory.store_backend._process_proxy_hash_map[key]
# Key was not found, so we have to evaluate the function in order to determine the
# resulting hash.
processed_hash = memory.get_hash(func(var))
# Reload the map in case it changed in the meantime.
memory.store_backend._process_proxy_hash_map = load_hash_map(memory)
# Add the new hash to the map.
# NOTE - this will leave a small window of time where concurrent access may result
# in new hashes to not be recorded.
memory.store_backend._process_proxy_hash_map[key] = processed_hash
# Save the updated mapping.
def write_func(to_write, dest_filename):
with open(dest_filename, "wb") as f:
pickle.dump(to_write, f, protocol=-1)
_concurrency_safe_write(
memory.store_backend._process_proxy_hash_map,
get_map_filename(memory),
write_func,
)
def get_function_hash(func):
# Ensure that the hash can be calculated, i.e. that there are no mutable
# objects present in the default arguments. Copy the object since some
# object (e.g. immutabledict) will cache the hash resulting from calling
# `hash()` (e.g. in a '_hash' attribute), and since the output of Python's
# `hash()` function is not constant across sessions, this causes Joblib's
# hash to change as well (which we do not want).
hash(deepcopy(signature(func)))
# Finally, calculate the hash using Joblib because the inbuilt hash()
# function changes its output in between runs.
return joblib.hashing.hash(signature(func)) + joblib.hashing.hash(
CodeObj(func.__code__).hashable()
)
def process_proxy(output, functions, memory):
"""Lazily apply deterministic `functions` to `output`.
Note that the functions should not contain closure variables, since these will not
(currently) influence the functions hash value.
Args:
output (N-tuple): Tuple of return values. May consist of `HashProxy` instances.
functions (N-tuple of callable): Deterministic functions to apply to `output`.
The i-th function will be applied to the i-th output.
Returns:
tuple of HashProxy: Lazily applied functions onto output.
Raises:
ValueError: If `len(output) != len(functions)`.
"""
if len(output) != len(functions):
raise ValueError(
"Expected the same number of outputs and functions. "
f"Got {len(output)} and {len(functions)}."
)
processed = []
for var, func in zip(output, functions):
lazy_hash_key = memory.get_hash(var) + get_function_hash(func)
processed_hash = get_processed_hash(lazy_hash_key, func, var, memory)
processed.append(
HashProxy(
Factory(lambda: func(var)),
hash_func=memory.get_hash,
hash_value=processed_hash,
)
)
return tuple(processed)
def get_attribute(attribute):
"""Reimplementation of operator.attrgetter that can be used with `signature`."""
def _get_attr(obj):
return getattr(obj, attribute)
return _get_attr
class EstimatorHashProxy(HashProxy):
"""Lazy Estimator proxy containing a pre-calculated hash value."""
__slots__ = "predict"
def __init__(self, *args, predict_proxy, **kwargs):
super().__init__(*args, **kwargs)
self.predict = predict_proxy
def get_proxied_estimator(estimator, memory):
"""Enable lazy retrieval of `estimator.predict`."""
# Turn into lazy object with a cached hash value.
def get_estimator():
return estimator
return EstimatorHashProxy(
Factory(get_estimator),
memory.get_hash,
hash_value=memory.get_hash(estimator),
predict_proxy=process_proxy(
(estimator,),
(get_attribute("predict"),),
memory=memory,
)[0],
)
|
from torch import nn
from torchvision.models.resnet import Bottleneck, BasicBlock
import torch
from neuralpredictors.utils import eval_state
def get_module_output(model, input_shape, neural_set):
"""
Gets the output dimensions of the convolutional core
by passing an input image through all convolutional layers
:param core: convolutional core of the DNN, which final dimensions
need to be passed on to the readout layer
:param input_shape: the dimensions of the input
:return: output dimensions of the core
"""
initial_device = "cuda" if next(iter(model.parameters())).is_cuda else "cpu"
device = "cuda" if torch.cuda.is_available() else "cpu"
with eval_state(model):
with torch.no_grad():
input = torch.zeros(1, *input_shape[1:]).to(device)
output = model.to(device)(input, neural_set=neural_set)
model.to(initial_device)
return output[0].shape
def freeze_params(model, to_freeze=None, not_to_freeze=None):
for name, param in model.named_parameters():
if to_freeze:
freeze = False
for freeze_key in to_freeze:
if freeze_key in name:
freeze = True
elif not_to_freeze:
freeze = True
for un_freeze_key in not_to_freeze:
if un_freeze_key in name:
freeze = False
else:
raise Exception(
"Please provide either to_freeze or not_to_freeze arguments!"
)
if freeze and param.requires_grad:
param.requires_grad = False
def freeze_mtl_shared_block(model, multi, tasks):
if multi:
if "v1" in tasks:
for param in model.module.mtl_vgg_core.v1_block.parameters():
param.requires_grad = False
if "v4" in tasks:
for param in model.module.mtl_vgg_core.v4_block.parameters():
param.requires_grad = False
else:
if "v1" in tasks:
for param in model.mtl_vgg_core.v1_block.parameters():
param.requires_grad = False
if "v4" in tasks:
for param in model.mtl_vgg_core.v4_block.parameters():
param.requires_grad = False
def weight_reset(m, advanced_init=False, zero_init_residual=False):
if (
isinstance(m, nn.Conv1d)
or isinstance(m, nn.Conv2d)
or isinstance(m, nn.Linear)
or isinstance(m, nn.Conv3d)
or isinstance(m, nn.ConvTranspose1d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.ConvTranspose3d)
or isinstance(m, nn.BatchNorm1d)
or isinstance(m, nn.BatchNorm2d)
or isinstance(m, nn.BatchNorm3d)
or isinstance(m, nn.GroupNorm)
):
m.reset_parameters()
if advanced_init and isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif advanced_init and isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual and isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif zero_init_residual and isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_model_parameters(model):
total_parameters = 0
for layer in list(model.parameters()):
layer_parameter = 1
for l in list(layer.size()):
layer_parameter *= l
total_parameters += layer_parameter
return total_parameters
def reset_params(model, reset=None):
model = model.module if isinstance(model, nn.DataParallel) else model
if reset == "all":
print(f"Resetting all parameters")
model.apply(weight_reset)
elif reset:
print(f"Resetting {reset}")
for name in reset:
block, layer = name.split(".")[0], int(name.split(".")[1])
getattr(model, block)[layer].apply(weight_reset)
|
import os
SECRET_KEY = '1234'
MIDDLEWARE_CLASSES = tuple()
WITH_WQDB = os.environ.get('WITH_WQDB', False)
if WITH_WQDB:
WQ_APPS = (
'wq.db.rest',
'wq.db.rest.auth',
)
else:
WQ_APPS = tuple()
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'data_wizard',
) + WQ_APPS + (
'tests.file_app',
'tests.data_app',
'tests.naturalkey_app',
'tests.eav_app',
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'data_wizard_test',
'USER': 'postgres',
}
}
ROOT_URLCONF = "tests.urls"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
CELERY_RESULT_BACKEND = BROKER_URL = 'redis://localhost/0'
if WITH_WQDB:
from wq.db.default_settings import * # noqa
|
import os
import argparse
import pandas as pd
from datetime import datetime
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from detection.engine import train_one_epoch, evaluate
from src.model import FasterRCNN
from src.dataset import SVHN_Dataset
from src.transforms import train_transform
from src.utils import *
cur_time = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
parser = argparse.ArgumentParser()
parser.add_argument(
'--save_root',
type=str,
default=f'./checkpoint/{cur_time}',
help='save root'
)
parser.add_argument(
'--folder',
type=str,
default='./train',
help='train image folder'
)
parser.add_argument(
'--optimizer',
type=str,
default='AdamW',
help='optimizer')
parser.add_argument(
'--lr',
type=float,
default=1e-4,
help='initial_learning_rate'
)
parser.add_argument(
'--weight_decay',
type=float,
default=5e-4,
help='weight decay'
)
parser.add_argument(
'--step_size',
type=int,
default=1,
help='learning decay period'
)
parser.add_argument(
'--gamma',
type=float,
default=0.9,
help='learning rate decay factor'
)
parser.add_argument(
'--max_epochs',
type=int,
default=20,
help='maximum epochs'
)
parser.add_argument(
'--batch_size',
type=int,
default=4,
help='batch size'
)
parser.add_argument(
'--num_workers',
type=int,
default=4,
help='number of workers'
)
parser.add_argument(
'--num_classes',
type=int,
default=11,
help='number of classes'
)
parser.add_argument(
'--cuda',
type=int,
default=0,
help='cuda'
)
args = parser.parse_args()
if __name__ == '__main__':
train_data = pd.read_csv('train_data.csv')
valid_data = pd.read_csv('valid_data.csv')
train_set = SVHN_Dataset(args.folder, train_data, train_transform)
valid_set = SVHN_Dataset(args.folder, valid_data, train_transform)
print(f'train: {len(train_set)}, valid: {len(valid_set)}')
train_dataloader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=args.num_workers
)
valid_dataloader = DataLoader(
valid_set,
batch_size=args.batch_size,
shuffle=False,
collate_fn=collate_fn,
num_workers=args.num_workers
)
model = FasterRCNN(args.num_classes)
device = get_device(args.cuda)
model.to(device)
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'AdamW':
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'SGD':
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=args.step_size,
gamma=args.gamma)
model.save(args.save_root, 0)
save_json_file(vars(args), os.path.join(args.save_root, 'config.json'))
for ep in range(1, args.max_epochs+1):
train_one_epoch(
model,
optimizer,
train_dataloader,
device,
ep,
print_freq=10
)
lr_scheduler.step()
with torch.no_grad():
evaluate(model, valid_dataloader, device=device)
model.save(args.save_root, ep)
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from __future__ import print_function
import CloudCanvas
import datetime
import leaderboard_utils
import boto3
import random
import json
from boto3.dynamodb.conditions import Key
class ReservoirSampler:
def __init__(self, stat):
self.sample = []
self.stat = stat
'''
if the sample is out of sync enough to warrant a reload
'''
def is_expired(self):
return False
'''
get the current sample
'''
def get_sample(self):
if self.is_expired():
print("reloading sample set")
self.reload_sample()
return self.sample
'''
recreate the sample (load from AWS, or regenerate on the fly)
'''
def reload_sample(self):
pass
def get_full_dataset_size(self):
return 0
class OnTheFlyReservoirSampler(ReservoirSampler):
def __init__(self, stat, size = 200):
ReservoirSampler.__init__(self, stat)
self.expiration_time = datetime.datetime.now() + datetime.timedelta(minutes = 5)
self.full_dataset_size = 0
self.size = size
try:
self.record_table = boto3.resource('dynamodb').Table(CloudCanvas.get_setting("MainTable"))
except Exception as e:
raise Exception("Error getting table reference")
self.reload_sample()
def is_expired(self):
return datetime.datetime.now() > self.expiration_time
def reload_sample(self):
self.expiration_time = datetime.datetime.now() + datetime.timedelta(minutes = 5)
self.sample = []
try:
response = self.record_table.scan()
for item in response.get("Items", []):
score = leaderboard_utils.generate_score_dict(item, self.stat)
if score:
self.add_to_sample(score)
while "LastEvaluatedKey" in response:
response = self.record_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
for item in response.get("Items", []):
score = leaderboard_utils.generate_score_dict(item, self.stat)
if score:
self.add_to_sample(score)
except Exception as e:
raise Exception("Error fetching scores from table")
self.sample = leaderboard_utils.sort_leaderboard(self.sample, self.stat)
def add_to_sample(self, score):
self.full_dataset_size = self.full_dataset_size + 1
if len(self.sample) < self.size:
self.sample.append(score)
elif random.random() < (1.0 / float(len(self.sample))):
self.sample.pop(random.randint(0, len(self.sample) - 1))
self.sample.append(score)
def get_full_dataset_size(self):
return self.full_dataset_size
class DynamoDBBackedReservoirSampler(ReservoirSampler):
def __init__(self, stat):
ReservoirSampler.__init__(self, stat)
self.leaderboard_table = boto3.resource('dynamodb').Table(CloudCanvas.get_setting("LeaderboardTable"))
self.info_table = boto3.resource('dynamodb').Table(CloudCanvas.get_setting("LeaderboardInfo"))
self.reload_sample()
def reload_sample(self):
try:
new_sample = []
response = self.leaderboard_table.query(
KeyConditionExpression=Key('type').eq('{}_sample'.format(self.stat))
)
for entry in response.get("Items", []):
new_sample.append(self.generate_score_dict(entry))
while "LastEvaluatedKey" in response:
response = self.leaderboard_table.query(
KeyConditionExpression=Key('type').eq('{}_sample'.format(self.stat))
)
for entry in response.get("Items", []):
new_sample.append(self.generate_score_dict(entry))
high_score = json.loads(self.get_leaderboard_info("high_score"))
if high_score:
high_score = self.generate_score_dict(high_score)
if not high_score["user"] in [entry["user"] for entry in new_sample]:
new_sample.append(high_score)
except Exception as e:
raise Exception("Error loading {} leaderboard".format(self.stat))
self.sample = leaderboard_utils.sort_leaderboard(new_sample, self.stat)
self.expiration_time = datetime.datetime.now() + datetime.timedelta(seconds = 30)
def generate_score_dict(self, entry):
return {
"user": entry["user"],
"stat": self.stat,
"value": entry["value"]
}
def is_expired(self):
return datetime.datetime.now() > self.expiration_time
def get_full_dataset_size(self):
return self.get_leaderboard_info("population")
def get_leaderboard_info(self, info):
response = self.info_table.get_item(Key = {"stat": self.stat, "type": info })
if not response.get("Item", {}):
raise Exception("Error retrieving leaderboard info for {}".format(self.stat))
return response["Item"]["value"]
|
import firebase_admin
from firebase_admin import credentials
from firebase_admin import messaging
import flask
def firebase_send_notify(title: str, body: str, data: dict = None,
topic: str = 'all', target_token: str = None):
cred = credentials.Certificate(flask.current_app.config.get('FIREBASE_CERTIFICATE'))
default_app = firebase_admin.initialize_app(cred) # noqa
# This registration token comes from the client FCM SDKs.
# registration_token = flask.current_app.config.get('FIREBASE_REGISTERATION_TOKEN')
data = data if data else {}
# See documentation on defining a message payload.
message = messaging.Message(
# android=messaging.AndroidConfig(
# ttl=datetime.timedelta(seconds=3600),
# priority='normal',
# ),
android=None,
apns=None,
webpush=None,
data={
'click_action': 'FLUTTER_NOTIFICATION_CLICK',
},
notification=messaging.Notification(title=title, body=body),
# notification=None,
fcm_options=None,
topic=topic,
token=target_token,
# condition=None,
)
# Send a message to the device corresponding to the provided
# registration token.
response = messaging.send(message)
# Response is a message ID string.
print('Successfully sent message:', response)
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='Great Circle v2',
ext_modules=cythonize("great_circle_v2.pyx"),
)
|
# Generated by Django 3.0.8 on 2020-09-27 20:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainComponent', '0010_auto_20200927_2327'),
]
operations = [
migrations.AlterField(
model_name='avaibleinternship',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
from .data import ITestData
from .dataset import ITestDataset
__all__ = ['ITestData', 'ITestDataset']
|
from protocols.forms import forms
from core.utils import TIME_UNITS
class ElectrophoreseForm(forms.VerbForm):
name = "Electrophorese"
slug = "electrophorese"
has_machine = True
model = forms.CharField(required = False, label='machine_model')
min_voltage = forms.IntegerField(required = False)
max_voltage = forms.IntegerField(required = False)
voltage_units = forms.CharField(initial ='volts')
|
import pytest
from hades_logs.parsing import parse_vlan as parse, ParsingError
def test_correct_untagged():
assert parse('"2hades-unauth"') == "hades-unauth (untagged)"
def test_correct_untagged_unstriped():
assert parse("2Wu5") == "Wu5 (untagged)"
def test_correct_tagged():
assert parse("1toothstone") == "toothstone (tagged)"
def test_bad_taggedness_raises_parsingerror():
with pytest.raises(ParsingError):
parse('"3some-vlan"')
def test_empty_name_raises_parsingerror():
with pytest.raises(ParsingError):
parse('"2"')
|
import bisect
import random
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.prefix = w[:]
for i in range(1, len(w)):
self.prefix[i] += self.prefix[i-1]
self.MAX = self.prefix[-1]
def pickIndex(self):
"""
:rtype: int
"""
return bisect.bisect(self.prefix, random.randrange(self.MAX))
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
""" Handles webhooks from payment backend """
import time
from django.http import HttpResponse
from .models import Order
from .reports import send_ticket_pdf_email
from .tickets import create_tickets_for_order
# pylint: disable=W0703
# Broad exception: Any error should cause orders to be deleted
# Largely based on the boutique ado project
class StripeWHHandler:
""" Handles stripe webhooks """
def __init__(self, request):
self.request = request
@staticmethod
def handle_event(event):
""" Handle a generic/unknown/unexpected webhook event """
return HttpResponse(
content=f'Unhandled webhook received: {event["type"]}',
status=200
)
def handle_payment_intent_succeeded(self, event):
""" Handle the payment_intent.succeeded webhook from Stripe """
intent = event.data.object
pid = intent.id
basket = intent.metadata.basket
#save_to_profile = intent.metadata.save_to_profile
billing_details = intent.charges.data[0].billing_details
order_total = round(intent.charges.data[0].amount / 100, 2)
# Update profile information if save_info was checked
profile=None
order_exists = False
attempt = 1
while attempt <= 5:
try:
order = Order.objects.get(
full_name__iexact=billing_details.name,
email__iexact=billing_details.email,
phone_number__iexact=billing_details.phone,
order_total=order_total,
original_basket=basket,
stripe_pid=pid,
)
order_exists = True
break
except Order.DoesNotExist:
attempt += 1
time.sleep(1)
if order_exists:
send_ticket_pdf_email(self.request, order)
return HttpResponse(
content=f'Webhook received: {event["type"]} | SUCCESS: Verified \
order already in database', status=200)
order = None
try:
order = Order.objects.create(
full_name=billing_details.name,
user_profile=profile,
email=billing_details.email,
phone_number=billing_details.phone,
original_basket=basket,
stripe_pid=pid,
)
create_tickets_for_order(order)
except Exception as error:
if order:
order.delete()
return HttpResponse(
content=f'Webhook received: {event["type"]} | ERROR: {error}',
status=500)
send_ticket_pdf_email(self.request, order)
return HttpResponse(
content=f'Webhook received: {event["type"]} | SUCCESS: Created order in webhook',
status=200)
@staticmethod
def handle_payment_intent_payment_failed(event):
""" Handle the payment_intent.payment_failed webhook from Stripe """
return HttpResponse(
content=f'Webhook received: {event["type"]}',
status=200
)
|
import gym
import numpy as np
import math
import cv2
from gym.spaces.box import Box
def resize(src, new_size):
src = src.astype("float32")
H, W, C = src.shape
new_H, new_W = new_size
dst = np.zeros((new_H, new_W, C))
ratio_h = H / new_H
ratio_w = W / new_W
for i in range(new_H):
src_y = ratio_h * i
y = math.floor(src_y)
v = src_y - y
if y < 0:
y = 0
v = 0
elif y >= H - 1:
y = H - 2
v = 1
for j in range(new_W):
src_x = ratio_w * j
x = math.floor(src_x)
u = src_x - x
if x < 0:
x = 0
u = 0
elif x >= W - 1:
x = W - 2
u = 1
dst[i][j] = (1-v) * (1-u) * src[y][x] + v * (1-u) * src[y+1][x] \
+ (1-v) * u * src[y][x+1] + v * u * src[y+1][x+1]
return dst.astype("uint8")
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id):
env = gym.make(env_id)
env = AtariRescale42x42(env)
env = NormalizedEnv(env)
return env
def _process_frame42(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2, keepdims=True)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.moveaxis(frame, -1, 0)
return frame
class AtariRescale42x42(gym.ObservationWrapper):
def __init__(self, env=None):
super(AtariRescale42x42, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [1, 42, 42])
def observation(self, observation):
return _process_frame42(observation)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
return (observation - unbiased_mean) / (unbiased_std + 1e-8)
|
import unittest
from year2021.python.day6.day6_func import *
class TestDay6(unittest.TestCase):
def test_correct_spawn_80(self):
# Arrange
fishes = [0, 1, 1, 2, 1, 0, 0, 0, 0]
spawn = FishSpawn()
# Act
numberOfFishes80Days = spawn.spawn(fishes, 80)
# Assert
self.assertEqual(5934, numberOfFishes80Days)
def test_correct_spawn_18(self):
# Arrange
fishes = [0, 1, 1, 2, 1, 0, 0, 0, 0]
spawn = FishSpawn()
# Act
numberOfFishes18Days = spawn.spawn(fishes, 18)
# Assert
self.assertEqual(26, numberOfFishes18Days)
def test_correct_init(self):
# Arrange
inputString = "3,4,3,1,2"
# Act
fishes = FishCreator.initFishes(inputString)
# Arrange
self.assertEqual([0, 1, 1, 2, 1, 0, 0, 0, 0], fishes)
def test_correct_lanternfish_count_same(self):
# Arrange
fishes = [0, 1, 1, 2, 1, 0, 0, 0, 0]
spawn = FishSpawn()
# Act
numberOfFishes18Days = spawn.spawn(fishes, 18)
numberOfFishes80Days = spawn.spawn(fishes, 80)
# Assert
self.assertEqual(26, numberOfFishes18Days)
self.assertEqual(5934, numberOfFishes80Days)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from color import generateColorFunction
def on(msg):
return generateColorFunction('green')(msg)
def off(msg):
return generateColorFunction('red')(msg)
def notice(msg):
return generateColorFunction('purple')(msg)
def hint(msg):
return generateColorFunction('yellow')(msg)
def success(msg):
return generateColorFunction('green')(msg)
def warn(msg):
return generateColorFunction('yellow')(msg)
def error(msg):
return generateColorFunction('red')(msg)
def system(msg):
return generateColorFunction('light-red')(msg)
def exit(msg):
return generateColorFunction('red')(msg)
def breakpoint(msg):
return generateColorFunction('yellow')(msg)
def signal(msg):
return generateColorFunction('bold,red')(msg)
def prompt(msg):
return generateColorFunction('bold,red')(msg)
|
"""packer_builder/specs/builders/vmware.py"""
def vmware_builder(**kwargs):
"""VMware specific builder specs."""
# Setup vars from kwargs
builder_spec = kwargs['data']['builder_spec']
distro = kwargs['data']['distro']
vagrant_box = kwargs['data']['vagrant_box']
builder_spec.update({
'type': 'vmware-iso',
'disk_adapter_type': '{{ user `disk_adapter_type` }}',
'disk_type_id': 0,
'version': '10',
'vmx_data': {
'ethernet0.pciSlotNumber': '32'
},
'vmx_remove_ethernet_interfaces': True
})
# Define OS type map for distro to guest OS type
os_type_map = {'alpine': 'other3xlinux-64', 'centos': 'centos-64',
'debian': 'debian8-64', 'fedora': 'fedora-64',
'freenas': 'FreeBSD-64', 'ubuntu': 'ubuntu-64'}
# Lookup distro OS type
guest_os_type = os_type_map[distro]
# If FreeNAS, add storage devices if Vagrant to ensure we can provision
if distro == 'freenas' and vagrant_box:
builder_spec.update(
{'disk_additional_size': ['{{ user `disk_size` }}']})
builder_spec.update({'guest_os_type': guest_os_type})
return builder_spec
|
import pyparsing as pp
import base64
def parse_receipt( text ):
''' Given a plist-based receipt from Apple's storeKit
return a native python datastructure.
'''
# Set up our grammar
identifier = pp.Literal('"').suppress() + pp.Word(pp.alphanums+"-_") + pp.Literal('"').suppress()
value = pp.Literal('"').suppress() + pp.Word(pp.alphanums+"-_/=+ '}{:.") + pp.Literal('"').suppress()
cmplx = pp.Forward()
expr = pp.Group( identifier + pp.Literal("=").suppress() + pp.Combine( value|cmplx ) + pp.Literal(";").suppress() )
gram = "{" + pp.OneOrMore( expr ) + "}"
cmplx << gram
scope = {}
for tok in gram.parseString(text):
if len(tok) == 2:
k = tok[0]
if k == 'purchase-info':
# recurse
v = parse_receipt( base64.b64decode( tok[1] ) )
else:
v = tok[1]
scope[k] = v
else:
if tok not in ( '{', '}' ):
print "shenanigans", tok
return scope
if __name__ == '__main__':
import sys
inp = open(sys.argv[1]).read()
data = parse_receipt( inp )
from pprint import pprint
pprint( data )
|
"""Generate a prediction for a given salient2's submodel and a given deadline date
Example:
$ python src/models/salient2/predict_keras.py -d 20200107 -sn d2wk_cop_sst_20170201
$ python src/models/salient2/predict_keras.py -d 20200121 -sn d2wk_cop_sst_mei_20190731 -r us
Named args:
--deadline_date (-d): official deadline for submission.
--submodel_name (-sn): string consisting of the ground truth variable ids used for training and the date of the last training example
a submodel_name consists of a concatenation of 3 strings, one of each of the category below:
ground truth variables : "d2wk_cop_sst"
suffixes: "", "mei", "mjo", "mei_mjo"
end_date: "20060726", "20070725", "20080730", "20090729", "20100727", "20110726", "20120731", "20130730", "20140729", "20150728", "20160727", "20170201", "20180725" or "20190731"
examples of submodel_name are: "d2wk_cop_sst_20060726", "d2wk_cop_sst_mei_20060726"
--region (-r): string consisting of the spatial region on which to train the model;
either 'us' to use U.S. continental bounding box for the output data
or 'contest' to use the frii contest region bounding box (default).
"""
import re
import os
import glob
import keras
import pickle
import argparse
import itertools
import subprocess
import numpy as np
import pandas as pd
from os.path import isfile
from netCDF4 import Dataset
from datetime import datetime, timedelta
from pkg_resources import resource_filename
from subseasonal_toolkit.utils.experiments_util import pandas2hdf
from subseasonal_toolkit.utils.general_util import make_directories, printf
from subseasonal_toolkit.utils.experiments_util import get_target_date as get_target_date_eval
from subseasonal_toolkit.models.salient2.salient2_util import ONE_DAY, ONE_WEEK, WEEKS, dir_train_data, dir_predict_data, dir_train_results, dir_submodel_forecasts, year_fraction, get_target_date
def read_with_lock(filename):
"""Open an hdf file with a lock and read.
Args:
filename (str): path of hdf file to be read with a lock.
Returns:
openned pandas dataframe.
"""
#with FileLock(filename+'lock'):
# df = pd.read_hdf(filename)
df = pd.read_hdf(filename)
subprocess.call(f"rm {filename}lock", shell=True)
return df
def generate_windows(x):
"""generates a sliding window over the data of length window_size.
Args:
x: array of input features used the NN
Returns:
reshaped array of input features so that each example is a concatenation of the past 10 weeks of data.
"""
window_size = 10
result = []
for i in range(x.shape[0]+1-window_size):
result.append(x[i:i+window_size, ...])
return np.stack(result)
def compile_input(submodel_name, datestr):
"""Compile input data to be used by NN.
Args:
submodel_name (str): string consisting of the ground truth variable
ids used for training and the date of the last training example.
datestr (str): YYMMDD string of submission date.
Returns:
input_data: array of input features used generate predictions by the NNs.
"""
#mei and mjo indices are not included by default
i_mei = "mei" in submodel_name
i_mjo = "mjo" in submodel_name
#get gt_var:
gt_var = submodel_name[:-9]
if "mei" in gt_var:
gt_var = gt_var[:gt_var.index("_mei")]
if "mjo" in gt_var:
gt_var = gt_var[:gt_var.index("_mjo")]
#target_date refers to submission date as in original salient scripts
target_date = datetime.strptime(datestr, '%Y%m%d')
# load date data
date_data_file = os.path.join(dir_train_data, "date.pickle")
date_vectors = pickle.load(open(date_data_file, 'rb'))
date_vectors_all = sorted([d[0] for d in date_vectors])
end_date_sn = datetime.strptime(submodel_name[-8:], '%Y%m%d')
date_vectors = [d for d in date_vectors_all if (d.astype(object).year<=end_date_sn.year) and (d.astype(object).month<=end_date_sn.month) and (d.astype(object).day<=end_date_sn.day)]
last_i = len(date_vectors)
# load training data
# load sst data
if "sst" in submodel_name:
training_sst_data_file = os.path.join(dir_train_data, "sst.pickle")
training_sst_vectors_all = pickle.load(open(training_sst_data_file, 'rb'))
# load time data
if i_mei:
mei_data_file = os.path.join(dir_train_data, "mei.pickle")
training_mei_vectors_all = pickle.load(open(mei_data_file, 'rb'))
# load time data
if i_mjo:
mjo_data_file = os.path.join(dir_train_data, "mjo.pickle")
training_mjo_vectors_all = pickle.load(open(mjo_data_file, 'rb'))
# load time data
training_time_data_file = os.path.join(dir_train_data, "time.pickle")
training_time_vectors = pickle.load(open(training_time_data_file, 'rb'))
training_time_vectors_all = np.reshape(training_time_vectors,(training_time_vectors.shape[0],1))
# account for early train stop submodels
if "sst" in submodel_name:
training_sst_vectors = training_sst_vectors_all[:last_i,:]
training_time_vectors = training_time_vectors_all[:last_i,:]
## Load input data to generate a prediction
#input predict data is organized in models/salient2/predict-data directories named
#after the center values (Wednesdays) of the submission week
if target_date.weekday() == 1:
target_date_predict_data = target_date + ONE_DAY
datestr_predict_data = datetime.strftime(target_date_predict_data, "%Y%m%d")
elif target_date.weekday() == 2:
target_date_predict_data = target_date
datestr_predict_data = datetime.strftime(target_date_predict_data, "%Y%m%d")
else:
printf(f"{target_date} is an invalid submission date. \
Submission date should be a Tuesday for contest gt_ids or a Wednesday for U.S. and east gt_ids.")
#******************************************************************************
# set input dates vector corresponding to the submission date
#******************************************************************************
date_vectors_all = [d.astype(datetime) for d in date_vectors_all]
date_vectors_all = [datetime(d.year, d.month, d.day) for d in date_vectors_all]
# need input data up through prior Saturday (weekday #5)
input_end_date = target_date_predict_data - timedelta(days=((target_date_predict_data.weekday() - 5) % 7))
input_end_date = datetime(input_end_date.year, input_end_date.month, input_end_date.day)
input_start_date = input_end_date - ONE_WEEK * WEEKS + ONE_DAY
# Create input dates vector consisting of center values (Wednesdays) of the relevant weeks
input_start_date = input_start_date + 3*ONE_DAY
input_end_date = input_end_date - 3*ONE_DAY
# Get input start and end indices
if input_end_date in date_vectors_all:
input_start_date_index = date_vectors_all.index(input_start_date)
input_end_date_index = date_vectors_all.index(input_end_date)
# load sst data
if "sst" in submodel_name:
sst_data_file = os.path.join(dir_predict_data, "d2wk_cop_sst", datestr_predict_data, "sst.pickle")
if isfile(sst_data_file):
sst_vectors = pickle.load(open(sst_data_file, 'rb'))
# input dates vector always contain wednesdays regardless of output starting on a Wednesday or Tuesday
else:
sst_vectors = training_sst_vectors_all[input_start_date_index:input_end_date_index+1,:]
sst_vectors = (sst_vectors - np.amin(training_sst_vectors)) * 1./(np.amax(training_sst_vectors) - np.amin(training_sst_vectors))
data_min, data_max = np.amin(training_sst_vectors), np.amax(training_sst_vectors)
#Load mei data
if i_mei:
mei_data_file = os.path.join(dir_predict_data, "d2wk_cop_sst", datestr_predict_data, "mei.pickle")
if isfile(mei_data_file):
mei_vectors = pickle.load(open(mei_data_file, 'rb'))
# input dates vector always contain wednesdays regardless of output starting on a Wednesday or Tuesday
else:
mei_vectors = training_mei_vectors_all[input_start_date_index:input_end_date_index+1,:]
mei_vectors = (mei_vectors - data_min) * 1./(data_max - data_min)
#Load mjo data
if i_mjo:
mjo_data_file = os.path.join(dir_predict_data, "d2wk_cop_sst", datestr_predict_data, "mjo.pickle")
if isfile(mjo_data_file):
mjo_vectors = pickle.load(open(mjo_data_file, 'rb'))
# input dates vector always contain wednesdays regardless of output starting on a Wednesday or Tuesday
else:
mjo_vectors = training_mjo_vectors_all[input_start_date_index:input_end_date_index+1,:]
mjo_vectors = (mjo_vectors - data_min) * 1./(data_max - data_min)
# compile input data
if "sst" in submodel_name:
input_data = sst_vectors
if i_mei:
input_data = np.concatenate((input_data, mei_vectors), axis=1)
if i_mjo:
input_data = np.concatenate((input_data, mjo_vectors), axis=1)
return input_data
def add_i_time(input_data, submodel_name, datestr, i_time=False):
"""add i_time feature to input data to be used by NN.
Args:
input_data (float): array of input features used generate predictions by the NNs.
submodel_name (str): string consisting of the ground truth variable
ids used for training and the date of the last training example.
datestr (str): YYMMDD string of submission date.
i_time (bool): if True, include time vector as an input feature (default: False).
Returns:
input_data: array of input features used generate predictions by the NNs.
"""
#get input start and end date
target_date = datetime.strptime(datestr, '%Y%m%d')
#input predict data is organized in models/salient2/predict-data directories named
#after the center values (Wednesdays) of the submission week
if target_date.weekday() == 1:
target_date_predict_data = target_date + ONE_DAY
datestr_predict_data = datetime.strftime(target_date_predict_data, "%Y%m%d")
elif target_date.weekday() == 2:
target_date_predict_data = target_date
datestr_predict_data = datetime.strftime(target_date_predict_data, "%Y%m%d")
else:
printf(f"{target_date} is an invalid submission date. \
Submission date should be a Tuesday for contest gt_ids or a Wednesday for U.S. and east gt_ids.")
# need input data up through prior Saturday (weekday #5)
input_end_date = target_date_predict_data - timedelta(days=((target_date_predict_data.weekday() - 5) % 7))
input_end_date = datetime(input_end_date.year, input_end_date.month, input_end_date.day)
input_start_date = input_end_date - ONE_WEEK * WEEKS + ONE_DAY
# Create input dates vector consisting of center values (Wednesdays) of the relevant weeks
input_start_date = input_start_date + 3*ONE_DAY
input_end_date = input_end_date - 3*ONE_DAY
#load time data
if i_time:
time_vectors = np.zeros((WEEKS, 1))
day = input_start_date
for i in range(time_vectors.shape[0]):
time_vectors[i, 0] = year_fraction(day)
day += ONE_WEEK
# compile input data
if i_time:
input_data = np.concatenate((input_data, time_vectors), axis=1)
return input_data
def main():
#"""
#example usage to generate original salient2 forecasts
# python src/models/salient2/predict_keras.py -d 20200811 -sn d2wk_cop_sst_20170201
# input arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--date', help='Submission date')
parser.add_argument('-sn', '--submodel_name', default='d2wk_cop_sst_mei_mjo_20190731')
parser.add_argument('--region', '-r', default='contest')
args = parser.parse_args()
# set args
submodel_name = args.submodel_name
target_date, datestr = get_target_date('Generate predictions', args.date)
region = args.region
# define usa flag
usa = region == 'us'
# Setup directory where the submodel's weights are saved.
dir_weights = os.path.join(dir_train_results, submodel_name, f"{region}_{submodel_name}")
# map locations with points of interest
mask_f = os.path.join("data", "masks", "us_mask.nc")
mask_ds = Dataset(mask_f) if usa else Dataset(mask_f.replace("us_", "fcstrodeo_"))
mask_lat = mask_ds.variables['lat'][:]
mask_lon = mask_ds.variables['lon'][:]
points_idx = np.where(mask_ds.variables['mask'][:])
points = np.array((points_idx[0], points_idx[1])).T
# Import and run models
model_names = glob.glob(os.path.join(dir_weights, "k_model_*.h5"))
N_models = len(model_names)
# set grid cells
num_of_gc = 862 if usa else 514
# Create empty template array for predictions to be generated
precip_wk34_predictions = np.zeros((N_models,num_of_gc))
precip_wk56_predictions = np.zeros((N_models,num_of_gc))
temp_wk34_predictions = np.zeros((N_models,num_of_gc))
temp_wk56_predictions = np.zeros((N_models,num_of_gc))
# Compile input data
input_data_all = compile_input(submodel_name, datestr)
# Generate predictions for each of the top 10 selected NN members in the ensemble
for i in range(N_models):
# Load NN ensemble member
model_name = model_names[i]
model = keras.models.load_model(model_name)
# If NN was trained on time as an input feature, add time to the compile input data
result = re.search('time(.*).h5', model_name)
input_set = int(result.group(1))
input_data = add_i_time(input_data_all, submodel_name, datestr, i_time=bool(input_set))
input_data = generate_windows(input_data)
# Generate predictions
prediction_i = model.predict(input_data)
# predictions for a 2-week target period are the accumulation of precip
# the mean temperature over the target period
prediction_i = np.reshape(prediction_i,(8,num_of_gc))
precip_wk34_predictions[i,:] = np.sum(prediction_i[0:2,:], axis=0)
precip_wk56_predictions[i,:] = np.sum(prediction_i[2:4,:], axis=0)
temp_wk34_predictions[i,:] = np.mean(prediction_i[4:6,:], axis=0)
temp_wk56_predictions[i,:] = np.mean(prediction_i[6:8,:], axis=0)
# sum precip predictions and average temp predictions over the 2-week target period
# clip precip predictions to zero since precipitations cannot be negative
precip_wk34_prediction = np.mean(precip_wk34_predictions, axis=0)
precip_wk34_prediction = precip_wk34_prediction.clip(0)
precip_wk56_prediction = np.mean(precip_wk56_predictions, axis=0)
precip_wk56_prediction = precip_wk56_prediction.clip(0)
temp_wk34_prediction = np.mean(temp_wk34_predictions, axis=0)
temp_wk56_prediction = np.mean(temp_wk56_predictions, axis=0)
# Get target date objects
deadline_date = datetime.strptime(datestr, '%Y%m%d')
target_date_34w = get_target_date_eval(datestr, "34w")
target_date_56w = get_target_date_eval(datestr, "56w")
# Get target date strings
target_date_str_34w = datetime.strftime(target_date_34w, '%Y%m%d')
target_date_str_56w = datetime.strftime(target_date_56w, '%Y%m%d')
# Get lat, lon and pred template arrays
template_f = resource_filename("subseasonal_toolkit", os.path.join("models", "salient2", "data", "apcp_week34_template.nc"))
template_ds = Dataset(template_f)
template_lat = template_ds.variables["lat"][:]
template_lon = template_ds.variables["lon"][:]
template_var = template_ds.variables["apcp_week34"][:]
# Determine variables, horizons and corresponding predictions to be saved
gt_vars = ["precip", "tmp2m"]
horizons = ["34w", "56w"]
predictions = [precip_wk34_prediction, precip_wk56_prediction, temp_wk34_prediction, temp_wk56_prediction]
gt_prefix = f"{region}_"
tasks = [f"{gt_prefix}{g}_{t}" for g, t in itertools.product(gt_vars, horizons)]
# Format predictions to standard pandas contest prediction format.
for task, prediction in zip(tasks, predictions):
out_dir = os.path.join(dir_submodel_forecasts, submodel_name, task)
make_directories(out_dir)
pred_file = os.path.join(dir_train_data, f"{region}_latlons.h5")
pred = read_with_lock(pred_file)
#pred = pd.read_hdf(pred_file)
if "34w" in task:
pred["start_date"] = target_date_34w
out_file = os.path.join(out_dir, f"{task}-{target_date_str_34w}.h5")
elif "56" in task:
pred["start_date"] = target_date_56w
out_file = os.path.join(out_dir, f"{task}-{target_date_str_56w}.h5")
if usa:
pred["pred"] = prediction
else:
pred["pred"] = np.nan
a = template_var
# save predictions into array
for loc in range(len(prediction)):
index = points[loc]
a[tuple(index)] = prediction[loc]
for i in range(len(pred)):
lat_i = np.argwhere(template_lat == pred["lat"].iloc[i])[0][0]
lon_i = np.argwhere(template_lon == pred["lon"].iloc[i])[0][0]
pred["pred"].iloc[i] = a[lat_i, lon_i]
# Save prediction files
if pred.isnull().values.sum()==0:
pandas2hdf(pred, out_file, format='table')
if __name__ == '__main__':
main()
|
import contextlib
import logging
import re
import traceback
from datetime import datetime as dt
from functools import wraps
from typing import Optional
import pandas as pd
from influxdb.exceptions import InfluxDBClientError
from app.models import Series
from app.vendors.influx import InfluxDB
from config import resolve_config
@contextlib.contextmanager
def create_influx():
"""
Contextmanager that will create and teardown a session.
"""
config = resolve_config()
influx = InfluxDB(host=config.INFLUXDB_HOST, database=config.INFLUXDB_DATABASE)
yield influx
influx.close()
def provide_influx(func):
@wraps(func)
def wrapper(*args, **kwargs):
influx_name = "influx"
func_params = func.__code__.co_varnames
session_in_args = influx_name in func_params and func_params.index(
influx_name
) < len(args)
session_in_kwargs = influx_name in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_influx() as influx:
kwargs[influx_name] = influx
return func(*args, **kwargs)
return wrapper
@provide_influx
def init_influx_db(database: str, influx: InfluxDB = None) -> None:
influx.database.create(database)
@provide_influx
def get_all_pairs(influx: InfluxDB = None) -> list:
pairs = influx.measurement.all()
pairs = [re.match("[A-Z]+", m["name"])[0] for m in pairs]
pairs = [p for p in set(pairs) if p[:4] != "TEST"]
return pairs
@provide_influx
def get_candles(
pair: str,
timeframe: str,
limit: int,
last_timestamp: Optional[int] = None,
influx: InfluxDB = None,
) -> Series:
measurement = pair + timeframe
if not last_timestamp:
last_timestamp = 9000000000
q = f"""
SELECT * FROM (
SELECT
median(close) AS close,
median(high) AS high,
median(low) AS low,
median(open) AS open,
median(volume) AS volume
FROM {measurement}
WHERE ("exchange" = 'binance'
OR "exchange" = 'bitfinex'
OR "exchange" = 'poloniex'
OR "exchange" = 'bittrex')
GROUP BY time({timeframe}) FILL(none)
)
WHERE time < {last_timestamp}s
ORDER BY time DESC
LIMIT {limit}
"""
result = influx.query(q, epoch="s")
df = pd.DataFrame(result.get_points(measurement=measurement))
if df.shape[0] == 0:
return Series(pair=pair, timeframe=timeframe)
candles = Series(
pair=pair,
timeframe=timeframe,
time=df.time.values[::-1],
open=df.open.values[::-1],
close=df.close.values[::-1],
high=df.high.values[::-1],
low=df.low.values[::-1],
volume=df.volume.values[::-1],
)
return candles
@provide_influx
def check_last_timestamp(
measurement: str, minus: int = 10, influx: InfluxDB = None
) -> int:
"""
Returns timestamp of last point in measurement.
"""
result = influx.query(
f"SELECT * FROM {measurement} ORDER BY time DESC LIMIT 1;", epoch="s"
)
df = pd.DataFrame(result.get_points(measurement=measurement))
if df.shape == (0, 0):
# Return something old enough
return 1518176375
return int(df.time.values) - minus
@provide_influx
def check_exchanges(pair: str, influx: InfluxDB = None) -> list:
result = influx.measurement.tag_values(measurement=f"{pair}1h", key="exchange")
return list(result)
@provide_influx
def insert_candles(
candles: list, time_precision: str = None, influx: InfluxDB = None
) -> bool:
"""
Inserts point into a given measurement.
"""
result = influx.write_points(candles, time_precision=time_precision)
return result
@provide_influx
def downsample(pair: str, from_tf: str, to_tf: str, influx: InfluxDB = None) -> None:
time_now = dt.now().strftime("%Y-%m-%dT%H:%M:%SZ")
query = f"""
SELECT
first(open) AS open,
max(high) AS high,
min(low) AS low,
last(close) AS close,
sum(volume) AS volume
INTO {pair}{to_tf} FROM {pair}{from_tf} WHERE time <= '{time_now}' GROUP BY time({to_tf}), *
"""
try:
influx.query(query)
except InfluxDBClientError:
logging.error(
f"FAILED %s downsample %s error: \n %s", to_tf, pair, traceback.format_exc()
)
def downsample_all_timeframes(pair):
downsample(pair, from_tf="30m", to_tf="1h")
downsample(pair, from_tf="1h", to_tf="2h")
downsample(pair, from_tf="1h", to_tf="3h")
downsample(pair, from_tf="1h", to_tf="4h")
downsample(pair, from_tf="1h", to_tf="6h")
downsample(pair, from_tf="1h", to_tf="12h")
downsample(pair, from_tf="1h", to_tf="24h")
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll, Ioan Sucan, Luis G. Torres
from os.path import exists
import os
import sqlite3
import sys
from optparse import OptionParser
plottingEnabled = True
try:
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
except ImportError:
print('Matplotlib or Numpy was not found; disabling plotting capabilities...')
plottingEnabled = False
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens):
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens={}):
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens={}):
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result is None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames, moveitformat):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
if sys.version_info[0] < 3:
conn.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename, 'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname is None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version is None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
# optional experiment properties
nrexpprops = int(readOptionalLogValue(logfile, 0, \
{-2: "experiment", -1: "properties"}) or 0)
expprops = {}
for _ in range(nrexpprops):
entry = logfile.readline().strip().split('=')
nameAndType = entry[0].split(' ')
expprops[nameAndType[0]] = (entry[1], nameAndType[1])
# adding columns to experiments table
c.execute('PRAGMA table_info(experiments)')
columnNames = [col[1] for col in c.fetchall()]
for name in sorted(expprops.keys()):
# only add column if it doesn't exist
if name not in columnNames:
c.execute('ALTER TABLE experiments ADD %s %s' % (name, expprops[name][1]))
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
if moveitformat:
expsetup = readRequiredLogValue("goal name", logfile, -1, {0: "Goal", 1: "name"})
cpuinfo = None
rseed = 0
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = 0
else:
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, \
{-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, \
{-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, \
{-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, \
{-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for _ in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() is None:
for j in range(len(enum) - 1):
c.execute('INSERT INTO enums VALUES (?,?,?)', \
(enum[0], j, enum[j + 1]))
# Creating entry in experiments table
experimentEntries = [None, expname, totaltime, timelimit, memorylimit, nrruns, version,
hostname, cpuinfo, date, rseed, expsetup]
for name in sorted(expprops.keys()): # sort to ensure correct order
experimentEntries.append(expprops[name][0])
c.execute('INSERT INTO experiments VALUES (' + ','.join(
'?' for i in experimentEntries) + ')', experimentEntries)
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for _ in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)', \
(plannerName, settings,))
p = c.fetchone()
if p is None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)', \
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' % \
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing '
'ompl::tools::Benchmark::Request::timeBetweenUpdates.')
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [t[0] for t in cur.fetchall()]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [t[0] for t in cur.fetchall() if t[0] != None]
if measurement:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if not measurements:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1, measurements.shape[1]))
ind = range(measurements.shape[1])
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0], \
color=matplotlib.cm.hot(int(floor(i * 256 / numValues))), \
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop=props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0]) < 1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_', ' '))
xtickNames = plt.setp(ax, xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts) > 0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i + width / 2 if typename == 'BOOLEAN' else i + 1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_', ' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % \
(attribute, r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if plannerNames:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0], t[1].replace('geometric_', '').replace('control_', '')) \
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump, 'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [str(t[0]) for t in c.fetchall()]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION', 'COMMIT', \
'sqlite_sequence', 'CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line:
break
else:
process = True
if not process:
continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"', '`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname, moveitformat):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
if moveitformat:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
elif 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db", \
help="Filename of benchmark database [default: %default]")
parser.add_option("-a", "--append", action="store_true", dest="append", default=False, \
help="Append data to database (as opposed to overwriting an existing database)")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False, \
help="Compute the views for best planner configurations")
if plottingEnabled:
parser.add_option("-p", "--plot", dest="plot", default=None, \
help="Create a PDF of plots")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None, \
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("--moveit", action="store_true", dest="moveit", default=False, \
help="Log files are produced by MoveIt!")
(options, args) = parser.parse_args()
if not options.append and exists(options.dbname) and args:
os.remove(options.dbname)
if args:
readBenchmarkLog(options.dbname, args, options.moveit)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname, options.moveit)
if plottingEnabled and options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from starthinker_ui.recipe.scripts import Script
from starthinker_ui.recipe.colab import script_to_colab
class Command(BaseCommand):
help = 'Generate Templates For Colab'
def handle(self, *args, **kwargs):
with open('%s/tutorials/deploy_colab.md' % settings.UI_ROOT,
'w') as readme_file:
readme_file.write('# Using Scripts As A Colab Notebook\n')
readme_file.write('\n')
readme_file.write(
'All StarThinker recipes and solutions can be run from [Google Collaboratory](https://colab.research.google.com/github/google/starthinker/blob/master). '
)
readme_file.write(
'Also visit the [Solution Gallery](google.github.io/starthinker/) or click a link below to deploy a notebook.\n'
)
readme_file.write('\n')
readme_file.write('## List Of Notebooks\n')
for script in Script.get_scripts():
if script.get_open_source():
readme_file.write('* [%s](%s) - %s\n' %
(script.get_name(), script.get_link_colab(),
script.get_description()))
readme_file.write('---\n')
readme_file.write(
'© 2019 Google Inc. - Apache License, Version 2.0\n')
for script in Script.get_scripts():
if script.get_open_source():
print('Writing: %s/colabs/%s.ipynb' %
(settings.UI_ROOT, script.get_tag()))
with open('%s/colabs/%s.ipynb' % (settings.UI_ROOT, script.get_tag()),
'w') as colab_file:
colab_file.write(
script_to_colab(
script.get_name(),
script.get_description(),
script.get_instructions(),
script.get_tasks(),
))
|
import pytest
from easel.site.defaults import Defaults
from easel.site.errors import MenuConfigError
from easel.site.menus import LinkPage, LinkURL, Spacer
def test__LinkPage__valid() -> None:
config = {
"label": "TestLinkPage",
"links-to": "/test-link-page",
}
link_page = LinkPage(**config)
repr(link_page)
assert link_page.label == "TestLinkPage"
assert link_page.links_to == "/test-link-page"
assert link_page.url == "/test-link-page"
def test__LinkPage__normalize_page_path() -> None:
normalized_page_path = "/test-link-page"
config_00 = {
"label": "TestLinkPage",
"links-to": "./contents/pages/test-link-page",
}
config_01 = {
"label": "TestLinkPage",
"links-to": "./pages/test-link-page",
}
config_01 = {
"label": "TestLinkPage",
"links-to": "./test-link-page",
}
link_page_00 = LinkPage(**config_00)
link_page_01 = LinkPage(**config_01)
link_page_02 = LinkPage(**config_01)
assert link_page_00.links_to == normalized_page_path
assert link_page_01.links_to == normalized_page_path
assert link_page_02.links_to == normalized_page_path
def test__LinkPage__missing_config() -> None:
config_01 = {
"label": "TestLinkPage",
}
config_02 = {
"links-to": "/test-link-page",
}
with pytest.raises(MenuConfigError):
LinkPage()
with pytest.raises(MenuConfigError):
LinkPage(**config_01)
with pytest.raises(MenuConfigError):
LinkPage(**config_02)
def test__LinkURL__valid() -> None:
config = {
"label": "TestLinkURL",
"url": "www.test-link-url.com",
}
link_url = LinkURL(**config)
repr(link_url)
assert link_url.label == "TestLinkURL"
assert link_url.url == "www.test-link-url.com"
def test__LinkURL__missing_config() -> None:
config_01 = {
"label": "TestLinkPage",
}
config_02 = {
"url": "www.test-link-url.com",
}
with pytest.raises(MenuConfigError):
LinkURL()
with pytest.raises(MenuConfigError):
LinkURL(**config_01)
with pytest.raises(MenuConfigError):
LinkURL(**config_02)
def test__Spacer__valid() -> None:
config = {
"label": "TestSpacer",
"size": Defaults.DEFAULT_SIZE,
}
spacer = Spacer(**config)
repr(spacer)
assert spacer.label == "TestSpacer"
assert spacer.size == Defaults.DEFAULT_SIZE
def test__Spacer__valid_default() -> None:
spacer_default = Spacer()
repr(spacer_default)
assert spacer_default.label == None
assert spacer_default.size == None
def test__Spacer__invalid_size() -> None:
config_01 = {
"size": "invalid-size",
}
with pytest.raises(MenuConfigError):
Spacer(**config_01)
|
from rest_framework import routers
from .api import *
router = routers.DefaultRouter()
router.register('terms', TermsViewSet)
router.register('marks', MarksViewSet)
router.register('control_types', ControlTypesViewSet)
router.register('records', RecordsViewSet)
urlpatterns = router.urls
|
"""
Vorter XSPRESS3 class
Python class for Vortex using EPICS area detector IOC.
:platform: Unix
:synopsis: Python class for Vortex with xspress3
.. moduleauthor:: Douglas Araujo<douglas.araujo@lnls.br>
Luciano Carneiro Guedes<luciano.guedes@lnls.br>
"""
from py4syn.epics.StandardDevice import StandardDevice
from py4syn.epics.ICountable import ICountable
from epics import PV
from time import sleep, time
class VortexXspress3(StandardDevice, ICountable):
"""
Class to control Vortex via EPICS.
Examples
--------
"""
RESPONSE_TIMEOUT = 15
WAIT_ACQUIRING = 0.005
def onAcquireChange(self, value, **kw):
self._done = (value == 0)
def __init__(self, mnemonic, pv):
"""
**Constructor**
See :class:`py4syn.epics.StandardDevice`
Parameters
----------
mnemonic : `string`
A mnemonic for the detector
pv : `string`
Base name of the EPICS process variable
"""
super().__init__(mnemonic)
self.pvAcquire = PV(pv + ':Acquire')
self.pvStatus = PV(pv + ':Acquire_RBV', callback=self.onAcquireChange)
self.pvAcquireTime = PV(pv + ':AcquireTime')
self.pvClear = PV(pv + ':ERASE')
self.pvMcaCounters = []
self.pvStatusScan = PV(pv + ':Acquire_RBV.SCAN')
self.pvStatusScan.put(9)
# Channels 1-4
for i in range(1, 5):
for j in range(1, 5):
self.pvMcaCounters.append(PV('{}:C{}_ROI{}:Value_RBV'.format(pv, j, i)))
self.pvClear.put(1, wait=True)
def close(self):
"""
Stops an ongoing acquisition, if any, and puts the EPICS IOC in idle state.
"""
self._done = 1
def getIntensity(self, channel=1):
return self.pvMcaCounters[channel-1].get()
def getValue(self, **kwargs):
if(kwargs):
count = 0
value = self.getIntensity(kwargs['channel'])
while (value==0 and count<3):
sleep(.05)
value = self.getIntensity(kwargs['channel'])
count += 1
return value
def setCountTime(self, t):
"""
Sets the image acquisition time.
Parameters
----------
t : `float`
Acquisition time
"""
self.pvAcquireTime.put(t, wait=True)
def getAcquireTime(self):
return self.pvAcquireTime.get()
def setPresetValue(self, channel, val):
"""
Dummy method to set initial counter value.
"""
pass
def startCount(self):
"""
Starts acquiring
"""
if not self._done:
raise RuntimeError('Already counting')
self.pvClear.put(1, wait=True) # clear the ROI value before start a new acquire
self.pvAcquire.put(1)
self._done = 0 # force the confirmation that the detector has already received acquire function
def stopCount(self):
"""
Stops acquiring. This method simply calls :meth:`close`.
See: :meth:`close`
"""
self.pvAcquire.put(0)
self.close()
def canMonitor(self):
"""
Returns false indicating that vortex cannot be used as a counter monitor.
"""
return False
def canStopCount(self):
"""
Returns true indicating that vortex has a stop command.
"""
return True
def isCounting(self):
"""
Returns true if the detector is acquiring, or false otherwise.
Returns
-------
`bool`
"""
return not self._done
def wait(self):
"""
Blocks until the acquisition completes.
"""
while not self._done:
sleep(self.WAIT_ACQUIRING)
sleep(0.2)
|
from django.apps import AppConfig
class PicturesConfig(AppConfig):
name = 'pictures'
|
##################FILE NAME: INTERIOR.py########################
#================================================================
# author: Nitish Anand & Jozef Stuijt |
# :Master Student, |
# :Process and Energy Departmemt, |
# :TU Delft, |
# :The Netherlands |
# |
# email : nitish.ug2@gmail.com |
# |
# Description: MOC Routine: position and velocity calculator. |
# : Refer Thesis or Zucrow & Hoffman for the |
# : derivation of equation |
# MODULE Function: |
# a. InteriorPoint() |
# b. InteriorPoint_Prop() |
# c. AxialPoint_Prop() |
# d. Inv_Wall_pnt() |
# e. FlowProp() |
# f. EulerPredictorCorrector() |
# g. Direct_Wall_pnt() |
#================================================================
#
# ShortHands
# EPC: Euler Predictor Corrector
# PisS: PLUS is simple
# MisS: MINUS is simple
from srcMOC.CLASSES import *
import time
import pdb
import math as calc
from srcMOC.MASSCALC import *
try: # check if import possible related to cluster
import matplotlib.pyplot as plt # for plotting routines
imprtplt = True
except ImportError:
print('\nWarining: Plotting module import unsuccesful\n')
imprtplt = False
#import matplotlib.pyplot as plt
import sys
#---------------------------------------------------------------------------------------------#
#
# Main code to compute the flow property of the third point from two points(P:Plus,M:Minus)
# includes euler corrector predictor step
#
def InteriorPoint(paramP1,paramM1,IN,PisS=0,MisS=0,EPC1=1):
# WORKS COMPLETELY FINE: COMPLIMETS THE SOLUTION OF BOOK Ex: 12.5 ZH-Vol2
# Added to read display
#try:INFile = sys.argv[1]
#except:INFile = 'MOC_Config.in'
#IN = ReadUserInput(INFile)
#disp = IN['PlotResults']
try:
IN['File_Name_Plot']
disp = True
except KeyError: disp = False
#disp = IN['PlotResults']
#if (disp!='YES' and disp!='NO'):
# raise IOError('Invalid Input: PlotResults')
# sys.exit()
flag = 0;
#Check if Point on AXIS
if abs(abs(paramP1.y)-abs(paramM1.y))<1e-5:
param1=AxisPoint_Prop(paramP1,paramM1,IN)
else:
param1=InteriorPoint_Prop(paramP1,paramM1,IN,'p',PisS,MisS)
# Check: Euler Predictor Corrector Yes/No
if EPC1==1:
param2 = EulerPredCorr(paramP1,paramM1,param1,IN)
else:
param2 = param1
# PLOTTING ROUTINE: Red: Positive Characteristics, Green: Negative Characterisitcs
# Jozef: Added plot toggle
#if (disp == 'YES'):
if (disp and imprtplt):
plt.figure(0)
if (paramP1.y>0 or paramM1.y>0 or param2.y>0):
plt.plot([paramP1.x,param2.x],[paramP1.y,param2.y],'r')
plt.plot([paramM1.x,param2.x],[paramM1.y,param2.y],'g')
return param2
#---------------------------------------------------------------------------------------------#
#
# calcualtes the flow properties of the 3rd point (intersection of 2 characterisitcs)
#
def InteriorPoint_Prop(paramP,paramM,IN,flag='c',PisS=0,MisS=0):
NozTyp = str(IN['NozzleType'])
param = Points()
paramP.ThermoProp(IN)
# For now always PLANAR
if NozTyp == 'PLANAR':
delta = 0
else:
delta = 1
paramP.theta = calc.atan(paramP.v/paramP.u)
try:paramP.alpha = calc.asin(paramP.a/paramP.V)
except:
#print('*** Error: Mach less than 1 ***\nCHECK INPUT VARIABLE')
#pdb.set_trace()
raise ValueError('Mach less than 1 ***\nCHECK INPUT VARIABLE')
#Check if PLUS Characterisitcs is Simple Wave
if PisS == 0:
paramP.lam = calc.tan(paramP.theta+paramP.alpha)
else:
paramP.lam = calc.tan(0+paramP.alpha)
paramP.Q = paramP.u**2 - paramP.a**2
paramP.R1 = (2*paramP.u*paramP.v) - (paramP.Q*paramP.lam)
#If Axial Point
if abs(paramP.y)<=1e-9:
paramP.S = 0
else:
paramP.S = delta*(paramP.a**2 * paramP.v)/paramP.y
paramM.ThermoProp(IN)
paramM.theta = calc.atan(paramM.v/paramM.u)
paramM.alpha = calc.asin(paramM.a/paramM.V)
#Check if MINUS Characterisitcs is Simple Wave
if MisS == 0:
paramM.lam = calc.tan(paramM.theta-paramM.alpha)
else:
paramM.lam = calc.tan(0-paramM.alpha)
paramM.Q = paramM.u**2 - paramM.a**2
paramM.R1 = (2*paramM.u*paramM.v) - (paramM.Q*paramM.lam)
#If Axial Point
if abs(paramM.y)<=1e-9:
paramM.S=0
else:
paramM.S = delta*(paramM.a**2 * paramM.v)/paramM.y
#Calculate Interior Point using P and M Point
if abs(paramM.y)<=1e-9:
param.x=paramM.x
param.y = (paramP.lam*(param.x-paramP.x))+paramP.y
elif abs(paramP.y)<=1e-9:
param.x=paramP.x
param.y=(paramM.lam*(param.x-paramM.x))+paramM.y
else:
[param.x,param.y] = Intersection([paramM.x,paramM.y],[paramP.x,paramP.y],[paramM.lam,paramP.lam])
#Calculating U and V for solution point
paramP.T = paramP.S*(param.x-paramP.x)+(paramP.Q*paramP.u)+(paramP.R1*paramP.v)
paramM.T = paramM.S*(param.x-paramM.x)+(paramM.Q*paramM.u)+(paramM.R1*paramM.v)
if abs(paramM.Q)<=1e-9:
param.u = paramP.u
param.v = 0
elif abs(paramP.Q)<=1e-9:
param.u = paramM.u
param.v = 0
else:
if abs(paramP.y)<=1e-9:
param.v=0
elif abs(paramM.y)<=1e-9:
param.v=0
else:
if abs(((paramP.T*paramM.Q)-(paramM.T*paramP.Q)))<=1e-9:
param.v = 0
elif abs(((paramM.T*paramP.Q)-(paramP.T*paramM.Q)))<=1e-9:
param.v=0
else:
param.v = ((paramP.T*paramM.Q)-(paramM.T*paramP.Q))/((paramM.Q*paramP.R1)-(paramP.Q*paramM.R1))
param.u = (paramM.T - (paramM.R1*param.v))/paramM.Q
#Return x,y,u,v for solution point
return param
#---------------------------------------------------------------------------------------------#
#
# Calculates the flow properties of the Axis Point
#
def AxisPoint_Prop(paramP,paramM,IN):
NozTyp = str(IN['NozzleType'])
if NozTyp == 'PLANAR':
delta = 0
else:
delta = 1
param = Points()
# PLUS Characteristics
paramP.ThermoProp(IN)
paramP.theta = calc.atan(paramP.v/paramP.u)
paramP.alpha = calc.asin(paramP.a/paramP.V)
paramP.lam = calc.tan(paramP.theta+paramP.alpha)
paramP.Q = paramP.u**2 - paramP.a**2
paramP.R1 = (2*paramP.u*paramP.v) - (paramP.Q*paramP.lam)
paramP.S = delta*(paramP.a**2 * paramP.v)/paramP.y
# MINUS Characteristics
paramM.ThermoProp(IN)
paramM.theta = calc.atan(paramM.v/paramM.u)
paramM.alpha = calc.asin(paramM.a/paramM.V)
paramM.lam = calc.tan(paramM.theta-paramM.alpha)
paramM.Q = paramM.u**2 - paramM.a**2
paramM.R1 = (2*paramM.u*paramM.v) - (paramM.Q*paramM.lam)
paramM.S = delta*(paramM.a**2 * paramM.v)/paramM.y
# Calculating x , y , u , v for AXIAL Solution Point
[param.x,param.y] =Intersection([paramM.x,paramM.y],[paramP.x,paramP.y],[paramM.lam,paramP.lam])
param.y=0
paramM.T = paramM.S*(param.x-paramM.x)+(paramM.Q*paramM.u)+(paramM.R1*paramM.v)
param.v=0
param.u = (paramM.T)/paramM.Q
return param
#---------------------------------------------------------------------------------------------#
#
# Inverse Wall Point calcualtor for the inital Nozzle expansion area [Kernel Region]
#
def Inv_Wall_pnt(wall,pt33,pt11,wall_ang,IN):
Corr_v=float(IN['tolerance_v'])
flagv=0
flagu=0
pt2 =Points()
pt4 = Points()
pt44 = Points()
pt44.x = pt33.x
#Determining PLUS/MINUS Characterisitcs
if (pt33.y)<0:
C = 'n'
else:
C = 'p'
# Ref: Zucrow and Hoffman
#Finding point on line pt1 & pt3
#Convergence Criteria : du $ dv < Corr_v
while (abs(pt2.v-flagv)<Corr_v) & (abs(pt2.u-flagu)<Corr_v):
lam31 = (pt33.y-pt11.y)/(pt33.x-pt11.x)
pt2.u = pt33.u
pt2.v = pt33.v
lam42 =FlowProp(pt2,1,C,IN)
[pt2.x,pt2.y]=Intersection([pt33.x,pt33.y],[wall.x,wall.y],[lam31,lam42])
#Rasing Error: Important to Move On to next Point in the MAIN.py(Ln:129)
if pt2.x > pt11.x:
raise NameError('BeyondPointp33xIWP')
flagv=pt2.v
flagu=pt2.u
#Interpolate the U and V value
pt2.u=Inter(pt33.x,pt11.x,pt33.u,pt11.u,pt2.x)
pt2.v=Inter(pt33.x,pt11.x,pt33.v,pt11.v,pt2.x)
#Now Finding properties at the WALL(4)
pt4.m = lam42
pt4.x = wall.x
pt4.y = wall.y
# Special FlowProp Calculator (since, Not all value has to be calcualted)
pt2 = FlowProp(pt2,2,C,IN)
if C=='p':
factor = 1
else:
factor = -1
T = pt2.S*(pt4.x-pt2.x)+(pt2.Q*pt2.u)+(pt2.R1*pt2.v)
pt4.u= T/(pt2.Q+(pt2.R1*calc.tan(calc.radians(factor*wall_ang))))
pt4.v=calc.tan(calc.radians(factor*wall_ang))*pt4.u
flagu=0
flagv=0
#RETURN: Wall velocities
return pt4.u,pt4.v,pt2
#---------------------------------------------------------------------------------------------#
#
# Calcualtes flow properties (limited)
# - Used in Inv Wall Points where calc of all prop is not required
def FlowProp(paramP,flag,Ctype,IN):
paramP.ThermoProp(IN)
NozTyp = str(IN['NozzleType'])
if NozTyp == 'PLANAR':
delta = 0
else:
delta = 1
paramP.theta = calc.atan(paramP.v/paramP.u)
paramP.alpha = calc.asin(paramP.a/paramP.V)
if Ctype == 'p':
paramP.lam = calc.tan(paramP.theta+paramP.alpha)
else:
paramP.lam = calc.tan(paramP.theta-paramP.alpha)
if flag == 1:
return paramP.lam
elif flag ==2:
paramP.Q = paramP.u**2 - paramP.a**2
paramP.R1 = (2*paramP.u*paramP.v) - (paramP.Q*paramP.lam)
if abs(paramP.v)<1e-9:
paramP.S = 0
else:
paramP.S = delta*(paramP.a**2 * paramP.v)/paramP.y
return paramP
#---------------------------------------------------------------------------------------------#
#
# UNDER INVESTIGATION: Scrapped Code!!
# NOT USED IN MAIN CODE
def Direct_Wall_pnt(wall1,pt2d,wall_ang,IN):
import math as calc
import EulerPredCorr as EPC
import matplotlib.pyplot as plt
Corr_v=float(IN['tolerance_v'])
pt4 =Points()
if (pt2d.y)<0:
C = 'n'
else:
C = 'p'
lam42 = FlowProp(pt2d,2,C,IN)
pt4.m = lam42
pt4.x = wall1.x
pt4.y = wall1.y
if C=='p':
factor = 1
else:
factor = -1
T = pt2d.S*(pt4.x-pt2d.x)+(pt2d.Q*pt2d.u)+(pt2d.R1*pt2d.v)
pt4.u= T/(pt2d.Q+(pt2d.R1*calc.tan(calc.radians(factor*wall_ang))))
pt4.v=calc.tan(calc.radians(factor*wall_ang))*pt4.u
Corr = int(IN['Corr_n_inv'])
plt.figure(0)
plt.title("Figure 0")
plt.plot(pt4.x,pt4.y,'>r')
return pt4.u,pt4.v,pt2d
#---------------------------------------------------------------------------------------------#
#
# EulerPredCorr()
# -Takes care of the corrector step in EPC
def EulerPredCorr(paramP,paramM,param6,IN):
#Property Calculator for InteriorPoint[Corrector]
#-----------------------------------------------------------------------------------------#
def LocalProp(paramP,paramM,P,M):
NozTyp = str(IN['NozzleType'])
if NozTyp == 'PLANAR':
delta = 0
else:
delta = 1
param =Points()
#PLUS Characterisitcs
paramP.ThermoProp(IN)
paramM.ThermoProp(IN)
paramP.theta = calc.atan(paramP.v/paramP.u)
paramP.alpha = calc.asin(paramP.a/paramP.V)
paramP.lam = calc.tan(paramP.theta+paramP.alpha)
paramP.Q = paramP.u**2 - paramP.a**2
paramP.R1 = (2*paramP.u*paramP.v) - (paramP.Q*paramP.lam)
if abs(paramP.y)<=1e-9:
paramP.S = 0
else:
paramP.S = delta*(paramP.a**2 * paramP.v)/paramP.y
#MINUS Characterisitcs
paramM.theta = calc.atan(paramM.v/paramM.u)
paramM.alpha = calc.asin(paramM.a/paramM.V)
paramM.lam = calc.tan(paramM.theta-paramM.alpha)
paramM.Q = paramM.u**2 - paramM.a**2
paramM.R1 = (2*paramM.u*paramM.v) - (paramM.Q*paramM.lam)
if abs(paramM.y)<=1e-9:
paramM.S=0
else:
paramM.S = delta*(paramM.a**2 * paramM.v)/paramM.y
#Intersection of Points
if abs(paramM.y)<=1e-9:
param.x=paramM.x
param.y = (paramP.lam*(param.x-paramP.x))+paramP.y
elif abs(paramP.y)<=1e-9:
param.x=paramP.x
param.y=(paramM.lam*(param.x-paramM.x))+paramM.y
else:
[param.x,param.y] =Intersection([M.x,M.y],[P.x,P.y],[paramM.lam,paramP.lam])
paramP.T = paramP.S*(param.x-paramP.x)+(paramP.Q*P.u)+(paramP.R1*P.v)
paramM.T = paramM.S*(param.x-paramM.x)+(paramM.Q*M.u)+(paramM.R1*M.v)
if abs(paramM.Q)<=1e-9:
param.u = P.u
param.v = 0
elif abs(paramP.Q)<=1e-9:
param.u = M.u
param.v = 0
else:
if abs(paramP.y)<=1e-9:
param.v=0
elif abs(paramM.y)<=1e-9:
param.v=0
else:
if abs(((paramP.T*paramM.Q)-(paramM.T*paramP.Q)))<=1e-9:
param.v = 0
elif abs(((paramM.T*paramP.Q)-(paramP.T*paramM.Q)))<=1e-9:
param.v=0
else:
param.v = ((paramP.T*paramM.Q)-(paramM.T*paramP.Q))/((paramM.Q*paramP.R1)-(paramP.Q*paramM.R1))
param.u = (paramM.T - (paramM.R1*param.v))/paramM.Q
return param
#Property Calculator for AxisPoint[Corrector]
#-----------------------------------------------------------------------------------------#
def LocalAxisProp(paramP,paramM,P,M):
NozTyp = str(IN['NozzleType'])
if NozTyp == 'PLANAR':
delta = 0
else:
delta = 1
param = Points()
#PLUS Characterisitcs
paramP.ThermoProp(IN)
paramM.ThermoProp(IN)
paramP.theta = calc.atan(paramP.v/paramP.u)
paramP.alpha = calc.asin(paramP.a/paramP.V)
paramP.lam = calc.tan(paramP.theta+paramP.alpha)
paramP.Q = paramP.u**2 - paramP.a**2
paramP.R1 = (2*paramP.u*paramP.v) - (paramP.Q*paramP.lam)
paramP.S = delta*(paramP.a**2 * paramP.v)/paramP.y
#MINUS Characteristics
paramM.theta = calc.atan(paramM.v/paramM.u)
paramM.alpha = calc.asin(paramM.a/paramM.V)
paramM.lam = calc.tan(paramM.theta-paramM.alpha)
paramM.Q = paramM.u**2 - paramM.a**2
paramM.R1 = (2*paramM.u*paramM.v) - (paramM.Q*paramM.lam)
paramM.S = delta*(paramM.a**2 * paramM.v)/paramM.y
#Intersection of Points
[param.x,param.y] =Intersection([M.x,M.y],[P.x,P.y],[paramM.lam,paramP.lam])
paramP.T = paramP.S*(param.x-paramP.x)+(paramP.Q*P.u)+(paramP.R1*P.v)
paramM.T = paramM.S*(param.x-paramM.x)+(paramM.Q*M.u)+(paramM.R1*M.v)
param.v = 0
param.u = (paramM.T - (paramM.R1*param.v))/paramM.Q
return param
#MAIN CODE START HERE
Corr_n = 1000
tol_x = float(IN['tolerance_x'])
tol_v = float(IN['tolerance_v'])
paramP2 =Points()
paramM2 =Points()
param5 = Points()
for i in range(0,int(Corr_n)):
#[CHECK]: velocity Convergence
if (abs(param5.y-param6.y)<tol_x)&(abs(param5.u-param6.u)<tol_v)&(abs(param5.v-param6.v)<tol_v):
break
else:
param5 = param6
# Finding average properties [PLUS]
paramP2.x = paramP.x
paramP2.u = (paramP.u+param5.u)/2
paramP2.v = (paramP.v+param5.v)/2
paramP2.y = (paramP.y+param5.y)/2
# Finding average properties [MINUS]
paramM2.x = paramM.x
paramM2.u = (paramM.u+param5.u)/2
paramM2.v = (paramM.v+param5.v)/2
paramM2.y = (paramM.y+param5.y)/2
#[CHECK]: Spacial Convergence
if abs(abs(paramP.y)-abs(paramM.y))<1e-5:
param6 = LocalAxisProp(paramP2,paramM2,paramP,paramM)
else:
param6 = LocalProp(paramP2,paramM2,paramP,paramM)
param6.ThermoProp(IN)
return param6
##
## END
##
|
from engine import TetrisEngine
width, height = 10, 20 # standard tetris friends rules
env = TetrisEngine(width, height)
# Reset the environment
state, character, features = env.clear()
def agent():
print("Please enter the rotation and column for the current tetromino:")
rotation = int(input().strip())
column = int(input().strip())
print("Rotating to {} and moving to {}".format(rotation, column))
return rotation, column
done = False
while not done:
# Get an action from a theoretical AI agent
print(str(env))
print("Current tetromino / character is {}.".format(character))
print("The current features are: {}".format(features))
rotation, column = agent()
# Sim step takes action and returns results
features, reward, done, _ = env.step((rotation, column))
|
# Generated by Django 3.1.3 on 2020-12-05 06:47
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='business',
name='business_profile',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='avatar'),
),
migrations.AddField(
model_name='neighbourhood',
name='image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='Profile pic'),
),
migrations.AddField(
model_name='post',
name='image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=cloudinary.models.CloudinaryField(default='profile.jpg', max_length=255, verbose_name='Profile pic'),
),
migrations.AlterField(
model_name='user',
name='avatar',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='avatar'),
),
]
|
import math
from .utils import clamp
def linear(t):
return t
def quadratic(t):
return t * t
def bezier(t):
return t * t * (3.0 - (2.0 * t))
def parametric(t):
t2 = t * t
return t2 / (2.0 * (t2 - t) + 1.0)
def spring(mass=1.0, stiffness=100.0, damping=10.0, velocity=0):
# Ported from https://webkit.org/demos/spring/
w0 = math.sqrt(stiffness / mass)
zeta = damping / (2.0 * math.sqrt(stiffness * mass))
if zeta < 1.0:
wd = w0 * math.sqrt(1 - zeta * zeta)
A = 1.0
B = (zeta * w0 - velocity) / wd
else:
wd = 0.0
A = 1.0
B = w0 - velocity
def solve(t):
if zeta < 1.0:
t = math.exp(-t * zeta * w0) * (A * math.cos(wd * t) + B * math.sin(wd * t))
else:
t = (A + B * t) * math.exp(-t * w0)
return 1 - t
return solve
class Animation:
def __init__(self, curve, duration=0.15, delay=0.0):
self.curve = curve
self.duration = duration
self.delay = delay
def interpolate(self, t):
return self.curve(clamp((t - self.delay) / self.duration, 0.0, 1.0))
def finished(self, t):
return t >= (self.duration + self.delay)
def __call__(self, old_value, new_value, modifier):
return AnimationExecutor(self, old_value, new_value, modifier)
class AnimationExecutor:
def __init__(self, animation, old_value, new_value, modifier):
self.animation = animation
self.old_value = old_value.copy()
self.new_value = new_value.copy()
self.modifier = modifier
self.t = 0.0
def step(self, dt):
self.t += dt
pct = self.animation.interpolate(self.t)
value = self.old_value.interpolate(self.new_value, pct)
self.modifier(value)
def finished(self):
return self.animation.finished(self.t)
|
#!/usr/bin/env python3
"""
Compare Pipfile.lock packages and versions against a requirements.txt.
Raises AssertionError if packages and versions do not exactly match in both.
"""
import json
import re
from typing import List
import sys
"""
Copyright 2020 James Williams
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def get_requirements(file_path: str, is_piplock: bool = False) -> List[str]:
"""Return a requirements.txt or Pipfile.lock as a list.
Pipfile.lock files are parsed and returned in requirements.txt format.
"""
with open(file_path) as req_file:
if is_piplock:
# Dict of {package: {version: xxx ...}}
package_info = json.loads(req_file.read())["default"]
packages = []
for name, info in package_info.items():
try:
# Standard 'package[extras]==version ; markers'
package = name
if info.get("extras"):
package += f"[{','.join(info['extras'])}]"
package += info["version"]
if info.get("markers"):
package += f" ; {info['markers']}"
packages.append(package)
except KeyError:
# Package installed from git
url = info["git"]
branch = info["ref"]
egg = name # TODO raises NameError if used directly?
# TODO the egg may be incorrect here in certain cases
# TODO branch may be undefined
packages.append(f"git+{url}@{branch}#egg={egg}")
# TODO check if other ways of installing packages are missed
else:
# Could possibly contain lines such as '-i https://pypi.org/simple'
all_lines = req_file.read().splitlines()
# Match anything that is either a normal or git installed package
is_package = re.compile(r"[A-Za-z0-9-\[\]]+==\d+|git\+[htps]+.+")
packages = [line for line in all_lines if is_package.match(line)]
return packages
def compare(reqs_1: List[str], reqs_2: List[str]):
"""Compare 2 requirements.txt files (lists) and exit if they don't match.
Lines that don't match are printed to stderr.
"""
diff_lock = "\n".join(list(set(reqs_1) - set(reqs_2)))
diff_pip = "\n".join(list(set(reqs_2) - set(reqs_1)))
if diff_lock or diff_pip:
err_msg = "Requirements files do not match.\n\n"
if diff_lock:
err_msg += f"Found in Pipfile.lock:\n{diff_lock}\n\n"
if diff_pip:
err_msg += f"Found in requirements.txt:\n{diff_pip}"
sys.stderr.write(err_msg)
sys.exit(1)
if __name__ == "__main__":
lockfile = get_requirements("Pipfile.lock", is_piplock=True)
pip = get_requirements("requirements.txt")
compare(lockfile, pip)
|
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
for i in dir():
if 'CreateOutLine' in i:
print i
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close ()
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 20)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 20)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 20)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutLine (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutLine (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutLine (pdf, root, "page2", NULL)
# create outline with test which is encoding
outline[2] = HPDF_CreateOutLine (pdf, root, SAMP_TXT,
HPDF_GetEncoder (pdf, "90ms-RKSJ-H"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
# HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main()
|
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/brA1_axon-dendrite.csv', header = 0, index_col = 0)
adj.columns = adj.columns.astype(int) #convert column names to int for easier indexing
inputs = pd.read_csv('VNC_interaction/data/brA1_input_counts.csv', index_col = 0)
inputs = pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input'])
pairs = pd.read_csv('VNC_interaction/data/pairs-2020-10-26.csv', header = 0) # import pairs
# %%
from connectome_tools.process_matrix import Adjacency_matrix, Promat
from datetime import date
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
pre_dVNC = pymaid.get_skids_by_annotation('mw pre-dVNC')
A1 = pymaid.get_skids_by_annotation('mw A1 neurons paired')
A1_MN = pymaid.get_skids_by_annotation('mw A1 MN')
A1_ascending = pymaid.get_skids_by_annotation('mw A1 neurons paired ascending')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
RGN = pymaid.get_skids_by_annotation('mw RGN')
dVNC_to_A1 = pymaid.get_skids_by_annotation('mw dVNC to A1')
br = pymaid.get_skids_by_annotation('mw brain neurons')
A1_local = list(np.setdiff1d(A1, A1_ascending)) # all A1 without A1_ascending
pruned_index = list(np.setdiff1d(adj.index, A1_local))
adj = adj.loc[pruned_index, pruned_index] # remove all local A1 skids from adjacency matrix
VNC_adj = Adjacency_matrix(adj.values, adj.index, pairs, inputs,'axo-dendritic')
# %%
# paths from each A1 ascending neuron
from tqdm import tqdm
threshold = 0.01
hops = 3
A1_ascending = A1_ascending + Promat.get_paired_skids(2511238, pairs) # added A00c A4 as well
A1_ascending_pairs = Promat.extract_pairs_from_list(A1_ascending, pairs)[0]
ascending_pair_paths = []
for index in tqdm(range(0, len(A1_ascending_pairs))):
ds_ascending = VNC_adj.downstream_multihop(list(A1_ascending_pairs.loc[index]), threshold, min_members = 0, hops=hops)
ascending_pair_paths.append(ds_ascending)
#A00c_path = VNC_adj.downstream_multihop(Promat.get_paired_skids(2511238, pairs), threshold, min_members=0, hops=hops) # added A00c A4 in (connectivity with A1 sensories/basins)
#ascending_pair_paths.append(A00c_path)
# %%
# plotting ascending paths
all_type_layers,all_type_layers_skids = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, br)
dVNC_A1_type_layers,_ = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, dVNC_to_A1)
dVNC_type_layers,_ = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, dVNC)
predVNC_type_layers,_ = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, pre_dVNC)
dSEZ_type_layers,_ = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, dSEZ)
RGN_type_layers,_ = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, RGN)
valid_ascendings = ~(all_type_layers.iloc[:, 0]==0)
all_type_layers = all_type_layers[valid_ascendings] # remove all ascendings with no strong brain connections
dVNC_A1_type_layers = dVNC_A1_type_layers[valid_ascendings]
dVNC_type_layers = dVNC_type_layers[valid_ascendings]
predVNC_type_layers = predVNC_type_layers[valid_ascendings]
dSEZ_type_layers = dSEZ_type_layers[valid_ascendings]
RGN_type_layers = RGN_type_layers[valid_ascendings]
fig, axs = plt.subplots(
1, 4, figsize=(6, 4)
)
ax = axs[0]
annotations = all_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(all_type_layers, annot = annotations, fmt = 's', cmap = 'Greens', ax = ax, cbar = False)
ax.set_yticks([])
ax.set_ylabel('Individual A1 Ascendings')
ax.set(title='Pathway')
ax = axs[1]
annotations = dVNC_A1_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(dVNC_A1_type_layers, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='dVNC to A1')
ax = axs[2]
annotations = dVNC_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(dVNC_type_layers, annot = annotations, fmt = 's', cmap = 'Reds', ax = ax, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='dVNC')
ax = axs[3]
annotations = predVNC_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(predVNC_type_layers, annot = annotations, fmt = 's', cmap = 'Blues', ax = ax, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='pre-dVNC')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_individual_ascending_paths_hops-{hops}.pdf', bbox_inches='tight')
# %%
# plot by individual ascending
plt.rcParams['font.size'] = 5
# split plot types by ascending pair
asc_pairs_order = [21250110, 4206755, 4595609, 2511238, 8059283, # direct to dVNCs
3571478, 10929797, 3220616, 10949382, 8057753, 2123422, 4555763, 7766016] # indirect to dVNCs
layer_types = [all_type_layers, dVNC_type_layers, dVNC_A1_type_layers, dSEZ_type_layers, RGN_type_layers]
col = ['Greens', 'Reds', 'Oranges', 'Purples', 'GnBu']
#asc_pairs = all_type_layers.index
#layer_types = [all_type_layers, dVNC_type_layers, dSEZ_type_layers, RGN_type_layers]
#col = ['Greens', 'Reds', 'Purples', 'Oranges']
asc_list = []
for pair in asc_pairs_order:
mat = np.zeros(shape=(len(layer_types), len(all_type_layers.columns)))
for i, layer_type in enumerate(layer_types):
mat[i, :] = layer_type.loc[pair]
asc_list.append(mat)
# loop through pairs to plot
for i, asc in enumerate(asc_list):
data = pd.DataFrame(asc, index = ['Total', 'dVNC', 'dVNC-A1', 'dSEZ', 'RGN'])
#data = pd.DataFrame(asc, index = ['Total', 'dVNC', 'dSEZ', 'RGN']).iloc[:, 0:2]
mask_list = []
for i_iter in range(0, len(data.index)):
mask = np.full((len(data.index),len(data.columns)), True, dtype=bool)
mask[i_iter, :] = [False]*len(data.columns)
mask_list.append(mask)
fig, axs = plt.subplots(
1, 1, figsize=(.5, .6)
)
for j, mask in enumerate(mask_list):
if((j == 0)):
vmax = 500
if((j == 2)):
vmax = 30
if((j in [1,3,4])):
vmax = 60
ax = axs
annotations = data.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(data, annot = annotations, fmt = 's', mask = mask, cmap=col[j], vmax = vmax, cbar=False, ax = ax)
plt.savefig(f'VNC_interaction/plots/individual_asc_paths/{i}_asc-{asc_pairs_order[i]}_Threshold-{threshold}_individual-path.pdf', bbox_inches='tight')
plt.rcParams['font.size'] = 6
#%%
# summary plots for main figure
dVNC_direct = [21250110, 4206755, 4595609, 2511238, 8059283]
dVNC_indirect = [3571478, 10929797, 3220616, 10949382, 8057753, 2123422, 4555763, 7766016]
asc_types_name = ['dVNC-direct', 'dVNC-indirect']
asc_types = [dVNC_direct, dVNC_indirect]
asc_types = [[Promat.get_paired_skids(x, pairs) for x in sublist] for sublist in asc_types] # convert leftid's to both skids from each pair
asc_types = [sum(x, []) for x in asc_types] # unlist nested lists
# multihop downstream
type_paths = []
for index in tqdm(range(0, len(asc_types))):
ds_asc = VNC_adj.downstream_multihop(list(asc_types[index]), threshold, min_members = 0, hops=hops)
type_paths.append(ds_asc)
#%%
# summary plot continued
# identifying different cell types in ascending pathways
all_type_layers_types,all_type_layers_skids_types = VNC_adj.layer_id(type_paths, asc_types_name, br) # include all neurons to get total number of neurons per layer
dVNC_type_layers_types,_ = VNC_adj.layer_id(type_paths, asc_types_name, dVNC)
dVNC_A1_type_layers_types,_ = VNC_adj.layer_id(type_paths, asc_types_name, dVNC_to_A1)
dSEZ_type_layers_types,_ = VNC_adj.layer_id(type_paths, asc_types_name, dSEZ)
RGN_type_layers_types,_ = VNC_adj.layer_id(type_paths, asc_types_name, RGN)
# split plot types by dVNC pair
layer_types_types = [all_type_layers_types, dVNC_type_layers_types, dVNC_A1_type_layers_types, dSEZ_type_layers_types, RGN_type_layers_types]
col = ['Greens', 'Reds', 'Oranges', 'Purples', 'GnBu']
asc_list_types = []
for types in asc_types_name:
mat = np.zeros(shape=(len(layer_types_types), len(all_type_layers_types.columns)))
for i, layer_type in enumerate(layer_types_types):
mat[i, :] = layer_type.loc[types]
asc_list_types.append(mat)
plt.rcParams['font.size'] = 5
# loop through pairs to plot
for i, asc in enumerate(asc_list_types):
data = pd.DataFrame(asc, index = ['Total', 'dVNC', 'dVNC-A1', 'dSEZ', 'RGN'])
#data = pd.DataFrame(asc, index = ['Total', 'dVNC', 'dSEZ', 'RGN']).iloc[:, 0:2]
mask_list = []
for i_iter in range(0, len(data.index)):
mask = np.full((len(data.index),len(data.columns)), True, dtype=bool)
mask[i_iter, :] = [False]*len(data.columns)
mask_list.append(mask)
fig, axs = plt.subplots(
1, 1, figsize=(.5, .6)
)
for j, mask in enumerate(mask_list):
if((j == 0)):
vmax = 800
if((j in [2,4])):
vmax = 60
if((j in [1,3])):
vmax = 100
ax = axs
annotations = data.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(data, annot = annotations, fmt = 's', mask = mask, cmap=col[j], vmax = vmax, cbar=False, ax = ax)
plt.savefig(f'VNC_interaction/plots/individual_asc_paths/Type_{i}_asc-{asc_types_name[i]}_Threshold-{threshold}_individual-path.pdf', bbox_inches='tight')
#%%
# export ds-ascending brain neurons
def readable_df(skids_list):
max_length = max([len(x) for x in skids_list])
df = pd.DataFrame()
for i, layer in enumerate(skids_list):
skids = list(layer)
if(len(layer)==0):
skids = ['']
if(len(skids) != max_length):
skids = skids + ['']*(max_length-len(skids))
df[f'Layer {i}'] = skids
return(df)
all_type_layers_skids.columns = [int(x) for x in all_type_layers_skids.columns]
asc_all_layers_readable = []
for column in all_type_layers_skids.columns:
readable = readable_df(all_type_layers_skids.loc[:,column])
asc_all_layers_readable.append(readable)
pd.concat(asc_all_layers_readable).to_csv(f'VNC_interaction/plots/individual_asc_paths/all_paths_ascending_Threshold-{threshold}.csv')
# %%
# ascending paths upstream of particular dVNC paths
from tqdm import tqdm
# setting up paramters for downstream_multihop from dVNC to A1
adj_A1 = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs_A1 = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs_A1 = pd.DataFrame(inputs_A1.values, index = inputs_A1.index, columns = ['axon_input', 'dendrite_input'])
A1_adj = Adjacency_matrix(adj_A1.values, adj_A1.index, pairs, inputs_A1,'axo-dendritic')
threshold = 0.01
source_dVNC, ds_dVNC = A1_adj.downstream(dVNC, threshold, exclude=dVNC)
edges, ds_dVNC_cleaned = A1_adj.edge_threshold(source_dVNC, ds_dVNC, threshold, direction='downstream')
edges[edges.overthres==True]
source_dVNC_cleaned = np.unique(edges[edges.overthres==True].upstream_pair_id)
source_dVNC_pairs = A1_adj.adj_inter.loc[(slice(None), source_dVNC_cleaned), :].index
source_dVNC_pairs = [x[2] for x in source_dVNC_pairs]
source_dVNC_pairs = Promat.extract_pairs_from_list(source_dVNC_pairs, pairs)[0]
source_dVNC_pair_paths = []
for index in tqdm(range(0, len(source_dVNC_pairs))):
ds_dVNC = A1_adj.downstream_multihop(list(source_dVNC_pairs.loc[index]), threshold, min_members = 0, hops=5)
source_dVNC_pair_paths.append(ds_dVNC)
# identifying ascending neurons of interest
order = [16, 0, 2, 11, 1, 5, 7, 12, 13, 8, 3, 9, 10, 15, 4, 6, 14]
group1 = [0,1,2,3]
group2 = [4, 5, 6, 7, 8]
group3 = [9, 10]
group4 = [11,12,13]
ascending_layers,ascending_skids = A1_adj.layer_id(source_dVNC_pair_paths, source_dVNC_pairs.leftid, A1_ascending)
ascending_layers = ascending_layers.iloc[order, :]
ascending_skids = ascending_skids.T.iloc[order, :]
ascending_skids_allhops = []
for index in ascending_skids.index:
skids_allhops = [x for sublist in ascending_skids.loc[index].values for x in sublist if x!='']
ascending_skids_allhops.append(skids_allhops)
# %%
# identifying ascending_paths based on ascending IDs in dVNC paths
# not yet complete
all_ascending_layers,all_ascending_layers_skids = VNC_adj.layer_id(ascending_pair_paths, A1_ascending_pairs.leftid, br)
all_ascending_layers_skids.columns = [int(x) for x in all_ascending_layers_skids.columns]
left_skids_list = []
ascending_dVNC_paths = []
for ascending_skids in ascending_skids_allhops:
left_skids = Promat.extract_pairs_from_list(ascending_skids, pairs)[0].leftid.values
path = all_ascending_layers_skids.loc[:, left_skids].values
path_combined = []
for layers in path:
layers = [x for sublist in layers for x in sublist]
#if(len(layers)>1):
# path_combined.append(np.concatenate(layers))
#if(len(layers)==1):
# path_combined.append(layers)
path_combined.append(layers)
ascending_dVNC_paths.append(path_combined)
left_skids_list.append(left_skids)
# identify neuron types
all_layers,_ = VNC_adj.layer_id(ascending_dVNC_paths, range(0, len(ascending_dVNC_paths)), VNC_adj.adj.index) # include all neurons to get total number of neurons per layer
dVNC_A1_layers,dVNC_A1_layers_skids = VNC_adj.layer_id(ascending_dVNC_paths, range(0, len(ascending_dVNC_paths)), dVNC_to_A1)
dVNC_layers,_ = VNC_adj.layer_id(ascending_dVNC_paths, range(0, len(ascending_dVNC_paths)), dVNC)
pre_dVNC_layers,_ = VNC_adj.layer_id(ascending_dVNC_paths, range(0, len(ascending_dVNC_paths)), pre_dVNC)
# ordered source dVNC pairs in same way
source_dVNC_pairs_ordered = source_dVNC_pairs.loc[order, :]
source_dVNC_pairs_ordered = source_dVNC_pairs_ordered.reset_index(drop=True)
dVNC_A1_layers_skids.columns = source_dVNC_pairs_ordered.leftid
contains_origin_list = []
for column in dVNC_A1_layers_skids.columns:
path = dVNC_A1_layers_skids.loc[:, column]
contains_origin = [True if x==column else False for sublist in path for x in sublist]
contains_origin_list.append(sum(contains_origin))
# %%
|
"""
Author: D. van Gent
License: MIT
Description: Simple Python implementation of the Ubiquiti device discovery protocol.
The protocol was "reverse engineered" by capturing traffic from the "WiFiman" Android app,
payload decoding is best-effort and not guaranteed to be correct.
Usage: python3 ubnt-discover.py {ip-address}
(default is to broadcast on 255.255.255.255)
"""
import socket
import sys
from ipaddress import IPv4Address
def ubnt_discover(addr):
''' Send discovery packets to Ubnt device '''
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(3)
for i in range(0, 3): # send sequential packets to UBNT device
port = 10001 if i else 5678 # switch ports, this mimics "WiFiman" app behavior
sock.sendto(i.to_bytes(4, byteorder='little'), (addr, port))
reply, srcaddr = sock.recvfrom(1024)
dvc_data = []
offset = 4 # data fields start at position 4 in the reply
while offset < len(reply):
ftype, dlength = reply[offset], reply[offset + 2] # field type, payload length
offset += 3
dvc_data.append((ftype, reply[offset:offset + dlength]))
offset += dlength
return dvc_data, srcaddr[0]
if __name__ == '__main__':
addr = sys.argv[1] if len(sys.argv) == 2 else '255.255.255.255'
dvc_data, dvc_addr = ubnt_discover(addr)
print(f' Reply from: {dvc_addr}')
# decode the fields
fields = {
None: ('Unknown', lambda d: ' '.join(f'{b:02x}' for b in d)),
0x01: ('HW addr', lambda d: ':'.join(f'{b:02x}' for b in d)),
0x02: ('Address', lambda d: f"{':'.join(f'{b:02x}' for b in d[0:6])} \
({IPv4Address(d[6:10])})"),
0x03: ('Firmware', lambda d: d.decode()),
0x0a: ('Uptime', lambda d: f"{int.from_bytes(d, byteorder='big')} seconds"),
0x0b: ('Hostname', lambda d: d.decode()),
0x0c: ('Model', lambda d: d.decode())
}
for field, data in dvc_data:
name, decode = fields.get(field, fields[None])
print(f'{name:>11s}: {decode(data)}')
# Just for fun: grab the first global/WAN IP
# This could be useful for dynamic DNS updates (through the API of your domain provider)
for f, d in dvc_data:
if f == 0x02:
ip = IPv4Address(d[6:10])
if ip.is_global:
print(f' WAN IP: {ip}')
break
|
import asyncio
import time
async def count():
print("one")
await asyncio.sleep(1)
print("two")
async def main():
await asyncio.gather(count(), count(), count())
start = time.time()
asyncio.run(main())
print(time.time() - start)
|
import datetime
import json
import logging
import random
from sqlalchemy import func
from trafficdb.blueprint.api import PAGE_LIMIT
from trafficdb.models import *
from .fixtures import (
create_fake_link_aliases,
create_fake_links,
create_fake_observations,
)
from .util import ApiTestCase as TestCase, API_PREFIX, strip_url
log = logging.getLogger(__name__)
class TestLinkAliases(TestCase):
@classmethod
def create_fixtures(cls):
create_fake_links(link_count=100)
create_fake_link_aliases(alias_count=200)
def test_all_link_aliass(self):
log.info('Querying all link aliases')
n_aliases = 0
n_pages = 0
# Response should look like a JSON document of the following form:
# {
# "aliases": [
# {
# id: <string>,
# linkId: <string>,
# linkUrl: <url>,
# }
# ],
# "page": {
# "count": <number>,
# ?"next": <url>,
# },
# }
# Get all data one page at a time
url = API_PREFIX + '/aliases/'
while url is not None:
# Check we're not looping "forever"
assert n_pages < 20
log.info('GET {0}'.format(url))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsNot(response.json, None)
self.assertIn('aliases', response.json)
self.assertIn('page', response.json)
aliases = response.json['aliases']
page = response.json['page']
log.info('Got {0} aliases'.format(len(aliases)))
log.info('Page structure: {0}'.format(page))
# Check each alias
for v in aliases:
self.assertIn('id', v)
self.assertIn('linkId', v)
self.assertIn('linkUrl', v)
n_aliases += len(aliases)
self.assertTrue(page['count'] == len(aliases))
n_pages += 1
if 'next' in page:
url = strip_url(page['next'])
else:
url = None
log.info('Got information on {0} alias(es)'.format(n_aliases))
self.assertEqual(n_aliases, 200)
def test_redirect(self):
# Non-canonical links URL should re-direct
url = API_PREFIX + '/aliases'
log.info('GET {0}'.format(url))
response = self.client.get(url)
self.assertEqual(response.status_code, 301)
def test_empty_document(self):
log.info('Querying page beyond alias maximum')
# rationale: using "Z" should be "above" any
# random link alias in the db given the ordering used by Postgres.
response = self.get_link_aliases(from_='Z')
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), 0)
self.assertNotIn('next', page)
def test_integer_from(self):
# In the case of aliases, names can be just about anything and
# so they could be an integer.
log.info('Querying page with integer from')
response = self.get_link_aliases(from_=0)
page, aliases = self.parse_link_aliases_response(response)
def test_negative_count(self):
request_count = -3
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
# -ve counts should return bad request
self.assertEqual(response.status_code, 400)
def test_non_number_count(self):
request_count = 'one'
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
# non-numeric counts should return bad request
self.assertEqual(response.status_code, 400)
def test_small_counts(self):
request_count = max(1,PAGE_LIMIT >> 1)
assert PAGE_LIMIT > request_count
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), request_count)
self.assertEqual(len(aliases), page['count'])
def test_huge_counts(self):
log.info('Querying 100 aliases (should be truncated)')
request_count = PAGE_LIMIT * 4
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), page['count'])
self.assertTrue(len(aliases) == PAGE_LIMIT)
def test_non_json_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='not a json document', content_type='application/json')
self.assert_400(response)
def test_empty_json_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{}', content_type='application/json')
self.assert_400(response)
def test_bad_alias_list_resolve_body_1(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": 3}', content_type='application/json')
self.assert_400(response)
def test_bad_alias_list_resolve_body_1(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": ["one", 3]}', content_type='application/json')
self.assert_400(response)
def test_bad_content_type_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": []}', content_type='text/plain')
self.assert_400(response)
def test_empty_resolve(self):
response = self.make_resolve_link_aliases_request([])
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
self.assertEqual(len(resolutions), 0)
def gen_alias_names(self, good_count=3, bad_count=3):
good_alias_names = set(r[0] for r in db.session.query(LinkAlias.name))
bad_alias_names = set('_bad_alias_{0}'.format(x) for x in range(bad_count))
alias_names = random.sample(good_alias_names, good_count) + list(bad_alias_names)
random.shuffle(alias_names)
return dict((n, n in good_alias_names) for n in alias_names)
def test_simple_resolve(self):
alias_name_map = self.gen_alias_names()
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_single_good_resolve(self):
alias_name_map = self.gen_alias_names(good_count=1, bad_count=0)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_single_bad_resolve(self):
alias_name_map = self.gen_alias_names(good_count=0, bad_count=1)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_too_big_resolve(self):
alias_name_map = self.gen_alias_names(good_count=PAGE_LIMIT, bad_count=PAGE_LIMIT)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_400(response)
ALIASES_PATH = API_PREFIX + '/aliases/'
class TestMutation(TestCase):
@classmethod
def create_fixtures(cls):
create_fake_links(link_count=20)
def new_alias_request(self, link_data):
return self.client.patch(ALIASES_PATH,
data=json.dumps(link_data),
content_type='application/json')
def test_empty_body_request(self):
response = self.client.patch(ALIASES_PATH, data='', content_type='application/json')
self.assert_400(response)
def test_non_json_body_request(self):
response = self.client.patch(ALIASES_PATH, data='not json', content_type='application/json')
self.assert_400(response)
def test_no_content_type_body_request(self):
response = self.client.patch(ALIASES_PATH, data='{}')
self.assert_400(response)
def test_empty_request(self):
response = self.new_alias_request({})
self.assert_200(response)
def verify_create(self, create, response):
self.assert_200(response)
self.assertIn('create', response.json)
create_resp = response.json['create']
self.assertEqual(create_resp['count'], len(create))
# Verify by resolving
response = self.make_resolve_link_aliases_request(
list(cr['name'] for cr in create)
)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
self.assertEqual(len(resolutions), len(create))
# What do we expect?
expected = {}
for cr in create:
expected[cr['name']] = cr['link']
log.info('resolutions: {0}'.format(resolutions))
log.info('expected: {0}'.format(expected))
for r_n, r_l in resolutions:
self.assertIn(r_n, expected)
self.assertEqual(r_l['id'], expected[r_n])
def test_create_single(self):
create = [
dict(name='new-alias', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.verify_create(create, response)
def test_create_multiple_identical(self):
create = [
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-1', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.assert_400(response)
def test_create_multiple(self):
create = [
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-2', link=self.get_some_link_id()),
dict(name='new-alias-3', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.verify_create(create, response)
|
from meuusuario import *
from bancodedados import salvar_usuario
def novopoder(usuario, usuario2):
_ = True
while _:
novo_poder = input('Digite seu novo poder [Administrador] ou [Usuario] : ')
if verificapoder(novo_poder):
if usuario2 == 'Administrador':
usuario.nomeclass = novo_poder
salvar_usuario(vcon, usuario, '5')
print('\n' * 150)
print('poder alterado com sucesso!')
return usuario
else:
print('Acesso negado!')
continue
def deletar(conexao, usuario, usuario2):
if usuario2.nomeclass == "Administrador":
c=conexao.cursor()
c.execute(f'DELETE FROM usuariosdb WHERE ID = "{usuario.id}"')
conexao.commit()
print('\n' * 150)
print('Usuário deletado com sucesso!')
else:
print('Acesso negado!')
def editar(usuario, usuario2):
if usuario.nomeclass == 'Administrador':
usuario_modificado = usuario2
_ = True
while _:
botao_editar_admin = input('''
| [1] Editar Usuario |
| [2] Editar Senha |
| [3] Editar Nome |
| [4] Editar e-mail |
| [5] Editar poder |
| [6] Deletar usuario|
| [7] Voltar |
--------------------
Escolha uma [opção] do menu: ''')
if botao_editar_admin not in '123456':
print('Opção inválida! Tente novamente.')
continue
elif botao_editar_admin == '1':
usuario_modificado = novousuario(usuario_modificado)
return usuario_modificado
elif botao_editar_admin == '2':
usuario_modificado = forcenovasenha(usuario_modificado, usuario)
return usuario_modificado
elif botao_editar_admin == '3':
usuario_modificado = novonome(usuario_modificado)
return usuario_modificado
elif botao_editar_admin == '4':
usuario_modificado = novoemail(usuario_modificado)
return usuario_modificado
elif botao_editar_admin == '5':
usuario_modificado = novopoder(usuario_modificado, usuario.nomeclass)
return usuario_modificado
elif botao_editar_admin == '6':
deletar(vcon, usuario_modificado, usuario)
break
elif botao_editar_admin == '7':
return usuario_modificado
else:
print('Acesso negado!')
|
from nornir import InitNornir
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.functions.text import print_result
nr = InitNornir()
result = nr.run(task=netmiko_send_command, command_string="show arp")
print_result(result)
#Nornir checks for hosts.yaml file. Based on that yaml file python executes specified command
|
from .. import fields, manager, model
from ..interaction import GenericInteraction
EVENT_TYPES_WITH_BLANK_VALUE = (
"lead_deleted",
"lead_restored",
"contact_deleted",
"contact_restored",
"company_deleted",
"company_restored",
"customer_deleted",
"entity_merged",
"task_added",
"task_deleted",
"task_completed",
)
EVENT_TYPE_LEAD_STATUS_CHANGE = "lead_status_changed"
class _EventValueField(fields._UnEditableField):
def on_get_instance(self, instance, value):
"""
value here is what we have in value_after/value_before field
For example
[
{
"note": {
"id": 42743871
}
}
],
"""
if instance.type in EVENT_TYPES_WITH_BLANK_VALUE:
return None
if instance.type == EVENT_TYPE_LEAD_STATUS_CHANGE:
return value[0]["lead_status"]
class Event:
value_after = _EventValueField("value_after")
value_before = _EventValueField("value_before")
class EventsInteraction(GenericInteraction):
path = "events"
class Event(model.Model):
type = fields._Field("type")
entity_id = fields._UnEditableField("entity_id")
entity_type = fields._UnEditableField("entity_type")
created_by = fields._Link("created_by", "User")
created_at = fields._DateTimeField("created_at")
account_id = fields._UnEditableField("account_id")
value_after = _EventValueField("value_after")
value_before = _EventValueField("value_before")
objects = manager.Manager(EventsInteraction())
|
import os
import sys
import json
from flask import Flask
from flask import request
from prediction import Prediction
app = Flask(__name__)
prediction = ""
aijson = ""
@app.route("/model")
def model():
return json.dumps(aijson)
@app.route("/health")
def hello():
return "aibuildpack-api"
@app.route('/', methods = ['POST'])
def new_data():
# CODE
req = request.get_data()
input = json.loads(req)
target = prediction.get_prediction(input)
return {
"prediction" : target.tolist()
}
if __name__ == "__main__":
aijson = json.load(sys.stdin)
prediction = Prediction(aijson)
if os.environ.get('VCAP_SERVICES') is None: # running locally
PORT = 8080
DEBUG = True
else: # running on CF
PORT = int(os.getenv("PORT"))
DEBUG = False
app.run(host='0.0.0.0', port=PORT, debug=DEBUG)
|
# Step by Step Logic Gate Solver. Execute input_equation() to start the program!
def input_equation():
print("Enter equation without whitespaces")
equation = input()
var = []
for character in equation:
if(character.isalpha):
if(character not in var):
var.append(character)
answer = fill_variable(var, equation)
print("The answer is:", answer)
def fill_variable(var, equation):
print("The variables are the following")
for v in var:
if(v.isalpha()):
print(v)
equation_n = equation
for v in var:
if(v.isalpha()):
print("Enter the value of:", v)
temp = input()
equation_n = equation_n.replace(v, temp)
return calculate(equation_n)
def calculate(equation):
print("Calculating now")
equation = list(equation)
equation = solve_complement(equation)
equation = solve_and(equation)
equation = solve_or(equation)
return equation
def solve_complement(equation):
unsolve = True
while(unsolve):
if("'" in equation):
ind = equation.index("'")
if(equation [ ind - 1 ] == '1'):
equation [ ind - 1 ] = 0
equation.pop(ind)
else:
equation [ ind - 1] = 1
equation.pop(ind)
else:
print("Solved Complements")
print(equation)
unsolve = False
return equation
def solve_and(equation):
unsolve = True
while(unsolve):
if("." in equation):
ind = equation.index(".")
if(equation[ind - 1] == '1'):
if(equation[ind + 1] == '1'):
equation[ ind ] = '1'
equation.pop(ind - 1), equation.pop(ind)
else:
equation[ ind ] = '0'
equation.pop(ind - 1), equation.pop(ind)
else:
equation[ ind ] = '0'
equation.pop(ind - 1)
equation.pop(ind)
else:
print("Solved AND")
print(equation)
unsolve = False
return equation
def solve_or(equation):
unsolve = True
while(unsolve):
if("+" in equation):
ind = equation.index("+")
if(equation[ind - 1] == '0'):
if(equation[ind + 1] == '0'):
equation[ ind ] = '0'
equation.pop(ind - 1), equation.pop(ind)
else:
equation [ ind ] = '1'
equation.pop(ind - 1)
equation.pop(ind)
else:
equation[ ind ] = '1'
equation.pop(ind - 1)
equation.pop(ind)
else:
print("Solved OR")
print(equation)
unsolve = False
return equation
input_equation()
|
# Generated by Django 3.1.5 on 2021-10-12 18:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('classes', '0004_auto_20211009_1840'),
('code_analysis', '0017_auto_20211008_2219'),
]
operations = [
migrations.AddField(
model_name='solution',
name='course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='classes.course'),
),
]
|
# dummy/proxy tz module
import utime
from utime import mktime, localtime
TZ = 1 # UTC+1
def isdst(ts):
return bool(utime.localtime(ts)[8])
def utcoffset(ts=None):
if ts is None:
ts = utime.time()
offset = TZ * 3600
if isdst(ts):
offset += 3600
return offset
|
import numpy as np
from numpy import pi
import logging
import h5py
from numpy import pi
import logging, os
from .Diagnostics import *
from .Saving import *
class Model(object):
""" Python class that represents the barotropic quasigeostrophic
pseudospectral model in a doubly periodic domain. Physical parameters
observe SI units.
Parameters
-----------
nx: integer (optional)
Number of grid points in the x-direction.
The number of modes is nx/2+1.
ny: integer (optional)
Number of grid points in the y-direction.
If None, then ny=nx.
L: float (optional)
Domain size.
dt: float (optional)
Time step for time integration.
twrite: integer (optional)
Print model status to screen every twrite time steps.
tmax: float (optional)
Total time of simulation.
U: float (optional)
Uniform zonal flow
use_filter: bool (optional)
If True, then uses exponential spectral filter.
nu4: float (optional)
Fouth-order hyperdiffusivity of potential vorticity.
nu: float (optional)
Diffusivity of potential vorticity.
mu: float (optional)
Linear drag of potential vorticity.
passive_scalar: bool (optional)
If True, then calculates passive scalar solution.
nu4c: float (optional)
Fouth-order hyperdiffusivity of passive scalar.
nuc: float (optional)
Diffusivity of passive scalar.
muc: float (optional)
Linear drag of passive scalar.
dealias: bool (optional)
If True, then dealias solution using 2/3 rule.
save_to_disk: bool (optional)
If True, then save parameters and snapshots to disk.
overwrite: bool (optional)
If True, then overwrite extant files.
tsave_snapshots: integer (optional)
Save snapshots every tsave_snapshots time steps.
tdiags: integer (optional)
Calculate diagnostics every tdiags time steps.
path: string (optional)
Location for saving output files.
"""
def __init__(
self,
nx=128,
ny=None,
L=5e5,
dt=10000.,
twrite=1000,
tswrite=10,
tmax=250000.,
use_filter = True,
U = .0,
nu4=5.e9,
nu = 0,
mu = 0,
beta = 0,
passive_scalar = False,
nu4c = 5.e9,
nuc = 0,
muc = 0,
dealias = False,
save_to_disk=False,
overwrite=True,
tsave_snapshots=10,
tdiags = 10,
path = 'output/',
use_mkl=False,
nthreads=1):
self.nx = nx
self.ny = nx
self.L = L
self.W = L
self.dt = dt
self.twrite = twrite
self.tswrite = tswrite
self.tmax = tmax
self.tdiags = tdiags
self.passive_scalar = passive_scalar
self.dealias = dealias
self.U = U
self.beta = beta
self.nu4 = nu4
self.nu = nu
self.mu = mu
self.nu4c = nu4c
self.nuc = nuc
self.muc = muc
self.save_to_disk = save_to_disk
self.overwrite = overwrite
self.tsnaps = tsave_snapshots
self.path = path
self.use_filter = use_filter
self.use_mkl = use_mkl
self.nthreads = nthreads
self._initialize_logger()
self._initialize_grid()
self._allocate_variables()
self._initialize_filter()
self._initialize_etdrk4()
self._initialize_time()
initialize_save_snapshots(self, self.path)
save_setup(self, )
self.cflmax = .5
self._initialize_fft()
self._initialize_diagnostics()
def _allocate_variables(self):
""" Allocate variables so that variable addresses are close in memory.
"""
self.dtype_real = np.dtype('float64')
self.dtype_cplx = np.dtype('complex128')
self.shape_real = (self.ny, self.nx)
self.shape_cplx = (self.ny, self.nx//2+1)
# vorticity
self.q = np.zeros(self.shape_real, self.dtype_real)
self.qh = np.zeros(self.shape_cplx, self.dtype_cplx)
self.qh0 = np.zeros(self.shape_cplx, self.dtype_cplx)
self.qh1 = np.zeros(self.shape_cplx, self.dtype_cplx)
# stream function
self.p = np.zeros(self.shape_real, self.dtype_real)
self.ph = np.zeros(self.shape_cplx, self.dtype_cplx)
def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
""" Run the model for prescribed time and yields to user code.
Parameters
----------
tsnapstart : float
The timestep at which to begin yielding.
tstapint : int (number of time steps)
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return
def run(self):
""" Run the model until the end (`tmax`).
The algorithm is:
1) Save snapshots (i.e., save the initial condition).
2) Take a tmax/dt steps forward.
3) Save diagnostics.
"""
# save initial conditions
if self.save_to_disk:
if self.passive_scalar:
save_snapshots(self,fields=['t','q','c'])
else:
save_snapshots(self,fields=['t','q'])
# run the model
while(self.t < self.tmax):
self._step_forward()
# save diagnostics
if self.save_to_disk:
save_diagnostics(self)
def _step_forward(self):
""" Step solutions forwards. The algorithm is:
1) Take one time step with ETDRK4 scheme.
2) Incremente diagnostics.
3) Print status.
4) Save snapshots.
"""
self._step_etdrk4()
increment_diagnostics(self,)
self._print_status()
save_snapshots(self,fields=['t','q','c'])
def _initialize_time(self):
""" Initialize model clock and other time variables.
"""
self.t=0 # time
self.tc=0 # time-step number
### initialization routines, only called once at the beginning ###
def _initialize_grid(self):
""" Create spatial and spectral grids and normalization constants.
"""
self.x,self.y = np.meshgrid(
np.arange(0.5,self.nx,1.)/self.nx*self.L,
np.arange(0.5,self.ny,1.)/self.ny*self.W )
self.dk = 2.*pi/self.L
self.dl = 2.*pi/self.L
# wavenumber grids
self.nl = self.ny
self.nk = self.nx//2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# physical grid spacing
self.dx = self.L / self.nx
self.dy = self.W / self.ny
# constant for spectral normalizations
self.M = self.nx*self.ny
# isotropic wavenumber^2 grid
# the inversion is not defined at kappa = 0
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
self.wv4 = self.wv2**2
iwv2 = self.wv2 != 0.
self.wv2i = np.zeros_like(self.wv2)
self.wv2i[iwv2] = self.wv2[iwv2]**-1
def _initialize_background(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_inversion_matrix(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_forcing(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_filter(self):
"""Set up spectral filter or dealiasing."""
if self.use_filter:
cphi=0.65*pi
wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
self.filtr = np.exp(-23.6*(wvx-cphi)**4.)
self.filtr[wvx<=cphi] = 1.
self.logger.info(' Using filter')
elif self.dealias:
self.filtr = np.ones_like(self.wv2)
self.filtr[self.nx/3:2*self.nx/3,:] = 0.
self.filtr[:,self.ny/3:2*self.ny/3] = 0.
self.logger.info(' Dealiasing with 2/3 rule')
else:
self.filtr = np.ones_like(self.wv2)
self.logger.info(' No dealiasing; no filter')
def _do_external_forcing(self):
pass
def _initialize_logger(self):
""" Initialize logger.
"""
self.logger = logging.getLogger(__name__)
fhandler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
fhandler.setFormatter(formatter)
if not self.logger.handlers:
self.logger.addHandler(fhandler)
self.logger.setLevel(10)
# this prevents the logger from propagating into the ipython notebook log
self.logger.propagate = False
self.logger.info(' Logger initialized')
def _step_etdrk4(self):
""" Take one step forward using an exponential time-dfferencing method
with a Runge-Kutta 4 scheme.
Rereferences
------------
See Cox and Matthews, J. Comp. Physics., 176(2):430-455, 2002.
Kassam and Trefethen, IAM J. Sci. Comput., 26(4):1214-233, 2005.
"""
self.qh0 = self.qh.copy()
Fn0 = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh0 + Fn0*self.Qh)*self.filtr
self.qh1 = self.qh.copy()
if self.passive_scalar:
self.ch0 = self.ch.copy()
Fn0c = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch0 + Fn0c*self.Qhc)*self.filtr
self.ch1 = self.ch.copy()
self._calc_derived_fields()
c1 = self._calc_ep_c()
self._invert()
k1 = self._calc_ep_psi()
Fna = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh0 + Fna*self.Qh)*self.filtr
if self.passive_scalar:
Fnac = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch0 + Fnac*self.Qhc)*self.filtr
self._calc_derived_fields()
c2 = self._calc_ep_c()
self._invert()
k2 = self._calc_ep_psi()
Fnb = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh1 + ( 2.*Fnb - Fn0 )*self.Qh)*self.filtr
if self.passive_scalar:
Fnbc = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch1 + ( 2.*Fnbc - Fn0c )*self.Qhc)*self.filtr
self._calc_derived_fields()
c3 = self._calc_ep_c()
self._invert()
k3 = self._calc_ep_psi()
Fnc = -self.jacobian_psi_q()
self.qh = (self.expch*self.qh0 + Fn0*self.f0 + 2.*(Fna+Fnb)*self.fab\
+ Fnc*self.fc)*self.filtr
if self.passive_scalar:
Fncc = -self.jacobian_psi_c()
self.ch = (self.expchc*self.ch0 + Fn0c*self.f0c+ 2.*(Fnac+Fnbc)*self.fabc\
+ Fncc*self.fcc)*self.filtr
self._calc_derived_fields()
c4 = self._calc_ep_c()
self.cvar += self.dt*(c1 + 2*(c2+c3) + c4)/6.
# invert
self._invert()
# calcuate q
self.q = self.ifft(self.qh).real
if self.passive_scalar:
self.c = self.ifft(self.ch).real
k4 = self._calc_ep_psi()
self.Ke += self.dt*(k1 + 2*(k2+k3) + k4)/6.
def _initialize_etdrk4(self):
""" Compute coefficients of the exponential time-dfferencing method
with a Runge-Kutta 4 scheme.
Rereferences
------------
See Cox and Matthews, J. Comp. Physics., 176(2):430-455, 2002.
Kassam and Trefethen, IAM J. Sci. Comput., 26(4):1214-233, 2005.
"""
#
# coefficients for q-equation
#
# the exponent for the linear part
c = np.zeros((self.nl,self.nk),self.dtype_cplx)
c += -self.nu4*self.wv4 - self.nu*self.wv2 - self.mu - 1j*self.k*self.U
c += self.beta*self.ik*self.wv2i
ch = c*self.dt
self.expch = np.exp(ch)
self.expch_h = np.exp(ch/2.)
self.expch2 = np.exp(2.*ch)
M = 32. # number of points for line integral in the complex plane
rho = 1. # radius for complex integration
r = rho*np.exp(2j*np.pi*((np.arange(1.,M+1))/M)) # roots for integral
LR = ch[...,np.newaxis] + r[np.newaxis,np.newaxis,...]
LR2 = LR*LR
LR3 = LR2*LR
self.Qh = self.dt*(((np.exp(LR/2.)-1.)/LR).mean(axis=-1))
self.f0 = self.dt*( ( ( -4. - LR + ( np.exp(LR)*( 4. - 3.*LR + LR2 ) ) )/ LR3 ).mean(axis=-1) )
self.fab = self.dt*( ( ( 2. + LR + np.exp(LR)*( -2. + LR ) )/ LR3 ).mean(axis=-1) )
self.fc = self.dt*( ( ( -4. -3.*LR - LR2 + np.exp(LR)*(4.-LR) )/ LR3 ).mean(axis=-1) )
if self.passive_scalar:
#
# coefficients for c-equation
#
# the exponent for the linear part
c = np.zeros((self.nl,self.nk),self.dtype_cplx)
c += -self.nu4c*self.wv4 - self.nuc*self.wv2 - self.muc
ch = c*self.dt
self.expchc = np.exp(ch)
self.expch_hc = np.exp(ch/2.)
self.expch2c = np.exp(2.*ch)
r = rho*np.exp(2j*np.pi*((np.arange(1.,M+1))/M)) # roots for integral
LR = ch[...,np.newaxis] + r[np.newaxis,np.newaxis,...]
LR2 = LR*LR
LR3 = LR2*LR
self.Qhc = self.dt*(((np.exp(LR/2.)-1.)/LR).mean(axis=-1))
self.f0c = self.dt*( ( ( -4. - LR + ( np.exp(LR)*( 4. - 3.*LR + LR2 ) ) )/ LR3 ).mean(axis=-1) )
self.fabc = self.dt*( ( ( 2. + LR + np.exp(LR)*( -2. + LR ) )/ LR3 ).mean(axis=-1) )
self.fcc = self.dt*( ( ( -4. -3.*LR - LR2 + np.exp(LR)*(4.-LR) )/ LR3 ).mean(axis=-1) )
def jacobian_psi_q(self):
""" Compute the advective term–––the Jacobian between psi and q.
Returns
-------
complex array of floats
The Fourier transform of Jacobian(psi,q)
"""
self.u, self.v = self.ifft(-self.il*self.ph).real, self.ifft(self.ik*self.ph).real
q = self.ifft(self.qh).real
return self.ik*self.fft(self.u*q) + self.il*self.fft(self.v*q)
def jacobian_psi_c(self):
""" Compute the advective term of the passive scalar equation–––the
Jacobian between psi and c.
Returns
-------
complex array of floats
The Fourier transform of Jacobian(psi,c)
"""
self.c = self.ifft(self.ch).real
return self.ik*self.fft(self.u*self.c) + self.il*self.fft(self.v*self.c)
def _invert(self):
""" Calculate the streamfunction given the potential vorticity.
"""
# invert for psi
self.ph = -self.wv2i*(self.qh)
# physical space
self.p = self.ifft(self.ph)
def set_q(self,q):
""" Initialize the potential vorticity.
Parameters
----------
q: an array of floats of dimension (nx,ny):
The potential vorticity in physical space.
"""
self.q = q
self.qh = self.fft(self.q)
self._invert()
self.Ke = self._calc_ke_qg()
def set_c(self,c):
""" Initialize the potential vorticity.
Parameters
----------
c: an array of floats of dimension (nx,ny):
The passive scalar in physical space.
"""
self.c = c
self.ch = self.fft(self.c)
self.cvar = self.spec_var(self.ch)
def _initialize_fft(self):
""" Define the two-dimensional FFT methods.
"""
# need to fix bug in mkl_fft.irfft2
if self.use_mkl:
#import mkl
#mkl.set_num_threads(self.nthreads)
#import mkl_fft
#self.fft = (lambda x : mkl_fft.rfft2(x))
#self.ifft = (lambda x : mkl_fft.irfft2(x))
pass
else:
pass
self.fft = (lambda x : np.fft.rfft2(x))
self.ifft = (lambda x : np.fft.irfft2(x))
def _print_status(self):
""" Print out the the model status.
Step: integer
Number of time steps completed
Time: float
The elapsed time.
P: float
The percentage of simulation completed.
Ke: float
The geostrophic kinetic energy.
CFL: float
The CFL number.
"""
self.tc += 1
self.t += self.dt
if (self.tc % self.twrite)==0:
self.ke = self._calc_ke_qg()
self.cfl = self._calc_cfl()
self.logger.info('Step: %i, Time: %4.3e, P: %4.3e , Ke: %4.3e, CFL: %4.3f'
, self.tc,self.t, self.t/self.tmax, self.ke, self.cfl )
assert self.cfl<self.cflmax, self.logger.error('CFL condition violated')
def _calc_ke_qg(self):
""" Compute geostrophic kinetic energy, Ke. """
return 0.5*self.spec_var(self.wv*self.ph)
def _calc_ens(self):
""" Compute geostrophic potential enstrophy. """
return 0.5*self.spec_var(self.qh)
def _calc_ep_psi(self):
""" Compute dissipation of Ke """
lap2psi = self.ifft(self.wv4*self.ph)
lapq = self.ifft(-self.wv2*self.qh)
return self.nu4*(self.q*lap2psi).mean() - self.nu*(self.p*lapq).mean()\
+ self.mu*(self.p*self.q).mean()
def _calc_ep_c(self):
""" Compute dissipation of C2 """
return -2*self.nu4c*(self.lapc**2).mean() - 2*self.nu*self.gradC2\
- 2*self.muc*self.C2
def _calc_chi_c(self):
""" Compute dissipation of gradC2 """
lap2c = self.ifft(self.wv4*self.ch)
return 2*self.nu4c*(lap2c*self.lapc).mean() - 2*self.nu*(self.lapc**2).mean()\
- 2*self.muc*self.gradC2
def _calc_chi_q(self):
"""" Calculates dissipation of geostrophic potential
enstrophy, S. """
return -self.nu4*self.spec_var(self.wv2*self.qh)
def spec_var(self, ph):
""" Compute variance of a variable `p` from its Fourier transform `ph` """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[:,0] *= 0.5
var_dens[:,-1] *= 0.5
# remove mean
var_dens[0,0] = 0
return var_dens.sum()
def _calc_cfl(self):
""" Compute the CFL number. """
# avoid destruction by fftw
self.u = self.ifft(-self.il*self.ph)
self.v = self.ifft(self.ik*self.ph)
return np.abs(np.hstack([self.u, self.v])).max()*self.dt/self.dx
def _initialize_diagnostics(self):
""" Initialize the diagnostics dictionary with each diganostic and an
entry.
"""
self.diagnostics = dict()
add_diagnostic(self,'time',
description='Time',
units='seconds',
types = 'scalar',
function = (lambda self: self.t)
)
add_diagnostic(self, 'ke_qg',
description='Quasigeostrophic Kinetic Energy',
units=r'm^2 s^{-2}',
types = 'scalar',
function = (lambda self: self._calc_ke_qg())
)
add_diagnostic(self, 'Ke',
description='Quasigeostrophic Kinetic Energy, from energy equation',
units=r'm^2 s^{-2}',
types = 'scalar',
function = (lambda self: self.Ke)
)
add_diagnostic(self,'ens',
description='Quasigeostrophic Potential Enstrophy',
units=r's^{-2}',
types = 'scalar',
function = (lambda self: 0.5*(self.q**2).mean())
)
add_diagnostic(self, 'ep_psi',
description='The hyperviscous dissipation of QG kinetic energy',
units=r'$m^2 s^{-3}$',
types = 'scalar',
function = (lambda self: self._calc_ep_psi())
)
add_diagnostic(self, 'chi_q',
description='The hyperviscous dissipation of QG kinetic energy',
units=r'$s^{-3}$',
types = 'scalar',
function = (lambda self: self._calc_chi_q())
)
add_diagnostic(self, 'C2',
description='Passive tracer variance',
units=r'[scalar]^2',
types = 'scalar',
function = (lambda self: self.C2)
)
add_diagnostic(self, 'cvar',
description='Passive tracer variance, from variance equation',
units=r'[scalar]^2',
types = 'scalar',
function = (lambda self: self.cvar)
)
add_diagnostic(self, 'gradC2',
description='Gradient of Passive tracer variance',
units=r'[scalar]^2 / m^2',
types = 'scalar',
function = (lambda self: self.gradC2)
)
add_diagnostic(self, 'Gamma_c',
description='Rate of generation of passive tracer gradient variance',
units=r'[scalar]^2 / (m^2 s)',
types = 'scalar',
function = (lambda self: self.Gamma_c)
)
add_diagnostic(self, 'ep_c',
description='The dissipation of tracer variance',
units=r'$s^{-3}$',
types = 'scalar',
function = (lambda self: self._calc_ep_c())
)
add_diagnostic(self, 'chi_c',
description='The dissipation of tracer gradient variance',
units=r'$s^{-3}$',
types = 'scalar',
function = (lambda self: self._calc_chi_c())
)
def _calc_derived_fields(self):
""" Compute derived fields necessary for model diagnostics. """
if self.passive_scalar:
self.C2 = self.spec_var(self.ch)
self.gradC2 = self.spec_var(self.wv*self.ch)
self.lapc = self.ifft(-self.wv2*self.ch)
self.Gamma_c = 2*(self.lapc*self.ifft(self.jacobian_psi_c())).mean()
else:
self.C2, self.gradC2, self.cvar = 0., 0., 0.
self.c, self.ch = 0., 0.
self.lapc, self.Gamma_c = np.array([0.]), 0.
|
# Generated by Django 3.1.2 on 2020-12-24 14:21
from django.db import migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('apis', '0021_auto_20201218_2020'),
]
operations = [
migrations.AlterField(
model_name='userapimodel',
name='phone_number',
field=phonenumber_field.modelfields.PhoneNumberField(default='+12125552368', max_length=128, region=None),
),
]
|
#!/usr/bin/python
"""
meta.py
Another "thin waist" of the interpreter. It can be happen at compile time!
We are following the code <-> data pattern, and this is the "data" module.
id_kind and asdl are the "code" modules.
Usage:
from osh.meta import Id, Kind, ast, ID_SPEC
"""
from asdl import py_meta
from asdl import asdl_ as asdl
from core import id_kind
from core import util
class Id(object):
"""Token and op type.
The evaluator must consider all Ids.
NOTE: We add a bunch of class attributes that are INSTANCES of this class,
e.g. Id.Lit_Chars.
"""
def __init__(self, enum_value):
self.enum_value = enum_value
def __repr__(self):
return IdName(self)
class Kind(object):
"""A coarser version of Id, used to make parsing decisions."""
# TODO: The Kind type should be folded into ASDL. It can't print itself,
# which is inconsistent with Id.
pass
class _AsdlModule(object):
"""Dummy object to copy attributes onto."""
pass
_ID_TO_KIND = {} # int -> Kind
def LookupKind(id_):
return _ID_TO_KIND[id_.enum_value]
_ID_NAMES = {} # int -> string
def IdName(id_):
return _ID_NAMES[id_.enum_value]
# Keep one instance of each Id, to save memory and enable comparison by
# OBJECT IDENTITY.
# Do NOT create any any more instances of them! Always used IdInstance().
# TODO: Fold Id into ASDL, which will enforce uniqueness?
_ID_INSTANCES = {} # int -> Id
def IdInstance(i):
return _ID_INSTANCES[i]
#
# Instantiate osh/types.asdl
#
f = util.GetResourceLoader().open('osh/types.asdl')
_asdl_module, _type_lookup = asdl.LoadSchema(f, {}) # no app_types
types = _AsdlModule()
if 0:
py_meta.MakeTypes(_asdl_module, types, _type_lookup)
else:
# Exported for the generated code to use
TYPES_TYPE_LOOKUP = _type_lookup
# Get the types from elsewhere
from _devbuild.gen import types_asdl
py_meta.AssignTypes(types_asdl, types)
f.close()
# Id -> bool_arg_type_e
BOOL_ARG_TYPES = {} # type: dict
# Used by test_builtin.py
TEST_UNARY_LOOKUP = {}
TEST_BINARY_LOOKUP = {}
TEST_OTHER_LOOKUP = {}
#
# Instantiate the spec
#
ID_SPEC = id_kind.IdSpec(Id, Kind,
_ID_NAMES, _ID_INSTANCES, _ID_TO_KIND,
BOOL_ARG_TYPES)
id_kind.AddKinds(ID_SPEC)
id_kind.AddBoolKinds(ID_SPEC, Id, types.bool_arg_type_e) # must come second
id_kind.SetupTestBuiltin(Id, Kind, ID_SPEC,
TEST_UNARY_LOOKUP, TEST_BINARY_LOOKUP,
TEST_OTHER_LOOKUP,
types.bool_arg_type_e)
# Debug
_kind_sizes = ID_SPEC.kind_sizes
APP_TYPES = {'id': asdl.UserType(Id)}
#
# Instantiate osh/osh.asdl
#
f = util.GetResourceLoader().open('osh/osh.asdl')
_asdl_module, _type_lookup = asdl.LoadSchema(f, APP_TYPES)
ast = _AsdlModule()
if 0:
py_meta.MakeTypes(_asdl_module, ast, _type_lookup)
else:
# Exported for the generated code to use
OSH_TYPE_LOOKUP = _type_lookup
# Get the types from elsewhere
from _devbuild.gen import osh_asdl
py_meta.AssignTypes(osh_asdl, ast)
f.close()
#
# Instantiate core/runtime.asdl
#
f = util.GetResourceLoader().open('core/runtime.asdl')
_asdl_module, _type_lookup = asdl.LoadSchema(f, APP_TYPES)
runtime = _AsdlModule()
if 0:
py_meta.MakeTypes(_asdl_module, runtime, _type_lookup)
else:
# Exported for the generated code to use
RUNTIME_TYPE_LOOKUP = _type_lookup
# Get the types from elsewhere
from _devbuild.gen import runtime_asdl
py_meta.AssignTypes(runtime_asdl, runtime)
f.close()
#
# Redirect Tables associated with IDs
#
# These might be osh specific.
#
REDIR_DEFAULT_FD = {
# filename
Id.Redir_Less: 0, # cat <input.txt means cat 0<input.txt
Id.Redir_Great: 1,
Id.Redir_DGreat: 1,
Id.Redir_Clobber: 1,
Id.Redir_LessGreat: 1, # TODO: What does echo <>foo do?
# descriptor
Id.Redir_GreatAnd: 1, # echo >&2 means echo 1>&2
Id.Redir_LessAnd: 0, # echo <&3 means echo 0<&3, I think
Id.Redir_TLess: 0, # here word
# here docs included
Id.Redir_DLess: 0,
Id.Redir_DLessDash: 0,
}
redir_arg_type_e = types.redir_arg_type_e
REDIR_ARG_TYPES = {
# filename
Id.Redir_Less: redir_arg_type_e.Path,
Id.Redir_Great: redir_arg_type_e.Path,
Id.Redir_DGreat: redir_arg_type_e.Path,
Id.Redir_Clobber: redir_arg_type_e.Path,
Id.Redir_LessGreat: redir_arg_type_e.Path,
# descriptor
Id.Redir_GreatAnd: redir_arg_type_e.Desc,
Id.Redir_LessAnd: redir_arg_type_e.Desc,
Id.Redir_TLess: redir_arg_type_e.Here, # here word
# note: here docs aren't included
}
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.uic.Compiler.qtproxies import QtGui
import pyqtgraph.opengl as gl
import numpy as np
from pyqtgraph import Vector
class ChartWidget3D(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.w = gl.GLViewWidget()
self.w.opts["elevation"]=90
self.w.opts["azimuth"]=0
self.w.opts['distance'] = 500
# self.w.setWindowOpacity(0)
# self.w.show()
# self.w.setBackgroundColor((88,88,88))
self.w.setParent(self)
self.w.move(0,0)
# self.show()
def setData(self,img):
data=np.array(img.convert("L"))
w,h=data.shape
step=np.random.randint(100,255)/w/255
for row in range(w):
lines = []
for col in range(h):
g = [row, col, data[row][col]]
lines.append(g)
# colors.append(img.getpixel((row,col)))
# _r/=255
_r=row*step
color=(1-_r,.1,_r,.3)
plt = gl.GLLinePlotItem(pos=np.array(lines),color=color, width=1, antialias=True)
self.w.addItem(plt)
# plt=gl.GLScatterPlotItem(pos=np.array(line), color=color, size=2, pxMode=False)
# self.w.addItem(plt)
self.w.opts['center']= Vector(w//2,h/2)
# self.gx = gl.GLGridItem()
# self.gx.setSize(255,w,1)
# self.gx.rotate(90, 0, 1, 0)
# self.gx.translate(0, h//2, 255//2)
# self.w.addItem(self.gx)
#
# self.gy = gl.GLGridItem()
# self.gy.setSize(h,255 , 1)
# self.gy.rotate(90, 1, 0, 0)
# self.gy.translate(w//2,0, 255//2)
# self.w.addItem(self.gy)
# #
# self.gz = gl.GLGridItem()
# self.gz.setSize(w, h, 1)
# self.gz.translate(w//2, h//2, 0)
# self.w.addItem(self.gz)
# data = data.T
# w,h=data.shape
# for row in range(w):
# line=[]
#
# for col in range(h):
# g=[col,row,data[row][col]]
# line.append(g)
# # colors.append(img.getpixel((row,col)))
# plt = gl.GLLinePlotItem(pos=np.array(line), color=(0.,1.,0.,1), width=1, antialias=True)
# self.w.addItem(plt)
def resizeEvent(self, event: QtGui.QResizeEvent) -> None:
self.w.setFixedWidth(self.width())
self.w.setFixedHeight(self.height())
if __name__ == '__main__':
app=QApplication(sys.argv)
cw=ChartWidget3D()
cw.show()
app.exit(app.exec_())
|
""" Cisco_IOS_XR_wdsysmon_fd_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR wdsysmon\-fd package operational data.
This module contains definitions
for the following management objects\:
system\-monitoring\: Processes operational data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SystemMonitoring(Entity):
"""
Processes operational data
.. attribute:: cpu_utilization
Processes CPU utilization information
**type**\: list of :py:class:`CpuUtilization <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wdsysmon_fd_oper.SystemMonitoring.CpuUtilization>`
"""
_prefix = 'wdsysmon-fd-oper'
_revision = '2015-11-09'
def __init__(self):
super(SystemMonitoring, self).__init__()
self._top_entity = None
self.yang_name = "system-monitoring"
self.yang_parent_name = "Cisco-IOS-XR-wdsysmon-fd-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cpu-utilization", ("cpu_utilization", SystemMonitoring.CpuUtilization))])
self._leafs = OrderedDict()
self.cpu_utilization = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SystemMonitoring, [], name, value)
class CpuUtilization(Entity):
"""
Processes CPU utilization information
.. attribute:: node_name (key)
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: total_cpu_one_minute
Total CPU utilization in past 1 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: total_cpu_five_minute
Total CPU utilization in past 5 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: total_cpu_fifteen_minute
Total CPU utilization in past 15 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: process_cpu
Per process CPU utilization
**type**\: list of :py:class:`ProcessCpu <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wdsysmon_fd_oper.SystemMonitoring.CpuUtilization.ProcessCpu>`
"""
_prefix = 'wdsysmon-fd-oper'
_revision = '2015-11-09'
def __init__(self):
super(SystemMonitoring.CpuUtilization, self).__init__()
self.yang_name = "cpu-utilization"
self.yang_parent_name = "system-monitoring"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_classes = OrderedDict([("process-cpu", ("process_cpu", SystemMonitoring.CpuUtilization.ProcessCpu))])
self._leafs = OrderedDict([
('node_name', (YLeaf(YType.str, 'node-name'), ['str'])),
('total_cpu_one_minute', (YLeaf(YType.uint32, 'total-cpu-one-minute'), ['int'])),
('total_cpu_five_minute', (YLeaf(YType.uint32, 'total-cpu-five-minute'), ['int'])),
('total_cpu_fifteen_minute', (YLeaf(YType.uint32, 'total-cpu-fifteen-minute'), ['int'])),
])
self.node_name = None
self.total_cpu_one_minute = None
self.total_cpu_five_minute = None
self.total_cpu_fifteen_minute = None
self.process_cpu = YList(self)
self._segment_path = lambda: "cpu-utilization" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SystemMonitoring.CpuUtilization, ['node_name', u'total_cpu_one_minute', u'total_cpu_five_minute', u'total_cpu_fifteen_minute'], name, value)
class ProcessCpu(Entity):
"""
Per process CPU utilization
.. attribute:: process_name
Process name
**type**\: str
.. attribute:: process_id
Process ID
**type**\: int
**range:** 0..4294967295
.. attribute:: process_cpu_one_minute
Process CPU utilization in percent for past 1 minute
**type**\: int
**range:** 0..4294967295
**units**\: percentage
.. attribute:: process_cpu_five_minute
Process CPU utilization in percent for past 5 minute
**type**\: int
**range:** 0..4294967295
**units**\: percentage
.. attribute:: process_cpu_fifteen_minute
Process CPU utilization in percent for past 15 minute
**type**\: int
**range:** 0..4294967295
**units**\: percentage
"""
_prefix = 'wdsysmon-fd-oper'
_revision = '2015-11-09'
def __init__(self):
super(SystemMonitoring.CpuUtilization.ProcessCpu, self).__init__()
self.yang_name = "process-cpu"
self.yang_parent_name = "cpu-utilization"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('process_name', (YLeaf(YType.str, 'process-name'), ['str'])),
('process_id', (YLeaf(YType.uint32, 'process-id'), ['int'])),
('process_cpu_one_minute', (YLeaf(YType.uint32, 'process-cpu-one-minute'), ['int'])),
('process_cpu_five_minute', (YLeaf(YType.uint32, 'process-cpu-five-minute'), ['int'])),
('process_cpu_fifteen_minute', (YLeaf(YType.uint32, 'process-cpu-fifteen-minute'), ['int'])),
])
self.process_name = None
self.process_id = None
self.process_cpu_one_minute = None
self.process_cpu_five_minute = None
self.process_cpu_fifteen_minute = None
self._segment_path = lambda: "process-cpu"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SystemMonitoring.CpuUtilization.ProcessCpu, [u'process_name', u'process_id', u'process_cpu_one_minute', u'process_cpu_five_minute', u'process_cpu_fifteen_minute'], name, value)
def clone_ptr(self):
self._top_entity = SystemMonitoring()
return self._top_entity
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import nltk
from nltk import word_tokenize
import spacy
import en_core_web_sm
from commonregex import CommonRegex
import glob
from nltk.corpus import wordnet
import sys
import os
import errno
import argparse
nltk.download('wordnet')
def openfiles(filename):
with open(filename,'r') as file:
data=file.read()
return data
def redact_text(data, lst):
if(len(lst)!=0):
for n in lst:
data=data.replace(n, "█"*len(n))
return data
def find_names(data):
names=[]
nlp=en_core_web_sm.load()
nltext=nlp(data)
for word in nltext.ents:
if word.label_=="PERSON":
names.append(word.text)
size=len(names)
stats=("The number of unique names replaced in the given file is %d \n" %size)
return names,stats
def find_locs(data):
locs=[]
nlp=en_core_web_sm.load()
nltext=nlp(data)
for word in nltext.ents:
if word.label_=="GPE" or word.label_=="LOC":
locs.append(word.text)
size=len(locs)
stats=("The number of unique locations replaced in the given file is %d \n" %size)
return locs, stats
def find_dates(data):
data1=CommonRegex(data)
date5=[]
if data1.dates!=0:
for n in data1.dates:
date5.append(n)
for n in date5:
data=data.replace(n, "█"*len(n))
size=len(date5)
stats=("The number of unique dates replaced in the given file is %d \n" %size)
return date5, stats
def find_address(data):
addresses=[]
data1=CommonRegex(data)
if data1.street_addresses!=0:
for n in data1.street_addresses:
addresses.append(n)
size=len(addresses)
stats=("The number of unique addresses replaced in the given file is %d \n" %size)
return addresses, stats
def find_numbers(data):
numbers=[]
data1=CommonRegex(data)
if data1.phones!=0:
for n in data1.phones:
numbers.append(n)
size=len(numbers)
stats=("The number of unique phone numbers replaced in the given file is %d \n" %size)
return numbers, stats
def concepts(data, word):
synonyms=[]
for n in wordnet.synsets(word):
for l in n.lemmas():
synonyms.append(l.name())
synonyms.append(word)
return synonyms
def conc_red(data, syns, word):
j=0
tokens=nltk.word_tokenize(data)
for i in range(0,len(tokens)):
for k in range(0,len(syns)):
if (tokens[i].lower()==syns[k].lower()):
tokens[i]=("█"*len(tokens[i]))
j+=1
data1=' '.join(map(str, tokens))
stats=("The number of words related to %s replaced in the given file is %d \n" %(word, j))
return data1, stats
def concepts_gen(data):
gen=['he','she','him','her','his','hers','male','female','man','woman','men','women','He','She','Him','Her','His','Hers','Male','Female','Man','Woman','Men','Women','HE','SHE','HIM','HER','HIS','HERS','MALE','FEMALE','MAN','WOMAN','MEN','WOMEN', 'Mr.', 'Mrs.', 'Ms.']
tokens=nltk.word_tokenize(data)
k=0
for i in range(0,len(tokens)):
for j in range(0,len(gen)):
if tokens[i]==gen[j]:
tokens[i]=("█"*len(tokens[i]))
k+=1
data=' '.join(map(str, tokens))
stats=("The number of gender based pronouns replaced in the given file is %d \n" %k)
return data,stats
def stats_display(names, locs, date5, address, conceptstxt, nums, gens, i, opt, name):
stats=("This is the stats for the document for the file named %s.redacted.txt\n"%(name))
stats+=names
stats+=locs
stats+=date5
stats+=address
stats+=conceptstxt
stats+=nums
stats+=gens
if (opt=="stdout"):
print(stats)
elif(opt=="store"):
textfile = ('./stats/stats%d.txt'%i)
if not os.path.exists(os.path.dirname(textfile)):
try:
os.makedirs(os.path.dirname(textfile))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(textfile, "w") as f:
f.write(stats)
def file_output(data, k, file):
text=data
textfile = ('./%s %s.redacted.txt' %(file, k))
with open(textfile, "w+") as f:
f.write(data)
f.close()
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument("--input", required=True, action="store", type=glob.glob, nargs = '+')
parser.add_argument("--names", action="store_true")
parser.add_argument("--dates", action="store_true")
parser.add_argument("--addresses", action="store_true")
parser.add_argument("--gender", action="store_true")
parser.add_argument("--numbers", action="store_true")
parser.add_argument("--locations", action="store_true")
parser.add_argument("--concepts", type=str)
parser.add_argument("--stats",choices=( "stdout", "store"))
parser.add_argument("--output", action="store")
args=parser.parse_args()
files=[]
for i in args.input:
files.extend(i)
print(files)
for i in range(0,len(files)):
filename=(files[i])
data=openfiles(filename)
if args.names==True:
(names, namestxt)=find_names(data)
data=redact_text(data, names)
else:
namestxt=("No unique names redacted\n")
if args.dates==True:
(dates, datetxt)=find_dates(data)
data=redact_text(data, dates)
else:
datetxt=("No unique dates redacted\n")
if args.addresses==True:
(address, addtxt)=find_address(data)
data=redact_text(data, address)
else:
addtxt=("No unique addresses redacted\n")
if args.gender==True:
(data, protxt)=concepts_gen(data)
else:
protxt=("No unique pronouns redacted\n")
if args.numbers==True:
(nums, numtxt)=find_numbers(data)
(data)=redact_text(data, nums)
else:
numtxt=("No unique phone numbers redacted\n")
if args.locations==True:
(loc, loctxt)=find_locs(data)
(data)=redact_text(data, loc)
else:
loctxt=("No unique locations redacted\n")
if args.concepts:
word=args.concepts
syn=concepts(data, word)
(data, contxt)=conc_red(data, syn, word)
else:
contxt=("No unique concept words redacted\n")
if args.stats:
opt=args.stats
stats_display(namestxt, loctxt, datetxt, addtxt, contxt, numtxt, protxt, i, opt, filename)
if args.output:
path=args.output
name=filename
file_output(data, name, path)
i+=1
|
import datetime
import time
import numpy as np
import tensorflow as tf
import sys
import os
from data_utils import Data
from char_cnn import CharConvNet
if __name__ == '__main__':
#execfile("config.py")
with open('config.py', 'r') as source_file:
exec(source_file.read())
#print "Loading data ....",
train_data = Data(data_source = config.train_data_source,
alphabet = config.alphabet,
l0 = config.l0,
batch_size = config.batch_size,
no_of_classes = config.no_of_classes)
train_data.loadData()
dev_data = Data(data_source = config.dev_data_source,
alphabet = config.alphabet,
l0 = config.l0,
batch_size = config.batch_size,
no_of_classes = config.no_of_classes)
dev_data.loadData()
num_batches_per_epoch = int(train_data.getLength() / config.batch_size) + 1
num_batch_dev = dev_data.getLength()
#print "Loaded"
#print "Training ===>"
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement = True,
log_device_placement = False)
sess = tf.Session(config = session_conf)
with sess.as_default():
char_cnn = CharConvNet(conv_layers = config.model.conv_layers,
fully_layers = config.model.fully_connected_layers,
l0 = config.l0,
alphabet_size = config.alphabet_size,
no_of_classes = config.no_of_classes,
th = config.model.th)
global_step = tf.Variable(0, trainable=False)
boundaries = []
br = config.training.base_rate
values = [br]
for i in range(1, 10):
values.append(br / (2 ** i))
boundaries.append(15000 * i)
values.append(br / (2 ** (i + 1)))
print(values)
print(boundaries)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
#learning_rate = tf.train.exponential_decay(config.training.base_rate,
# global_step,
# config.training.decay_step,
# config.training.decay_rate,
# staircase=True)
#optimizer = tf.train.MomentumOptimizer(learning_rate, config.training.momentum)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(char_cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step = global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.histogram("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", char_cnn.loss)
acc_summary = tf.summary.scalar("accuracy", char_cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables())
#sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
char_cnn.input_x: x_batch,
char_cnn.input_y: y_batch,
char_cnn.dropout_keep_prob: config.training.p
}
_, step, summaries, loss, accuracy = sess.run(
[train_op,
global_step,
train_summary_op,
char_cnn.loss,
char_cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
char_cnn.input_x: x_batch,
char_cnn.input_y: y_batch,
char_cnn.dropout_keep_prob: 1.0 # Disable dropout
}
step, summaries, loss, accuracy = sess.run(
[global_step,
dev_summary_op,
char_cnn.loss,
char_cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Passing global_step to minimize() will increment it at each step.
# Training loop. For each batch...\
for e in range(config.training.epoches):
train_data.shuffleData()
for k in range(num_batches_per_epoch):
batch_x, batch_y = train_data.getBatchToIndices(k)
train_step(batch_x, batch_y)
current_step = tf.train.global_step(sess, global_step)
if current_step % config.training.evaluate_every == 0:
xin, yin = dev_data.getBatchToIndices()
print("\nEvaluation:")
dev_step(xin, yin, writer=dev_summary_writer)
print("")
if current_step % config.training.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
|
"""This module contains a plugin to test a node's remote shutdown functionality."""
from teatime import Context, Issue, NodeType, Severity
from teatime.plugins.base import IPFSRPCPlugin
class Shutdown(IPFSRPCPlugin):
"""Attempt to list all active P2P listeners.
Severity: Critical
Endpoint: https://docs.ipfs.io/reference/http/api/#api-v0-shutdown
Anyone can shut down the IPFS daemon. This plugin has shut down the node.
This is the highest possible threat to availability. Why would you leave
this enabled? Are you insane?
"""
INTRUSIVE = True # damn right it is
def _check(self, context: Context):
if context.node_type != NodeType.IPFS:
return
payload = self.get_rpc_json(
target=context.target, route="/api/v0/shutdown", raw=True
)
context.report.add_issue(
Issue(
title="Exposed Shutdown Endpoint",
description=(
"Anyone can shut down the IPFS daemon. This plugin has shut down the node. "
"This is the highest possible threat to availability."
),
severity=Severity.CRITICAL,
raw_data=payload,
)
)
|
#! /usr/bin/env python
#adam-does# moves a certain class of files so that it's not in the way screwing up pipeline scripts, but keeping them somewhere in case I need them
#adam-use# for example, after running the stellar suppression you can remove the *OCF.fits files so they don't confuse the processing of the *OCFR.fits files. But you can save the *OCF.fits files in case you add more stellar rings in later.
#adam-example# ipython -i -- adam_backup_and_rm_files.py /u/ki/awright/data/MACS0416-24/W-C-RC/SCIENCE/*OCF.fits
import sys
sys.path.append('/u/ki/awright/InstallingSoftware/pythons/')
from import_tools import *
SUBARUDIR="/u/ki/awright/data/"
backup_main="/u/ki/awright/data/backup_files/"
files2backup=imagetools.ArgCleaner(sys.argv)
fl0= files2backup[0]
backup_from_dir=fl0[:fl0.rfind('/')]
if os.path.isdir(backup_from_dir):
if backup_from_dir.startswith(SUBARUDIR):
backup_locators=backup_from_dir.replace(SUBARUDIR,'').split('/')
try:
backup_locators.remove('')
except ValueError:
pass
backup_datetime="%.2i-%.2i-%.4i_at_%.2i-%.2i" % (tm_mon,tm_mday,tm_year,tm_hour,tm_min)
backup_location=backup_main+'_'.join(backup_locators)+"_"+backup_datetime
os.mkdir(backup_location)
num_copied=0
for fl in files2backup:
if os.path.islink(fl):
raise Exception('cannot do this backup, at least one file (%s) is a symlink\nmust copy link dest to links filename in order to do this!')
for fl in files2backup:
print 'mv %s %s' % (fl,backup_location)
out=os.system('mv %s %s' % (fl,backup_location))
if out==0:
num_copied+=1
else:
raise Exception('attempt to copy this file: %s has failed!' % (fl))
print "\n\nbacked up %s files from %s to %s\none example is: %s" % (num_copied,backup_from_dir,backup_location,fl0)
else:
raise Exception('directory youre backing up (%s) isnt within the SUBARUDIR (%s) directory' % (backup_from_dir,SUBARUDIR))
else:
raise Exception('directory youre backing up (%s) doesnt exist!' % (backup_from_dir))
|
from .button import Button
from ..decorators import stere_performer, use_after, use_before
from ..field import Field
@stere_performer('select', consumes_arg=True)
class Dropdown(Field):
"""Represents a dropdown menu.
If the "option" argument is provided with a field,
use that as the dropdown item.
Else, assume a standard HTML Dropdown and use the option tag.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# If no option arg is given, assume Dropdown is a standard HTML one.
if kwargs.get('option') is None:
self.option = Button('tag', 'option')
else:
self.option = kwargs.get('option')
@property
def options(self):
"""Searches for all the elements that are an option in the dropdown.
Returns:
list
"""
self.option._element.parent_locator = self.find()
return [item for item in self.option.find_all()]
@use_after
@use_before
def select(self, value):
"""Searches for an option by its html content, then clicks the one
that matches.
Arguments:
value (str): The option value to select.
Raises:
ValueError: The provided value could not be found in the dropdown.
"""
found_options = []
for option in self.options:
found_options.append(option.html)
if option.html == value:
option.click()
break
else:
raise ValueError(
f'{value} was not found. Found values are: {found_options}')
|
#coding:utf-8
from multiprocessing.connection import Client
import socket, threading
"""IPC通信(送信用)コンソール
"""
print("IPC Connector (Client)")
def main(port):
while True:
command = input(f"localhost:{port}> ")
try:
data = str(command).encode()
length = len(data)
client.sendall(length.to_bytes(4, byteorder='big'))
client.sendall(data)
data = client.recv(4)
length = int.from_bytes(data, "big")
data = client.recv(length)
msg = data.decode()
print(msg)
except BaseException as err:
print(f"error happend.\n{err}")
if __name__ == "__main__":
print("address> localhost")
port = input("port> ")
if port == "":
print("please insert a port.")
input("Enter to exit...")
exit()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(("localhost", int(port)))
print(f"connect [localhost:{port}]!")
except BaseException as err:
print(f"error happend.\n{err}")
input("Enter to exit...")
exit()
main(port)
|
import html # clean up html strings (such as &)
import json # interact with the Tribe Events API
from datetime import datetime # convert utc time to datetime
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
# remove html encoding and convert to a string object
def clean(my_json_string):
return str(html.unescape(my_json_string))
class PaDevelopmentSpider(CityScrapersSpider):
name = "pa_development"
agency = "PA Department of Community & Economic Development"
timezone = "America/New_York"
allowed_domains = ["dced.pa.gov"]
start_urls = ["https://dced.pa.gov/wp-json/tribe/events/v1/events"]
def parse(self, response):
events = json.loads(response.text)["events"]
for item in events:
meeting = Meeting(
title=self._parse_title(item),
description=self._parse_description(item),
classification=self._parse_classification(item),
start=self._parse_start(item),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item),
source=self._parse_source(item),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, item):
"""Parse or generate meeting title."""
return clean(item["title"])
def _parse_description(self, item):
"""Parse or generate meeting description."""
return clean(item["description"])
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return BOARD
def _parse_start(self, item):
"""Parse start datetime as a naive datetime object."""
start_time = datetime.strptime(item["start_date"], "%Y-%m-%d %H:%M:%S")
return start_time
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return datetime.strptime(item["end_date"], "%Y-%m-%d %H:%M:%S")
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return item["all_day"]
def _get_street(self, item):
try:
return clean(item["venue"]["address"])
except KeyError:
return ""
def _get_city(self, item):
try:
return clean(item["venue"]["city"])
except KeyError:
return ""
def _get_state(self, item):
try:
return clean(item["venue"]["state"])
except KeyError:
return ""
def _get_zip(self, item):
try:
return clean(item["venue"]["zip"])
except KeyError:
return ""
def _parse_location(self, item):
"""Parse or generate location."""
address = self._get_street(item)
address += ", " + self._get_city(item)
address += ", " + self._get_state(item)
address += ", " + self._get_zip(item)
return {
"address": address,
"name": clean(item["venue"]["venue"]),
}
def _parse_links(self, item):
"""Parse or generate links."""
return [{"href": "", "title": ""}]
def _parse_source(self, item):
"""Parse or generate source."""
url = item["url"]
return url
|
import re
from datetime import datetime
from functools import total_ordering
from typing import Dict, List, Optional, Tuple
from rich.highlighter import RegexHighlighter
from rich.theme import Theme
class TaskHighlighter(RegexHighlighter):
"""Apply style to [Task]s."""
base_style = "task."
highlights = [
r"(?P<symbols>[(){}\[\]<>:@+-])",
r"\W(?P<numbers>\d+)",
r"(?P<quote>'.*?')",
r"(?P<quote>\".*?\")",
r"profile:(?P<profile>\w*)",
]
TaskTheme = Theme(
{
"task.symbols": "bold",
"task.numbers": "bold blue",
"task.quote": "#FF8700",
"task.profile": "bold cyan",
}
)
@total_ordering
class Task:
def __init__(
self,
msg: str,
*,
complete: bool = None,
priority: str = None,
date_created: datetime = None,
date_completed: datetime = None,
) -> None:
self.msg = msg.strip()
if not self.msg:
raise ValueError("Task message must not be empty.")
self.complete = complete or False
self.priority = priority or ""
self.date_created = date_created or None
self.date_completed = date_completed or None
if self.date_completed and not self.complete:
raise ValueError("Only completed task can have completion date!")
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, value):
if re.fullmatch(r"[A-Z]?", value) is None:
raise ValueError(f"{value!r} is not a valid priority! ([A-Z]?)")
self._priority = value
@property
def contexts(self):
return self.get_tags(self.msg, "@")
@property
def projects(self):
return self.get_tags(self.msg, "+")
@property
def keywords(self):
return self.get_keywords(self.msg)
def __contains__(self, value):
return value in self.to_string()
def __eq__(self, other):
return self.comparison_tuple(self) == self.comparison_tuple(other)
def __hash__(self):
return hash(self.comparison_tuple(self))
def __lt__(self, other):
return self.comparison_tuple(self) < self.comparison_tuple(other)
def __repr__(self):
params = (
"msg",
"complete",
"priority",
"date_created",
"date_completed",
)
args = ", ".join(
f"{param}={getattr(self, param)!r}"
for param in params
if getattr(self, param) is not None
)
return f"Task({args})"
def __str__(self):
return self.to_string()
def contains_term(self, term, sep="/"):
return any(subterm in self for subterm in term.split(sep))
def to_string( # noqa: C901 too complex
self,
hide_contexts: bool = False,
hide_projects: bool = False,
hide_keywords: bool = False,
):
parts = []
if self.complete:
parts.append("x")
if self.priority:
parts.append(f"({self.priority})")
if self.date_completed:
parts.append(self.date_completed.strftime(r"%Y-%m-%d"))
if self.date_created:
parts.append(self.date_created.strftime(r"%Y-%m-%d"))
msg = self.msg
if hide_contexts:
for context in self.contexts:
msg = msg.replace(f"@{context}", "").replace(" ", " ", 1)
if hide_projects:
for project in self.projects:
msg = msg.replace(f"+{project}", "").replace(" ", " ", 1)
if hide_keywords:
for key, value in self.keywords.items():
msg = msg.replace(f"{key}:{value}", "").replace(" ", " ", 1)
parts.append(msg)
return " ".join(parts)
@classmethod
def comparison_tuple(cls, task):
return (
task.complete is not False,
task.complete,
task.priority == "",
task.priority,
(
task.date_created is None,
task.date_created,
),
(
task.date_completed is None,
task.date_completed,
),
task.msg,
)
@classmethod
def from_string(cls, string):
complete, remainder = cls.get_match_and_remainder(
r"([xX]) (.*)", string
)
priority, remainder = cls.get_match_and_remainder(
r"\((\S)\) (.*)", remainder
)
if complete:
try:
date_completed, remainder = cls.get_date(remainder)
except ValueError:
raise ValueError(
f'Unable to parse completion date in "{string}"!'
) from None
else:
date_completed = None
try:
date_created, remainder = cls.get_date(remainder)
except ValueError:
raise ValueError(
f'Unable to parse completion date in "{string}"!'
) from None
return cls(
remainder,
complete=bool(complete),
priority=priority,
date_created=date_created,
date_completed=date_completed,
)
@staticmethod
def get_match_and_remainder(
expr: str, string: str
) -> Tuple[Optional[str], str]:
match = re.match(expr, string)
if match:
return match.group(1), match.group(2)
return (None, string)
@classmethod
def get_date(cls, string: str) -> Tuple[Optional[datetime], str]:
expr = r"([0-9]{4})-([0-9]{2})-([0-9]{2}) (.*)"
date, remainder = cls.get_match_and_remainder(expr, string)
if date is not None:
return datetime.strptime(date, r"%Y-%m-%d"), remainder
return None, remainder
@staticmethod
def get_tags(string: str, tag: str) -> List[str]:
tags = []
for word in string.split():
if word.startswith(tag):
tags.append(word[1:])
return tags
@staticmethod
def get_keywords(string: str) -> Dict[str, str]:
keywords: Dict[str, str] = {}
expr = re.compile(r"([^:\s]+):([^:\s]+)")
for word in string.split():
match = expr.match(word)
if match:
keywords[match.group(1)] = match.group(2)
return keywords
|
#!/usr/bin/env python3
import os
import datetime
from pydblite import Base
#db openen via losse functie
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def sort_high_scores(elem):
return (int(elem['score']), elem['duration'], elem['date'])
def update_user_table(user_id, first_name):
db = Base(os.path.join(SCRIPT_DIR, 'users.db'))
db.create('user_id', 'first_name', mode="open")
if ((len(db(user_id=user_id)) == 0) or (len(db(user_id=user_id, first_name=first_name)) == 0)):
db.insert(user_id=user_id,
first_name=first_name)
db.commit()
def get_user_name(user_id):
db = Base(os.path.join(SCRIPT_DIR, 'users.db'))
db.create('user_id', 'first_name', mode="open")
user = db(user_id=user_id)
if not user:
return None
user = user.pop()
return user['first_name']
def set_high_score(first_name, user_id, rounds, duration):
rounds = int(rounds)
print('h.set_high_score')
print(first_name, user_id, rounds, duration)
update_user_table(user_id, first_name)
db = Base(os.path.join(SCRIPT_DIR, 'high_scores.db'))
db.create('user_id', 'score', 'duration', 'date', mode="open")
db.insert(user_id=user_id,
score=rounds,
duration=duration,
date=datetime.datetime.now())
db.commit()
def get_high_scores(user_id=None):
print('get_high_scores')
db = Base(os.path.join(SCRIPT_DIR, 'high_scores.db'))
db.create('user_id', 'score', 'duration', 'date', mode="open")
global_top = [
{
'user_id': r['user_id'],
'user': get_user_name(r['user_id']),
'date': r['date'].strftime('%d-%m-%Y'),
'duration': str(r['duration']).split('.')[0],
'score': r['score'],
}
for r in sorted([ r for r in db ], key=sort_high_scores)
]
return {
'my_top': [r for r in global_top if r['user_id'] == user_id][:5],
'global_top': global_top[:5],
}
def get_print_high_scores():
#TODO niet als str returnen maar als dict voor de website. voor printen ff via join() doen
return [
'{user} {score:02} {duration} {date}'.format(
user=r['user'],
score=r['score'],
date=r['date'],
duration=r['duration'],
)
for r in get_high_scores()['global_top']
]
def print_high_scores():
for score in get_print_high_scores():
print(score)
if __name__ == '__main__':
print_high_scores()
|
import os
from glob import glob
import pandas as pd
"""A test to check if images are placed correctly"""
psychic_learners_dir = os.path.split(os.getcwd())[0]
image_folder = os.path.join(psychic_learners_dir, 'data', 'image', 'v1_train_nodups_240x240')
train_df = pd.read_csv(os.path.join(psychic_learners_dir, 'data', 'train_split.csv'))
for big_category in glob(os.path.join(image_folder, '*')):
if not os.path.isdir(big_category):
continue
for small_category in glob(os.path.join(big_category, '*')):
if not os.path.isdir(small_category):
continue
category = int(os.path.split(small_category)[-1])
df = train_df.loc[train_df['Category'] == category]
df['image_names'] = df['abs_image_path'].map(lambda x: os.path.split(x)[-1])
print(df['image_names'])
break
for image in glob(os.path.join(small_category, '*.jpg')):
image_name = os.path.split(image)[-1]
if image_name not in df['image_names']:
print('{} placed in wrong category {}'.format(image_name, category))
else:
print('correct')
|
from avidaspatial import *
from patch_analysis import *
import sys
import shapely as shp
from shapely.geometry import MultiPoint
from descartes.patch import PolygonPatch
import pandas as pd
df = pd.read_csv("all_task_locs.csv")
env_id = sys.argv[1]
task_id=0
if len(sys.argv) > 2:
task_id = int(sys.argv[2])
draw_points = "hotspots"
if len(sys.argv) > 3:
draw_points = sys.argv[3]
env = parse_environment_file("../config/env"+env_id+".cfg", (60,60))
env = convert_world_to_phenotype(env)
env.grid, n = assign_ranks_by_cluster(env.grid, 100)
#length = get_pallete_length(hotspots)
#palette = sns.hls_palette(length, s=1)
#env.task_palette = palette
#print(len(env.task_palette))
#print(len(env.resource_palette))
env.resource_palette = sns.hls_palette(n, s=1, l=.5)
#env.resource_palette = [[0,0,0]] + env.resource_palette
env.task_palette = [(0,0,0), (0,0,0)]
print(n)
#paired_environment_phenotype_grid(env, hotspots, palette=env.task_palette)
plot_world(env, palette=env.resource_palette)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
ax = plt.gca()
colors = ["black", "black", "red", "pink", "yellow", "green", "cyan", "black", "white"] #["black", "black"] + sns.color_palette("husl", 7)#["black", "red", "orange", "yellow", "green", "cyan", "blue", "magenta", "white"]
print(colors)
ids = list(range(9,4,-1))
if task_id != 0:
ids = [task_id]
patch_handles = []
if draw_points == "hotspots":
for task_id in ids:
hotspots = load_grid_data(str(task_id)+"_"+env_id+"_hotspots.csv", "int", delim=",")
hotspots = agg_grid(hotspots, mode)
ones = []
for y in range(len(hotspots)):
for x in range(len(hotspots[y])):
if hotspots[y][x] == 0:
hotspots[y][x] = -1
else:
ones.append((x,y))
patches = traverse_core(ones, 60)
to_remove = []
for i in range(len(patches)):
if len(patches[i]) < 30:
to_remove.append(i)
continue
patches[i] = MultiPoint(patches[i])
for i in reversed(to_remove):
patches.pop(i)
patch = 0
for p in patches:
if p.convex_hull.geom_type == "Polygon":
data = p.convex_hull
else: #p.convex_hull.geom_type == "LineString":
data = p.convex_hull.buffer(.5)
patch = PolygonPatch(data, facecolor='white', lw=3, edgecolor=colors[task_id-1], alpha=1, fill=False, zorder=2, label=task_id)
ax.add_patch(patch)
patch_handles.append(patch)
elif draw_points == "points":
points = df[df.task == int(task_id)][df.environment == int(env_id)]
plt.plot(points.x, points.y, ".", color=colors[task_id-1])
elif draw_points == "paths":
pathfile = open("paths_"+str(task_id-1)+"_"+str(env_id)+".dat")
lines = pathfile.readlines()[10:15]
colors = sns.color_palette("bone", len(lines))
for i,line in enumerate(lines):
nums = eval(line.strip())
(xs, ys) = zip(*nums)
ax.add_line(plt.Line2D(xs, ys, linewidth=2, color=colors[i]))
pathfile.close()
#plt.show()
name = "all" if len(ids)>1 else str(task_id)
name += "_" + draw_points
#legend = plt.legend(handles = patch_handles, bbox_to_anchor=(0.5, -0.01),
# loc=9, borderaxespad=0., frameon=True, ncol=9)
#legend.get_frame().set_facecolor('lightgrey')
plt.savefig(str(env_id)+"_" + name+".png", bbox_inches = 'tight', pad_inches = 0)
|
from settings import Settings, SETTINGS
from ufrc.main import UFRC
def get_file(settings: Settings):
ufrc = UFRC()
ufrc.connect(settings.username, settings.password)
ufrc.get("temp.txt")
if __name__ == "__main__":
get_file(SETTINGS)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-05 04:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20170803_1446'),
]
operations = [
migrations.AlterField(
model_name='user',
name='picture',
field=models.ImageField(blank=True, default='img/default.png', height_field='height_field', null=True, upload_to='', verbose_name='profile picture', width_field='width_field'),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('conventions', '__first__'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('sum_paid', models.IntegerField()),
('paid_via', models.CharField(help_text='How was the payment made?', max_length=50)),
('payment_time', models.DateTimeField(auto_now=True, verbose_name='Time the ticket was paid.')),
],
options={
'verbose_name_plural': 'Payments',
'verbose_name': 'Payment',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('first_name', models.CharField(null=True, max_length=100)),
('last_name', models.CharField(null=True, max_length=100)),
('email', models.EmailField(null=True, max_length=75)),
('address', models.CharField(help_text='home address', max_length=500)),
('country', django_countries.fields.CountryField(max_length=2)),
('date_of_birth', models.DateField(null=True)),
('sum_paid', models.PositiveSmallIntegerField(default=0)),
('status', models.PositiveSmallIntegerField(help_text='Status of payment.', default=0, choices=[(1, 'ordered'), (2, 'paid'), (0, 'cancelled')])),
('order_time', models.DateTimeField(help_text='Time the ticket was ordered', auto_now_add=True)),
('comments', models.TextField(blank='True')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TicketPool',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('max_tickets', models.PositiveSmallIntegerField()),
('convention', models.ForeignKey(to='conventions.Convention')),
],
options={
'verbose_name_plural': 'ticket pools',
'verbose_name': 'ticket pool',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TicketType',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('price', models.PositiveSmallIntegerField(default=0)),
('currency', models.CharField(max_length=3, choices=[('EUR', '€'), ('NOK', 'NOK'), ('USD', 'US $'), ('LTL', 'LTL')])),
('max_tickets', models.PositiveSmallIntegerField(null=True, blank=True)),
('description', models.CharField(help_text='Short description of the ticket type.', blank=True, max_length=200)),
('ticket_pool', models.ForeignKey(to='tickets.TicketPool')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='ticket',
name='ticket_type',
field=models.ForeignKey(to='tickets.TicketType', related_name='tickets'),
preserve_default=True,
),
migrations.AddField(
model_name='payment',
name='ticket',
field=models.ForeignKey(to='tickets.Ticket'),
preserve_default=True,
),
]
|
from core.advbase import *
from module.bleed import Bleed
from slot.a import *
from slot.d import *
def module():
return Patia
class Patia(Adv):
a1 = ('bt',0.35)
a3 = ('primed_crit_chance', 0.10, 5)
conf = {}
conf['slots.a'] = Resounding_Rendition()+Brothers_in_Arms()
conf['slots.poison.a'] = Valiant_Crown()+The_Fires_of_Hate()
conf['acl'] = """
#Patia will almost never run out of trickery stacks
`dragon.act("c3 s end"), s=1 and self.slots.tmp.d.trickery <= 1
`s3, not self.s3_buff
`s1
`s2
`s4
"""
coab = ['Blade','Bow','Tobias']
share = ['Karl']
def prerun(self):
self.bleed = Bleed('g_bleed',0).reset()
def s1_proc(self, e):
Teambuff(f'{e.name}_defense', 0.25, 15, 'defense').on()
def s2_proc(self, e):
Bleed(e.name, 1.46).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
import sys
from fastapi import APIRouter, HTTPException, Request
from fastapi.templating import Jinja2Templates
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import *
from . import message_event, user_event
sys.path.append(".")
import config
line_bot_api = LineBotApi(config.LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(config.LINE_CHANNEL_SECRET)
line_app = APIRouter()
templates = Jinja2Templates(directory="templates")
@line_app.post("/callback")
async def callback(request: Request) -> str:
"""LINE Bot Webhook Callback
Args:
request (Request): Request Object.
Raises:
HTTPException: Signature 驗證失敗
Returns:
str: OK
"""
signature = request.headers["X-Line-Signature"]
body = await request.body()
# handle webhook body
try:
handler.handle(body.decode(), signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Missing Parameter")
return "OK"
@handler.add(FollowEvent)
def handle_follow(event):
"""事件 - 新使用者加入Bot
Args:
event (LINE Event Object): Refer to https://developers.line.biz/en/reference/messaging-api/#follow-event
"""
user_event.handle_follow(event=event)
@handler.add(UnfollowEvent)
def handle_unfollow(event):
"""事件 - 新使用者封鎖Bot
Args:
event (LINE Event Object): Refer to https://developers.line.biz/en/reference/messaging-api/#unfollow-event
"""
user_event.handle_unfollow(event=event)
@handler.add(MessageEvent, message=(TextMessage))
def handle_message(event):
"""事件 - 訊息
Args:
event (LINE Event Object): Refer to https://developers.line.biz/en/reference/messaging-api/#message-event
"""
message_event.handle_message(event=event)
|
# -*- coding: utf-8 -*-
"""Handles the instrospection of REST Framework Views and ViewSets."""
import inspect
import itertools
import re
import yaml
import importlib
from .compat import OrderedDict, strip_tags, get_pagination_attribures
from abc import ABCMeta, abstractmethod
from django.http import HttpRequest
from django.contrib.admindocs.utils import trim_docstring
from django.utils.encoding import smart_text
import rest_framework
from rest_framework import viewsets
from rest_framework.compat import apply_markdown
try:
from rest_framework.fields import CurrentUserDefault
except ImportError:
# FIXME once we drop support of DRF 2.x .
CurrentUserDefault = None
from rest_framework.utils import formatting
from django.utils import six
try:
import django_filters
except ImportError:
django_filters = None
def get_view_description(view_cls, html=False, docstring=None):
if docstring is not None:
view_cls = type(
view_cls.__name__ + '_fake',
(view_cls,),
{'__doc__': docstring})
return rest_framework.settings.api_settings \
.VIEW_DESCRIPTION_FUNCTION(view_cls, html)
def get_default_value(field):
default_value = getattr(field, 'default', None)
if rest_framework.VERSION >= '3.0.0':
from rest_framework.fields import empty
if default_value == empty:
default_value = None
if callable(default_value):
if CurrentUserDefault is not None and isinstance(default_value,
CurrentUserDefault):
default_value.user = None
default_value = default_value()
return default_value
class IntrospectorHelper(object):
__metaclass__ = ABCMeta
@staticmethod
def strip_yaml_from_docstring(docstring):
"""
Strips YAML from the docstring.
"""
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index in range(len(split_lines) - 1, -1, -1):
line = split_lines[index]
line = line.strip()
if line == '---':
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def strip_params_from_docstring(docstring):
"""
Strips the params from the docstring (ie. myparam -- Some param) will
not be removed from the text body
"""
params_pattern = re.compile(r' -- ')
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index, line in enumerate(split_lines):
line = line.strip()
if params_pattern.search(line):
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def get_serializer_name(serializer):
if serializer is None:
return None
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
assert serializer != ListSerializer, "uh oh, what now?"
if isinstance(serializer, ListSerializer):
serializer = serializer.child
if inspect.isclass(serializer):
return serializer.__name__
return serializer.__class__.__name__
@staticmethod
def get_summary(callback, docstring=None):
"""
Returns the first sentence of the first line of the class docstring
"""
description = get_view_description(
callback, html=False, docstring=docstring) \
.split("\n")[0].split(".")[0]
description = IntrospectorHelper.strip_yaml_from_docstring(
description)
description = IntrospectorHelper.strip_params_from_docstring(
description)
description = strip_tags(get_view_description(
callback, html=True, docstring=description))
return description
class BaseViewIntrospector(object):
__metaclass__ = ABCMeta
def __init__(self, callback, path, pattern, user):
self.callback = callback
self.path = path
self.pattern = pattern
self.user = user
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
@abstractmethod
def __iter__(self):
pass
def get_iterator(self):
return self.__iter__()
def get_description(self):
"""
Returns the first sentence of the first line of the class docstring
"""
return IntrospectorHelper.get_summary(self.callback)
def get_docs(self):
return get_view_description(self.callback)
class BaseMethodIntrospector(object):
__metaclass__ = ABCMeta
ENUMS = [
'choice',
'multiple choice',
]
PRIMITIVES = {
'integer': ['int32', 'int64'],
'number': ['float', 'double'],
'string': ['string', 'byte', 'date', 'date-time'],
'boolean': ['boolean'],
}
def __init__(self, view_introspector, method):
self.method = method
self.parent = view_introspector
self.callback = view_introspector.callback
self.path = view_introspector.path
self.user = view_introspector.user
@property
def is_array_response(self):
""" Support definition of array responses with the 'many' attr """
return self.get_yaml_parser().object.get('many')
def get_module(self):
return self.callback.__module__
def check_yaml_methods(self, yaml_methods):
missing_set = set()
for key in yaml_methods:
if key not in self.parent.methods():
missing_set.add(key)
if missing_set:
raise Exception(
"methods %s in class docstring are not in view methods %s"
% (list(missing_set), list(self.parent.methods())))
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
parent_parser = YAMLDocstringParser(self.parent)
self.check_yaml_methods(parent_parser.object.keys())
new_object = {}
new_object.update(parent_parser.object.get(self.method, {}))
new_object.update(parser.object)
parser.object = new_object
return parser
def get_extra_serializer_classes(self):
return self.get_yaml_parser().get_extra_serializer_classes(
self.callback)
def ask_for_serializer_class(self):
if hasattr(self.callback, 'get_serializer_class'):
view = self.create_view()
parser = self.get_yaml_parser()
mock_view = parser.get_view_mocker(self.callback)
view = mock_view(view)
if view is not None:
if parser.should_omit_serializer():
return None
try:
serializer_class = view.get_serializer_class()
except AssertionError as e:
if "should either include a `serializer_class` attribute, or override the `get_serializer_class()` method." in str(e): # noqa
serializer_class = None
else:
raise
return serializer_class
def create_view(self):
view = self.callback()
if not hasattr(view, 'kwargs'):
view.kwargs = dict()
if hasattr(self.parent.pattern, 'default_args'):
view.kwargs.update(self.parent.pattern.default_args)
view.request = HttpRequest()
view.request.user = self.user
view.request.method = self.method
return view
def get_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_serializer_class(self.callback)
if serializer is None:
serializer = self.ask_for_serializer_class()
return serializer
def get_response_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_response_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_request_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_request_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_summary(self):
# If there is no docstring on the method, get class docs
return IntrospectorHelper.get_summary(
self.callback,
self.get_docs() or self.parent.get_description())
def get_nickname(self):
""" Returns the APIView's nickname """
return rest_framework.settings.api_settings \
.VIEW_NAME_FUNCTION(self.callback, self.method).replace(' ', '_')
def get_notes(self):
"""
Returns the body of the docstring trimmed before any parameters are
listed. First, get the class docstring and then get the method's. The
methods will always inherit the class comments.
"""
docstring = ""
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(class_docs)
method_docs = self.get_docs()
if class_docs is not None:
docstring += class_docs + " \n"
if method_docs is not None:
method_docs = formatting.dedent(smart_text(method_docs))
method_docs = IntrospectorHelper.strip_yaml_from_docstring(
method_docs
)
method_docs = IntrospectorHelper.strip_params_from_docstring(
method_docs
)
docstring += '\n' + method_docs
docstring = docstring.strip()
return do_markdown(docstring)
def get_parameters(self):
"""
Returns parameters for an API. Parameters are a combination of HTTP
query parameters as well as HTTP body parameters that are defined by
the DRF serializer fields
"""
params = []
path_params = self.build_path_parameters()
body_params = self.build_body_parameters()
form_params = self.build_form_parameters()
query_params = self.build_query_parameters()
if django_filters is not None:
query_params.extend(
self.build_query_parameters_from_django_filters())
if path_params:
params += path_params
if self.get_http_method() not in ["GET", "DELETE", "HEAD"]:
params += form_params
if not form_params and body_params is not None:
params.append(body_params)
if query_params:
params += query_params
return params
def get_http_method(self):
return self.method
@abstractmethod
def get_docs(self):
return ''
def retrieve_docstring(self):
"""
Attempts to fetch the docs for a class method. Returns None
if the method does not exist
"""
method = str(self.method).lower()
if not hasattr(self.callback, method):
return None
return get_view_description(getattr(self.callback, method))
def build_body_parameters(self):
serializer = self.get_request_serializer_class()
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is None:
return
return {
'name': serializer_name,
'type': serializer_name,
'paramType': 'body',
}
def build_path_parameters(self):
"""
Gets the parameters from the URL
"""
url_params = re.findall('/{([^}]*)}', self.path)
params = []
for param in url_params:
params.append({
'name': param,
'type': 'string',
'paramType': 'path',
'required': True
})
return params
def build_query_parameters(self):
params = []
docstring = self.retrieve_docstring() or ''
docstring += "\n" + get_view_description(self.callback)
if docstring is None:
return params
split_lines = docstring.split('\n')
for line in split_lines:
param = line.split(' -- ')
if len(param) == 2:
params.append({'paramType': 'query',
'name': param[0].strip(),
'description': param[1].strip(),
'type': 'string'})
return params
def build_query_parameters_from_django_filters(self):
"""
introspect ``django_filters.FilterSet`` instances.
"""
params = []
filter_class = getattr(self.callback, 'filter_class', None)
if (filter_class is not None and
issubclass(filter_class, django_filters.FilterSet)):
for name, filter_ in filter_class.base_filters.items():
data_type = 'string'
parameter = {
'paramType': 'query',
'name': name,
'description': filter_.label,
}
normalize_data_format(data_type, None, parameter)
multiple_choices = filter_.extra.get('choices', {})
if multiple_choices:
parameter['enum'] = [choice[0] for choice
in itertools.chain(multiple_choices)]
parameter['type'] = 'enum'
params.append(parameter)
return params
def build_form_parameters(self):
"""
Builds form parameters from the serializer class
"""
data = []
serializer_class = self.get_request_serializer_class()
if serializer_class is None:
return data
serializer = serializer_class()
fields = serializer.get_fields()
read_only_fields = getattr(getattr(serializer, 'Meta', None), 'read_only_fields', [])
for name, field in fields.items():
if getattr(field, 'read_only', False) or name in read_only_fields:
continue
data_type, data_format = get_data_type(field) or ('string', 'string')
if data_type == 'hidden':
continue
# guess format
# data_format = 'string'
# if data_type in self.PRIMITIVES:
# data_format = self.PRIMITIVES.get(data_type)[0]
choices = []
if data_type in BaseMethodIntrospector.ENUMS:
if isinstance(field.choices, list):
choices = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
choices = [k for k, v in field.choices.items()]
if choices:
# guest data type and format
data_type, data_format = get_primitive_type(choices[0]) or ('string', 'string')
f = {
'paramType': 'form',
'name': name,
'description': getattr(field, 'help_text', '') or '',
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
}
# Swagger type is a primitive, format is more specific
if f['type'] == f['format']:
del f['format']
# defaultValue of null is not allowed, it is specific to type
if f['defaultValue'] is None:
del f['defaultValue']
# Min/Max values
max_value = getattr(field, 'max_value', None)
min_value = getattr(field, 'min_value', None)
if max_value is not None and data_type == 'integer':
f['minimum'] = min_value
if max_value is not None and data_type == 'integer':
f['maximum'] = max_value
# ENUM options
if choices:
f['enum'] = choices
data.append(f)
return data
def get_primitive_type(var):
if isinstance(var, bool):
return 'boolean', 'boolean'
elif isinstance(var, int):
return 'integer', 'int32'
elif isinstance(var, float):
return 'number', 'float'
elif isinstance(var, six.string_types):
return 'string', 'string'
else:
return 'string', 'string' # 'default'
def get_data_type(field):
# (in swagger 2.0 we might get to use the descriptive types..
from rest_framework import fields
if isinstance(field, fields.BooleanField):
return 'boolean', 'boolean'
elif hasattr(fields, 'NullBooleanField') and isinstance(field, fields.NullBooleanField):
return 'boolean', 'boolean'
# elif isinstance(field, fields.URLField):
# return 'string', 'string' # 'url'
# elif isinstance(field, fields.SlugField):
# return 'string', 'string', # 'slug'
elif isinstance(field, fields.ChoiceField):
return 'choice', 'choice'
# elif isinstance(field, fields.EmailField):
# return 'string', 'string' # 'email'
# elif isinstance(field, fields.RegexField):
# return 'string', 'string' # 'regex'
elif isinstance(field, fields.DateField):
return 'string', 'date'
elif isinstance(field, fields.DateTimeField):
return 'string', 'date-time' # 'datetime'
# elif isinstance(field, fields.TimeField):
# return 'string', 'string' # 'time'
elif isinstance(field, fields.IntegerField):
return 'integer', 'int64' # 'integer'
elif isinstance(field, fields.FloatField):
return 'number', 'float' # 'float'
# elif isinstance(field, fields.DecimalField):
# return 'string', 'string' #'decimal'
# elif isinstance(field, fields.ImageField):
# return 'string', 'string' # 'image upload'
# elif isinstance(field, fields.FileField):
# return 'string', 'string' # 'file upload'
# elif isinstance(field, fields.CharField):
# return 'string', 'string'
elif rest_framework.VERSION >= '3.0.0':
if isinstance(field, fields.HiddenField):
return 'hidden', 'hidden'
elif isinstance(field, fields.ListField):
return 'array', 'array'
else:
return 'string', 'string'
else:
return 'string', 'string'
class APIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield APIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
class WrappedAPIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield WrappedAPIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
def get_notes(self):
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(
class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(
class_docs)
return get_view_description(
self.callback, html=True, docstring=class_docs)
def do_markdown(docstring):
# Markdown is optional
if apply_markdown:
return apply_markdown(docstring)
else:
return docstring.replace("\n\n", "<br/>")
class APIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
class WrappedAPIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return get_view_description(self.callback)
def get_module(self):
from rest_framework_swagger.decorators import wrapper_to_func
func = wrapper_to_func(self.callback)
return func.__module__
def get_notes(self):
return self.parent.get_notes()
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
class ViewSetIntrospector(BaseViewIntrospector):
"""Handle ViewSet introspection."""
def __init__(self, callback, path, pattern, user, patterns=None):
super(ViewSetIntrospector, self).__init__(callback, path, pattern, user)
if not issubclass(callback, viewsets.ViewSetMixin):
raise Exception("wrong callback passed to ViewSetIntrospector")
self.patterns = patterns or [pattern]
def __iter__(self):
methods = self._resolve_methods()
for method in methods:
yield ViewSetMethodIntrospector(self, methods[method], method)
def methods(self):
stuff = []
for pattern in self.patterns:
if pattern.callback:
stuff.extend(self._resolve_methods(pattern).values())
return stuff
def _resolve_methods(self, pattern=None):
from .decorators import closure_n_code, get_closure_var
if pattern is None:
pattern = self.pattern
callback = pattern.callback
try:
x = closure_n_code(callback)
while getattr(x.code, 'co_name') != 'view':
# lets unwrap!
callback = get_closure_var(callback)
x = closure_n_code(callback)
freevars = x.code.co_freevars
except (AttributeError, IndexError):
raise RuntimeError(
'Unable to use callback invalid closure/function ' +
'specified.')
else:
return x.closure[freevars.index('actions')].cell_contents
class ViewSetMethodIntrospector(BaseMethodIntrospector):
def __init__(self, view_introspector, method, http_method):
super(ViewSetMethodIntrospector, self) \
.__init__(view_introspector, method)
self.http_method = http_method.upper()
@property
def is_array_response(self):
""" ViewSet.list methods always return array responses """
return (self.method == 'list' or
super(ViewSetMethodIntrospector, self).is_array_response)
def get_http_method(self):
return self.http_method
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
def create_view(self):
view = super(ViewSetMethodIntrospector, self).create_view()
if not hasattr(view, 'action'):
setattr(view, 'action', self.method)
view.request.method = self.http_method
return view
def build_query_parameters(self):
parameters = super(ViewSetMethodIntrospector, self) \
.build_query_parameters()
view = self.create_view()
page_size, page_query_param, page_size_query_param = get_pagination_attribures(view)
if self.method == 'list' and page_size:
data_type = 'integer'
if page_query_param:
parameters.append({
'paramType': 'query',
'name': page_query_param,
'description': None,
})
normalize_data_format(data_type, None, parameters[-1])
if page_size_query_param:
parameters.append({
'paramType': 'query',
'name': page_size_query_param,
'description': None,
})
normalize_data_format(data_type, None, parameters[-1])
return parameters
def multi_getattr(obj, attr, default=None):
"""
Get a named attribute from an object; multi_getattr(x, 'a.b.c.d') is
equivalent to x.a.b.c.d. When a default argument is given, it is
returned when any attribute in the chain doesn't exist; without
it, an exception is raised when a missing attribute is encountered.
"""
attributes = attr.split(".")
for i in attributes:
try:
obj = getattr(obj, i)
except AttributeError:
if default:
return default
else:
raise
return obj
def normalize_data_format(data_type, data_format, obj):
"""
sets 'type' on obj
sets a valid 'format' on obj if appropriate
uses data_format only if valid
"""
if data_type == 'array':
data_format = None
flatten_primitives = [
val for sublist in BaseMethodIntrospector.PRIMITIVES.values()
for val in sublist
]
if data_format not in flatten_primitives:
formats = BaseMethodIntrospector.PRIMITIVES.get(data_type, None)
if formats:
data_format = formats[0]
else:
data_format = None
if data_format == data_type:
data_format = None
obj['type'] = data_type
if data_format is None and 'format' in obj:
del obj['format']
elif data_format is not None:
obj['format'] = data_format
class YAMLDocstringParser(object):
"""
Docstring parser powered by YAML syntax
This parser allows you override some parts of automatic method inspection
behaviours which are not always correct.
See the following documents for more information about YAML and Swagger:
- https://github.com/wordnik/swagger-core/wiki
- http://www.yaml.org/spec/1.2/spec.html
- https://github.com/wordnik/swagger-codegen/wiki/Creating-Swagger-JSON-from-YAML-files
1. Control over parameters
============================================================================
Define parameters and its properties in docstrings:
parameters:
- name: some_param
description: Foobar long description goes here
required: true
type: integer
paramType: form
minimum: 10
maximum: 100
- name: other_foo
paramType: query
- name: avatar
type: file
It is possible to override parameters discovered by method inspector by
defining:
`parameters_strategy` option to either `merge` or `replace`
To define different strategies for different `paramType`'s use the
following syntax:
parameters_strategy:
form: replace
query: merge
By default strategy is set to `merge`
Sometimes method inspector produces wrong list of parameters that
you might not won't to see in SWAGGER form. To handle this situation
define `paramTypes` that should be omitted
omit_parameters:
- form
2. Control over serializers
============================================================================
Once in a while you are using different serializers inside methods
but automatic method inspector cannot detect this. For that purpose there
is two explicit parameters that allows you to discard serializer detected
by method inspector OR replace it with another one
serializer: some.package.FooSerializer
omit_serializer: true
3. Custom Response Class
============================================================================
If your view is not using serializer at all but instead outputs simple
data type such as JSON you may define custom response object in method
signature like follows:
type:
name:
required: true
type: string
url:
required: false
type: url
4. Response Messages (Error Codes)
============================================================================
If you'd like to share common response errors that your APIView might throw
you can define them in docstring using following format:
responseMessages:
- code: 401
message: Not authenticated
- code: 403
message: Insufficient rights to call this procedure
5. Different models for reading and writing operations
============================================================================
Since REST Framework won't output write_only fields in responses as well as
does not require read_only fields to be provided it is worth to
automatically register 2 separate models for reading and writing operations.
Discovered serializer will be registered with `Write` or `Read` prefix.
Response Class will be automatically adjusted if serializer class was
detected by method inspector.
You can also refer to this models in your parameters:
parameters:
- name: CigarSerializer
type: WriteCigarSerializer
paramType: body
SAMPLE DOCSTRING:
============================================================================
---
# API Docs
# Note: YAML always starts with `---`
type:
name:
required: true
type: string
url:
required: false
type: url
created_at:
required: true
type: string
format: date-time
serializer: .serializers.FooSerializer
omit_serializer: false
parameters_strategy: merge
omit_parameters:
- path
parameters:
- name: name
description: Foobar long description goes here
required: true
type: string
paramType: form
- name: other_foo
paramType: query
- name: other_bar
paramType: query
- name: avatar
type: file
responseMessages:
- code: 401
message: Not authenticated
"""
PARAM_TYPES = ['header', 'path', 'form', 'body', 'query']
yaml_error = None
def __init__(self, method_introspector):
self.method_introspector = method_introspector
self.object = self.load_obj_from_docstring(
docstring=self.method_introspector.get_docs())
if self.object is None:
self.object = {}
def load_obj_from_docstring(self, docstring):
"""Loads YAML from docstring"""
split_lines = trim_docstring(docstring).split('\n')
# Cut YAML from rest of docstring
for index, line in enumerate(split_lines):
line = line.strip()
if line.startswith('---'):
cut_from = index
break
else:
return None
yaml_string = "\n".join(split_lines[cut_from:])
yaml_string = formatting.dedent(yaml_string)
try:
return yaml.load(yaml_string)
except yaml.YAMLError as e:
self.yaml_error = e
return None
def _load_class(self, cls_path, callback):
"""
Dynamically load a class from a string
"""
if not cls_path or not callback or not hasattr(callback, '__module__'):
return None
package = None
if '.' not in cls_path:
# within current module/file
class_name = cls_path
module_path = self.method_introspector.get_module()
else:
# relative or fully qualified path import
class_name = cls_path.split('.')[-1]
module_path = ".".join(cls_path.split('.')[:-1])
if cls_path.startswith('.'):
# relative lookup against current package
# ..serializers.FooSerializer
package = self.method_introspector.get_module()
class_obj = None
# Try to perform local or relative/fq import
try:
module = importlib.import_module(module_path, package=package)
class_obj = getattr(module, class_name, None)
except ImportError:
pass
# Class was not found, maybe it was imported to callback module?
# from app.serializers import submodule
# serializer: submodule.FooSerializer
if class_obj is None:
try:
module = importlib.import_module(
self.method_introspector.get_module())
class_obj = multi_getattr(module, cls_path, None)
except (ImportError, AttributeError):
raise Exception("Could not find %s, looked in %s" % (cls_path, module))
return class_obj
def get_serializer_class(self, callback):
"""
Retrieves serializer class from YAML object
"""
serializer = self.object.get('serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_extra_serializer_classes(self, callback):
"""
Retrieves serializer classes from pytype YAML objects
"""
parameters = self.object.get('parameters', [])
serializers = []
for parameter in parameters:
serializer = parameter.get('pytype', None)
if serializer is not None:
try:
serializer = self._load_class(serializer, callback)
serializers.append(serializer)
except (ImportError, ValueError):
pass
return serializers
def get_request_serializer_class(self, callback):
"""
Retrieves request serializer class from YAML object
"""
serializer = self.object.get('request_serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_serializer_class(self, callback):
"""
Retrieves response serializer class from YAML object
"""
serializer = self.object.get('response_serializer', None)
if isinstance(serializer, list):
serializer = serializer[0]
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_type(self):
"""
Docstring may define custom response class
"""
return self.object.get('type', None)
def get_consumes(self):
"""
Retrieves media type supported as input
"""
return self.object.get('consumes', [])
def get_produces(self):
"""
Retrieves media type supported as output
"""
return self.object.get('produces', [])
def get_response_messages(self):
"""
Retrieves response error codes from YAML object
"""
messages = []
response_messages = self.object.get('responseMessages', [])
for message in response_messages:
messages.append({
'code': message.get('code', None),
'message': message.get('message', None),
'responseModel': message.get('responseModel', None),
})
return messages
def get_view_mocker(self, callback):
view_mocker = self.object.get('view_mocker', lambda a: a)
if isinstance(view_mocker, six.string_types):
view_mocker = self._load_class(view_mocker, callback)
return view_mocker
def get_parameters(self, callback):
"""
Retrieves parameters from YAML object
"""
params = []
fields = self.object.get('parameters', [])
for field in fields:
param_type = field.get('paramType', None)
if param_type not in self.PARAM_TYPES:
param_type = 'form'
# Data Type & Format
# See:
# https://github.com/wordnik/swagger-core/wiki/1.2-transition#wiki-additions-2
# https://github.com/wordnik/swagger-core/wiki/Parameters
data_type = field.get('type', 'string')
pytype = field.get('pytype', None)
if pytype is not None:
try:
serializer = self._load_class(pytype, callback)
data_type = IntrospectorHelper.get_serializer_name(
serializer)
except (ImportError, ValueError):
pass
if param_type in ['path', 'query', 'header']:
if data_type not in BaseMethodIntrospector.PRIMITIVES:
data_type = 'string'
# Data Format
data_format = field.get('format', None)
f = {
'paramType': param_type,
'name': field.get('name', None),
'description': field.get('description', ''),
'required': field.get('required', False),
}
normalize_data_format(data_type, data_format, f)
if field.get('defaultValue', None) is not None:
f['defaultValue'] = field.get('defaultValue', None)
# Allow Multiple Values &f=1,2,3,4
if field.get('allowMultiple'):
f['allowMultiple'] = True
if f['type'] == 'array':
items = field.get('items', {})
elt_data_type = items.get('type', 'string')
elt_data_format = items.get('type', 'format')
f['items'] = {
}
normalize_data_format(elt_data_type, elt_data_format, f['items'])
uniqueItems = field.get('uniqueItems', None)
if uniqueItems is not None:
f['uniqueItems'] = uniqueItems
# Min/Max are optional
if 'minimum' in field and data_type == 'integer':
f['minimum'] = str(field.get('minimum', 0))
if 'maximum' in field and data_type == 'integer':
f['maximum'] = str(field.get('maximum', 0))
# enum options
enum = field.get('enum', [])
if enum:
f['enum'] = enum
# File support
if f['type'] == 'file':
f['paramType'] = 'body'
params.append(f)
return params
def discover_parameters(self, inspector):
"""
Applies parameters strategy for parameters discovered
from method and docstring
"""
parameters = []
docstring_params = self.get_parameters(inspector.callback)
method_params = inspector.get_parameters()
# paramType may differ, overwrite first
# so strategy can be applied
for meth_param in method_params:
for doc_param in docstring_params:
if doc_param['name'] == meth_param['name']:
if 'paramType' in doc_param:
meth_param['paramType'] = doc_param['paramType']
for param_type in self.PARAM_TYPES:
if self.should_omit_parameters(param_type):
continue
parameters += self._apply_strategy(
param_type, method_params, docstring_params
)
# PATCH requests expects all fields except path fields to be optional
if inspector.get_http_method() == "PATCH":
for param in parameters:
if param['paramType'] != 'path':
param['required'] = False
return parameters
def should_omit_parameters(self, param_type):
"""
Checks if particular parameter types should be omitted explicitly
"""
return param_type in self.object.get('omit_parameters', [])
def should_omit_serializer(self):
"""
Checks if serializer should be intentionally omitted
"""
return self.object.get('omit_serializer', False)
def _apply_strategy(self, param_type, method_params, docstring_params):
"""
Applies strategy for subset of parameters filtered by `paramType`
"""
strategy = self.get_parameters_strategy(param_type=param_type)
method_params = self._filter_params(
params=method_params,
key='paramType',
val=param_type
)
docstring_params = self._filter_params(
params=docstring_params,
key='paramType',
val=param_type
)
if strategy == 'replace':
return docstring_params or method_params
elif strategy == 'merge':
return self._merge_params(
method_params,
docstring_params,
key='name',
)
return []
@staticmethod
def _filter_params(params, key, val):
"""
Returns filter function for parameters structure
"""
def filter_by(o):
return o.get(key, None) == val
return filter(filter_by, params)
@staticmethod
def _merge_params(params1, params2, key):
"""
Helper method.
Merges parameters lists by key
"""
import itertools
merged = OrderedDict()
for item in itertools.chain(params1, params2):
merged[item[key]] = item
return [val for (_, val) in merged.items()]
def get_parameters_strategy(self, param_type=None):
"""
Get behaviour strategy for parameter types.
It can be either `merge` or `replace`:
- `merge` overwrites duplicate parameters signatures
discovered by inspector with the ones defined explicitly in
docstring
- `replace` strategy completely overwrites parameters discovered
by inspector with the ones defined explicitly in docstring.
Note: Strategy can be defined per `paramType` so `path` parameters can
use `merge` strategy while `form` parameters will use `replace`
strategy.
Default strategy: `merge`
"""
default = 'merge'
strategy = self.object.get('parameters_strategy', default)
if hasattr(strategy, 'get') and param_type is not None:
strategy = strategy.get(param_type, default)
if strategy not in ['merge', 'replace']:
strategy = default
return strategy
|
import numpy as np
from pygamoo import Player
from copy import deepcopy
class ClonalSelection(Player):
def __init__(self, num, obj_queue, repair_queue, cmd_exchange, npop, nvars, bounds, host, port, player_parm):
self.nclone = player_parm['nclone']
self.mutate_args = tuple(player_parm['mutate_args'])
super(ClonalSelection, self).__init__(num, obj_queue, repair_queue, cmd_exchange, npop, nvars, bounds, host, port)
def step(self, pop, pop_eval, pattern):
temp_pop = deepcopy(pop)
temp_pop_eval = deepcopy(pop_eval)
arg_sort = temp_pop_eval.argsort()
indices = []
better = []
better_eval = []
evaluation_counter = 0
for rank, arg in enumerate(arg_sort):
clone_num = max(int(self.nclone / (rank + 1) + 0.5), 1)
clones = np.array([self._mutate(temp_pop[arg], pattern) for _ in range(clone_num)])
clones = np.unique(clones, axis=0)
clones = clones[np.any(clones != pop[arg], axis=1)]
if clones.shape[0] > 0:
if self.repair_rpc is not None:
clones = self.evaluate_call(clones, self.repair_rpc)
clones_eval = self.evaluate_call(clones, self.obj_rpc)
evaluation_counter += clones.shape[0]
argmin = clones_eval.argmin()
if clones_eval[argmin] < temp_pop_eval[arg]:
indices.append(arg)
better.append(clones[argmin])
better_eval.append(clones_eval[argmin])
if len(better) > 0:
better = np.stack(better)
better_eval = np.stack(better_eval)
temp_pop[indices] = better
temp_pop_eval[indices] = better_eval
return temp_pop, temp_pop_eval, evaluation_counter
def _mutate(self, ind, pattern):
a, b, sigma = self.mutate_args
r = np.random.random()
if r < a:
ind = self._uniform_mutate(ind, pattern, self.bounds)
elif r < b:
ind = self._gaussian_mutate(ind, pattern, self.bounds, sigma)
else:
ind = self._bound_mutate(ind, pattern, self.bounds)
return ind
@staticmethod
def _uniform_mutate(individual, pattern, bounds):
ind = individual.copy()
if np.sum(pattern) == 0:
return ind
indx = np.where(pattern)[0]
k = np.random.choice(indx)
a = bounds[k][0]
b = bounds[k][1]
ind[k] = np.random.uniform(a, b)
return ind
@staticmethod
def _bound_mutate(individual, pattern, bounds):
ind = individual.copy()
if np.sum(pattern) == 0:
return ind
indx = np.where(pattern)[0]
k = np.random.choice(indx)
a = bounds[k][0]
b = bounds[k][1]
r = np.random.random()
r2 = np.random.uniform(0, 1)
if r < 0.5:
ind[k] = a + (ind[k] - a) * r2
else:
ind[k] = (b - ind[k]) * r2 + ind[k]
return ind
@staticmethod
def _gaussian_mutate(individual, pattern, bounds, sigma):
ind = individual.copy()
if np.sum(pattern) == 0:
return ind
indx = np.where(pattern)[0]
k = np.random.choice(indx)
a = bounds[k][0]
b = bounds[k][1]
ran = sigma * (b - a) * np.random.randn() + ind[k]
if a <= ran <= b:
ind[k] = ran
return ind
|
#! /vagrant/analysis/py36env/bin/python3.6
import psycopg2
from contextlib import contextmanager
from pprint import pprint
def connect():
"""
:return:
"""
return psycopg2.connect("dbname=forum")
@contextmanager
def get_cursor():
conn = connect()
cursor = conn.cursor()
try:
yield cursor
except:
raise
else:
conn.commit()
finally:
cursor.close()
conn.close()
def set_url_popularity(cur):
cur.execute('''
CREATE TEMPORARY TABLE url_hits AS (
SELECT
path,
COUNT (path) AS total
FROM
log
WHERE
path is DISTINCT FROM '/'
GROUP BY
path
ORDER BY total DESC
)
''')
def set_article_popularity(cur):
cur.execute('''
CREATE TEMPORARY TABLE article_hits AS (
SELECT
title,
total,
articles.author AS author_id
FROM url_hits
LEFT JOIN articles
ON POSITION(articles.slug IN url_hits.path) > 0
)
''')
def set_authors_popularity(cur):
cur.execute('''
CREATE TEMPORARY TABLE authors_popularity AS (
SELECT
name,
total
FROM article_hits
LEFT JOIN authors
ON author_id = authors.id
)
''')
def set_requests_by_day(cur):
cur.execute('''
CREATE TEMPORARY TABLE total_requests_per_day AS (
SELECT
log.time::DATE as time,
COUNT(log.time::DATE) AS hits -- Loses timezone information
FROM log
GROUP BY log.time::DATE
)
''')
def get_most_popular_articles(cur, top=3):
cur.execute('''
SELECT *
FROM article_hits
LIMIT %s;
''', (top, ))
return cur.fetchall()
def get_most_popular_authors(cur, top=3):
cur.execute('''
SELECT
*
FROM authors_popularity
LIMIT %s
''', (top,))
return cur.fetchall()
def get_largest_error_frequency_by_day(cur, top=1):
cur.execute('''
WITH total_errors_per_day AS (
SELECT
log.time::DATE as time,
COUNT(log.time::DATE) AS hits -- Loses timezone information
FROM log
WHERE log.status ~ '4[0-9]{2}'
GROUP BY log.time::DATE
)
SELECT
total_errors_per_day.time as time,
total_errors_per_day.hits/total_requests_per_day.hits::FLOAT as percentage_errors
FROM total_errors_per_day
LEFT JOIN total_requests_per_day
ON total_requests_per_day.time = total_errors_per_day.time
WHERE total_errors_per_day.hits/total_requests_per_day.hits::FLOAT > 0.01
''')
return cur.fetchall()
if __name__ == '__main__':
with get_cursor() as cur:
print('Testing database access. Getting first ten rows from table log.')
cur.execute('SELECT * FROM log LIMIT 10;')
data = cur.fetchall()
pprint(data)
set_url_popularity(cur)
set_article_popularity(cur)
set_authors_popularity(cur)
set_requests_by_day(cur)
print('Most popular articles:')
pprint(get_most_popular_articles(cur))
print('Most popular authors:')
pprint(get_most_popular_authors(cur))
print('Largest day with errors:')
pprint(get_largest_error_frequency_by_day(cur))
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Display an opt_tools hist.')
parser.add_argument('hist_file', type=str, nargs='+', help="Paths to the history files to be displayed.")
parser.add_argument('--smooth', help="Smoothing for objective function.", type=int, default=3)
parser.add_argument('--logscale', help="Log scale graphs.", action="store_true")
parser.add_argument('--xaxis', help="time|iter", type=str, default="time")
args = parser.parse_args()
hs = [pd.read_pickle(hf) for hf in args.hist_file]
plt.figure()
for h, f in zip(hs, args.hist_file):
fn = os.path.splitext(os.path.split(f)[-1])[0]
plt.subplot(311)
idx = h.t / 3600 if args.xaxis == "time" else h.i
# plt.plot(idx, np.convolve(h.f, np.ones(args.smooth) / args.smooth, 'same'), label=fn, alpha=0.5)
smooth = np.ones(args.smooth) / args.smooth
plt.plot(np.convolve(idx, smooth, 'valid'), np.convolve(h.f, smooth, 'valid'), label=fn, alpha=0.5)
# plt.plot(idx, h.f, label=fn)
plt.subplot(312)
plt.plot(idx, h.learning_rate)
plt.ylabel("Learning rate")
plt.yscale('log')
plt.subplot(313)
plt.plot(idx, h.minibatch_size)
plt.ylabel("Minibatch size")
plt.yscale('log')
plt.subplot(311)
plt.ylabel("LML bound")
plt.legend()
plt.subplot(313)
plt.xlabel("Time (hrs)")
if args.logscale:
plt.xscale('log')
plt.figure()
for h in hs:
if 'lml' in h.columns:
f = h[~(h.lml == 0.0) & ~np.isnan(h.lml)]
plt.plot(f.t / 3600, f.lml, '-')
else:
plt.plot([])
plt.xlabel("Time (hrs)")
plt.ylabel("LML bound")
plt.figure()
for i, h in enumerate(hs):
try:
# f = h[~h.err.isnull()].filter(regex="model.kern.convrbf.basek*")
ss = h[~h.err.isnull()]
f = ss.filter(regex=".*(lengthscales)")
if f.shape[1] > 0:
plt.plot(f, color="C%i" % i)
f = ss.filter(regex=".*(variance)")
plt.plot(f, color="C%i" % i, alpha=0.5)
# f = h.filter(regex="model.kern.convrbf.basek*")
# plt.plot(h.t, f[~f.acc.isnull()])
except:
pass
plt.xlabel("Time (hrs)")
plt.ylabel("Some hyperparameters")
# plt.figure()
# for i, h in enumerate(hs):
# p = h[~h.acc.isnull()]
# plt.plot(p.t / 3600, p.filter(regex="variance*"), color="C%i" % i)
plt.figure()
for h in hs:
p = h[~h.acc.isnull()]
plt.plot(p.t / 3600, p.err)
# plt.ylim(0.01, 0.03)
plt.xlabel("Time (hrs)")
plt.ylabel("Error")
if args.logscale:
plt.xscale('log')
plt.figure()
for h in hs:
p = h[~h.acc.isnull()]
plt.plot(p.t / 3600, p.nlpp)
# plt.ylim(0.03, 0.08)
plt.xlabel("Time (hrs)")
plt.ylabel("nlpp")
if args.logscale:
plt.xscale('log')
plt.figure()
for h in hs:
plt.plot(h.t / h.tt)
plt.xlabel("Record")
plt.ylabel("Proportion of time optimising")
def reshape_patches_for_plot(patches):
n_patch, dimx, dimy = patches.shape
n_rows = int(np.floor(np.sqrt(n_patch)))
n_cols = int(np.ceil(n_patch / n_rows))
image = np.empty((n_rows * dimx + n_rows - 1, n_cols * dimy + n_cols - 1))
image.fill(np.nan)
for count, p in enumerate(patches):
i = count // n_cols
j = count % n_cols
image[i * (dimx + 1):i * (dimx + 1) + dimx, j * (dimy + 1):j * (dimy + 1) + dimy] = p
return image, n_rows, n_cols
patches_fig = plt.figure()
qm_fig = plt.figure()
w_fig = plt.figure()
for i, h in enumerate(hs):
if not np.any(["conv" in cn or "basekern" in cn for cn in h.columns]):
continue
nsbplt = int(np.ceil(len(hs) ** 0.5))
plt.figure(patches_fig.number)
plt.subplot(nsbplt, nsbplt, i + 1)
Zend = h[~h.acc.isnull()].iloc[-1]['model.Z' if 'model.Z' in h.columns else 'model.Z1']
patch_size = int(Zend.shape[-1] ** 0.5)
qmu = h[~h.acc.isnull()]['model.q_mu'].iloc[-1]
if qmu.shape[1] == 1:
qm = np.vstack(h[~h.acc.isnull()]['model.q_mu'].iloc[-1]).flatten()
s = np.argsort(qm)
else:
s = np.arange(len(Zend))
try:
patch_image, n_rows, n_cols = reshape_patches_for_plot(1 - Zend.reshape(-1, patch_size, patch_size)[s, :, :])
plt.imshow(patch_image, cmap="gray")
plt.clim(-0.25, 1.25)
plt.colorbar()
except:
pass
if qmu.shape[1] == 1:
plt.figure(qm_fig.number)
plt.subplot(nsbplt, nsbplt, i + 1)
plt.imshow(np.hstack((qm[s], np.zeros(n_rows * n_cols - len(qm)))).reshape(n_rows, n_cols))
plt.colorbar()
plt.figure(w_fig.number)
plt.subplot(nsbplt, nsbplt, i + 1)
Wseries = h[~h.acc.isnull()].iloc[-1].filter(regex=".*.W")
if len(Wseries) >= 1 or len(Wseries) == 2352:
patch_weights = Wseries[0].flatten()
patch_h = int(np.ceil(patch_weights.shape[-1] ** 0.5))
if patch_h ** 2.0 != patch_weights.shape[-1]:
patch_h = int(np.ceil((patch_weights.shape[-1] / 3) ** 0.5))
patch_w = int(patch_weights.shape[-1] / patch_h)
plt.imshow(patch_weights.reshape(3, patch_h, patch_h).transpose(1, 0, 2).reshape(patch_h, patch_w))
else:
plt.imshow(patch_weights.reshape(patch_h, patch_h))
plt.colorbar()
plt.show()
|
import uuid
from datetime import datetime
from flask import current_app
from api import db, bcrypt
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String(255), unique=True, nullable=False)
first_name = db.Column(db.String(120), nullable=False)
last_name = db.Column(db.String(120), nullable=False)
email = db.Column(db.String(255), nullable=False, unique=True, index=True)
phone = db.Column(db.String(20), nullable=False)
password = db.Column(db.String(255))
active = db.Column(db.Boolean, default=False, nullable=False)
app_role = db.Column(db.String(10), default="Read")
rank = db.Column(db.String(50), default="Volunteer")
first_aid = db.Column(db.String(50), default="CFR")
updated_at = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
def __init__(self, first_name, last_name, email, phone, password, active):
self.uuid = str(uuid.uuid4())
self.first_name = first_name
self.last_name = last_name
self.email = email
self.phone = phone
self.password = bcrypt.generate_password_hash(
password, current_app.config["BCRYPT_LOG_ROUNDS"]
).decode()
self.active = active
def verify_password(self, password):
return bcrypt.check_password_hash(self.password, password)
def activate(self):
self.active = True
db.session.commit()
return True
def update(self, data):
self.first_name = data["first_name"]
self.last_name = data["last_name"]
self.email = data["email"]
self.phone = data["phone"]
self.active = data["active"]
self.app_role = data["app_role"]
self.rank = data["rank"]
self.first_aid = data["first_aid"]
db.session.commit()
return True
def create_user(first_name, last_name, email, phone, password, active):
user = User(first_name, last_name, email, phone, password, active)
db.session.add(user)
db.session.commit()
return user
def get_user_by_email(email):
return User.query.filter_by(email=email).first()
def get_all_users():
return User.query.all()
def get_user_by_uuid(uuid):
return User.query.filter_by(uuid=uuid).first()
|
import socket
import logging
import sys
from threading import Thread
HOST = "127.0.0.1"
PORT = 8080
ADDR = (HOST, PORT)
BUFFERSIZE = 4096
def print_chat(text):
"""Imprime a mensagem de maneira bonita"""
sys.stdout.write("\u001b[1L\u001b[1A\u001b[1B\u001b[1000D")
sys.stdout.write(text)
sys.stdout.write("\u001b[1B\u001b[1000D\u001b[7C")
sys.stdout.flush()
def receive():
"""Recebe e imprime as mensagens dos outros usuário"""
while True:
try:
data = sock.recv(BUFFERSIZE)
print_chat(str(data, "utf-8"))
except Exception as e:
logging.info("Erro de conexão")
logging.error(e)
break
def send():
"""Envia a mensagem para o servidor"""
while True:
msg = input("Você > ")
data = "{}".format(msg)
sock.send(bytes(data, "utf-8"))
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
user_name = input("Username: ")
# Cria uma conexão com o servidor e se apresenta
sock.connect(ADDR)
sock.send(bytes(user_name, "utf-8"))
logging.info("Conectado ao servidor.")
# Cria e inicia as threads para receber e enviar mensagens
# dessa maneira, o usuário não fica bloqueado enquanto envia ou recebe mensagens
thread_send = Thread(target=send)
thread_recv = Thread(target=receive)
thread_send.start()
thread_recv.start()
|
import zmq
import threading
import queue
import time
import random
class Router():
"""
Router Limited to localhost
"""
def __init__(self, routes, CURVE=False):
self.routes = routes
# self.connectionTimeout = 2000
self.connectionTimeout = 1800
self.protocol = 'tcp'
self.serverSockets = {}
self.clientSockets = {}
self.CURVE = CURVE
self.termination = False
self.latency_range = [800, 1200]
self.drop_msg_prob = 0.95
self.drop_msg_period = 1000 * 10 # 10 seconds in milliseconds
# self.drop_msg_period = 1000 * 5 # 10 seconds in milliseconds
self.last_drop_msg_time = None
self.use_latency = False
self.use_drop_msg = True
if self.CURVE:
self.createServerSocket = self._createServerSocket_CURVE
self.createClientSocket = self._createClientSocket_CURVE
else:
self.createServerSocket = self._createServerSocket_simple
self.createClientSocket = self._createClientSocket_simple
self.changeTime = time.time() + 75
def start(self):
for routeID in range(len(self.routes)):
self.createServerSocket(routeID)
self.createClientSocket(routeID)
msgQueue = queue.Queue()
server_thread = threading.Thread(target=self.run_server, args=[routeID, msgQueue])
client_thread = threading.Thread(target=self.run_client, args=[routeID, msgQueue])
server_thread.start()
client_thread.start()
def run_server(self, routeID, msgQueue):
serverSocket = self.serverSockets[routeID]
while not self.termination:
try:
msg = serverSocket.recv_multipart()
except Exception as e:
continue
msgQueue.put([msg, time.time()])
def run_client(self, routeID, msgQueue):
clientSocket = self.clientSockets[routeID]
serverSocket = self.serverSockets[routeID]
changed = False
while not self.termination:
try:
msg, arrival_time = msgQueue.get(timeout=self.connectionTimeout/1000)
except Exception as e:
continue
delay = self.compute_latency(arrival_time)
if delay > 0:
print("total delay:", delay)
time.sleep(delay)
if self.toDropMsg():
print("Dropping Msg!")
continue
clientID = msg[0]
clientSocket.send_multipart(msg[1:])
try:
RESP = clientSocket.recv()
except Exception as e:
continue
serverSocket.send_multipart([clientID, RESP])
if self.changeTime <= time.time() and not changed:
changed = True
self.connectionTimeout = 1000
clientSocket.RCVTIMEO = self.connectionTimeout
serverSocket.RCVTIMEO = self.connectionTimeout
self.use_drop_msg = False
print("CHANGED!!!")
def compute_latency(self, arrival_time):
if self.use_latency:
latency = random.uniform(self.latency_range[0], self.latency_range[1])
current_time = time.time()
delay = arrival_time + latency/1000 - current_time
return delay
else:
return 0
def toDropMsg(self):
if not self.use_drop_msg:
return False
toDrop = False
if self.last_drop_msg_time is None:
self.last_drop_msg_time = time.time()
if time.time() - self.last_drop_msg_time > self.drop_msg_period / 1000:
self.last_drop_msg_time = time.time()
if random.random() < self.drop_msg_prob:
toDrop = True
return toDrop
def _createServerSocket_simple(self, routeID):
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.RCVTIMEO = self.connectionTimeout
port = self.routes[routeID][0]
addr = "{0}://*:{1}".format(self.protocol, port)
socket.bind(addr)
self.serverSockets[routeID] = socket
return socket
def _createClientSocket_simple(self, routeID):
context = zmq.Context()
socket = context.socket(zmq.DEALER)
routeID_bin = str.encode(chr(routeID))
socket.setsockopt(zmq.IDENTITY, routeID_bin)
IP, port = '127.0.0.1', self.routes[routeID][1]
addr = "{0}://{1}:{2}".format(self.protocol, IP, port)
socket.RCVTIMEO = self.connectionTimeout
socket.connect(addr)
socket.connect(addr)
self.clientSockets[routeID] = socket
return socket
def _createServerSocket_CURVE(self):
pass
def _createClientSocket_CURVE(self):
pass
def terminate(self):
self.termination = True
def createRouter(raftConfigs):
servers = {}
routes = []
for raftParams in raftConfigs:
for k,v in raftParams['servers'].items():
if k not in servers:
servers[k] = v
for raftParams in raftConfigs:
input_port = servers[raftParams['serverID']][-1]
output_port = raftParams['port']
routes.append([input_port, output_port])
router_instance = Router(routes)
return router_instance
|
"""
Convenience interface for NSDictionary/NSMutableDictionary
"""
__all__ = ()
from objc._convenience_mapping import addConvenienceForBasicMapping
from objc._convenience import container_wrap, container_unwrap, addConvenienceForClass
from objc._objc import lookUpClass
import sys, os, collections.abc
NSDictionary = lookUpClass("NSDictionary")
NSMutableDictionary = lookUpClass("NSMutableDictionary")
addConvenienceForBasicMapping("NSDictionary", True)
addConvenienceForBasicMapping("NSMutableDictionary", False)
def _all_contained_in(inner, outer):
"""
Return True iff all items in ``inner`` are also in ``outer``.
"""
for v in inner:
if v not in outer:
return False
return True
def nsdict__len__(self):
return self.count()
def nsdict__iter__(self):
return iter(self.keyEnumerator())
class nsdict_view(collections.abc.Set):
__slots__ = ()
def __eq__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) == len(other):
return _all_contained_in(self, other)
else:
return False
def __ne__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) == len(other):
return not _all_contained_in(self, other)
else:
return True
def __lt__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) < len(other):
return _all_contained_in(self, other)
else:
return False
def __le__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) <= len(other):
return _all_contained_in(self, other)
else:
return False
def __gt__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) > len(other):
return _all_contained_in(other, self)
else:
return False
def __ge__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
if len(self) >= len(other):
return _all_contained_in(other, self)
else:
return False
def __and__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.intersection_update(other)
return result
def __rand__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.intersection_update(other)
return result
def __or__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.update(other)
return result
def __ror__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.update(other)
return result
def __sub__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.difference_update(other)
return result
def __rsub__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(other)
result.difference_update(self)
return result
def __xor__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.symmetric_difference_update(other)
return result
def __rxor__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented
result = set(self)
result.symmetric_difference_update(other)
return result
class nsdict_keys(nsdict_view):
__slots__ = ("__value",)
def __init__(self, value):
self.__value = value
def __repr__(self):
keys = list(self.__value)
return "<nsdict_keys({0})>".format(keys)
def __len__(self):
return len(self.__value)
def __iter__(self):
return iter(self.__value)
def __contains__(self, value):
return value in self.__value
class nsdict_values(nsdict_view):
__slots__ = ("__value",)
def __init__(self, value):
self.__value = value
def __repr__(self):
values = list(self)
values.sort()
return "<nsdict_values({0})>".format(values)
def __len__(self):
return len(self.__value)
def __iter__(self):
return iter(self.__value.objectEnumerator())
def __contains__(self, value):
for v in iter(self):
if value == v:
return True
return False
class nsdict_items(nsdict_view):
__slots__ = ("__value",)
def __init__(self, value):
self.__value = value
def __repr__(self):
values = list(self)
values.sort()
return "<nsdict_items({0})>".format(values)
def __len__(self):
return len(self.__value)
def __iter__(self):
for k in self.__value:
yield (k, self.__value[k])
def __contains__(self, value):
for v in iter(self):
if value == v:
return True
return False
collections.abc.KeysView.register(nsdict_keys)
collections.abc.ValuesView.register(nsdict_values)
collections.abc.ItemsView.register(nsdict_items)
collections.abc.Mapping.register(NSDictionary)
collections.abc.MutableMapping.register(NSMutableDictionary)
if int(os.uname()[2].split(".")[0]) <= 10:
# Limited functionality on OSX 10.6 and earlier
def nsdict_fromkeys(cls, keys, value=None):
keys = [container_wrap(k) for k in keys]
values = [container_wrap(value)] * len(keys)
return NSDictionary.dictionaryWithObjects_forKeys_(values, keys)
# XXX: 'nsdict_fromkeys' doesn't work on OSX 10.5
def nsmutabledict_fromkeys(cls, keys, value=None):
value = container_wrap(value)
result = NSMutableDictionary.alloc().init()
for k in keys:
result[container_wrap(k)] = value
return result
else:
def nsdict_fromkeys(cls, keys, value=None):
keys = [container_wrap(k) for k in keys]
values = [container_wrap(value)] * len(keys)
return cls.dictionaryWithObjects_forKeys_(values, keys)
def nsmutabledict_fromkeys(cls, keys, value=None):
value = container_wrap(value)
result = cls.alloc().init()
for k in keys:
result[container_wrap(k)] = value
return result
def nsdict_new(cls, *args, **kwds):
if len(args) == 0:
pass
elif len(args) == 1:
d = dict()
if isinstance(args[0], collections.abc.Mapping):
items = args[0].items()
else:
items = args[0]
for k, v in items:
d[container_wrap(k)] = container_wrap(v)
for k, v in kwds.items():
d[container_wrap(k)] = container_wrap(v)
return cls.dictionaryWithDictionary_(d)
else:
raise TypeError("dict expected at most 1 arguments, got {0}".format(len(args)))
if kwds:
d = dict()
for k, v in kwds.items():
d[container_wrap(k)] = container_wrap(v)
return cls.dictionaryWithDictionary_(d)
return cls.dictionary()
def nsdict__eq__(self, other):
if not isinstance(other, collections.abc.Mapping):
return False
return self.isEqualToDictionary_(other)
def nsdict__ne__(self, other):
return not nsdict__eq__(self, other)
def nsdict__lt__(self, other):
return NotImplemented
def nsdict__le__(self, other):
return NotImplemented
def nsdict__ge__(self, other):
return NotImplemented
def nsdict__gt__(self, other):
return NotImplemented
addConvenienceForClass(
"NSDictionary",
(
("keys", lambda self: nsdict_keys(self)),
("values", lambda self: nsdict_values(self)),
("items", lambda self: nsdict_items(self)),
),
)
addConvenienceForClass(
"NSDictionary",
(
("__new__", staticmethod(nsdict_new)),
("fromkeys", classmethod(nsdict_fromkeys)),
("__eq__", nsdict__eq__),
("__ne__", nsdict__ne__),
("__lt__", nsdict__lt__),
("__le__", nsdict__le__),
("__gt__", nsdict__gt__),
("__ge__", nsdict__ge__),
("__len__", nsdict__len__),
("__iter__", nsdict__iter__),
),
)
addConvenienceForClass(
"NSMutableDictionary",
(
("__new__", staticmethod(nsdict_new)),
("fromkeys", classmethod(nsdict_fromkeys)),
("clear", lambda self: self.removeAllObjects()),
),
)
|
import sys
n = int(sys.stdin.readline())
for _ in range(n):
n, k = sys.stdin.readline().strip().split()
n, k = int(n), int(k)
if n % 2 == 0:
# even
if n < k or (k % 2 == 1 and n < 2 * k):
print("NO")
else:
print("YES")
if k % 2 == 0:
digits = ["1"] * (k-1) + [str(n - (k - 1))]
else:
digits = ["2"] * (k-1) + [str(n - 2 * (k-1))]
print(" ".join(digits))
else:
# odd
if k % 2 == 0 or k > n:
print("NO")
else:
print("YES")
digits = ["1"] * (k-1) + [str(n - (k - 1))]
print(" ".join(digits))
|
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class TestPackageConan(ConanFile):
def test(self):
if not tools.cross_building(self.settings):
self.run("some_tool --version")
|
# OpenWeatherMap API Key
weather_api_key = "6edbbe4fc8bc867709594d77824c6aed"
# Google API Key
g_key = "AIzaSyC-8Y9a59V9dNpKTAOE3bwfTVS8kcMqANE"
|
import logging
from abc import ABCMeta, abstractmethod
import torch
from torch import nn
from quati import constants
logger = logging.getLogger(__name__)
class Model(torch.nn.Module):
__metaclass__ = ABCMeta
def __init__(self, fields_tuples):
super().__init__()
# Default fields and embeddings
self.fields_dict = dict(fields_tuples)
self.target_field = self.fields_dict['target']
# Building flag
self.is_built = False
# Loss function has to be defined later!
self._loss = None
@property
def nb_classes(self):
pad_shift = int(constants.PAD in self.target_field.vocab.stoi)
return len(self.target_field.vocab) - pad_shift # remove pad index
@abstractmethod
def forward(self, *args, **kwargs):
raise NotImplementedError
def load(self, path):
logger.info("Loading model weights from {}".format(path))
self.load_state_dict(
torch.load(str(path), map_location=lambda storage, loc: storage)
)
def save(self, path):
logger.info("Saving model weights to {}".format(path))
torch.save(self.state_dict(), str(path))
def build_loss(self, loss_weights=None):
if loss_weights is not None:
loss_weights = torch.tensor(loss_weights).float()
self._loss = nn.NLLLoss(weight=loss_weights,
ignore_index=constants.TARGET_PAD_ID)
def loss(self, pred, gold):
# (bs, ts, nb_classes) -> (bs*ts, nb_classes)
predicted = pred.reshape(-1, self.nb_classes)
# (bs, ts, ) -> (bs*ts, )
gold = gold.reshape(-1)
return self._loss(predicted, gold)
def predict_probas(self, batch):
log_probs = self.forward(batch)
probs = torch.exp(log_probs) # assume log softmax in the output
return probs
def predict_classes(self, batch):
classes = torch.argmax(self.predict_probas(batch), -1)
return classes
|
from enum import Enum
class Color(Enum):
NoColor = 0
Red = 1
Green = 2
Blue = 3
Yellow = 4
@classmethod
def get_from(cls, color_string: str):
if color_string == 'rouge':
return Color.Red
elif color_string == 'vert':
return Color.Green
elif color_string == 'bleu':
return Color.Blue
elif color_string == 'orange':
return Color.Yellow
else:
return Color.NoColor
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack.package import *
class AutodockVina(MakefilePackage):
"""AutoDock Vina is an open-source program for doing molecular docking"""
homepage = "http://vina.scripps.edu/"
url = "https://github.com/ccsb-scripps/AutoDock-Vina/archive/refs/tags/v1.2.3.tar.gz"
version('1.2.3', sha256='22f85b2e770b6acc363429153b9551f56e0a0d88d25f747a40d2f55a263608e0')
version('1.2.2', sha256='b9c28df478f90d64dbbb5f4a53972bddffffb017b7bb58581a1a0034fff1b400')
version('1.2.1', sha256='2d8d9871a5a95265c03c621c0584d9f06b202303116e6c87e23c935f7b694f74')
version('1.2.0', sha256='9c9a85766b4d124d7c1d92e767aa8b4580c6175836b8aa2c28f88a9c40a5b90e')
version('1.1.2', sha256='65422b2240c75d40417872a48e98043e7a7c435300dc8490af0c1f752f1ca4a2',
url='https://github.com/ccsb-scripps/AutoDock-Vina/archive/refs/tags/v1.1.2-boost-new.tar.gz')
depends_on('boost@1.50.0:1.75.0 +filesystem +program_options +serialization +system +thread', when='@1.1.2')
depends_on('boost@1.54.0: +filesystem +program_options +serialization +system +thread', when='@1.2.0:')
@property
def build_directory(self):
if sys.platform == "darwin":
return join_path('build', 'mac', 'release')
else:
return join_path('build', 'linux', 'release')
def edit(self, spec, prefix):
with working_dir(self.build_directory):
makefile = FileFilter('Makefile')
makefile.filter('BOOST_INCLUDE = .*', 'BOOST_INCLUDE = %s' %
self.spec['boost'].prefix.include)
makefile.filter('C_PLATFORM=.*', 'C_PLATFORM=-pthread')
makefile.filter('GPP=.*', 'GPP=%s' % spack_cxx)
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
def install(self, spec, prefix):
with working_dir(self.build_directory):
mkdirp(prefix.bin)
install('vina', prefix.bin)
install('vina_split', prefix.bin)
|
import pytest
from tuneit.graph import Graph, Node, Key
class String(str):
def __init__(self, val):
self.__name__ = val
self.__label__ = val
self.__dot_attrs__ = {}
def __iter__(self):
raise TypeError
def test_graph():
a = Graph()
a["foo"] = String("bar")
b = Node("foo2")
b.value = (a["foo"], "bar", {"one": 1})
with pytest.raises(KeyError):
Node("foo", (a["foo"], "bar"))
assert isinstance(a["foo"], Node)
assert "foo" in a
assert "foo" in list(a)
assert isinstance(a["foo"].value, str) and a["foo"].key == "foo"
assert isinstance(a["foo"].value, str) and a["foo"].value == "bar"
assert a["foo"] == Node("foo", a)
assert a["foo"] == "bar"
assert a["foo"] == Key(a["foo"])
assert str(a["foo"]) == "bar"
assert a["foo"] != a
assert a[a["foo"]] == a["foo"]
assert a["foo"] in a
assert repr(a).startswith("Graph")
assert repr(a["foo"]).startswith("Node")
assert repr(Key(a["foo"])).startswith("Key")
assert a == a.backend
assert a.visualize()
with pytest.raises(KeyError):
a.visualize(end="bar")
with pytest.raises(KeyError):
a.visualize(start="bar")
assert b.graph.visualize(end=b).source == b.visualize().source
assert b.visualize().source == b.visualize(start="foo").source
assert b.visualize().source != b.visualize(start=a["foo2"]).source
def test_dict_methods():
a = Graph(dict(one=1, two=2, three=3))
assert a.get("one", None) == 1
assert a.get("four", None) == None
b = a.copy()
assert a == b
assert b.pop("one") == 1
assert "one" not in b
assert "one" in a
b.update(a)
assert a == b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.