content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
import sys
for _ in range(int(input().strip())):
arr = [list(input()) for _ in range(int(input().strip()))]
new_arr = []
for ar in arr:
temp = ([ord(a) for a in ar])
new_arr.append(sorted(temp))
for i in range(0, len(new_arr) - 1):
if new_arr[i + 1][0] < new_arr[i][0] or new_arr[i + 1][-1] < new_arr[i][-1]:
print('NO')
break
else:
print('YES')
|
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import pandas as pd
import numpy as np
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=['A', 'B', 'C', 'D'])
df.iloc[2,2] = 1111
df.loc['2013-01-03', 'D'] = 2222
df.A[df.A>0] = 0
df['F'] = np.nan
df['G'] = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130101', periods=6))
print(df) |
"""
Copyright 2018, Oath Inc.
Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.
The Panoptes Context is one of the most important abstractions throughout the system. It is a thread-safe interface to
configuration and utilities throughout the system.
A Panoptes Context holds the system wide configuration, a logger and a Redis connection pool
In addition, a Context can optionally hold the following: multiple Key/Value stores, a Message Producer (with it's
underlying Kafka Client) and a ZooKeeper client
The Context object, once created, would be passed between multiple objects and methods within a process
"""
import os
import inspect
import logging
import re
from logging import StreamHandler, Formatter
import kazoo.client
import kazoo.client
import redis
from kafka import KafkaClient
from kafka.common import ConnectionError
from kazoo.exceptions import LockTimeout
from . import const
from .validators import PanoptesValidators
from .configuration_manager import PanoptesConfig
from .exceptions import PanoptesBaseException
from .utilities.helpers import get_calling_module_name
from .utilities.key_value_store import PanoptesKeyValueStore
from .utilities.message_queue import PanoptesMessageQueueProducer
class PanoptesContextError(PanoptesBaseException):
"""
A class that encapsulates all context creation errors
"""
pass
class PanoptesContextValidators(object):
@classmethod
def valid_panoptes_context(cls, panoptes_context):
"""
valid_panoptes_context(cls, panoptes_context)
Checks if the passed object is an instance of PanoptesContext
Args:
panoptes_context (PanoptesContext): The object to check
Returns:
bool: True if the object is not null and is an instance of PanoptesContext
"""
return panoptes_context and isinstance(panoptes_context, PanoptesContext)
class PanoptesContext(object):
"""
A thread-safe object that parses system wide config and sets up clients to various stores like Redis, Zookeeper and
Kafka.
A PanoptesContext is an essential object for all Panoptes subsystems. Creating a context does the following:
* Parses and loads the system wide Panoptes configuration file
* Creates a Python logger hierarchy based on the logging configuration file name provided in the system wide \
configuration
* A Redis Connection Pool (created through a call to the PanoptesConfiguraton class)
* (Optional) Creates one or more Key/Value stores
* (Optional) Creates a message producer client
* (Optional) Creates a ZooKeeper client
Args:
config_file (str): Absolute path to the system wide configuration file
key_value_store_class_list (list): A list of the Key/Value classes. PanoptesContext will create KV stores
based on each class. Default is an empty list
create_message_producer (bool): Whether a message producer client should be created. Default is False
async_message_producer (bool): Whether the message producer client should be asynchronous. Default is False. \
This parameter only matters if the create_message_producer is set to True
create_zookeeper_client (bool): Whether a ZooKeeper client should be created
Notes:
* If a ZooKeeper client is created, three additional threads are created by by the Kazoo library
"""
__rootLogger = None
def __init__(self, config_file=None, key_value_store_class_list=None,
create_message_producer=False, async_message_producer=False, create_zookeeper_client=False):
assert config_file is None or PanoptesValidators.valid_nonempty_string(config_file), \
'config_file must be a non-empty string'
assert key_value_store_class_list is None or isinstance(key_value_store_class_list,
list), 'key_value_store_class_list must be a list'
self.__redis_connections = dict()
self.__kv_stores = dict()
self.__message_producer = None
"""
Setup a default root logger so that in case configuration parsing or logger hierarchy creation fails, we have
a place to send the error messages for failures
"""
if not self.__class__.__rootLogger:
try:
self.__class__.__rootLogger = logging.getLogger(const.DEFAULT_ROOT_LOGGER_NAME)
self.__class__.__rootLogger.setLevel(logging.INFO)
handler = StreamHandler()
handler.setFormatter(Formatter(fmt=const.DEFAULT_LOG_FORMAT))
self.__class__.__rootLogger.addHandler(handler)
except Exception as e:
raise PanoptesContextError('Could not create root logger: %s' % str(e))
if not config_file:
if const.CONFIG_FILE_ENVIRONMENT_VARIABLE in os.environ:
config_file = os.environ[const.CONFIG_FILE_ENVIRONMENT_VARIABLE]
else:
config_file = const.DEFAULT_CONFIG_FILE_PATH
try:
self.__logger = self.__class__.__rootLogger
self.__config = self._get_panoptes_config(config_file)
self.__logger = self._get_panoptes_logger()
except Exception as e:
raise PanoptesContextError('Could not create PanoptesContext: %s' % str(e))
self.__redis_pool = self.get_redis_connection(const.DEFAULT_REDIS_GROUP_NAME)
"""
Instantiate the KeyValueStore classes provide in the list (if any) and store the reference to the objects
created in an object dictionary called __kv_stores
"""
if key_value_store_class_list is not None:
for key_value_store_class in key_value_store_class_list:
if not inspect.isclass(key_value_store_class):
raise PanoptesContextError('Current item in key_value_store_class_list is not a class')
if not issubclass(key_value_store_class, PanoptesKeyValueStore):
raise PanoptesContextError(key_value_store_class.__name__ +
" in key_value_store_class_list does not subclass PanoptesKeyValueStore")
for key_value_store_class in key_value_store_class_list:
self.__kv_stores[key_value_store_class.__name__] = self._get_kv_store(key_value_store_class)
if create_message_producer:
self._kafka_client = self._get_kafka_client()
if create_message_producer:
self.__message_producer = self._get_message_producer(async_message_producer)
if create_zookeeper_client:
self.__zookeeper_client = self._get_zookeeper_client()
def __repr__(self):
kv_repr = 'KV Stores: [' + ','.join([str(Obj) for Obj in self.kv_stores]) + ']'
config_repr = 'Config: ' + repr(self.config_object)
redis_pool_repr = 'Redis pool set: ' + str(hasattr(self, '__redis_pool'))
message_producer_repr = 'Message producer set: ' + str(hasattr(self, '__message_producer'))
kafka_client_repr = 'Kafka client set: ' + str(hasattr(self, '_kafka_client'))
zk_client_repr = 'Zookeeper client set: ' + str(hasattr(self, '__zookeeper_client'))
return '[PanoptesContext: %s, %s, %s, %s, %s, %s]' \
% (kv_repr, config_repr, redis_pool_repr, message_producer_repr, kafka_client_repr, zk_client_repr)
def __del__(self):
"""
Attempt to do resource clean-up when the reference count for PanoptesContext goes to zero, namely:
* Delete any KV stores. Then delete __kv_stores.
* Disconnect the redis pool.
* Stop the message producer if one was requested/created.
* Flush and close the kafka client.
* Stop and close the zookeeper client if one was requested/created.
"""
try:
del self.__kv_stores
except AttributeError as e:
self.logger.error('__kv_stores attribute no longer exists: %s' % str(e))
except Exception as e:
self.logger.error('Attempt to delete _kv_stores failed: %s' % str(e))
if hasattr(self, '_' + self.__class__.__name__ + '__message_producer'):
try:
self.__message_producer.stop()
except Exception as e:
self.logger.error('Attempt to stop message producer failed: %s' % str(e))
if hasattr(self, '_kafka_client'):
try:
self._kafka_client.close()
except Exception as e:
self.logger.error('Attempt to close the Kafka client failed: %s' % str(e))
if hasattr(self, '_' + self.__class__.__name__ + '__zookeeper_client'):
try:
self.__zookeeper_client.stop()
self.__zookeeper_client.close()
except Exception as e:
self.logger.error('Attempt to stop and close the zookeeper client failed: %s' % str(e))
def _get_panoptes_config(self, config_file):
"""
Returns the system wide configuration to be used with the context
Args:
config_file (str): The path and name of the configuration file to parse
Returns:
PanoptesConfig: The Panoptes Config object that holds the system wide configuration
Raises:
PanoptesContextError: This exception is raised if any errors happen in reading or parsing the configuration
file
"""
self.__logger.info('Attempting to get Panoptes Configuration')
try:
panoptes_config = PanoptesConfig(self.__class__.__rootLogger, config_file)
except Exception as e:
raise PanoptesContextError('Could not get Panoptes Configuration object: %s' % str(e))
self.__logger.info('Got Panoptes Configuration: %s' % panoptes_config)
return panoptes_config
def _get_panoptes_logger(self):
"""
Returns the logger to be used by the context
The method attempts to guess the name of the calling module based on introspection of the stack
Returns:
logger(logger): A Python logger subsystem logger
Raises:
PanoptesContextError: This exception is raised is any errors happen trying to instantiate the logger
"""
self.__logger.info('Attempting to get logger')
try:
module = get_calling_module_name()
logger = self.__rootLogger.getChild(module)
self.__logger.info('Got logger for module %s' % module)
return logger
except Exception as e:
raise PanoptesContextError('Could not get logger: %s' % str(e))
def _get_redis_connection(self, group, shard):
"""
Create and return a Redis Connection for the given group
Returns:
redis.StrictRedis: The Redis Connection
Raises:
Exception: Passes through any exceptions that happen in trying to get the connection pool
"""
redis_group = self.__config.redis_urls_by_group[group][shard]
self.__logger.info('Attempting to connect to Redis for group "{}", shard "{}", url "{}"'.format(group, shard,
redis_group))
redis_pool = redis.BlockingConnectionPool(host=redis_group.host,
port=redis_group.port,
db=redis_group.db,
password=redis_group.password)
redis_connection = redis.StrictRedis(connection_pool=redis_pool)
self.__logger.info('Successfully connected to Redis for group "{}", shard "{}", url "{}"'.format(group, shard,
redis_group))
return redis_connection
def _get_kv_store(self, cls):
"""
Create and return a Key/Value store
Args:
cls (class): The class of the Panoptes Key/Value store to create
Returns:
PanoptesKeyValueStore: The Key/Value store object created
Raises:
PanoptesContextError: Passes through any exceptions that happen in trying to create the Key/Value store
"""
self.__logger.info('Attempting to connect to KV Store "{}"'.format(cls.__name__))
try:
key_value_store = cls(self)
except Exception as e:
raise PanoptesContextError('Could not connect to KV store "{}": {}'.format(cls.__name__, repr(e)))
self.__logger.info('Connected to KV Store "{}": {}'.format(cls.__name__, key_value_store))
return key_value_store
def _get_kafka_client(self):
"""
Create and return a Kafka Client
Returns:
KafkaClient: The created Kafka client
Raises:
PanoptesContextError: Passes through any exceptions that happen in trying to create the Kafka client
"""
# The logic of the weird check that follows is this: KafkaClient initialization can fail if there is a problem
# connecting with even one broker. What we want to do is: succeed if the client was able to connect to even one
# broker. So, we catch the exception and pass it through - and then check the number of brokers connected to the
# client in the next statement (if not kafka_client.brokers) and fail if the client is not connected to any
# broker
self.__logger.info('Attempting to connect Kafka')
config = self.__config
kafka_client = None
try:
kafka_client = KafkaClient(config.kafka_brokers)
except ConnectionError:
pass
if not kafka_client.brokers:
raise PanoptesContextError('Could not connect to any Kafka broker from this list: %s'
% config.kafka_brokers)
self.__logger.info('Successfully connected to Kafka brokers: %s' % kafka_client.brokers)
return kafka_client
def _get_message_producer(self, async):
"""
Creates and returns a Message Producer
Args:
async (bool): Whether the created message producer should be asynchronous or not
Returns:
PanoptesMessageQueueProducer: The created message producer
Raises:
PanoptesContextError: asses through any exceptions that happen in trying to create the message producer
"""
self.__logger.info('Attempting to connect to message bus')
try:
message_producer = PanoptesMessageQueueProducer(self, async)
except Exception as e:
raise PanoptesContextError('Could not connect to message bus: %s' % str(e))
self.__logger.info('Connected to message bus: %s' % message_producer)
return message_producer
def _get_zookeeper_client(self):
"""
Create and return a ZooKeeper client
Returns:
KazooClient: The created ZooKeeper client
Raises:
PanoptesContextError: Passes through any exceptions that happen in trying to create the ZooKeeper client
"""
config = self.__config
if not config.zookeeper_servers:
raise PanoptesContextError('No Zookeeper servers configured')
self.__logger.info('Attempting to connect to Zookeeper with servers: %s' % ",".join(config.zookeeper_servers))
try:
zk = kazoo.client.KazooClient(hosts=",".join(config.zookeeper_servers))
zk.start()
except Exception as e:
raise PanoptesContextError('Could not connect to Zookeeper: %s' % str(e))
self.__logger.info('Successfully connected to Zookeeper: %s' % zk)
return zk
def get_kv_store(self, key_value_store_class_name):
"""
Get the Key Value Store object associated with the provided KeyValueStore class
Args:
key_value_store_class_name (class): The class (not just the classname string) for which the object is \
desired
Returns:
KeyValueStore: An object which can be used to set/get values from the associated Key/Value store. Raises \
PanoptesContextError if an object of the specified class does not exist
"""
try:
return self.__kv_stores[key_value_store_class_name.__name__]
except KeyError:
raise PanoptesContextError(
'No Key Value Store based on class %s' % key_value_store_class_name)
def get_redis_shard_count(self, group, fallback_to_default=True):
try:
return len(self.__config.redis_urls_by_group[group])
except:
if (group != const.DEFAULT_REDIS_GROUP_NAME) and fallback_to_default:
return len(self.__config.redis_urls_by_group[const.DEFAULT_REDIS_GROUP_NAME])
else:
raise
def get_redis_connection(self, group, shard=0, fallback_to_default=True):
"""
Returns a Redis connection for the given group and shard
Args:
group (str): The name of the group for which to return the Redis connection
shard (int): The number of the shard for which to return the Redis connection
fallback_to_default (bool): If we can't find a connection for given group, whether to fallback to the \
'default` group name
Returns:
redis.StrictRedis: The Redis connection
"""
def _inner_get_redis_connection():
try:
connection = self._get_redis_connection(group, shard)
self.__redis_connections[group][shard] = connection
except:
if (group != const.DEFAULT_REDIS_GROUP_NAME) and fallback_to_default:
self.__redis_connections[group][shard] = self.redis_pool
else:
raise
if group not in self.__redis_connections:
self.__redis_connections[group] = dict()
_inner_get_redis_connection()
elif shard not in self.__redis_connections[group]:
_inner_get_redis_connection()
return self.__redis_connections[group][shard]
def get_lock(self, path, timeout, retries=1, identifier=None, listener=None):
"""
A wrapper around the kazoo library lock
Args:
path (str): A '/' separated path for the lock
timeout (int): in seconds. Must be a positive integer
retries (int): how many times to try before giving up. Zero implies try forever
identifier (str): Name to use for this lock contender. This can be useful for querying \
to see who the current lock contenders are
listener (callable): The callable to use to handle Zookeeper state changes
Returns:
kazoo.recipe.lock.Lock: lock
"""
assert isinstance(path, str) and re.search("^/\S+", path), 'path must be a non-empty string that begins with /'
assert isinstance(timeout, int) and (timeout > 0), 'timeout must be a positive integer'
assert isinstance(retries, int) and (retries > -1), 'retries must be a non-negative integer'
assert identifier and isinstance(identifier, str), 'identifier must be a non-empty string'
assert (not listener) or callable(listener), 'listener must be a callable'
logger = self.logger
calling_module = get_calling_module_name(2)
logger.info("Creating lock for module: " + calling_module + " with lock parameters: path=" + path +
",timeout=" + str(timeout) + ",retries=" + str(retries) + ",identifier=" + identifier)
try:
lock = self.zookeeper_client.Lock(path, identifier)
except Exception as e:
logger.error('Failed to create lock object: %s' % str(e))
return None
if retries == 0:
while True:
logger.info('Trying to acquire lock with client id "%s" under path %s. Other contenders: %s. '
% (identifier, path, lock.contenders()))
try:
lock.acquire(timeout=timeout)
except LockTimeout:
logger.info('Timed out after %d seconds trying to acquire lock. Retrying.' % timeout)
except Exception as e:
logger.info('Error in acquiring lock: %s. Retrying.' % str(e))
if lock.is_acquired:
break
else:
tries = 0
while tries < retries:
logger.info('Trying to acquire lock with client id "%s" under path %s. Other contenders: %s. ' %
(identifier, path, lock.contenders()))
try:
lock.acquire(timeout=timeout)
except LockTimeout:
logger.info('Timed out after %d seconds trying to acquire lock. Retrying %d more times' %
(timeout, retries - tries - 1))
except Exception as e:
logger.info('Error in acquiring lock: %s. Retrying %d more times' % (str(e), (retries - tries)))
if lock.is_acquired:
break
tries += 1
if not lock.is_acquired:
logger.warn('Unable to acquire lock after %d tries' % tries)
if lock.is_acquired:
logger.info(
'Lock acquired. Other contenders: %s' % lock.contenders())
if listener:
self.zookeeper_client.add_listener(listener)
return lock
@property
def config_object(self):
"""
The PanoptesConfig object created by the context
Returns:
PanoptesConfig
"""
return self.__config
@property
def config_dict(self):
"""
A **copy** of the system wide configuration
Returns:
ConfigObj
"""
return self.__config.get_config()
@property
def logger(self):
"""
A module-aware logger which will try and guess the right name for the calling module
Returns:
logging.logger
"""
return self.__logger
@property
def message_producer(self):
"""
The message producer object which can be used to send messages
Returns:
PanoptesMessageQueueProducer
"""
return self.__message_producer
@property
def redis_pool(self):
"""
A Redis Connection Pool
Returns:
RedisConnectionPool
"""
return self.__redis_pool
@property
def zookeeper_client(self):
"""
A Kazoo ZooKeeper client
Returns:
KazooClient
"""
return self.__zookeeper_client
@property
def kafka_client(self):
"""
A Kafka client
Returns:
KafkaClient
"""
return self._kafka_client
@property
def kv_stores(self):
"""
Dictionary of KV stores
Returns:
A dictionary of KV store name/KV store class
"""
return self.__kv_stores
|
addresses = [3, 225, 1, 225, 6, 6, 1100, 1, 238, 225, 104, 0, 1101, 32, 43, 225, 101, 68, 192, 224, 1001, 224, -160, 224, 4, 224, 102, 8, 223, 223, 1001, 224, 2, 224, 1, 223, 224, 223, 1001, 118, 77, 224, 1001, 224, -87, 224, 4, 224, 102, 8, 223, 223, 1001, 224, 6, 224, 1, 223, 224, 223, 1102, 5, 19, 225, 1102, 74, 50, 224, 101, -3700, 224, 224, 4, 224, 1002, 223, 8, 223, 1001, 224, 1, 224, 1, 223, 224, 223, 1102, 89, 18, 225, 1002, 14, 72, 224, 1001, 224, -3096, 224, 4, 224, 102, 8, 223, 223, 101, 5, 224, 224, 1, 223, 224, 223, 1101, 34, 53, 225, 1102, 54, 10, 225, 1, 113, 61, 224, 101, -39, 224, 224, 4, 224, 102, 8, 223, 223, 101, 2, 224, 224, 1, 223, 224, 223, 1101, 31, 61, 224, 101, -92, 224, 224, 4, 224, 102, 8, 223, 223, 1001, 224, 4, 224, 1, 223, 224, 223, 1102, 75, 18, 225, 102, 48, 87, 224, 101, -4272, 224, 224, 4, 224, 102, 8, 223, 223, 1001, 224, 7, 224, 1, 224, 223, 223, 1101, 23, 92, 225, 2, 165, 218, 224, 101, -3675, 224, 224, 4, 224, 1002, 223, 8, 223, 101, 1, 224, 224, 1, 223, 224, 223, 1102, 8, 49, 225, 4, 223, 99, 0, 0, 0, 677, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1105, 0, 99999, 1105, 227, 247, 1105, 1, 99999, 1005, 227, 99999, 1005, 0, 256, 1105, 1, 99999, 1106, 227, 99999, 1106, 0, 265, 1105, 1, 99999, 1006, 0, 99999, 1006, 227, 274, 1105, 1, 99999, 1105, 1, 280, 1105, 1, 99999, 1, 225, 225, 225, 1101, 294, 0, 0, 105, 1, 0, 1105, 1, 99999, 1106, 0, 300, 1105, 1, 99999, 1, 225, 225, 225, 1101, 314, 0, 0, 106, 0, 0, 1105, 1, 99999, 1107, 226, 226, 224, 1002, 223, 2, 223, 1005, 224, 329, 1001, 223, 1, 223, 1007, 677, 226, 224, 1002, 223, 2, 223, 1006, 224, 344, 1001, 223, 1, 223, 108,
677, 226, 224, 102, 2, 223, 223, 1006, 224, 359, 1001, 223, 1, 223, 7, 226, 226, 224, 1002, 223, 2, 223, 1005, 224, 374, 101, 1, 223, 223, 107, 677, 677, 224, 1002, 223, 2, 223, 1006, 224, 389, 1001, 223, 1, 223, 1007, 677, 677, 224, 1002, 223, 2, 223, 1006, 224, 404, 1001, 223, 1, 223, 1107, 677, 226, 224, 1002, 223, 2, 223, 1005, 224, 419, 1001, 223, 1, 223, 108, 226, 226, 224, 102, 2, 223, 223, 1006, 224, 434, 1001, 223, 1, 223, 1108, 226, 677, 224, 1002, 223, 2, 223, 1006, 224, 449, 1001, 223, 1, 223, 1108, 677, 226, 224, 102, 2, 223, 223, 1005, 224, 464, 1001, 223, 1, 223, 107, 226, 226, 224, 102, 2, 223, 223, 1006, 224, 479, 1001, 223, 1, 223, 1008, 226, 226, 224, 102, 2, 223, 223, 1005, 224, 494, 101, 1, 223, 223, 7, 677, 226, 224, 1002, 223, 2, 223, 1005, 224, 509, 101, 1, 223, 223, 8, 226, 677, 224, 1002, 223, 2, 223, 1006, 224, 524, 1001, 223, 1, 223, 1007, 226, 226, 224, 1002, 223, 2, 223, 1006, 224, 539, 101, 1, 223, 223, 1008, 677, 677, 224, 1002, 223, 2, 223, 1006, 224, 554, 101, 1, 223, 223, 1108, 677, 677, 224, 102, 2, 223, 223, 1006, 224, 569, 101, 1, 223, 223, 1107, 226, 677, 224, 102, 2, 223, 223, 1005, 224, 584, 1001, 223, 1, 223, 8, 677, 226, 224, 1002, 223, 2, 223, 1006, 224, 599, 101, 1, 223, 223, 1008, 677, 226, 224, 102, 2, 223, 223, 1006, 224, 614, 1001, 223, 1, 223, 7, 226, 677, 224, 1002, 223, 2, 223, 1005, 224, 629, 101, 1, 223, 223, 107, 226, 677, 224, 102, 2, 223, 223, 1005, 224, 644, 101, 1, 223, 223, 8, 677, 677, 224, 102, 2, 223, 223, 1005, 224, 659, 1001, 223, 1, 223, 108, 677, 677, 224, 1002, 223, 2, 223, 1005, 224, 674, 101, 1, 223, 223, 4, 223, 99, 226]
def getParamByMode(mode, step, index, inputs):
if mode == 0:
return inputs[inputs[index + step]]
return inputs[index + step]
def getParamsByMode(mode1, mode2, index, inputs):
return getParamByMode(mode1, 1, index, inputs), getParamByMode(mode2, 2, index, inputs)
def getParamModes(modes):
return [int(mode) for mode in [modes[2], modes[1], modes[2], modes[3:]]]
def addition(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
inputs[inputs[index + 3]] = param1 + param2
def multiply(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
inputs[inputs[index + 3]] = param1 * param2
def less(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
inputs[inputs[index + 3]] = 1 if param1 < param2 else 0
def equal(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
inputs[inputs[index + 3]] = 1 if param1 == param2 else 0
def jumpIfTrue(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
return param2 if param1 != 0 else index + 3
def jumpIfFalse(mode1, mode2, index, inputs):
param1, param2 = getParamsByMode(mode1, mode2, index, inputs)
return param2 if param1 == 0 else index + 3
def computify(inputs, userInput):
index = 0
diagnostic = None
while inputs[index] != 99:
mode1, mode2, mode3, opcode = getParamModes(f"{inputs[index]:05}")
if opcode == 1:
addition(mode1, mode2, index, inputs)
index += 4
elif opcode == 2:
multiply(mode1, mode2, index, inputs)
index += 4
elif opcode == 3:
inputs[inputs[index + 1]] = userInput
index += 2
elif opcode == 4:
diagnostic = inputs[inputs[index + 1]]
index += 2
elif opcode == 5:
index = jumpIfTrue(mode1, mode2, index, inputs)
elif opcode == 6:
index = jumpIfFalse(mode1, mode2, index, inputs)
elif opcode == 7:
less(mode1, mode2, index, inputs)
index += 4
elif opcode == 8:
equal(mode1, mode2, index, inputs)
index += 4
return diagnostic
print(computify(addresses[:], 1))
print(computify(addresses[:], 5))
|
from __future__ import print_function
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
import io
@image_comparison(baseline_images=['log_scales'], remove_text=True)
def test_log_scales():
ax = plt.subplot(122, yscale='log', xscale='symlog')
ax.axvline(24.1)
ax.axhline(24.1)
@image_comparison(baseline_images=['logit_scales'], remove_text=True,
extensions=['png'])
def test_logit_scales():
ax = plt.subplot(111, xscale='logit')
# Typical extinction curve for logit
x = np.array([0.001, 0.003, 0.01, 0.03, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.97, 0.99, 0.997, 0.999])
y = 1.0 / x
ax.plot(x, y)
ax.grid(True)
@cleanup
def test_log_scatter():
"""Issue #1799"""
fig, ax = plt.subplots(1)
x = np.arange(10)
y = np.arange(10) - 1
ax.scatter(x, y)
buf = io.BytesIO()
fig.savefig(buf, format='pdf')
buf = io.BytesIO()
fig.savefig(buf, format='eps')
buf = io.BytesIO()
fig.savefig(buf, format='svg')
|
"""
run A* path planning to provide a clairvoyant oracle for training data from the set of enviroment scenarios
determined by target_dist d as Manhattan distance from the base platform to the target position
"""
from absl import app, flags, logging
from pickle import Pickler
from scenario import plan_path
from truss_state import BreakableTrussState
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug', False, 'show debug logging messages')
flags.DEFINE_integer('target_dist', 1, 'triangular lattice manhattan distance to target')
flags.DEFINE_integer('eps', 0, 'number of expansions to do per stage. Unlimmited if 0')
flags.DEFINE_string('train_example_path', "./data/h_net_train.pkl", 'training examples output file')
def main(_argv):
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
train_examples = []
start_configs = BreakableTrussState.get_start_configs(FLAGS.target_dist)
for i, start_config in enumerate(start_configs):
logging.debug("Running scenario {}".format(i))
ex, end_state = plan_path(
start_state=BreakableTrussState.from_config(start_config),
greedy=False,
heuristic='Manhattan',
eps=FLAGS.eps,
render=False,
return_examples=True
)
train_examples.extend(ex)
# logging.debug("Damaging a node")
# damaged_state = end_state.clone()
# if damaged_state.damage_random_node():
# logging.debug("Running damaged node scenario {}".format(i))
# ex, end_state = plan_path(
# start_state=damaged_state,
# greedy=False,
# heuristic='ManhattanBraced',
# eps=FLAGS.eps,
# render=False,
# return_examples=True
# )
# train_examples.extend(ex)
logging.info("Saving {} training examples to {}".format(len(train_examples), FLAGS.train_example_path))
with open(FLAGS.train_example_path, "wb") as f:
Pickler(f).dump(train_examples)
if __name__ == '__main__':
app.run(main) |
import tensorflow as tf
import numpy as np
from ..utils import WeightNormalization, shape_list, logdet as logdet_f
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = tf.tanh(in_act[:, :, :n_channels_int])
s_act = tf.sigmoid(in_act[:, :, n_channels_int:])
acts = t_act * s_act
return acts
class ConvReluNorm(tf.keras.layers.Layer):
def __init__(self, hidden_channels, out_channels, kernel_size, n_layers, p_dropout, name=0, **kwargs):
super(ConvReluNorm, self).__init__(name=f'ConvReluNorm_{name}', **kwargs)
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert self.n_layers > 1, 'Number of layers should be larger than 0.'
self.conv_layers = []
self.norm_layers = []
self.relu_drop = tf.keras.Sequential([tf.keras.layers.ReLU(),
tf.keras.layers.Dropout(self.p_dropout)])
for i in range(self.n_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
self.hidden_channels,
self.kernel_size,
padding='same',
name='conv_._{}'.format(i),
)
)
self.norm_layers.append(tf.keras.layers.LayerNormalization(
epsilon=1e-4,
name='LayerNorm_._{}'.format(i),
))
self.proj = tf.keras.layers.Conv1D(self.out_channels, 1,
kernel_initializer='zeros', bias_initializer='zeros',
padding='same')
def call(self, x, x_mask, training=False):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask, training=training)
x = self.norm_layers[i](x, training=training)
x = self.relu_drop(x, training=training)
x = x_org + self.proj(x, training=training)
return x * x_mask
class WN(tf.keras.layers.Layer):
def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, name=0, **kwargs):
super(WN, self).__init__(name=f'WN_{name}', **kwargs)
assert(kernel_size % 2 == 1)
assert(hidden_channels % 2 == 0)
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = []
self.res_skip_layers = []
self.drop = tf.keras.layers.Dropout(self.p_dropout)
if self.gin_channels != 0:
cond_layer = tf.keras.layers.Conv1D(2*self.hidden_channels*self.n_layers, 1, padding='same')
self.cond_layer = WeightNormalization(cond_layer, data_init=False)
for i in range(n_layers):
dilation = dilation_rate ** i
in_layer = tf.keras.layers.Conv1D(2*self.hidden_channels, self.kernel_size,
dilation_rate=dilation, padding='same')
in_layer = WeightNormalization(in_layer, data_init=False)
self.in_layers.append(in_layer)
if i < n_layers - 1:
res_skip_channels = 2 * self.hidden_channels
else:
res_skip_channels = self.hidden_channels
res_skip_layer = tf.keras.layers.Conv1D(res_skip_channels, 1)
res_skip_layer = WeightNormalization(res_skip_layer, data_init=False)
self.res_skip_layers.append(res_skip_layer)
def call(self, x, x_mask=None, g=None, training=False):
output = tf.zeros_like(x)
n_channels_tensor = tf.constant([self.hidden_channels], dtype=tf.int32)
if g is not None:
g = self.cond_layer(g, training=training)
for i in range(self.n_layers):
x_in = self.in_layers[i](x, training=training)
x_in = self.drop(x_in, training=training)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, :, cond_offset:cond_offset+2*self.hidden_channels]
else:
g_l = tf.zeros_like(x_in)
acts = fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts, training=training)
if i < self.n_layers - 1:
x = (x + res_skip_acts[:, :, :self.hidden_channels]) * x_mask
output = output + res_skip_acts[:, :, self.hidden_channels:]
else:
output = output + res_skip_acts
return output * x_mask
class ActNorm(tf.keras.layers.Layer):
def __init__(self, channels, ddi=False, name=0, **kwargs):
super(ActNorm, self).__init__(name=f'ActNorm_{name}', **kwargs)
self.channels = channels
self.initialized = not ddi
self.logs = tf.get_variable(
name='logs',
shape=[1, 1, channels],
initializer=tf.zeros_initializer(),
)
self.bias = tf.get_variable(
name='bias',
shape=[1, 1, channels],
initializer=tf.zeros_initializer(),
)
def call(self, x, x_mask=None, reverse=False, **kwargs):
if x_mask is None:
x_mask = tf.ones((tf.shape(x)[0], tf.shape(x)[1], 1), dtype=x.dtype)
x_len = tf.reduce_sum(x_mask, [2, 1])
if not self.initialized:
self.initialize(x, x_mask)
self.initialized = True
if reverse:
z = (x - self.bias) * tf.math.exp(-self.logs) * x_mask
logdet = None
else:
z = (self.bias + tf.math.exp(self.logs) * x) * x_mask
logdet = tf.reduce_sum(self.logs) * x_len
return z, logdet
def initialize(self, x, x_mask):
denom = tf.stop_gradient(tf.reduce_sum(x_mask, [0, 1]))
m = tf.stop_gradient(tf.reduce_sum(x * x_mask, [0, 1]) / denom)
m_sq = tf.stop_gradient(tf.reduce_sum(x * x * x_mask, [0, 1]) / denom)
v = tf.stop_gradient(m_sq - (m ** 2))
logs = tf.stop_gradient(0.5 * tf.math.log(tf.clip_by_value(v, 1e-6, tf.reduce_max(v))))
bias_init = tf.stop_gradient((-m * tf.math.exp(-logs)))
bias_init = tf.stop_gradient(tf.reshape(bias_init, self.bias.shape))
logs_init = tf.stop_gradient((-logs))
self.bias = bias_init
self.logs = logs_init
def store_inverse(self):
pass
def set_ddi(self, ddi):
self.initialized = not ddi
class InvConvNear(tf.keras.layers.Layer):
def __init__(self, channels, n_split=4, no_jacobian=False, name=0, **kwargs):
super(InvConvNear, self).__init__(name=f'InvConvNear_{name}', **kwargs)
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian and not tf.executing_eagerly()
w_init = np.random.normal(size=(self.n_split, self.n_split))
w_init = np.linalg.qr(w_init)[0]
if np.linalg.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
w_init = tf.convert_to_tensor(w_init.astype(np.float32))
self.weight = tf.Variable(w_init, name='w_init')
self.weight_inv = None
def call(self, x, x_mask=None, reverse=False, **kwargs):
# [B, T, C] -> [B, C, T]
x = tf.transpose(x, [0, 2, 1])
b, c, t = shape_list(x)
if x_mask is None:
x_mask = 1.0
x_len = tf.ones((b,), dtype=x.dtype) * t
else:
x_len = tf.reduce_sum(x_mask, [2, 1])
# [B, 2, C // N, N // 2, T]
x = tf.reshape(x, (b, 2, c // self.n_split, self.n_split // 2, t))
# [B, 2, N // 2, C // N, T]
x = tf.transpose(x, (0, 1, 3, 2, 4))
# [B, N, C // N, T]
x = tf.reshape(x, (b, self.n_split, c // self.n_split, t))
if reverse:
if self.weight_inv is not None:
weight = self.weight_inv
else:
weight = tf.linalg.inv(self.weight)
logdet = None
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
else:
logdet = logdet_f(self.weight) / (c / self.n_split) * x_len
weight = tf.reshape(weight, (1, 1, self.n_split, self.n_split))
# [B, C // N, T, N]
x = tf.transpose(x, (0, 2, 3, 1))
z = tf.nn.conv2d(x, weight, 1, padding='SAME')
# [B, N, C // N, T]
z = tf.transpose(z, (0, 3, 1, 2))
# [B, 2, N // 2, C // N, T]
z = tf.reshape(z, (b, 2, self.n_split // 2, c // self.n_split, t))
# [B, 2, C // N, N // 2, T]
z = tf.transpose(z, (0, 1, 3, 2, 4))
# [B, C, T]
z = tf.reshape(z, (b, c, t))
# [B, T, C]
z = tf.transpose(z, (0, 2, 1))
return z * x_mask, logdet
def store_inverse(self):
self.weight_inv = tf.linalg.inv(self.weight)
|
import re
from collections import Counter
from random import choices as rcs
"""
i copied from line 6 to 10
"""
def words(text):
return re.findall(r'\w+', text.lower())
WORDS = Counter(words(open('big.txt').read()))
answer = []
probability = []
def calculateProbability(probability):
counter = 0
while counter < len(WORDS.values()):
probability.append(round(list(WORDS.values() )[counter] / sum(WORDS.values()), 5))
counter += 1
def showData():
counter = 0
while counter < len(WORDS.values()):
print(probability[counter], "%", "|", "OC:",list(WORDS.values())[counter], list(WORDS.keys())[counter])
counter += 1
"""
based on the probability of each key
pick key based on "prbability" indexes
"""
calculateProbability(probability)
showData()
|
# coding=utf-8
import sys
class Process(object):
"""
Procces class
"""
def __init__(self,id):
self.id=id
def processItem(self,entry):
"""
The method that will contains the process applied to items.
:param entry: item data
:return:
"""
pass
def processProperty(self,entry):
"""
The method that will contains the process applied to properties.
:param entry: property data
:return:
"""
pass
class WDController(object):
"""
"""
def __init__(self,reader,*args):
"""
Constructor
:param reader: wikidata dump reader (see Reader class)
:param args: contains all the process you want to apply during the dump reading
"""
self.reader=reader
self.process={}
for arg in args:
if isinstance(arg,Process):
self.process[arg.id]=arg
def process_all(self,v=True):
"""
Read the dump and apply each process to each entry
:param v: verbose
:return:
"""
iterations = 0
while self.reader.has_next():
entry=self.reader.next()
if entry:
for id_,proc in self.process.items():
if entry["id"][0] == "Q":
proc.processItem(entry)
else:
proc.processProperty(entry)
iterations+=1
if iterations%100 == 0 and v:
sys.stdout.write("\rEntity Parsed: "+'{:,}'.format(iterations))
|
# - this file contains a Numpy implementation of the Revised Simplex Method (RSM)
# (see function "rsm_numpy")
# - for algorithmic details see:
# Hillier, Frederick S. (2014): Introduction to Operations Research.
# - RSM is used to solve Linear Programs (LP)
# - in vectorized notation, an LP optimization problem can be described as follows:
#
# parameters
# matrix A
# vector b, c
# variables
# vector x
# max
# c'*x
# st
# A*x <= b
# x >= 0
#
# - to compute with "rsm_numpy" a correct result, the LP problem formulation must comply with the following rules:
# - Minimization LP problems must be transformed into maximization LP problems
# - constraints of type "... >= b" and "... = b" need to be transformed with Big M Method
# (https://en.wikipedia.org/wiki/Big_M_method) to constraints of type "... <= b"
# - all values in b must be greater than or equal to 0 (b >= 0)
# - the identity matrix for the starting basic variables in A must be provided explicitly
# - cost c for starting basic variables can be only zero or a large negative number (Big M)
# --> see "examples.py" for some example problems and how they are transformed to meet
# the requirements for this RSM implementation
import time
import numpy as np
# helper class for returning and printing the results
class Rsm_Result:
def __init__(self, message, success, z, x, slack, nit, sec, concise=False):
self.message = message
self.success = success
self.z = z
self.x = x
self.slack = slack
self.nit = nit
self.sec = sec
self.concise = concise
def __str__(self):
x = ""
if not self.concise:
x += "x: " + str(self.x) + "\n" + "slack: " + str(self.slack) + "\n"
return "message: " + self.message + "\n" + \
"success: " + str(self.success) + "\n" + \
"max z: " + str(self.z) + "\n" + \
x + \
"nit: " + str(self.nit) + "\n" + \
"seconds: " + str(self.sec)
# Revised Simplex Method
# find a vector x
# that maximizes c'x
# subject to Ax <= b
# and x >= 0
def rsm_numpy(c, A, b):
tic = time.time()
success = False
z = -np.inf
x_B = []
EPS = 1e-12
m = A.shape[0] # amount constraints
n = A.shape[1] - m # amount variables
B_idx = np.array(list(range(n, n + m)))
c_all = c
iteration = 1
lt_zero_slack = np.where(c[B_idx] < -EPS)[0] + n
B_inv = np.identity(m)
E = np.identity(m)
message = ""
while True:
B_inv_b = B_inv.dot(b)
if len(np.where(B_inv_b < -EPS)[0]) != 0:
# The implementation does not support finding a valid starting point
print("infeasible --> values in b must be >= 0")
exit(-1)
c_B = c_all[B_idx]
c_B_B_inv = c_B.dot(B_inv)
_c = np.hstack((c_B_B_inv.dot(A[:, list(range(n))]) - c[list(range(n))], c_B_B_inv))
_c[B_idx] = np.inf # set values of items in the base to infinity
_c[lt_zero_slack] = np.inf # negative slack (BIG M) can't become a member of the base
c_min = np.min(_c)
if c_min + EPS >= 0:
message = "optimal solution found"
success = True
z = c_B.dot(B_inv_b)
x_B = list(zip(B_idx, B_inv_b))
# test for BIG M infeasibility
lt_zero_in_B = list(set(lt_zero_slack) & set(B_idx))
if lt_zero_in_B and len(np.where(B_inv_b[np.where(B_idx == lt_zero_in_B)[0]] != 0)[0]):
message = "solution is infeasible because not all negative Big M starting basic variables could be replaced"
success = False
break
x_enter = np.where(_c == c_min)[0][0]
if x_enter < n:
p_prime = B_inv.dot(A[:, x_enter])
else:
p_prime = B_inv[:, x_enter - n]
idx_gt_zero = np.where(p_prime > EPS)[0]
if len(idx_gt_zero) == 0:
message = "unbounded maximization problem"
z = c_B.dot(B_inv_b)
x_B = list(zip(B_idx, B_inv_b))
break
x_B_div_col = B_inv_b[idx_gt_zero] / p_prime[idx_gt_zero]
x_leave_min = np.min(x_B_div_col)
x_leave = idx_gt_zero[np.where(x_B_div_col == x_leave_min)[0][0]]
B_idx[x_leave] = x_enter
# update old inverse instead computing new one
mult_val = np.longdouble(1) / p_prime[x_leave]
E[:, x_leave] = p_prime * -mult_val
E[:, x_leave][x_leave] = mult_val
B_inv = E.dot(B_inv)
E[:, x_leave] = 0
E[x_leave, x_leave] = 1
iteration += 1
toc = time.time()
x = np.zeros(n)
slack = np.zeros(m)
for item in x_B:
if item[0] < n:
x[item[0]] = item[1]
else:
slack[item[0] - n] = item[1]
return Rsm_Result(message, success, z, x, slack, iteration, toc - tic)
if __name__ == '__main__':
# import the sample LP problems
from examples import *
c, A, b = problem_basic()
# c, A, b = problem_portfolio()
# c, A, b = problem_dietary()
# c, A, b = problem_c_B_B_inv_negatives_values()
# c, A, b = problem_unbounded()
# c, A, b = problem_infeasible()
# c, A, b = problem_degenerated()
# c, A, b = generate_random_LP(m=20, n=40, density=0.5, seed=0)
# solve LP problem
result = rsm_numpy(c, A, b)
# if True then do not output variables
result.concise = False
print(result)
|
from BaseState import States
|
# -*- coding=UTF-8 -*-
"""generate typing from module help.
Usage: "$PYTHON" ./scripts/full_help.py $MODULE | "$PYTHON" ./scripts/typing_from_help.py -
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import cast_unknown as cast
import re
_CLASS_MRO_START = "Method resolution order:"
_CLASS_METHODS_START = "Methods defined here:"
_CLASS_CLASS_METHODS_START = "Class methods defined here:"
_CLASS_STATIC_METHODS_START = "Static methods defined here:"
_CLASS_DATA_ATTR_START = "Data and other attributes defined here:"
_CLASS_READ_ONLY_PROPERTY_START = "Readonly properties defined here:"
_CLASS_DATA_DESC_START = "Data descriptors defined here:"
_CLASS_INHERITED_METHODS_START = "Methods inherited from (.+):"
_CLASS_INHERITED_CLASS_METHODS_START = "Class methods inherited from (.+):"
_CLASS_INHERITED_STATIC_METHODS_START = "Static methods inherited from (.+):"
_CLASS_INHERITED_DATA_ATTR_START = "Data and other attributes inherited from (.+):"
_CLASS_INHERITED_DATA_DESC_START = "Data descriptors inherited from (.+):"
_CLASS_INHERITED_READ_ONLY_PROPERTY_START = "Readonly properties inherited from (.+):"
_CLASS_SECTION_END = "-{20,}"
TYPE_MAP = {
"object": "",
"exceptions.Exception": "Exception",
"builtins.object": "object",
"builtins.tuple": "tuple",
"String": "typing.Text",
"string": "typing.Text",
"str": "typing.Text",
"Float": "float",
"Floating point value": "float",
"Bool": "bool",
"Boolean": "bool",
"Int": "int",
"Integer": "int",
"integer": "int",
"Integer value": "int",
"void": "None",
"list of strings or single string": "typing.Union[typing.List[typing.Text], typing.Text]",
"List of strings": "typing.List[typing.Text]",
"list of str": "typing.List[typing.Text]",
"String list": "typing.List[typing.Text]",
"List of int": "typing.List[int]",
"list of int": "typing.List[int]",
"[int]": "typing.List[int]",
"List": "list",
"(x, y, z)": "typing.Tuple",
"list of (x,y,z) tuples": "typing.List",
"list of floats": "typing.List[float]",
}
def _iter_class_sections(lines):
lines = iter(lines)
section_type = "docstring"
section_values = []
for line in lines:
if re.match(_CLASS_MRO_START, line):
yield (section_type, section_values)
section_type = "mro"
section_values = []
elif re.match(_CLASS_METHODS_START, line):
yield (section_type, section_values)
section_type = "methods"
section_values = []
elif re.match(_CLASS_STATIC_METHODS_START, line):
yield (section_type, section_values)
section_type = "static-methods"
section_values = []
elif re.match(_CLASS_CLASS_METHODS_START, line):
yield (section_type, section_values)
section_type = "class-methods"
section_values = []
elif re.match(_CLASS_DATA_ATTR_START, line):
yield (section_type, section_values)
section_type = "data"
section_values = []
elif re.match(_CLASS_DATA_DESC_START, line):
yield (section_type, section_values)
section_type = "data"
section_values = []
elif re.match(_CLASS_READ_ONLY_PROPERTY_START, line):
yield (section_type, section_values)
section_type = "data"
section_values = []
elif re.match(_CLASS_INHERITED_METHODS_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_METHODS_START, line)
section_type = "inherited-methods"
section_values = [match.group(1)]
elif re.match(_CLASS_INHERITED_CLASS_METHODS_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_CLASS_METHODS_START, line)
section_type = "inherited-class-methods"
section_values = [match.group(1)]
elif re.match(_CLASS_INHERITED_STATIC_METHODS_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_STATIC_METHODS_START, line)
section_type = "inherited-static-methods"
section_values = [match.group(1)]
elif re.match(_CLASS_INHERITED_DATA_ATTR_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_DATA_ATTR_START, line)
section_type = "inherited-data"
section_values = [match.group(1)]
elif re.match(_CLASS_INHERITED_DATA_DESC_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_DATA_DESC_START, line)
section_type = "inherited-data"
section_values = [match.group(1)]
elif re.match(_CLASS_INHERITED_READ_ONLY_PROPERTY_START, line):
yield (section_type, section_values)
match = re.match(_CLASS_INHERITED_READ_ONLY_PROPERTY_START, line)
section_type = "inherited-data"
section_values = [match.group(1)]
elif re.match(_CLASS_SECTION_END, line):
yield (section_type, section_values)
section_type = ""
section_values = []
else:
section_values.append(line)
if section_values:
yield (section_type, section_values)
def _strip_lines(lines):
return "\n".join(lines).strip("\n").splitlines()
def _parse_by_indent(lines, indent=" "):
key = "" # type: str
values = []
for line in lines:
line = cast.text(line)
if line.startswith(indent) or line == indent.rstrip(" "):
values.append(line[len(indent) :])
else:
if key:
yield (key, values)
key = ""
values = []
key = line
if values:
yield (key, values)
def _parse_class_data(lines):
for k, v in _parse_by_indent(lines):
data_def = _parse_data_description(k)
if v:
data_def["docstring"] = _strip_lines(v)
yield data_def
def _parse_args(args):
args = (args or "").split(",")
args = [i.strip() for i in args]
args = [TYPE_MAP.get(i, i) for i in args]
args = [i for i in args if i]
if "..." in args:
args = ["*args", "**kwargs"]
ret = []
for i in args:
match = re.match(r"\((.+)\)(.+)$", i)
if match:
ret.append("%s: %s" % (match.group(2), match.group(1)))
continue
ret.append(i)
return ret
def _parse_class_method(lines):
for k, v in _parse_by_indent(lines):
match = re.match(r"^(.+?)(?:\((.+)\))?(?: from (.+))?$", k)
if not match:
raise NotImplementedError(k, v)
name = match.group(1)
args = _parse_args(match.group(2))
docstring = v
return_type = ""
match = (
len(docstring) > 0
and re.match(
r"(?:self\.)?" + re.escape(name) + r"\((.*)\) ?-> ?(.+?)\.? *:?$",
docstring[0],
)
or None
)
if match:
docstring = docstring[1:]
args = _parse_args(match.group(1))
return_type = match.group(2) or ""
args = [i.strip() for i in args]
args = [TYPE_MAP.get(i, i) for i in args]
args = [i for i in args if i]
return_type = TYPE_MAP.get(return_type, return_type)
docstring = _strip_lines(docstring)
yield dict(name=name, args=args, return_type=return_type, docstring=docstring)
def _iter_classes(lines):
for class_key, class_values in _parse_by_indent(lines, " | "):
if not class_values:
# Ignore summary list and empty lines
continue
match = re.match(r"(.+?) = class (.+?)(?:\((.+)\))?$", class_key)
if match:
g3 = match.group(3)
yield dict(
name=match.group(1),
inherits=g3.split(",") if g3 else [],
real_name=match.group(2),
)
continue
match = re.match(r"class (.+?)(?:\((.+)\))?$", class_key)
if not match:
raise NotImplementedError(
"_iter_classes: %s: %s" % (class_key, class_values)
)
g2 = match.group(2)
class_def = dict(
name=match.group(1),
inherits=g2.split(",") if g2 else [],
static_methods=[],
class_methods=[],
methods=[],
data=[],
docstring=[],
)
for (section_key, section_values) in _iter_class_sections(class_values):
if section_key == "" and section_values == []:
continue
elif section_key == "inherited-data":
continue
elif section_key == "inherited-methods":
continue
elif section_key == "inherited-class-methods":
continue
elif section_key == "inherited-static-methods":
continue
elif section_key == "mro":
continue
elif section_key == "docstring":
class_def["docstring"] = section_values
elif section_key == "data":
class_def["data"] = list(_parse_class_data(section_values))
elif section_key == "methods":
class_def["methods"] = list(_parse_class_method(section_values))
elif section_key == "static-methods":
class_def["static_methods"] = list(_parse_class_method(section_values))
elif section_key == "class-methods":
class_def["class_methods"] = list(_parse_class_method(section_values))
else:
raise NotImplementedError(section_key, section_values)
class_def["docstring"] = _strip_lines(class_def["docstring"])
yield class_def
def _iter_functions(lines):
for k, v in _parse_by_indent(lines):
match = re.match(r"(.+?) = (.+?)(?:\((.+)\))?$", k)
if match:
g3 = match.group(3)
yield dict(
name=match.group(1),
inherits=g3.split(",") if g3 else [],
real_name=match.group(2),
)
continue
match = re.match(r"(.+?) lambda (.*)$", k)
if match:
yield dict(
name=match.group(1),
args=_parse_args(match.group(2)),
docstring=_strip_lines(v),
return_type="",
)
continue
match = re.match(r"(.+?)\((.*)\)$", k)
if not match:
raise NotImplementedError(k, v)
name = match.group(1)
args = _parse_args(match.group(2))
docstring = _strip_lines(v)
match = (
len(docstring) > 0
and re.match(re.escape(name) + r"\((.*)\) ?-> ?(.+?)\.? *$", docstring[0])
or None
)
return_type = ""
if match:
docstring = docstring[1:]
args = _parse_args(match.group(1))
return_type = match.group(2) or ""
return_type = TYPE_MAP.get(return_type, return_type)
docstring = _strip_lines(docstring)
yield dict(name=name, args=args, docstring=docstring, return_type=return_type)
def _typing_from_class(class_def):
name = class_def["name"]
real_name = class_def.get("real_name")
if real_name is not None:
yield "%s = %s" % (name, real_name)
return
docstring = class_def["docstring"]
inherits = class_def["inherits"]
inherits = [TYPE_MAP.get(i, i) for i in inherits]
inherits = [i for i in inherits if i]
methods = class_def["methods"]
class_methods = class_def["class_methods"]
static_methods = class_def["static_methods"]
data = class_def["data"]
yield "class %s%s:" % (name, "(%s)" % ",".join(inherits) if inherits else "")
if docstring:
yield ' """'
for i in docstring:
yield (" %s" % i).rstrip()
yield ' """'
yield ""
if data:
for i in data:
yield " %s: ...%s" % (
i["name"],
" = %s" % i["value"] if i["value"] else "",
)
yield ' """'
for j in i["docstring"]:
yield (" %s" % j).rstrip()
yield ' """'
yield ""
if static_methods:
for i in static_methods:
yield " @staticmethod"
yield " def %s(%s)%s:" % (
i["name"],
", ".join(i["args"]),
" -> %s" % i["return_type"] if i["return_type"] else "",
)
yield ' """'
for j in i["docstring"]:
yield (" %s" % j).rstrip()
yield ' """'
yield " ..."
yield ""
if class_methods:
for i in class_methods:
if "cls" not in i["args"]:
i["args"].insert(0, "cls")
yield " @classmethod"
yield " def %s(%s)%s:" % (
i["name"],
", ".join(i["args"]),
" -> %s" % i["return_type"] if i["return_type"] else "",
)
yield ' """'
for j in i["docstring"]:
yield (" %s" % j).rstrip()
yield ' """'
yield " ..."
yield ""
if methods:
for i in methods:
if "self" not in i["args"]:
i["args"].insert(0, "self")
yield " def %s(%s)%s:" % (
i["name"],
", ".join(i["args"]),
" -> %s" % i["return_type"] if i["return_type"] else "",
)
yield ' """'
for j in i["docstring"]:
yield (" %s" % j).rstrip()
yield ' """'
yield " ..."
yield ""
yield " ..."
def _typing_from_function(func_def):
name = func_def["name"]
real_name = func_def.get("real_name")
if real_name is not None:
yield "%s = %s" % (name, real_name)
return
args = func_def["args"]
return_type = func_def["return_type"]
docstring = func_def["docstring"]
yield "def %s(%s)%s:" % (
name,
", ".join(args),
" -> %s" % return_type if return_type else "",
)
yield ' """'
for i in docstring:
yield (" %s" % i).rstrip()
yield ' """'
yield " ..."
yield ""
def _typing_from_functions(lines):
for i in _iter_functions(lines):
for j in _typing_from_function(i):
yield j
yield ""
def _typing_from_classes(lines):
for i in _iter_classes(lines):
for j in _typing_from_class(i):
yield j
yield ""
def _parse_data_description(i):
match = re.match(r"^(.+?)(?: ?= ?(.+))?$", i)
if not match:
raise NotImplementedError(i)
name = match.group(1)
value = match.group(2) or ""
value_type = "..."
docstring = []
if value.endswith("..."):
docstring.append(value)
value = ""
elif value.startswith("<"):
docstring.append(value)
value = ""
elif value.startswith(("'", '"')):
docstring.append(value)
value_type = "typing.Text"
value = ""
elif value.startswith("["):
docstring.append(value)
value = ""
value_type = "list"
elif value.startswith("{"):
docstring.append(value)
value = ""
value_type = "dict"
elif value in ("True", "False"):
docstring.append(value)
value = ""
value_type = "bool"
elif re.match(r"-?\d+", value):
value_type = "int"
return dict(name=name, value=value, value_type=value_type, docstring=docstring)
def _iter_data(lines):
for i in lines:
if i == "":
continue
yield _parse_data_description(i)
def _typing_from_datum(datum_def):
name = datum_def["name"]
value = datum_def["value"]
value_type = datum_def["value_type"]
docstring = datum_def["docstring"]
yield "%s: %s%s" % (name, value_type, " = %s" % value if value else "")
if docstring:
yield '"""'
for i in docstring:
yield i
yield '"""'
def _typing_from_data(lines):
for i in _iter_data(lines):
for j in _typing_from_datum(i):
yield j
yield ""
def _handle_windows_line_ending(lines):
for i in lines:
i = cast.text(i)
yield i.strip("\r\n")
def iterate_typing_from_help(lines):
yield "# -*- coding=UTF-8 -*-"
yield "# This typing file was generated by typing_from_help.py"
for k, v in _parse_by_indent(lines):
if k == "NAME":
yield '"""'
for i in v:
yield i
yield '"""'
yield ""
yield "import typing"
yield ""
elif k == "DATA":
for i in _typing_from_data(v):
yield i
elif k == "CLASSES":
for i in _typing_from_classes(v):
yield i
elif k == "FILE":
pass
elif k == "PACKAGE CONTENTS":
pass
elif k == "DESCRIPTION":
yield '"""'
for i in v:
yield i
yield '"""'
elif k == "SUBMODULES":
for i in v:
yield "from . import %s" % i
elif k == "VERSION":
yield "# version: %s" % cast.one(v)
elif k == "FUNCTIONS":
for i in _typing_from_functions(v):
yield i
elif not v:
pass
else:
raise NotImplementedError(k, v)
def typing_from_help(text):
return "\n".join(iterate_typing_from_help(cast.text(text).splitlines()))
if __name__ == "__main__":
import argparse
import sys
import codecs
parser = argparse.ArgumentParser()
_ = parser.add_argument("--type", dest="type")
_ = parser.add_argument("file")
args = parser.parse_args()
should_close = False
if args.file == "-":
f = sys.stdin
else:
f = codecs.open(args.file, "r", encoding="utf-8")
should_close = True
try:
lines = _handle_windows_line_ending(f)
if args.type == "class":
for i in _typing_from_classes(lines):
print(i)
else:
for i in iterate_typing_from_help(lines):
print(i)
finally:
if should_close:
f.close()
|
import regex
def ignore_some_dir(dir_name):
double_underscored = regex.findall(r'^__[0-9a-zA-Z]+__', dir_name, overlapped=True)
if len(double_underscored) != 0:
return False
perioded = regex.findall(r'^\.[\s\S]+?', dir_name)
if len(perioded) != 0:
return False
return True
def extension_filter(file_list, ext_filter):
if type(ext_filter) is not list:
ext_filter = []
file_ext_dict = dict()
for file_name in file_list:
ext_cand = regex.findall(r'.(\.[\s\S]+$)', file_name, overlapped=True)
ext_cand = [cand[1:] for cand in ext_cand]
ext_cand = sorted(ext_cand, key=len)
ext = None
for cand in ext_cand:
if cand in ext_filter:
ext = cand
break
if ext is not None:
file_ext_dict[file_name] = ext
return file_ext_dict
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from corehq.apps.data_interfaces.forms import (
is_valid_case_property_name,
validate_case_property_name,
)
class TestFormValidation(TestCase):
INVALID_PROPERTY_NAMES = [
' parent/abc ',
' host/abc ',
'abc~',
' ',
None,
]
def test_validate_case_property_name_with_parent_case_references(self):
self.assertEqual(
validate_case_property_name(' abc ', allow_parent_case_references=True),
'abc'
)
self.assertEqual(
validate_case_property_name(' parent/abc ', allow_parent_case_references=True),
'parent/abc'
)
self.assertEqual(
validate_case_property_name(' host/abc ', allow_parent_case_references=True),
'host/abc'
)
with self.assertRaises(ValidationError):
validate_case_property_name('abc~', allow_parent_case_references=True)
with self.assertRaises(ValidationError):
validate_case_property_name('parent/abc~', allow_parent_case_references=True)
with self.assertRaises(ValidationError):
validate_case_property_name(' ', allow_parent_case_references=True)
with self.assertRaises(ValidationError):
validate_case_property_name(None, allow_parent_case_references=True)
with self.assertRaises(ValidationError):
validate_case_property_name(' parent/ ', allow_parent_case_references=True)
with self.assertRaises(ValidationError):
validate_case_property_name('unknown/abc', allow_parent_case_references=True)
def test_validate_case_property_name_without_parent_case_references(self):
self.assertEqual(
validate_case_property_name(' abc ', allow_parent_case_references=False),
'abc'
)
for invalid_property_name in self.INVALID_PROPERTY_NAMES:
with self.assertRaises(ValidationError):
validate_case_property_name(invalid_property_name, allow_parent_case_references=False)
def test_is_valid_caes_property_name(self):
for valid_property_name in ['abc', 'foo_bar']:
self.assertTrue(is_valid_case_property_name(valid_property_name))
for invalid_property_name in self.INVALID_PROPERTY_NAMES + [' abc ']:
self.assertFalse(is_valid_case_property_name(invalid_property_name))
|
import os
import lmfit
def save(obj, fname, overwrite='ask', **kwargs):
""" Saves a `dump`-able object to a file.
This is designed to reduce boilerplate at the command line when saving objects like
`ModelResult`, `Model`, and `Parameters`
"""
assert hasattr(obj, 'dump') and callable(obj.dump), "Object must have a dump method"
mode = 'x'
if os.path.exists(fname):
if overwrite == 'ask' and input("Overwrite? (y/n)") == 'y':
mode = 'w'
elif overwrite is True:
mode = 'w'
else:
mode = 'w'
with open(fname, mode=mode) as f:
obj.dump(f, **kwargs)
def load(fname, kind, **kwargs):
""" Loads an lmfit object from disk
This is designed to reduce boilerplate at the command line.
Args:
fname: the file name to open
kind: the kind of object that is being opened: 'model', 'modelresult', or 'parameters'
**kwargs: passed to the underlying load functions
"""
kind = str(kind).lower().strip()
if kind == 'model':
return lmfit.model.load_model(fname, **kwargs)
elif kind == 'modelresult':
return lmfit.model.load_modelresult(fname, **kwargs)
elif kind == 'parameters':
with open(fname, 'r') as f:
return lmfit.Parameters().load(f, **kwargs)
else:
raise ValueError(f"Unrecognized kind {kind}; try model, modelresult, or parameters")
|
mainshop = [{"name":"Pencil","price":20,"description":"Write/Draw something"},
{"name":"Watch","price":500,"description":"Check the time"},
{"name":"iPhone","price":1000,"description":"Call people and use apps"},
{"name":"iPad","price":2000,"description":"Use apps"},
{"name":"Laptop","price":3000,"description":"Do work and play games"},
{"name":"Gaming PC","price":5000,"description":"Stream to Twitch and play games"}]
@bot.command()
async def shop(ctx):
em = discord.Embed(title="BeeMod Economy Shop")
for item in mainshop:
name = item["name"]
price = item["price"]
desc = item["description"]
em.add_field(name = name, value = f"${price} │ {desc}")
em.set_footer(text="Use b!buy <item> to buy it!")
await ctx.reply(embed=em)
|
def floyd_war(matrixSize, adjMatrix):
D_old = adjMatrix[:]
D_new = [[0 for x in range(matrixSize)] for y in range(matrixSize)]
print(f"Via node 0: ")
for row in range(matrixSize):
for column in range(matrixSize):
print(D_old[row][column], end=" ")
print()
print()
for node in range(matrixSize):
print(f"Via Node {node + 1}:")
for row in range(matrixSize):
for column in range(matrixSize):
value = min(D_old[row][column], D_old[row][node] + D_old[node][column])
D_new[row][column] = test if value > 90 else value
print(D_new[row][column], end=" ")
print()
D_old = D_new[:]
print()
if __name__ == "__main__":
matrixSize = int(input("Enter adjacency matrix size: "))
print("Enter Adjacency matrix values for vertices other than diagonals...")
adjMatrix = [[int(input(f"Enter value for {row + 1, column + 1}:")) if column != row else 0
for column in range(matrixSize)]for row in range(matrixSize)]
test = float("inf")
adjMatrix = [[test if adjMatrix[i][j] >= 90 else adjMatrix[i][j] for j in range(matrixSize)]
for i in range(matrixSize)]
print()
floyd_war(matrixSize, adjMatrix)
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--algo', default='ah-ch')
parser.add_argument(
'--lr', type=float, default=7e-4, help='learning rate (default: 7e-4)')
parser.add_argument(
'--bc-lr', type=float, default=1e-3, help='behavior cloning lr')
parser.add_argument(
'--eps',
type=float,
default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument(
'--alpha',
type=float,
default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument(
'--belief-loss-coef',
type=float,
default=0.0,
help='belief loss term coefficient (default: 0.0)')
parser.add_argument(
'--n-reactive',
type=int,
default=1,
help='number of reactive steps')
parser.add_argument(
'--gae-lambda',
type=float,
default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument(
'--entropy-coef',
type=float,
default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument(
'--policy-file',
type=str,
default=None)
parser.add_argument(
'--transitions-file',
type=str,
default=None)
parser.add_argument(
'--running-mode',
type=str,
default='train')
parser.add_argument(
'--value-loss-coef',
type=float,
default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument(
'--max-grad-norm',
type=float,
default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument(
'--seed', type=int, default=0, help='random seed (default: 0)')
parser.add_argument(
'--cuda-deterministic',
action='store_true',
default=False,
help="sets flags for determinism when using CUDA (potentially slow!)")
parser.add_argument(
'--num-processes',
type=int,
default=16,
help='how many training CPU processes to use (default: 16)')
parser.add_argument(
'--bc-num-epochs',
type=int,
default=50)
parser.add_argument(
'--num-steps',
type=int,
default=5,
help='number of forward steps in A2C (default: 5)')
parser.add_argument(
'--bc-batch-size',
type=int,
default=5)
parser.add_argument(
'--log-interval',
type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--save-interval',
type=int,
default=100,
help='save interval, one save per n updates (default: 100)')
parser.add_argument(
'--eval-interval',
type=int,
default=None,
help='eval interval, one eval per n updates (default: None)')
parser.add_argument(
'--num-env-steps',
type=int,
default=1e6,
help='number of environment steps to train (default: 10e6)')
parser.add_argument(
'--env-name',
default='PomdpBumps-v0')
parser.add_argument(
'--log-dir',
default='/tmp/gym/',
help='directory to save agent logs (default: /tmp/gym)')
parser.add_argument(
'--device',
default='cpu')
parser.add_argument(
'--save-dir',
default='./logs/',
help='directory to save agent logs (default: ./logs/)')
parser.add_argument(
'--no-cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument(
'--save-transitions',
action='store_true',
default=False,
help='save transitions to file')
parser.add_argument(
'--use-proper-time-limits',
action='store_false',
default=True,
help='compute returns taking into account time limits')
parser.add_argument(
'--use-linear-lr-decay',
action='store_true',
default=False,
help='use a linear schedule on the learning rate')
parser.add_argument(
'--use-linear-entropy-decay',
action='store_true',
default=False,
help='use a linear schedule on the entropy coeff')
args = parser.parse_args()
return args
|
# Copyright (C) 2006 Frederic Back (fredericback@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
class ClassParserInterface:
""" An abstract interface for class parsers.
A class parser monitors gedit documents and provides a gtk.TreeModel
that contains the browser tree. Elements in the browser tree are reffered
to as 'tags'.
There is always only *one* active instance of each parser. They are created
at startup (in __init__.py).
The best way to implement a new parser is probably to store custom python
objects in a gtk.treestore or gtk.liststore, and to provide a cellrenderer
to render them.
"""
#------------------------------------- methods that *have* to be implemented
def parse(self, geditdoc):
""" Parse a gedit.Document and return a gtk.TreeModel.
geditdoc -- a gedit.Document
"""
pass
def cellrenderer(self, treeviewcolumn, cellrenderertext, treemodel, it):
""" A cell renderer callback function that controls what the text label
in the browser tree looks like.
See gtk.TreeViewColumn.set_cell_data_func for more information. """
pass
#------------------------------------------- methods that can be implemented
def pixbufrenderer(self, treeviewcolumn, cellrendererpixbuf, treemodel, it):
""" A cell renderer callback function that controls what the pixmap next
to the label in the browser tree looks like.
See gtk.TreeViewColumn.set_cell_data_func for more information. """
cellrendererpixbuf.set_property("pixbuf",None)
def get_tag_position(self, model, doc, path):
""" Return the position of a tag in a file. This is used by the browser
to jump to a symbol's position.
Returns a tuple with the full file uri of the source file and the line
number of the tag or None if the tag has no correspondance in a file.
model -- a gtk.TreeModel (previously provided by parse())
path -- a tuple containing the treepath
"""
pass
def get_menu(self, model, path):
""" Return a list of gtk.Menu items for the specified tag.
Defaults to an empty list
model -- a gtk.TreeModel (previously provided by parse())
path -- a tuple containing the treepath
"""
return []
def current_line_changed(self, model, doc, line):
""" Called when the cursor points to a different line in the document.
Can be used to monitor changes in the document.
model -- a gtk.TreeModel (previously provided by parse())
doc -- a gedit document
line -- int
"""
pass
def get_tag_at_line(self, model, doc, linenumber):
""" Return a treepath to the tag at the given line number, or None if a
tag can't be found.
model -- a gtk.TreeModel (previously provided by parse())
doc -- a gedit document
linenumber -- int
"""
pass
|
import unittest
from puzzlesolver.utils import halver
class TestUtilities(unittest.TestCase):
def test_can_divide_by_two(self):
my_halver = halver()
self.assertEqual(next(my_halver), 0.5)
|
from fastapi_utils.api_model import APIModel
from pydantic import validator
from app.services.validator import is_eml_or_msg_file
class Payload(APIModel):
file: str
class FilePayload(APIModel):
file: bytes
@validator("file")
def eml_file_must_be_eml(cls, v: bytes):
if not is_eml_or_msg_file(v):
raise ValueError("Invalid file format.")
return v
|
"""0_migrate
Revision ID: 434bc58109ac
Revises:
Create Date: 2019-11-01 18:51:24.754787
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '434bc58109ac'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('environments',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('permissions',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('code_name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_permissions_code_name'), 'permissions', ['code_name'], unique=True)
op.create_index(op.f('ix_permissions_name'), 'permissions', ['name'], unique=True)
op.create_table('users',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('full_name', sa.String(length=255), nullable=True),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('document', sa.Unicode(length=14), nullable=True),
sa.Column('phone_number', sa.Unicode(length=20), nullable=True),
sa.Column('is_superuser', sa.Boolean(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('document'),
sa.UniqueConstraint('phone_number')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_full_name'), 'users', ['full_name'], unique=False)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('env_user',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('environment_id', postgresql.UUID(), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('extra_data', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['environment_id'], ['environments.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'environment_id')
)
op.create_table('groups',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('environment_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['environment_id'], ['environments.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'environment_id', name='uix_name_environment')
)
op.create_table('user_permission',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('permission_id', postgresql.UUID(), nullable=False),
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('extra_data', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('permission_id', 'user_id')
)
op.create_table('group_permission',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('group_id', postgresql.UUID(), nullable=False),
sa.Column('permission_id', postgresql.UUID(), nullable=False),
sa.Column('extra_data_json', postgresql.JSON(none_as_null=True, astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),
sa.PrimaryKeyConstraint('group_id', 'permission_id')
)
op.create_table('user_group',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('group_id', postgresql.UUID(), nullable=False),
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('extra_data', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('group_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_group')
op.drop_table('group_permission')
op.drop_table('user_permission')
op.drop_table('groups')
op.drop_table('env_user')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_full_name'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_permissions_name'), table_name='permissions')
op.drop_index(op.f('ix_permissions_code_name'), table_name='permissions')
op.drop_table('permissions')
op.drop_table('environments')
# ### end Alembic commands ###
|
#
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Common utilities for engine platforms.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
import logging
from copy import copy
from inspect import isclass
from traceback import format_exc
from collections import OrderedDict
from pkg_resources import iter_entry_points
from .node import BaseNode
log = logging.getLogger(__name__)
class NodeLoader(object):
"""
Node loader utility class for platform engines.
This class allows to load nodes for a platform engine using Python entry
points.
:param str engine_name: Name of the engine.
:param str api_version: Version of the API for that engine node.
:param class base_class: Base class to check against. Any class specified
here must comply with the :class:`BaseNode`.
"""
def __init__(self, engine_name, api_version='1.0', base_class=None):
super(NodeLoader, self).__init__()
self.entrypoint = 'topology_{engine_name}_node_{api_version}'.format(
engine_name=engine_name, api_version=api_version.replace('.', '')
)
self.base_class = base_class or BaseNode
assert issubclass(self.base_class, BaseNode)
self._nodes_cache = OrderedDict()
def __call__(self, cache=True):
return self.load_nodes(cache=cache)
def load_nodes(self, cache=True):
"""
List all available nodes types.
This function lists all available node types by discovering installed
plugins registered in the entry point. This can be costly or error
prone if a plugin misbehave. Because of this a cache is stored after
the first call.
:param bool cache: If ``True`` return the cached result. If ``False``
force reload of all plugins registered for the entry point.
:rtype: OrderedDict
:return: An ordered dictionary associating the name of the node type
and the class (subclass of :class:`topology.platforms.node.BaseNode`)
implementing it.
"""
# Return cached value if call is repeated
if cache and self._nodes_cache:
return copy(self._nodes_cache)
# Add built-in node types
available = OrderedDict()
# Iterate over entry points
for ep in iter_entry_points(group=self.entrypoint):
name = ep.name
try:
node = ep.load()
except Exception:
log.exception(
'Unable to load node from plugin {}'.format(name)
)
log.debug(format_exc())
continue
if not isclass(node) or not issubclass(node, self.base_class):
log.error(
'Ignoring node "{}" as it doesn\'t '
'match the required interface: '
'Node not a subclass of {}.'.format(
name, self.base_class.__name__
)
)
continue
available[name] = node
self._nodes_cache = available
return copy(self._nodes_cache)
__all__ = ['NodeLoader']
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-07 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labour', '0031_surveyrecord'),
]
operations = [
migrations.AddField(
model_name='survey',
name='override_does_not_apply_message',
field=models.TextField(default='', help_text="This message will be shown to the user when they attempt to access a query they don'thave access to.", verbose_name='Message when denied access'),
),
]
|
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import base64
import struct
def format_blob(blob):
return base64.b64decode(blob)
def encode_blob(blob):
if isinstance(blob, str):
blob = blob.encode('utf-8')
return base64.b64encode(blob).decode('utf-8')
def pack(fmt, data):
"""
data should be something like [(x0,y0),(x1,y1), (xN,yN)]
@param fmt:
@param data:
@return:
"""
# if len(args) > 1:
# args = zip(args)
# b = b''
return b''.join([struct.pack(fmt, *datum) for datum in data])
def unpack(blob, fmt='>ff', step=8, decode=False):
if decode:
blob = format_blob(blob)
if blob:
try:
return list(zip(*[struct.unpack(fmt, blob[i:i + step]) for i in range(0, len(blob), step)]))
except struct.error:
ret = []
for i in range(0, len(blob), step):
try:
args = struct.unpack(fmt, blob[i:i+step])
except struct.error:
break
ret.append(args)
return list(zip(*ret))
else:
return [[] for _ in fmt.count('f')]
# ============= EOF =============================================
|
from .netdash_modules import NetDashModule
def create_netdash_modules(modules):
return [NetDashModule(module_name) for module_name in modules]
|
"""Test of cryptography."""
from pymicropel.helper.crypto import Crypto
def test_encrypt_decrypt_test():
"""Test encoding and decoding."""
original_str = "Sww=BRDqXPgX5ytH"
exp_str_pass1 = "Ffz7WCI{MAjR hyB"
exp_str_pass999999 = 'Mgf"\\BUnF@vG+ieW'
cryptography = Crypto()
cryptography.crypt_init(1)
encoded_str = cryptography.code_string(original_str)
assert exp_str_pass1 == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(999999)
encoded_str = cryptography.code_string(original_str)
assert exp_str_pass999999 == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(0)
encoded_str = cryptography.code_string(original_str)
assert original_str == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(-1)
encoded_str = cryptography.code_string(original_str)
assert original_str == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
|
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login, logout as auth_logout, update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import update_last_login
from django.contrib.auth.signals import user_logged_in
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import View
from social_core.backends.utils import load_backends
from extras.models import ObjectChange
from extras.tables import ObjectChangeTable
from netbox.authentication import get_auth_backend_display
from netbox.config import get_config
from utilities.forms import ConfirmationForm
from .forms import LoginForm, PasswordChangeForm, TokenForm, UserConfigForm
from .models import Token
#
# Login/logout
#
class LoginView(View):
"""
Perform user authentication via the web UI.
"""
template_name = 'login.html'
@method_decorator(sensitive_post_parameters('password'))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get(self, request):
form = LoginForm(request)
if request.user.is_authenticated:
logger = logging.getLogger('netbox.auth.login')
return self.redirect_to_next(request, logger)
auth_backends = {
name: get_auth_backend_display(name) for name in load_backends(settings.AUTHENTICATION_BACKENDS).keys()
}
return render(request, self.template_name, {
'form': form,
'auth_backends': auth_backends,
})
def post(self, request):
logger = logging.getLogger('netbox.auth.login')
form = LoginForm(request, data=request.POST)
if form.is_valid():
logger.debug("Login form validation was successful")
# If maintenance mode is enabled, assume the database is read-only, and disable updating the user's
# last_login time upon authentication.
if get_config().MAINTENANCE_MODE:
logger.warning("Maintenance mode enabled: disabling update of most recent login time")
user_logged_in.disconnect(update_last_login, dispatch_uid='update_last_login')
# Authenticate user
auth_login(request, form.get_user())
logger.info(f"User {request.user} successfully authenticated")
messages.info(request, "Logged in as {}.".format(request.user))
return self.redirect_to_next(request, logger)
else:
logger.debug("Login form validation failed")
return render(request, self.template_name, {
'form': form,
'auth_backends': load_backends(settings.AUTHENTICATION_BACKENDS),
})
def redirect_to_next(self, request, logger):
data = request.POST if request.method == "POST" else request.GET
redirect_url = data.get('next', settings.LOGIN_REDIRECT_URL)
if redirect_url and redirect_url.startswith('/'):
logger.debug(f"Redirecting user to {redirect_url}")
else:
if redirect_url:
logger.warning(f"Ignoring unsafe 'next' URL passed to login form: {redirect_url}")
redirect_url = reverse('home')
return HttpResponseRedirect(redirect_url)
class LogoutView(View):
"""
Deauthenticate a web user.
"""
def get(self, request):
logger = logging.getLogger('netbox.auth.logout')
# Log out the user
username = request.user
auth_logout(request)
logger.info(f"User {username} has logged out")
messages.info(request, "You have logged out.")
# Delete session key cookie (if set) upon logout
response = HttpResponseRedirect(reverse('home'))
response.delete_cookie('session_key')
return response
#
# User profiles
#
class ProfileView(LoginRequiredMixin, View):
template_name = 'users/profile.html'
def get(self, request):
# Compile changelog table
changelog = ObjectChange.objects.restrict(request.user, 'view').filter(user=request.user).prefetch_related(
'changed_object_type'
)[:20]
changelog_table = ObjectChangeTable(changelog)
return render(request, self.template_name, {
'changelog_table': changelog_table,
'active_tab': 'profile',
})
class UserConfigView(LoginRequiredMixin, View):
template_name = 'users/preferences.html'
def get(self, request):
userconfig = request.user.config
form = UserConfigForm(instance=userconfig)
return render(request, self.template_name, {
'form': form,
'active_tab': 'preferences',
})
def post(self, request):
userconfig = request.user.config
form = UserConfigForm(request.POST, instance=userconfig)
if form.is_valid():
form.save()
messages.success(request, "Your preferences have been updated.")
return redirect('user:preferences')
return render(request, self.template_name, {
'form': form,
'active_tab': 'preferences',
})
class ChangePasswordView(LoginRequiredMixin, View):
template_name = 'users/password.html'
def get(self, request):
# LDAP users cannot change their password here
if getattr(request.user, 'ldap_username', None):
messages.warning(request, "LDAP-authenticated user credentials cannot be changed within NetBox.")
return redirect('user:profile')
form = PasswordChangeForm(user=request.user)
return render(request, self.template_name, {
'form': form,
'active_tab': 'password',
})
def post(self, request):
form = PasswordChangeForm(user=request.user, data=request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, "Your password has been changed successfully.")
return redirect('user:profile')
return render(request, self.template_name, {
'form': form,
'active_tab': 'change_password',
})
#
# API tokens
#
class TokenListView(LoginRequiredMixin, View):
def get(self, request):
tokens = Token.objects.filter(user=request.user)
return render(request, 'users/api_tokens.html', {
'tokens': tokens,
'active_tab': 'api-tokens',
})
class TokenEditView(LoginRequiredMixin, View):
def get(self, request, pk=None):
if pk:
token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
else:
token = Token(user=request.user)
form = TokenForm(instance=token)
return render(request, 'generic/object_edit.html', {
'object': token,
'form': form,
'return_url': reverse('user:token_list'),
})
def post(self, request, pk=None):
if pk:
token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
form = TokenForm(request.POST, instance=token)
else:
token = Token(user=request.user)
form = TokenForm(request.POST)
if form.is_valid():
token = form.save(commit=False)
token.user = request.user
token.save()
msg = f"Modified token {token}" if pk else f"Created token {token}"
messages.success(request, msg)
if '_addanother' in request.POST:
return redirect(request.path)
else:
return redirect('user:token_list')
return render(request, 'generic/object_edit.html', {
'object': token,
'form': form,
'return_url': reverse('user:token_list'),
})
class TokenDeleteView(LoginRequiredMixin, View):
def get(self, request, pk):
token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
initial_data = {
'return_url': reverse('user:token_list'),
}
form = ConfirmationForm(initial=initial_data)
return render(request, 'generic/object_delete.html', {
'object': token,
'form': form,
'return_url': reverse('user:token_list'),
})
def post(self, request, pk):
token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
form = ConfirmationForm(request.POST)
if form.is_valid():
token.delete()
messages.success(request, "Token deleted")
return redirect('user:token_list')
return render(request, 'generic/object_delete.html', {
'object': token,
'form': form,
'return_url': reverse('user:token_list'),
})
|
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
## Distance import utility class definition
class DistanceImportUtility:
def __init__(self, model_part, settings):
self.settings = settings
self.model_part = model_part
def ImportDistance(self):
import_mode = self.settings["import_mode"].GetString()
if(import_mode == "from_GiD_file"):
distance_file_name = self.settings["distance_file_name"].GetString()
distance_file = open(distance_file_name, "r")
distance_reading = False
for line in distance_file:
# Check if the distance reading is already finished
if ("End Values" in line):
distance_reading = False
# Read nodal distance
if (distance_reading == True):
node_id = int(line[0:line.index(" ")]) # Get the nodal id. as integer
distance_value = float(line[line.index(" ")+1:]) # Get the distance value as float
if (node_id in self.model_part.Nodes):
# Note that the distance sign is swapped (Kratos distance criterion is opposite to GiD one)
self.model_part.Nodes[node_id].SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, -distance_value)
# print("Node: ",node_id," distance value: ",self.model_part.Nodes[node_id].GetSolutionStepValue(KratosMultiphysics.DISTANCE))
# Check if the distance reading has finished in the previous line
if("Values" in line):
distance_reading = True
# Recall to close the distance file to free memory
distance_file.close()
|
import asyncio
from asyncevents import on_event, emit, get_current_emitter, ExceptionHandling
@on_event("error")
async def oh_no(_, event: str):
print(f"Goodbye after {event!r}!")
raise ValueError("D:")
async def handle_error(_, exc: Exception, event: str):
print(f"Exception {type(exc).__name__!r} from {event!r} handled!")
async def main():
try:
await emit("error") # The error propagates
except ValueError:
print("Bang!")
# Now let's try a different error handling strategy
get_current_emitter().on_error = ExceptionHandling.LOG # Logs the exception
await emit("error") # This won't raise. Yay!
print("We're safe!")
# And a different one again
get_current_emitter().on_error = ExceptionHandling.IGNORE # Silences the exception
await emit("error") # This won't raise nor log anything to the console. Yay x2!
print("We're safe again!")
# Let's try using a coroutine function as an exception handler
get_current_emitter().on_error = handle_error
await emit("error") # This will call handle_error with the exception object and the event name
print("We're safe once again!")
if __name__ == "__main__":
asyncio.run(main())
|
import re
import mock
from django.test import TestCase
from django_universal_view_decorator import ViewDecoratorBase
def test_log(*args, **kwargs):
pass
class TestAutomaticValueOfNumRequiredArgs(TestCase):
""" If you don't override the `ViewDecoratorBase.num_required_args` class attribute then its value is
dependent on the number of args of the `__init__()` of the concrete instantiated subclass of
`ViewDecoratorBase`.
Rules:
- If `__init__()` has only a `self` argument then `num_required_args` is None. This means that the decorator
receives no arguments and you can't use `()` after the decorator when you are applying it to a view.
- If `__init__()` has at least one positional argument without a default value then `num_required_args` returns
the number of positional args without a default value. This means that in case of applying the decorator
you always have to specify at least the required arguments.
- If `__init__()` has no positional arguments without a default value but it has at least one of the following:
- positional arguments with default values
- *args
- **kwargs
... then `num_required_args` is -1 that means, you can apply the decorator to a view by specifying any of the
optional arguments but if you don't specify any default args you can decide whether you want to use the
empty arg-list brackets `()` after the decorator name. (Both @my_decorator and @my_decorator() are valid.)
We don't test for python3 kwonlyargs but it doesn't matter because the presence of varargs is the important
thing.
"""
def test_init_has_only_self(self):
# If `__init__()` has only a `self` arg then the decorator should not accept any args.
# This means that `num_required_args` should be `None`.
class MyDecorator(ViewDecoratorBase):
def __init__(self):
super(MyDecorator, self).__init__()
self.assertIsNone(MyDecorator.num_required_args)
def test_init_has_only_self_because_i_havent_defined_init_in_the_subclass(self):
class MyDecorator(ViewDecoratorBase):
pass
self.assertIsNone(MyDecorator.num_required_args)
def test_1_required_arg(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, required):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, 1)
def test_2_required_args(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, required_0, required_1):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, 2)
def test_required_arg_and_default_arg(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, required, default=None):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, 1)
def test_default_arg(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, default=None):
super(MyDecorator, self).__init__()
# -1 means that we have 0 required args and the usage of `()` after the decorator is optional
# if someone instantiates the decorator by passing no arguments.
# e.g:
#
# my_decorator = MyDecorator.universal_decorator
#
# @my_decorator # valid
# class ViewClass(View):
# ...
#
# @my_decorator() # valid
# class ViewClass(View):
# ...
self.assertEqual(MyDecorator.num_required_args, -1)
def test_varargs(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, *args):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, -1)
def test_kwargs(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, **kwargs):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, -1)
def test_varargs_and_kwargs(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, *args, **kwargs):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, -1)
def test_default_arg_and_kwargs(self):
class MyDecorator(ViewDecoratorBase):
def __init__(self, default=None, **kwargs):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, -1)
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestNumRequiredArgsHasExpectedEffectOnDecoratorArgPassing(TestCase):
class MyDecoratorBase(ViewDecoratorBase):
def _call_view_function(self, decoration_instance, view_class_instance, view_function, *args, **kwargs):
test_log('decorator')
return view_function(*args, **kwargs)
def test_num_required_args_is_none(self, mock_test_log):
class MyDecorator(self.MyDecoratorBase):
pass
self.assertIsNone(MyDecorator.num_required_args)
def view_function(request):
test_log('view_function', request)
return 'response'
# decorating without decorator arguments should work
response = MyDecorator.universal_decorator(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
# Decorating the big nothing gives a useful error message...
# This decorator expects exactly one thing: the view to decorate.
regexes = [
"takes exactly 1 argument (0 given)", # python2
"missing 1 required positional argument: 'class_or_routine'", # python3
"MyDecorator.universal_decorator() takes exactly 1 argument (0 given)", # pypy3
]
pattern = '|'.join(re.escape(regex) for regex in regexes)
self.assertRaisesRegexp(TypeError, pattern, MyDecorator.universal_decorator)
# Decorating something that isn't a function, class, or class method should fail.
self.assertRaisesRegexp(TypeError,
r"Expected a regular view function, view class, or view class method, got "
"'this_isnt_a_valid_view_to_decorate' instead\.",
MyDecorator.universal_decorator, 'this_isnt_a_valid_view_to_decorate')
def test_num_required_args_is_minus_1(self, mock_test_log):
# The decorator has no required args and MyDecorator.num_required_args == -1 that means it's optional
# to write out the empty brackets `()` when we pass no args to the decorator.
class MyDecorator(self.MyDecoratorBase):
def __init__(self, arg=None):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, -1)
def view_function(request):
test_log('view_function', request)
return 'response'
# Should work without empty brackets.
response = MyDecorator.universal_decorator(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
mock_test_log.reset_mock()
# Empty brackets should work.
response = MyDecorator.universal_decorator()(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
mock_test_log.reset_mock()
# Passing the default decorator arg(s) should work.
response = MyDecorator.universal_decorator('decorator_param')(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
def test_num_required_args_is_0(self, mock_test_log):
class MyDecorator(self.MyDecoratorBase):
# Note: the default implementation of the num_required_args attribute never returns 0. If you
# have no required args then it returns -1 instead of 0 to allow omitting the empty brackets `()`
# when you specify zero args for the decorator. You can however explicitly set this attribute to
# zero when you know that your decorator has no required arguments. By doing so you force the users
# of your decorator to write out the empty brackets even if they don't pass any decorator arguments
# when applying it to a view.
num_required_args = 0
self.assertEqual(MyDecorator.num_required_args, 0)
def view_function(request):
test_log('view_function', request)
return 'response'
# Decorating a view without passing the required zero parameters (empty parents) to
# the decorator shouldn't work.
self.assertRaisesRegexp(
TypeError,
r'This error may be the result of passing the wrong number of arguments to a view decorator',
MyDecorator.universal_decorator,
view_function
)
response = MyDecorator.universal_decorator()(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
def test_num_required_args_is_1_or_more(self, mock_test_log):
class MyDecorator(self.MyDecoratorBase):
def __init__(self, arg):
super(MyDecorator, self).__init__()
self.assertEqual(MyDecorator.num_required_args, 1)
def view_function(request):
test_log('view_function', request)
return 'response'
# "Calling" the decorator with zero args should cause an error because the number of
# required args is exactly 1.
self.assertRaisesRegexp(
TypeError,
r'This error may be the result of passing the wrong number of arguments to a view decorator',
MyDecorator.universal_decorator
)
response = MyDecorator.universal_decorator('decorator_param')(view_function)('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator'),
mock.call('view_function', 'request'),
])
@mock.patch.object(ViewDecoratorBase, '_is_decorator_arg', return_value='is_decorator_arg_retval')
class TestAreDecoratorArgs(TestCase):
""" Testing the `ViewDecoratorBase._are_decorator_args()` and `ViewDecoratorBase._is_decorator_arg()` methods."""
def test_auto_detect_doesnt_work_on_single_positional_function_arg(self, mock_is_decorator_arg):
def function_object(request):
pass
result = ViewDecoratorBase._are_decorator_args((function_object,), {})
self.assertEqual(result, 'is_decorator_arg_retval')
self.assertTrue(mock_is_decorator_arg.called)
def test_auto_detect_doesnt_work_on_single_positional_class_arg(self, mock_is_decorator_arg):
class MyClass(object):
pass
result = ViewDecoratorBase._are_decorator_args((MyClass,), {})
self.assertEqual(result, 'is_decorator_arg_retval')
self.assertTrue(mock_is_decorator_arg.called)
def test_auto_detect_doesnt_work_on_single_positional_method_arg(self, mock_is_decorator_arg):
class MyClass(object):
def method(self):
pass
result = ViewDecoratorBase._are_decorator_args((MyClass.method,), {})
self.assertEqual(result, 'is_decorator_arg_retval')
self.assertTrue(mock_is_decorator_arg.called)
def test_passing_kwargs_implies_decorator_args(self, mock_is_decorator_arg):
def function_object(request):
pass
result = ViewDecoratorBase._are_decorator_args((), dict(arg=function_object))
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
result = ViewDecoratorBase._are_decorator_args((function_object,), dict(kwarg='kwarg_value'))
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
def test_args_count_other_than_1_implies_decorator_args(self, mock_is_decorator_arg):
def function_object(request):
pass
result = ViewDecoratorBase._are_decorator_args((function_object, function_object), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
result = ViewDecoratorBase._are_decorator_args((function_object, 'whatever'), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
def test_non_routine_and_non_class_arg_implies_decorator_arg(self, mock_is_decorator_arg):
result = ViewDecoratorBase._are_decorator_args(('non_routine_and_non_class',), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
result = ViewDecoratorBase._are_decorator_args((None,), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
result = ViewDecoratorBase._are_decorator_args((False,), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
result = ViewDecoratorBase._are_decorator_args((42,), {})
self.assertTrue(result)
self.assertFalse(mock_is_decorator_arg.called)
|
def binary_search(array, searched_value):
start = 0
end = len(array) - 1
while(start <= end):
mid = (start + end) // 2
guess = array[mid]
if(searched_value == guess):
return mid
elif(searched_value < guess):
end = mid - 1
elif(searched_value > guess):
start = mid + 1
return -1
my_array = [32,2,25,3,11,78,-2,32]
my_array.sort()
print(my_array)
print(binary_search(my_array, 78))
|
# -*- coding: utf-8 -*-
__author__ = 'Sal Aguinaga'
__license__ = "GPL"
__version__ = "0.1.0"
__email__ = "saguinag@nd.edu"
import pprint as pp
import pandas as pd
import numpy as np
import scipy
from nltk.cluster import KMeansClusterer, GAAClusterer, euclidean_distance
import nltk.corpus
import nltk.stem
import re
import io
import json, time, sys, csv
from HTMLParser import HTMLParser
import sys, os, argparse
import traceback
import time, datetime
import ast
import glob
from collections import defaultdict
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
global cDict;
## http://stackoverflow.com/questions/23531608/how-do-i-save-streaming-tweets-in-json-via-tweepy
## https://www.airpair.com/python/posts/top-mistakes-python-big-data-analytics
## Gen graph: https://github.com/jdevoo/twecoll
## https://wakari.io/sharing/bundle/iuliacioroianu/Text_analysis_Python_NLTK
# def jacc_dist_cluster(nK, dfrow, cDict):
# print len(cDict)
# if len(cDict) < 1:
# cDict[1] = (dfrow[0], dfrow[1])
# else:
# print 'clustering: ', dfrow[2]
# for k,v in cDict.items():
# jDist = distance.jaccard(dfrow[2], cDict[k]])
def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
from: https://github.com/doukremt/distance
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2))
def cluster_doc_collection(nbrK, clust_method, pndsDF):
if clust_method != 'jacc':
return
df = pndsDF
df = df.sort_values(by=[2])
cDict = defaultdict(list)
#df.apply(lambda row: jacc_dist_cluster(nbrK, row, cDict=cDict), axis=1)
for dfrow in df.iterrows():
# print dfrow[0], dfrow[1][1]
# if dfrow[0] > 15: break
# -- the first case
if len(cDict) < 1:
cDict[1].append(dfrow[1][1])
continue
tid_added_bool = False
for k in cDict.keys():
# print "key",k, len(cDict[k])
jDVals = []
if len(cDict[k]) > 1:
# print [df.loc[df[1] == y[1]][2].values[0].encode('utf-8') for y in cDict[k]]
jDVals.append([jaccard(dfrow[1][2], df.loc[df[1] == y][2].values[0].encode('utf-8')) for y in cDict[k]])
jDist = np.mean(jDVals)
else:
jDist = jaccard(dfrow[1][2], str(df.loc[df[1] == cDict[k][0]][2].values[0].encode('utf-8')))
if ((k-1)/float(nbrK) > jDist) and (jDist < k/float(nbrK)):
cDict[k].append(dfrow[1][1])
tid_added_bool = True
if (len(cDict) < nbrK) and not tid_added_bool:
cDict[len(cDict.keys())+1].append(dfrow[1][1])
# print dfrow[0], cDict.keys()
# print(" {}".format(cDict))
for k,v in cDict.items():
for x in v:
print k, df.loc[df[1] == x][2].values[0].encode('utf-8')
# print len(cDict.values())
exit()
idx = np.arange(len(pndsDF))
np.random.shuffle(idx)
seed_clusters = df.loc[df.index.isin(idx[:nbrK])]
seed_clusters['cltrs'] = range(len(seed_clusters))
# clustersDict={}
#
# for j,clst in enumerate(seed_clusters.index.values):
# # Here I need to compare strings using levenstein or Jaccard similarity
# jacc_dist = df.apply(lambda doc: distance.jaccard(doc[2], df.iloc[clst][2]), axis=1)
#
# # clustersDict[clust]=[x for x in jacc_dist.values if ((j/float(nbrK) > x) and ((j+1.)/float(nbrK) <x))]
# # print [x for x in jacc_dist.values if x < 1/float(nbrK)]
#
# break
exit()
return
def normalize_word(word):
return stemmer_func(word.lower())
def get_words(titles):
words = set()
for title in titles:
for word in title:
words.add(normalize_word(word))
return list(words)
def vectorspaced(title):
# title_components = [normalize_word(word) for word in title[0].decode('utf-8').split()]
title_components = [normalize_word(word) for word in title]
return np.array([
word in title_components and not word in stopwords
for word in words], np.short)
def extract_tweets_citedby_graph(df):
global stemmer_func, words, stopwords
stemmer_func = nltk.stem.snowball.SnowballStemmer("english").stem
stopwords = set(nltk.corpus.stopwords.words('english'))
words = get_words(df[2].values)
# pp.pprint(words[:10])
# K-Means clustering:
# cluster = KMeansClusterer(7, euclidean_distance,avoid_empty_clusters=True)
# GAAClusterer
cluster = GAAClusterer(21)
cluster.cluster([vectorspaced(title) for title in df[2].values if title],True)
classified_examples = [cluster.classify(vectorspaced(title)) for title in df[2].values]
# for cluster_id, title in sorted(zip(classified_examples, df[2].values)):
# # print "{}\t{}\t{}\n".format(cluster_id, df[0].loc[df[2] == title].values, df[1].loc[df[2] == title].values)
# print "{}\t{}\t{}".format(cluster_id, df[1].loc[df[2] == title].values, title)
# Display clusters / write to disk
with open ('Results/clustered_relevant_users.tsv', 'w') as f:
for cluster_id,title in sorted(zip(classified_examples, df[2].values)):
if cluster_id>6:
# save: docid tab userids
f.write('{}\t{}\n'.format(df[0].loc[df[2] == title].values, df[1].loc[df[2] == title].values))
if os.path.exists('Results/clustered_relevant_users.tsv'): print 'file saved: Results/clustered_relevant_users.tsv'
return
def write_tweets_df_todisk(tweets_df):
tweets_df.to_csv('Results/tweets_collection.tsv', sep='\t', header=False, index=False)
def cluster_tweets_infile(in_tsv_fname=""):
if not in_tsv_fname:
print 'Not a valid filename'
return
# Read the input TSV file with user_names and tweet
df = pd.read_csv( in_tsv_fname, sep='\t', header=None)
df = df.dropna()
df = df.drop_duplicates()
seed_filter = "Graph Isomorphism in Quasipolynomial Time Laszlo Babai"
df['jacc_dist'] = df.apply(lambda row: jaccard(seed_filter, row[2]), axis=1)#
#print df['jacc_dist'].describe()
#print df.loc[df['jacc_dist']< 0.52][2]
if 1:
extract_tweets_citedby_graph(df.loc[df['jacc_dist']< 0.52])
else:
df[2] = df[2].apply(lambda tweet: tweet.decode('utf-8'))
cluster_doc_collection(21,'jacc',pndsDF=df)
# global stemmer_func, words, stopwords
#
# stemmer_func = nltk.stem.snowball.SnowballStemmer("english").stem
# stopwords = set(nltk.corpus.stopwords.words('english'))
#
# # print tweets[1]
#
# words = get_words(df[2].values)
# print words[:10]
#
# # K-Means clustering:
# cluster = KMeansClusterer(7, euclidean_distance,avoid_empty_clusters=True)
# # GAAClusterer
# cluster = GAAClusterer(13)
#
# cluster.cluster([vectorspaced(title) for title in df[2].values if title],True)
# classified_examples = [cluster.classify(vectorspaced(title)) for title in df[2].values]
#
# # Display clusters / write to disk
# with open ('Results/clustered_relevant_users.tsv', 'w') as f:
# for cluster_id, title in sorted(zip(classified_examples, df[2].values)):
# #print cluster_id, df[0].loc[df[1] == title].values, title
# if cluster_id == 11:
# # save: docid tab userids
# f.write('{}\t{}\n'.format(df[0].loc[df[2] == title].values, df[1].loc[df[2] == title].values))
#
# print type(classified_examples), np.shape(classified_examples)
# import os.path
# if os.path.isfile('Results/clustered_relevant_users.tsv'): print 'Wrote: Results/clustered_relevant_users.tsv'
# tweets_sanslinks= df['1'].apply(lambda tweet: [tweet.strip(l) for l in re.findall(r'(https?://\S+)', tweet) if not l] )
# # tweets_sanslinks=[x for x in tweets_sanslinks if not x]
# print tweets_sanslinks[:20]
# df['2'] = df['1'].apply(lambda tweet: tweet.lstrip("[\'").rstrip("\']"))
# print [x.strip(l) for l in re.findall(r'(https?://\S+)', row[''])]
# tst_ar =[]
# for row in df.iterrows():
# print row
# if re.findall(r'(https?://\S+)', row['1']):
# print row['1']
# else:
#
#
#
#
#
return
def get_parser():
parser = argparse.ArgumentParser(description='procjson clust | Ex: python procjson_clust.py '+
'Results/tweets_cleaned.tsv')
parser.add_argument('tsvfile', metavar='TSVFILE', help='Input file: tsv.')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
cluster_tweets_infile(args['tsvfile'])
if not args['tsvfile']:
parser.print_help()
os._exit(1)
if __name__=='__main__':
main()
print 'Done'
|
from conans import ConanFile
from conan_build_helper.cmake import *
from conan_build_helper.headeronly import *
from conan_build_helper.require_scm import *
class ConanCommonRecipes(ConanFile):
name = "conan_build_helper"
version = "0.0.1"
url = "https://gitlab.com/USERNAME/conan_build_helper"
license = "MIT"
description = "Common recipes for conan.io packages"
exports = "*.py"
|
import argparse
from defaults import EPIC_JPOSE, EPIC_MMEN
def get_JPoSE_parser(info_str):
parser = get_base_parser(info_str, EPIC_JPOSE)
parser.add_argument('--action-weight', type=float, help='Weight of the action losses. [{}]'.format(EPIC_JPOSE.action_weight))
parser.add_argument('--comb-func', type=str, help='What combination function to use for the action embedding. [{}]'.format(EPIC_JPOSE.comb_func))
parser.add_argument('--comb-func-start', type=int, help='When to start using a learned combine function instead of concatenate. [{}]'.format(EPIC_JPOSE.comb_func_start))
parser.add_argument('--noun-weight', type=float, help='Weight of the noun losses. [{}]'.format(EPIC_JPOSE.noun_weight))
parser.add_argument('--verb-weight', type=float, help='Weight of the verb losses. [{}]'.format(EPIC_JPOSE.verb_weight))
parser.set_defaults(
action_weight=EPIC_JPOSE.action_weight,
comb_func=EPIC_JPOSE.comb_func,
comb_func_start=EPIC_JPOSE.comb_func_start,
noun_weight=EPIC_JPOSE.noun_weight,
verb_weight=EPIC_JPOSE.verb_weight
)
return parser
def get_MMEN_parser(info_str):
parser = get_base_parser(info_str, EPIC_MMEN)
parser.add_argument('caption_type', type=str, help='Type of captions to use {caption, verb, noun}.')
parser.set_defaults(
)
return parser
def get_base_parser(info_str, defaults_):
parser = argparse.ArgumentParser(info_str)
parser.add_argument('--batch-size', type=int, help='Size of the batch during training. [{}]'.format(defaults_.batch_size))
parser.add_argument('--checkpoint-rate', type=int, help='How many epochs between saving a model checlpoint. [{}]'.format(defaults_.checkpoint_rate))
parser.add_argument('--embedding-size', type=int, help='Size of the resulting embedding. [{}]'.format(defaults_.embedding_size))
parser.add_argument('--gpu', type=bool, help='Whether or not to use the gpu for training. [False]')
parser.add_argument('--learning-rate', type=float, help='Value for the learning rate. [{}]'.format(defaults_.learning_rate))
parser.add_argument('--margin', type=float, help='The size of the margin for the triplet losses. [{}]'.format(defaults_.margin))
parser.add_argument('--momentum', type=float, help='Momentum used for the SGD optimiser. [{}]'.format(defaults_.momentum))
parser.add_argument('--num-epochs', type=int, help='Number of epochs to train for. [{}]'.format(defaults_.num_epochs))
parser.add_argument('--num-layers', type=int, help='Number of layers for each embedding network. [{}]'.format(defaults_.num_layers))
parser.add_argument('--optimiser', type=str, help='Which optimiser to use, SGD or adam. [{}]'.format(defaults_.optimiser))
parser.add_argument('--out-dir', type=str, help='Where to save the model and outputs. [{}]'.format(defaults_.out_dir))
parser.add_argument('--tt-weight', type=float, help='Weight of the text to text weight. [{}]'.format(defaults_.tt_weight))
parser.add_argument('--tv-weight', type=float, help='Weight of the text to visual weight. [{}]'.format(defaults_.tv_weight))
parser.add_argument('--vt-weight', type=float, help='Weight of the visual to text weight. [{}]'.format(defaults_.vt_weight))
parser.add_argument('--vv-weight', type=float, help='Weight of the visual to visual weight. [{}]'.format(defaults_.vv_weight))
parser.set_defaults(
batch_size=defaults_.batch_size,
checkpoint_rate=defaults_.checkpoint_rate,
embedding_size=defaults_.embedding_size,
gpu=False,
learning_rate=defaults_.learning_rate,
margin=defaults_.margin,
momentum=defaults_.momentum,
num_epochs=defaults_.num_epochs,
num_layers=defaults_.num_layers,
optimiser=defaults_.optimiser,
out_dir=defaults_.out_dir,
tt_weight=defaults_.tt_weight,
tv_weight=defaults_.tv_weight,
vt_weight=defaults_.vt_weight,
vv_weight=defaults_.vv_weight,
)
return parser
|
import os
from mock import Mock, patch
import pytest
from bson.objectid import ObjectId
from torrents.downloader import Downloader
#path of torrent file
def path_to_fixture(name):
module_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(module_dir, 'fixtures', name)
@pytest.fixture
def dl_obj():
ses = Mock()
ses.listen_on = Mock()
downloader = Downloader()
downloader.ses = ses
return downloader
class TestDownloader:
#tests to see if torrent file is downloading
def test_start(self, dl_obj):
ses = Mock()
ses.listen_on = Mock()
dl_obj.start(ses)
assert ses.listen_on.called_once
#test to see if a torrent was successfully added
def test_add_torrent(self, dl_obj):
dl_obj.ses.add_torrent = Mock()
dl_obj.start(dl_obj.ses)
data = open(path_to_fixture('test1.torrent'), 'rb').read()
dl_obj.add_torrent(ObjectId(), data)
assert dl_obj.ses.add_torrent.called_once
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='kivymd',
version='0.1.2',
description='Set of widgets for Kivy inspired by Google\'s Material '
'Design',
author='Andrés Rodríguez',
author_email='andres.rodriguez@lithersoft.com',
url='https://github.com/mixedCase/kivymd',
packages=['kivymd'],
package_data={'kivymd': ['images/*.png', 'images/*.jpg', 'images/*.atlas', 'fonts/*.ttf']},
)
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.controller import Controller
from netforce.model import Model, fields, get_model
from netforce import database
import time
import datetime
from netforce import access
class ReturnPaysbuy(Controller):
_path = "/ecom_return_paysbuy"
def post(self):
with database.Transaction():
print("POST ARGUMENT >>>>>>>>>>>>>>>>>>>")
print(self.request.body)
f = open("paysbuy_return", "a")
s = "################################################################################################################" + \
"\n"
s += "Date : " + time.strftime("%Y-%m-%d %H:%M:%S") + "\n"
s += "Request : " + str(self.request) + "\n"
if self.request.body:
s += "Body : " + str(self.request.body) + "\n"
s += "################################################################################################################" + \
"\n"
f.write(s)
f.close()
cart_id = int(self.get_argument("cart_id"))
result = self.get_argument("result", None)
method = self.get_argument("method", None)
cart = get_model("ecom.cart").browse(cart_id)
f = open("record_return", "a")
x= "cart_id:" +str(cart_id)
x+= "\nrestut:" +result
f.write(x)
f.close()
if method:
access.set_active_user(1)
access.set_active_company(1)
cart.update_paysbuy_method(method)
if result.startswith("00"): # received payment already
if not cart.is_paid:
access.set_active_user(1)
access.set_active_company(1)
cart.import_paysbuy_payment()
self.redirect("/ecom_order_confirmed?cart_id=%s" % cart_id)
elif result.startswith("02"): # will receive payment later
self.redirect("/ecom_order_confirmed?cart_id=%s" % cart_id)
else:
cart.cancel_order()
self.redirect("/ecom_order_cancelled?cart_id=%s" % cart_id)
self.redirect("/ecom_order_cancelled?cart_id=%s" % cart_id)
ReturnPaysbuy.register()
|
from utilities import *
from shapes import *
GU = GeometryUtility()
class RotatingCaliper():
def __init__(self, points):
self._points = points
self._N = len(points)
self._antipodals = None
self._calipers()
def _next(self, i):
return (i+1) % self._N
def _calipers(self):
q = 1
antipodals = {}
N = len(self._points)
for i in range(N):
antipodals[i] = [-1,-1]
next = self._next
PSet = self._points
for i in range(N):
j = next(i)
while GU.area2(PSet[i], PSet[j], PSet[next(q)]) > GU.area2(PSet[i], PSet[j], PSet[q]):
q = next(q)
antipodals[j][0] = q
antipodals[i][1] = q
self._antipodals = antipodals
def AntiPodals(self):
antis = []
PSet = self._points
for k, v in self._antipodals.items():
for i in range(v[0], v[1]+1):
if [PSet[k], PSet[i]] in antis or [PSet[i], PSet[k]] in antis:
continue
antis.append([PSet[k], PSet[i]])
return antis
def Diameter(self):
antis = self.AntiPodals()
d = -1
support = []
for ppair in antis:
td = ppair[0].distance(ppair[1])
if td > d:
d = td
support = ppair
return d, support
def Width(self):
w = float("inf")
support = []
for i in range(self._N):
fpt = self._points[i]
tpt = self._points[(i+1) % self._N]
lseg = LineSegment(fpt, tpt)
j = self._antipodals[i][-1]
pt = self._points[j]
td = lseg.height(pt)
if td < w:
w = td
support = [fpt, tpt, pt]
return w, support |
from __future__ import print_function
import logging
import os
from aeromancer import project
from aeromancer import project_filter
from aeromancer.cli.run import ProjectShellCommandBase
class Grep(ProjectShellCommandBase):
"""Search the contents of files
Accepts most of the arguments of git-grep, unless they conflict
with other arguments to this command.
"""
log = logging.getLogger(__name__)
def _get_command(self, parsed_args):
return ['git', 'grep'] + self._extra
|
import math
import numpy as np
import torch
from torch import nn
from .backbone import conv3x3
from .transformer import Transformer
class ConvHeader(nn.Module):
def __init__(self, cfg):
super(ConvHeader, self).__init__()
self.cfg = cfg
self.use_bn = cfg.use_bn
self.use_transformer = cfg.use_transformer
self.use_road_seg = cfg.use_road_seg
bias = not self.use_bn
self.conv1 = conv3x3(96, 96, bias=bias)
self.bn1 = nn.BatchNorm2d(96)
self.conv2 = conv3x3(96, 96, bias=bias)
self.bn2 = nn.BatchNorm2d(96)
self.conv3 = conv3x3(96, 96, bias=bias)
self.bn3 = nn.BatchNorm2d(96)
self.conv4 = conv3x3(96, 96, bias=bias)
self.bn4 = nn.BatchNorm2d(96)
self.heatmap_head = conv3x3(96, self.cfg.num_points_per_trajectory, bias=True)
self.softmax = nn.Softmax(dim=-1)
npoints = self.cfg.num_points_per_trajectory
if self.use_road_seg:
self.road_head = conv3x3(96, 1, bias=True)
self.conv7 = nn.Conv2d(1, npoints, 3, 2, padding=1, bias=True)
if self.cfg.use_transformer:
self.conv5 = nn.Conv2d(96, npoints, 3, 2, padding=1, bias=True)
# depth-wise convolution
if self.cfg.transformer.use_position_encoder:
self.conv6 = nn.Conv2d(npoints, npoints, 3, 2, groups=npoints, padding=1, bias=True)
self.waypoint_predictor = Transformer(self.cfg.transformer)
self.road_loss_func = nn.BCELoss().cuda()
self.heatmap_loss_func = nn.MSELoss().cuda()
def spatial_softmax(self, x):
b, c, h, w = x.shape
x = self.softmax(x.view(b, c, -1))
x = x.view(b, c, h, w)
argmax = torch.argmax(x.view(b * c, -1), dim=1)
argmax_x, argmax_y = torch.remainder(argmax, w).float(), torch.floor(torch.div(argmax.float(), float(w)))
argmax_x = argmax_x.view((b, c, -1)) / float(w)
argmax_y = argmax_y.view((b, c, -1)) / float(h)
pos_pred = torch.cat([argmax_x, argmax_y], dim=2)
return x, pos_pred
def forward(self, batch_dict):
feature_map = batch_dict['seg_features']
x = self.conv1(feature_map)
if self.use_bn:
x = self.bn1(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2(x)
x = self.conv3(x)
if self.use_bn:
x = self.bn3(x)
x = self.conv4(x)
if self.use_bn:
x = self.bn4(x)
heatmap = self.heatmap_head(x) # [b, num_points_per_trajectory, 75, 100]
heatmap, pred_pos = self.spatial_softmax(heatmap)
if self.use_road_seg:
road = torch.sigmoid(self.road_head(x)) # [b, 1, 75, 100]
batch_dict['pred_seg'] = road.squeeze(1)
else:
batch_dict['pred_seg'] = heatmap.sum(dim=1)
batch_dict['pred_heatmap'] = heatmap.sum(dim=1)
if self.cfg.use_transformer:
waypoints_feature = self.conv5(feature_map)
b, c, h, w = waypoints_feature.shape
if self.cfg.transformer.use_position_encoder:
x2 = self.conv6(heatmap)
batch_dict['pos_feature'] = x2.view(b, c, -1)
if self.use_road_seg:
x3 = self.conv7(road)
waypoints_feature += x3
waypoints_feature = waypoints_feature.view(b, c, -1)
batch_dict['waypoints_feature'] = waypoints_feature
batch_dict = self.waypoint_predictor(batch_dict)
else:
batch_dict['waypoints_pred'] = pred_pos
if self.training:
if self.use_road_seg:
self.road_pred = road.squeeze(1)
self.road_target = batch_dict['img_ins']
self.heatmap_pred = heatmap
self.heatmap_targets = batch_dict['heatmap']
return batch_dict
def get_loss(self):
loss_heatmap = self.heatmap_loss_func(self.heatmap_pred, self.heatmap_targets)
loss_heatmap *= self.cfg.weight_loss_heatmap
loss = loss_heatmap
tb_dict = {'loss_heatmap': loss_heatmap}
if self.cfg.use_transformer:
loss_waypoint = self.waypoint_predictor.get_loss()
loss_waypoint *= self.cfg.weight_loss_waypoint
loss += loss_waypoint
tb_dict['loss_waypoint'] = loss_waypoint
if self.use_road_seg:
loss_road = self.road_loss_func(self.road_pred, self.road_target)
loss_road *= self.cfg.weight_loss_road
loss += loss_road
tb_dict['loss_road'] = loss_road
return loss, tb_dict
def get_prediction(self, batch_dict):
pred_points = batch_dict['waypoints_pred']
return pred_points.detach().cpu().numpy()
def parse_predicted_waypoints(self, prediction):
assert isinstance(prediction, torch.Tensor)
predicted_points = []
prediction = prediction.detach().cpu().numpy()
c, h, w = prediction.shape[0:3]
num_points = c
assert num_points == self.cfg.num_points_per_trajectory
for n in range(num_points):
tmp = prediction[n]
y, x = np.unravel_index(np.argmax(tmp), tmp.shape)
point = np.array([x, y])
predicted_points.append(point)
return np.array(predicted_points).astype(np.float32)
|
"""A feature extractor for provider information."""
from __future__ import absolute_import
import logging
import pandas as pd
from sutter.lib import postgres
from sutter.lib.feature_extractor import FeatureExtractor
from sutter.lib.helper import format_column_title
log = logging.getLogger('feature_extraction')
class ProviderExtractor(FeatureExtractor):
"""
Generates features related to patient's provider.
Features:
- `specialty_*` - Whether the patient's provider has the given specialty (47 different ones).
"""
def extract(self):
query = """
SELECT *
FROM {}.bayes_vw_feature_provider
""".format(self._schema)
engine = postgres.get_connection()
res = pd.read_sql(query, engine)
log.info('The queried table has %d rows.' % len(res))
pivoted = res.dropna() \
.groupby('hsp_acct_study_id') \
.specialty \
.apply(_rename_columns) \
.str.get_dummies()
df = pd.DataFrame(index=res.hsp_acct_study_id.unique())
df[pivoted.columns] = pivoted.astype('bool')
df.fillna(False, inplace=True)
return self.emit_df(df)
def _rename_columns(specialties):
return "|".join('specialty_' + format_column_title(s) for s in specialties)
|
# coding=utf-8
import asyncio
async def compute(x, y):
print("Compute %s + %s ..." % (x, y))
await asyncio.sleep(1.0) # 协程compute不会继续往下面执行,直到协程sleep返回结果
return x + y
async def print_sum(x, y):
result = await compute(x, y) # 协程print_sum不会继续往下执行,直到协程compute返回结果
print("%s + %s = %s" % (x, y, result))
loop = asyncio.get_event_loop()
loop.run_until_complete(print_sum(1, 2))
loop.close()
# ### Chain coroutines(协程嵌套)
# 嵌套的协程,即一个协程中await了另外一个协程;
|
from datetime import datetime
class Monitor:
def __init__(self, max_patience=5, delta=1e-6, log_file=None):
self.counter = 0
self.best_value = 0
self.max_patience = max_patience
self.patience = max_patience
self.delta = delta
self.log_file = log_file
print("time,iteration,hitrate@20,recall@20,ndcg@20,hitrate@50,recall@50,ndcg@50", file=self.log_file)
def update_monitor(self, hitrate20, recall20, ndcg20, hitrate50, recall50, ndcg50):
self.counter += 1
print("%s hitrate@20=%.4lf, recall@20=%.4lf, ndcg@20=%.4lf" %
(datetime.now(), hitrate20, recall20, ndcg20))
print("%s hitrate@50=%.4lf, recall@50=%.4lf, ndcg@50=%.4lf" %
(datetime.now(), hitrate50, recall50, ndcg50))
print("%s,%d,%f,%f,%f,%f,%f,%f" %
(datetime.now(), self.counter, hitrate20, recall20, ndcg20, hitrate50, recall50, ndcg50),
file=self.log_file)
value = recall20 + ndcg20
if value < self.best_value + self.delta:
self.patience -= 1
print("%s the monitor counts down its patience to %d!" % (datetime.now(), self.patience))
if self.patience == 0:
return True
else:
self.patience = self.max_patience
self.best_value = value
return False
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import mimetypes
import pprint as pp
from .log import Logger
from .base import config, digest
from .nominals import para_join, nominal
from .resource import Resource
log = Logger(__name__)
class Registry(Resource):
@classmethod
def globals(cls):
return globals()
def __init__(self, elems=None, **kw):
super().__init__(elems, **kw)
for k, v in tuple(self.items()):
v = self.add_once(v)
if v:
self[k] = v
else:
del self[k]
def __repr__(self):
es = {k: v for k, v in self.items() if v and k is not v}
es = pp.pformat(es, indent=4)
return '{}({})'.format(type(self).__name__, es)
@property
def elems(self):
return [v for k, v in self.items() if v and k is not v]
rename_msg = Resource.rename
def add_once(self, v):
if isinstance(v, tuple):
return tuple(self.add_once(i) for i in v)
return super().add_once(v)
def eml_content(self, part, name, _):
return part.get_content()
def eml_register(self, name, vs):
vs = self.add_once(vs)
try:
os = self[name]
assert os == vs or nominal(para_join(os)) == nominal(para_join(vs))
except KeyError:
if vs:
self[name] = vs
return vs
def extract(self, name, raw):
vs = []
for i, p in enumerate(raw.walk()):
if self.check_type(p):
v = self.eml_content(p, name, i)
if v:
vs.append(v)
return self.eml_register(name, tuple(vs))
class Texts(Registry):
_res_path = config.qnar_dst + 'texts.qnr'
def check_type(self, part):
return part.get_content_type() == 'text/' + config.PLAIN
def eml_register(self, name, vs):
return vs
def register(self, name, paras):
super().eml_register(name, tuple(paras))
def expand(self, name, paras):
if name in self:
del self[name]
self.register(name, paras)
class Htmls(Registry):
_res_path = config.qnar_dst + 'htmls.qnr'
def check_type(self, part):
return part.get_content_type() == 'text/' + config.HTML
def eml_content(self, part, name, i):
v = part.get_content()
if v:
d = digest(v.encode())
if d not in self:
self[d] = 'aaa'
# print('\nhtml', name, i, d)
# return d
class Attms(Registry):
_res_path = config.qnar_dst + 'attms.qnr'
def check_type(self, part):
return part.get_content_maintype() not in ('multipart', 'text')
def eml_content(self, part, name, i):
try:
v = part.get_content()
if v:
d = digest(v)
if d not in self:
self[d] = 'aaa'
t = mimetypes.guess_extension(part.get_content_type())
print('\n', t, name, i, d, part.get_content_maintype(),
part.get_filename())
except Exception as e:
print(e)
log.error('Error getting content {} {} {}', name,
part.get_content_maintype(), part.get_filename())
|
from datetime import timezone, timedelta
import dotenv
import pytest as pytest
from box import Box
from core import firestore_client
from google.cloud.firestore_v1 import Client, DocumentReference, DocumentSnapshot
from mockito import mock
PARIS_TZ = timezone(timedelta(hours=2))
@pytest.fixture(autouse=True)
def setup():
dotenv.load_dotenv()
@pytest.fixture(autouse=False)
def db(when):
ret = mock(Client)
when(firestore_client).db().thenReturn(ret)
return ret
def test_push_new_account_to_slack(when, db):
from main import push_new_account_to_slack
ressource = "pax/auth0|600a84b07038e20071c74950"
pax_ref_mock = mock(spec=DocumentReference)
pax_doc_mock = mock({"exists": True}, spec=DocumentSnapshot)
pax_data_mock = {
"name": "Name",
"state": "REGISTERED",
"preregistration_form_entry_url":
"https://www.cognitoforms.com/forms/coworkingcoliving30%C3%A8mecielpreregistration/entries/237",
}
when(db).document(ressource).thenReturn(pax_ref_mock)
when(pax_ref_mock).get().thenReturn(pax_doc_mock)
when(pax_doc_mock).to_dict().thenReturn(pax_data_mock)
event = {
"oldValue": {
"createTime": "2021-03-05T19:54:37.991311Z",
"fields": {
"arrival_date": {"timestampValue": "2021-03-15T23:00:00Z"},
"created": {"timestampValue": "2021-03-05T19:54:37.978Z"},
"departure_date": {"timestampValue": "2021-03-19T23:00:00Z"},
"kind": {"stringValue": "COLIVING"},
"number_of_nights": {"integerValue": "4"},
"state": {"stringValue": "PENDING_REVIEW"}
},
"name": "projects/trentiemeciel/databases/(default)/documents/pax/auth0|600a84b07038e20071c74950",
"updateTime": "2021-03-05T21:56:55.248879Z"
},
"updateMask": {"fieldPaths": ["state"]},
"value": {
"createTime": "2021-03-05T19:54:37.991311Z",
"fields": {
"arrival_date": {"timestampValue": "2021-03-15T23:00:00Z"},
"created": {"timestampValue": "2021-03-05T19:54:37.978Z"},
"departure_date": {"timestampValue": "2021-03-19T23:00:00Z"},
"kind": {"stringValue": "COLIVING"},
"number_of_nights": {"integerValue": "4"},
"state": {"stringValue": "CANCELED"}
},
"name": "projects/trentiemeciel/databases/(default)/documents/pax/auth0|600a84b07038e20071c74950",
"updateTime": "2021-03-05T21:56:55.248879Z"
}
}
push_new_account_to_slack(ressource, Box(event))
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile, os
import sys
def untar(fname, dirs):
"""
extract the tar.gz file
:param fname: the name of tar.gz file
:param dirs: the path of decompressed file
:return: bool
"""
try:
t = tarfile.open(name=fname, mode='r:gz')
t.extractall(path=dirs)
return True
except Exception as e:
print(e)
return False
untar(sys.argv[1], sys.argv[2])
|
import numpy as np
import pandas as pd
import pickle
import glob
from sklearn.model_selection import train_test_split
from skmultilearn.model_selection import iterative_train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import RidgeClassifierCV
from sklearn.ensemble import StackingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import ClassifierChain
from sklearn.linear_model import LogisticRegression
from sklearn.cross_decomposition import CCA
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from imblearn.combine import SMOTEENN
from imblearn.ensemble import EasyEnsembleClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from timeit import default_timer
import glob
class OVR_DNN:
def __init__(self, X_train=None, y_train=None, filename=None):
self._X_train = X_train
self._y_train = y_train
self._filename = filename
if self._X_train is not None:
self.find_imp_feats()
self.train_base_models()
self.train_stk_models()
if self._filename is not None:
print('Loading models...')
self.load_models()
def find_imp_feats(self):
self.split_data()
estimators = self.get_models()
first_one = True
feat_imp_sums = np.zeros(self._X_train.shape[1])
for pair in estimators:
print('Training base model to find feature importances', pair[0])
pair[1].fit(self._X_train, self._y_train)
for est in pair[1].estimators_:
try:
if hasattr(est[1], 'feature_importances_'):
print('Found estimator with feature importances!...')
feat_imp_sums += est[1].feature_importances_
except:
print('DOES NOT HAVE FEATURE IMPORTANCES')
self._imp_feats = feat_imp_sums > np.mean(feat_imp_sums)
self._X_train = self.limit_to_imp_feats(self._X_train)
self._X_val = self.limit_to_imp_feats(self._X_val)
self._X_test = self.limit_to_imp_feats(self._X_test)
def train_base_models(self):
estimators = self.get_models()
first_one = True
for pair in estimators:
print('Training base model with important features', pair[0])
pair[1].fit(self._X_train, self._y_train)
self._base_models = estimators
def get_models(self):
base_lr = make_pipeline(StandardScaler(), LogisticRegression(class_weight='balanced'))
ovr_lr = OneVsRestClassifier(base_lr)
base_rf = make_pipeline_with_sampler(RandomUnderSampler(), RandomForestClassifier(n_jobs=-1))
ovr_rf = OneVsRestClassifier(base_rf)
base_et = make_pipeline_with_sampler(RandomUnderSampler(), ExtraTreesClassifier(n_jobs=-1))
ovr_et = OneVsRestClassifier(base_et)
base_gbc = make_pipeline_with_sampler(RandomUnderSampler(), HistGradientBoostingClassifier())
ovr_gbc = OneVsRestClassifier(base_gbc)
estimators = [('lr', ovr_lr),
('rf', ovr_rf),
('et', ovr_et),
('gbc', ovr_gbc)]
return estimators
def split_data(self):
print('Splitting data into training and validation set to train DNNs...')
X_train, y_train, X_test, y_test = iterative_train_test_split(self._X_train,
self._y_train,
test_size = 0.25)
X_train, y_train, X_val, y_val = iterative_train_test_split(X_train,
y_train,
test_size = 0.25)
self._X_train = X_train
self._y_train = y_train
self._X_val = X_val
self._y_val = y_val
self._X_test = X_test
self._y_test = y_test
print('Train data:', X_train.shape)
print('Train labels:', y_train.shape)
print('Val data:', X_val.shape)
print('Val labels:', y_val.shape)
print('Test data:', X_test.shape)
print('Test labels:', y_test.shape)
def train_stk_models(self):
print('Training stacking model on validation set...')
for i,model in enumerate(self._base_models):
print(' Getting probabilities for validation set...')
this_y_prob = model[1].predict_proba(self._X_val)
if i == 0:
y_prob = this_y_prob
else:
y_prob = np.concatenate((y_prob, this_y_prob), axis=1)
stk_et = make_pipeline_with_sampler(RandomUnderSampler(), ExtraTreesClassifier(n_jobs=-1))
ovr_stk_et = OneVsRestClassifier(stk_et)
ovr_stk_et.fit(y_prob, self._y_val)
self._stk_model = ovr_stk_et
self.train_thresholds()
def train_thresholds(self):
print('Training threshold probability of stacking model...')
for i,model in enumerate(self._base_models):
print(' Getting probabilities for testing set...')
this_y_prob = model[1].predict_proba(self._X_test)
if i == 0:
y_prob = this_y_prob
else:
y_prob = np.concatenate((y_prob, this_y_prob), axis=1)
y_prob_ovr = self._stk_model.predict_proba(y_prob)
threshs = []
for i in range(y_prob_ovr.shape[1]):
fpr, tpr, thresholds = roc_curve(self._y_test[:,i], y_prob_ovr[:,i])
# get the best threshold
J = tpr - fpr
ix = np.argmax(J)
best_thresh = thresholds[ix]
threshs.append(best_thresh)
self._threshs = np.array(threshs)
def predict(self, X_test):
X_test = self.limit_to_imp_feats(X_test)
for i,model in enumerate(self._base_models):
this_y_prob = model[1].predict_proba(X_test)
if i == 0:
y_prob = this_y_prob
else:
y_prob = np.concatenate((y_prob, this_y_prob), axis=1)
y_pred_test = self._stk_model.predict_proba(y_prob)
for i in range(y_pred_test.shape[1]):
y_pred_test[:,i] = (y_pred_test[:,i] >= self._threshs[i]).astype(float)
return y_pred_test
def predict_proba(self, X_test):
X_test = self.limit_to_imp_feats(X_test)
for i,model in enumerate(self._base_models):
this_y_prob = model[1].predict_proba(X_test)
if i == 0:
y_prob = this_y_prob
else:
y_prob = np.concatenate((y_prob, this_y_prob), axis=1)
y_pred_test = self._stk_model.predict_proba(y_prob)
return y_pred_test
def save_models(self, filename):
model_dict = {
'base_models': self._base_models,
'imp_feats': self._imp_feats,
'stk_model': self._stk_model,
'threshs': self._threshs
}
stk_filename = filename.split('.pick')[0] + '_ovr_imb_models.pickle'
with open(stk_filename, 'wb') as handle:
pickle.dump(model_dict, handle)
handle.close()
def load_models(self):
# load in base dnn models
file_hash = self._filename.split('.pick')[0] + '_ovr_imb_models.pickle'
with open(file_hash, 'rb') as handle:
model_dict = pickle.load(handle)
handle.close()
self._base_models = model_dict['base_models']
self._imp_feats = model_dict['imp_feats']
self._stk_model = model_dict['stk_model']
self._threshs = model_dict['threshs']
def limit_to_imp_feats(self, X):
X = X[:, self._imp_feats]
return X
|
"""
A permission has to be defined first (using grok.Permission for example)
before it can be used in @grok.require().
>>> from grokcore.xmlrpc import testing
# PY2 - remove '+IGNORE_EXCEPTION_DETAIL' when dropping Python 2 support:
>>> testing.grok(__name__) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.configuration.config.ConfigurationExecutionError: \
martian.error.GrokError: Undefined permission \
'doesnt.exist' in \
<class \
'grokcore.xmlrpc.tests.base.xmlrpc.missing_permission2.MissingPermission'>.\
Use grok.Permission first...
"""
import grokcore.component as grok
import grokcore.security
import grokcore.xmlrpc
import zope.interface
class MissingPermission(grokcore.xmlrpc.XMLRPC):
grok.context(zope.interface.Interface)
@grokcore.security.require('doesnt.exist')
def foo(self):
pass
|
import re
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# tweets_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
def extract_features(tweet, freqs):
'''
Input:
tweet: a list of words for one tweet
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
Output:
x: a feature vector of dimension (1,3)
'''
# process_tweet tokenizes, stems, and removes stopwords
word_l = process_tweet(tweet)
# 3 elements in the form of a 1 x 3 vector
x = np.zeros((1, 3))
#bias term is set to 1
x[0,0] = 1
# loop through each word in the list of words
for word in word_l:
# increment the word count for the positive label 1
if (word,1) in freqs.keys():
x[0,1] += freqs[(word,1)]
# increment the word count for the negative label 0
if (word,0) in freqs.keys():
x[0,2] += freqs[(word,0)]
assert(x.shape == (1, 3))
return x
|
from uuid import uuid4
from django.db import models
from taggit.managers import TaggableManager
class Thing(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=100)
description = models.TextField()
category = models.ForeignKey(
'ThingCategory',
related_name='things',
blank=True,
null=True,
on_delete=models.CASCADE
)
tags = TaggableManager()
def __str__(self):
return str(id)
class ThingInstance(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
section = models.ForeignKey(
'storages.Section',
related_name='things_in_sections',
on_delete=models.CASCADE
)
type = models.ForeignKey(
'Thing',
related_name='things_instances',
on_delete=models.CASCADE
)
count = models.PositiveIntegerField()
def __str__(self):
return str(id)
class ThingCategory(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=100)
icon_label = models.IntegerField()
def __str__(self):
return str(id) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 18:21:04 2021
@author: ariandovald
"""
import matplotlib.pyplot as plt
import numpy as np
from tabulate import tabulate
import random as random
n_total = 0
p_win = 0.01
p_lose = 1 - p_win
t = 0
p = []
n_space = []
n = 1
# s = 0
# while n-1 <= 200:
# m = 0.99**(n)
# s += m
# n += 1
# a = 1/(1-0.99)
# print((a-s)*0.0101010101)
while n <= 1000:
n_space.append(n)
p.append(np.log(p_win*(p_lose**(n-1))))
n += 1
plt.plot(n_space, p, 'bo')
z = np.polyfit(n_space, p, 1)
slope, intercept = z
print(slope, intercept)
while t <= 10000:
p = []
n = 1
while True:
p.append(p_win*(p_lose**(n-1)))
r = random.randint(0,100)
if r == random.randint(0,100):
break
n += 1
n_total += n
t += 1
n_average = n_total * (t**(-1))
print(n_average)
|
import setuptools
with open("README.md", "r") as fh:
long_description=fh.read()
setuptools.setup(
name="gmsfile",
version="0.1.4",
author="Gourab Kanti Das",
author_email="gourabkanti.das@visva-bharati.ac.in",
description="A converter from XYZ to GAMESS file",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/amardrylab/gmsfile3",
packages = setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.0',
)
|
#
# This software is licensed under the Apache 2 license, quoted below.
#
# Copyright 2019 Astraea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
This module contains access to the jvm SparkContext with RasterFrameLayer support.
"""
from pyspark import SparkContext
from typing import Tuple
__all__ = ['RFContext']
class RFContext(object):
"""
Entrypoint to RasterFrames services
"""
def __init__(self, spark_session):
self._spark_session = spark_session
self._gateway = spark_session.sparkContext._gateway
self._jvm = self._gateway.jvm
jsess = self._spark_session._jsparkSession
self._jrfctx = self._jvm.org.locationtech.rasterframes.py.PyRFContext(jsess)
def list_to_seq(self, py_list):
conv = self.lookup('_listToSeq')
return conv(py_list)
def lookup(self, function_name: str):
return getattr(self._jrfctx, function_name)
def build_info(self):
return self._jrfctx.buildInfo()
def companion_of(self, classname: str):
if not classname.endswith("$"):
classname = classname + "$"
companion_module = getattr(self._jvm, classname)
singleton = getattr(companion_module, "MODULE$")
return singleton
# NB: Tightly coupled to `org.locationtech.rasterframes.py.PyRFContext._resolveRasterRef`
def _resolve_raster_ref(self, ref_struct):
f = self.lookup("_resolveRasterRef")
return f(
ref_struct.source.raster_source_kryo,
ref_struct.bandIndex,
ref_struct.subextent.xmin,
ref_struct.subextent.ymin,
ref_struct.subextent.xmax,
ref_struct.subextent.ymax,
)
@staticmethod
def active():
"""
Get the active Python RFContext and throw an error if it is not enabled for RasterFrames.
"""
sc = SparkContext._active_spark_context
if not hasattr(sc, '_rf_context'):
raise AttributeError(
"RasterFrames have not been enabled for the active session. Call 'SparkSession.withRasterFrames()'.")
return sc._rf_context
@staticmethod
def call(name, *args):
f = RFContext.active().lookup(name)
return f(*args)
@staticmethod
def jvm():
"""
Get the active Scala PyRFContext and throw an error if it is not enabled for RasterFrames.
"""
return RFContext.active()._jvm
|
#!/usr/bin/env python
"""
These classes implement various focus lock modes. They determine
all the behaviors of the focus lock.
Hazen 05/15
"""
import math
import numpy
import scipy.optimize
import tifffile
import time
from PyQt5 import QtCore
import storm_control.sc_library.halExceptions as halExceptions
import storm_control.sc_library.parameters as params
# Focus quality determination for the optimal lock.
import storm_control.hal4000.focusLock.focusQuality as focusQuality
class LockModeException(halExceptions.HalException):
pass
#
# Mixin classes provide various locking and scanning behaviours.
# The idea is that these are more or less self-contained and setting
# the lock modes 'mode' attribute will switch between them.
#
# These are active when the 'behavior' attribute corresponds to
# their name.
#
# FIXME: Was this actually a good idea? Getting the inheritance
# to work correctly is messy. It is also a bit difficult
# to follow what exactly is going to happen when a
# a particular method like startLock() is called. Maybe
# these should just have been different class of objects?
#
class FindSumMixin(object):
"""
This will run a find sum scan, starting at the z stage minimum and
moving to the maximum, or until a maximum in the QPD sum signal is
found that is larger than the requested minimum sum signal.
"""
fsm_pname = "find_sum"
def __init__(self, **kwds):
super().__init__(**kwds)
self.fsm_max_pos = 0.0
self.fsm_max_sum = 0.0
self.fsm_max_z = 0.0
self.fsm_min_sum = 0.0
self.fsm_min_z = 0.0
self.fsm_mode_name = "find_sum"
self.fsm_requested_sum = 0.0
self.fsm_step_size = 0.0
if not hasattr(self, "behavior_names"):
self.behavior_names = []
self.behavior_names.append(self.fsm_mode_name)
@staticmethod
def addParameters(parameters):
"""
Add parameters specific to finding sum.
"""
p = parameters.addSubSection(FindSumMixin.fsm_pname)
p.add(params.ParameterRangeFloat(description = "Step size for find sum search.",
name = "step_size",
value = 1.0,
min_value = 0.1,
max_value = 10.0))
def getFindSumMaxSum(self):
return self.fsm_max_sum
def handleQPDUpdate(self, qpd_state):
if hasattr(super(), "handleQPDUpdate"):
super().handleQPDUpdate(qpd_state)
if (self.behavior == self.fsm_mode_name):
power = qpd_state["sum"]
z_pos = self.z_stage_functionality.getCurrentPosition()
# Check if the current power is greater than the
# maximum we've seen so far.
if (power > self.fsm_max_sum):
self.fsm_max_sum = power
self.fsm_max_pos = z_pos
# Check if the power has started to go back down, if it has
# then we've hopefully found the maximum.
if (self.fsm_max_sum > self.fsm_requested_sum) and (power < (0.5 * self.fsm_max_sum)):
self.z_stage_functionality.goAbsolute(self.fsm_max_pos)
self.behaviorDone(True)
else:
# Are we at the maximum z?
if (z_pos >= self.fsm_max_z):
# Did we find anything at all?
if (self.fsm_max_sum > self.fsm_min_sum):
self.z_stage_functionality.goAbsolute(self.fsm_max_pos)
# Otherwise just go back to the center position.
else:
self.z_stage_functionality.recenter()
# Emit signal for failure.
self.behaviorDone(False)
# Move up one step size.
else:
self.z_stage_functionality.goRelative(self.fsm_step_size)
def startLockBehavior(self, behavior_name, behavior_params):
if hasattr(super(), "startLockBehavior"):
super().startLockBehavior(behavior_name, behavior_params)
if (behavior_name == self.fsm_mode_name):
self.fsm_max_pos = 0.0
self.fsm_max_sum = 0.0
self.fsm_requested_sum = behavior_params["requested_sum"]
self.fsm_min_sum = 0.1 * self.fsm_requested_sum
if "fsm_step_size" in behavior_params:
self.fsm_step_size = behavior_params["fsm_step_size"]
else:
self.fsm_step_size = self.parameters.get(self.fsm_pname + ".step_size")
# Move to z = 0.
self.fsm_max_z = self.z_stage_functionality.getMaximum()
self.fsm_min_z = self.z_stage_functionality.getMinimum()
self.z_stage_functionality.goAbsolute(self.fsm_min_z)
class LockedMixin(object):
"""
This will try and hold the specified lock target. It
also keeps track of the quality of the lock.
"""
lm_pname = "locked"
def __init__(self, **kwds):
super().__init__(**kwds)
self.lm_buffer = None
self.lm_buffer_length = 1
self.lm_counter = 0
self.lm_gain = 0.5
self.lm_max_gain = 0.7
self.lm_min_sum = 0.0
self.lm_mode_name = "locked"
self.lm_offset_threshold = 0.02
self.lm_scale = self.lm_max_gain - self.lm_gain
self.lm_target = 0.0
if not hasattr(self, "behavior_names"):
self.behavior_names = []
self.behavior_names.append(self.lm_mode_name)
@staticmethod
def addParameters(parameters):
"""
Add parameters specific to staying in lock.
"""
p = parameters.addSubSection(LockedMixin.lm_pname)
p.add(params.ParameterInt(description = "Number of repeats for the lock to be considered good.",
name = "buffer_length",
value = 5))
p.add(params.ParameterRangeFloat(description = "Lock response gain (near target offset).",
name = "lock_gain",
value = 0.5,
min_value = 0.0,
max_value = 1.0))
p.add(params.ParameterRangeFloat(description = "Lock response maximum gain (far from target offset).",
name = "lock_gain_max",
value = 0.7,
min_value = 0.0,
max_value = 1.0))
p.add(params.ParameterFloat(description = "Maximum allowed difference to still be in lock (nm).",
name = "offset_threshold",
value = 20.0))
p.add(params.ParameterFloat(description = "Minimum sum to be considered locked (AU).",
name = "minimum_sum",
value = -1.0))
def controlFn(self, offset):
"""
Returns how much to move the stage (in microns) given the
offset (also in microns).
"""
# Exponential with a sigma of 0.5 microns (2.0 * 0.5 * 0.5 = 0.5).
#
# If the offset is large than we just want to use the maximum gain
# to get back to the target as quickly as possible. However if we
# are near the target then we want to respond with a smaller gain
# value.
#
dx = offset * offset / 0.5
p_term = self.lm_max_gain - self.lm_scale*math.exp(-dx)
return -1.0 * p_term * offset
def getLockTarget(self):
return self.lm_target
def handleQPDUpdate(self, qpd_state):
if hasattr(super(), "handleQPDUpdate"):
super().handleQPDUpdate(qpd_state)
if (self.behavior == self.lm_mode_name):
if qpd_state["is_good"] and (qpd_state["sum"] > self.lm_min_sum):
diff = (qpd_state["offset"] - self.lm_target)
if (abs(diff) < self.lm_offset_threshold):
self.lm_buffer[self.lm_counter] = 1
else:
self.lm_buffer[self.lm_counter] = 0
# Simple proportional control.
#dz = -1.0 * self.lm_gain * diff
dz = self.controlFn(diff)
self.z_stage_functionality.goRelative(dz)
else:
self.lm_buffer[self.lm_counter] = 0
good_lock = bool(numpy.sum(self.lm_buffer) == self.lm_buffer_length)
self.last_good_z = self.z_stage_functionality.getCurrentPosition()
if (good_lock != self.good_lock):
self.setLockStatus(good_lock)
self.lm_counter += 1
if (self.lm_counter == self.lm_buffer_length):
self.lm_counter = 0
def newParameters(self, parameters):
if hasattr(super(), "newParameters"):
super().newParameters(parameters)
p = parameters.get(self.lm_pname)
self.lm_buffer_length = p.get("buffer_length")
self.lm_buffer = numpy.zeros(self.lm_buffer_length, dtype = numpy.uint8)
self.lm_counter = 0
self.lm_gain = p.get("lock_gain")
self.lm_max_gain = p.get("lock_gain_max")
self.lm_min_sum = p.get("minimum_sum")
self.lm_offset_threshold = 1.0e-3 * p.get("offset_threshold")
self.lm_scale = self.lm_max_gain - self.lm_gain
def startLock(self):
self.lm_counter = 0
self.lm_buffer = numpy.zeros(self.lm_buffer_length, dtype = numpy.uint8)
self.behavior = "locked"
def startLockBehavior(self, behavior_name, behavior_params):
if hasattr(super(), "startLockBehavior"):
super().startLockBehavior(behavior_name, behavior_params)
if (behavior_name == self.lm_mode_name):
p = self.parameters.get(self.lm_pname)
if "buffer_length" in behavior_params:
self.lm_buffer_length = behavior_params["buffer_length"]
else:
self.lm_buffer_length = p.get("buffer_length")
if "minimum_sum" in behavior_params:
self.lm_min_sum = behavior_params["minimum_sum"]
else:
self.lm_min_sum = p.get("minimum_sum")
if "offset_threshold" in behavior_params:
self.lm_offset_threshold = 1.0e-3 * behavior_params["offset_threshold"]
else:
self.lm_offset_threshold = 1.0e-3 * p.get("offset_threshold")
# Did the user request a target?
if "target" in behavior_params:
self.setLockTarget(behavior_params["target"])
# If not, use the current QPD offset.
else:
self.setLockTarget(self.qpd_state["offset"])
if "z_start" in behavior_params:
self.z_stage_functionality.goAbsolute(behavior_params["z_start"])
self.startLock()
class ScanMixin(object):
"""
This will do a (local) scan for the z position with the correct
offset.
FIXME: Is this the right thing for this behavior to do?
"""
sm_pname = "scan"
def __init__(self, **kwds):
super().__init__(**kwds)
self.sm_min_sum = None
self.sm_mode_name = "scan"
self.sm_offset_threshold = None
self.sm_target = None
self.sm_z_end = None
self.sm_z_start = None
self.sm_z_step = None
if not hasattr(self, "behavior_names"):
self.behavior_names = []
self.behavior_names.append(self.sm_mode_name)
@staticmethod
def addParameters(parameters):
"""
Add parameters specific to scan mode.
"""
p = parameters.addSubSection(ScanMixin.sm_pname)
p.add(params.ParameterFloat(description = "Minimum sum for finding the correct offset (AU).",
name = "minimum_sum",
value = -1.0))
p.add(params.ParameterFloat(description = "Maximum allowed difference for finding the correct offset (nm).",
name = "offset_threshold",
value = 100.0))
p.add(params.ParameterFloat(description = "Scan range in microns.",
name = "scan_range",
value = 10.0))
p.add(params.ParameterFloat(description = "Scan step size in microns.",
name = "scan_step",
value = 0.05))
def handleQPDUpdate(self, qpd_state):
if hasattr(super(), "handleQPDUpdate"):
super().handleQPDUpdate(qpd_state)
if (self.behavior == self.sm_mode_name):
diff = 2.0 * self.sm_offset_threshold
if (qpd_state["sum"] > self.sm_min_sum):
diff = (qpd_state["offset"] - self.sm_target)
#
# If we are at a z position where we are getting the correct offset
# then we are done.
#
if (abs(diff) < self.sm_offset_threshold):
self.behaviorDone(True)
else:
#
# If we hit the end of the range and did not find anything then
# return to the last z position where we had a good lock and stop.
#
if (self.z_stage_functionality.getCurrentPosition() > self.sm_z_end):
self.z_stage_functionality.goAbsolute(self.last_good_z)
self.behaviorDone(False)
#
# Otherwise continue to move up.
#
else:
self.z_stage_functionality.goRelative(self.sm_z_step)
def startLockBehavior(self, behavior_name, behavior_params):
if hasattr(super(), "startLockBehavior"):
super().startLockBehavior(behavior_name, behavior_params)
if (behavior_name == self.sm_mode_name):
p = self.parameters.get(self.sm_pname)
# Set minimum sum.
if "minimum_sum" in behavior_params:
self.sm_min_sum = behavior_params["minimum_sum"]
else:
self.sm_min_sum = p.get("minimum_sum")
# Set offset threshold.
if "offset_threshold" in behavior_params:
self.sm_offset_threshold = 1.0e-3 * behavior_params["offset_threshold"]
else:
self.sm_offset_threshold = 1.0e-3 * p.get("offset_threshold")
# Set scan range.
#
# FIXME: User will specify full range or half range size?
#
if "scan_range" in behavior_params:
# None means scan the entire range of the z stage.
if behavior_params["scan_range"] is None or behavior_params["scan_range"] is False:
sm_z_range = self.z_stage_functionality.getMaximum() - self.z_stage_functionality.getMinimum()
else:
sm_z_range = behavior_params["scan_range"]
else:
sm_z_range = p.get("scan_range")
# Set z step size.
if "scan_step" in behavior_params:
self.sm_z_step = behavior_params["scan_step"]
else:
self.sm_z_step = p.get("scan_step")
# Set z starting and ending positions.
if "z_center" in behavior_params:
self.sm_z_end = behavior_params["z_center"] + sm_z_range
self.sm_z_start = behavior_params["z_center"] - sm_z_range
else:
self.sm_z_end = self.last_good_z + sm_z_range
self.sm_z_start = self.last_good_z - sm_z_range
# Fix end points is they are outside the range of the z stage.
if (self.sm_z_end > self.z_stage_functionality.getMaximum()):
self.sm_z_end = self.z_stage_functionality.getMaximum()
if (self.sm_z_start < self.z_stage_functionality.getMinimum()):
self.sm_z_start = self.z_stage_functionality.getMinimum()
# Set target offset.
if "target" in behavior_params:
self.sm_target = behavior_params["target"]
else:
self.sm_target = self.lm_target
# Move z stage to the starting point.
self.z_stage_functionality.goAbsolute(self.sm_z_start)
class LockMode(QtCore.QObject):
"""
The base class for all the lock modes.
Modes are 'state' of the focus lock. They are called when there
is a new QPD reading or a new frame (from the camera/feed that
is being used to time the acquisition).
The modes have control of the zstage to do the actual stage
moves. Note that the requests to move the zstage could be
sent as fast as the QPD reads and/or new frames are arriving,
so if the zstage is slow it could get overwhelmed by move requests.
The modes share a single parameter object. The parameters specific
to a particular mode are stored under a mode specific attribute.
To avoid name clashes as there are a lot of attributes (too many?),
sub-class attribute names are all prefixed with a sub-class
specific string.
"""
# This signal is emitted when a mode finishes,
# with True/False for success or failure.
done = QtCore.pyqtSignal(bool)
# This is signal is emitted when the lock state
# changes between bad and good.
goodLock = QtCore.pyqtSignal(bool)
# Emitted when the current lock target is changed.
lockTarget = QtCore.pyqtSignal(float)
# The current QPD state. This is a class rather than an instance
# variable so it is still available even when we change lock modes.
qpd_state = None
# Z stage functionality. All the classes use the same one.
z_stage_functionality = None
def __init__(self, parameters = None, **kwds):
super().__init__(**kwds)
self.behavior = "none"
self.good_lock = False
self.last_good_z = None
self.name = "NA"
self.parameters = parameters
self.qpd_state = None
if not hasattr(self, "behavior_names"):
self.behavior_names = []
self.behavior_names.append(self.behavior)
def amLocked(self):
return (self.behavior == "locked")
def behaviorDone(self, success):
"""
Behaviors that end should call this method when they have
finished, and indicate whether they succeeded or failed.
The mode will go into the idle state and wait for
lockControl.LockControl to tell it what to do next.
"""
self.behavior = "none"
self.done.emit(success)
def canHandleTCPMessages(self):
"""
Modes without any of the mixins cannot handle TCP messages.
"""
return False
def getName(self):
"""
Returns the name of the lock mode (as it should appear
in the lock mode combo box).
"""
return self.name
def getQPDState(self):
return self.qpd_state
def getWaveform(self):
"""
Hardware timed modules should return a daqModule.DaqWaveform here.
"""
pass
def handleNewFrame(self, frame):
pass
def handleQPDUpdate(self, qpd_state):
self.qpd_state = qpd_state
if hasattr(super(), "handleQPDUpdate"):
super().handleQPDUpdate(qpd_state)
def isGoodLock(self):
return self.good_lock
def newParameters(self, parameters):
self.parameters = parameters
if hasattr(super(), "newParameters"):
super().newParameters(parameters)
def setLockStatus(self, status):
self.good_lock = status
self.goodLock.emit(status)
def setLockTarget(self, target):
self.lockTarget.emit(target)
self.lm_target = target
def setZStageFunctionality(self, z_stage_functionality):
self.z_stage_functionality = z_stage_functionality
def shouldEnableLockButton(self):
return False
def startFilm(self):
pass
def startLock(self):
self.setLockStatus(False)
#
# The assumption here is that the only super class that will
# have a startLock() method is the LockedMixin class.
#
if hasattr(super(), "startLock"):
super().startLock()
def startLockBehavior(self, behavior_name, behavior_params):
"""
Start a 'behavior' of the lock mode.
"""
if not behavior_name in self.behavior_names:
raise LockModeException("Unknown lock behavior '" + behavior_name + "'.")
self.setLockStatus(False)
#
# Basically this searches through the various mixin classes till
# it finds the one that implements the requested behavior.
#
if hasattr(super(), "startLockBehavior"):
super().startLockBehavior(behavior_name, behavior_params)
self.behavior = behavior_name
def stopLock(self):
self.behavior = "none"
self.z_stage_functionality.recenter()
self.setLockStatus(False)
def stopFilm(self):
pass
class JumpLockMode(LockMode, FindSumMixin, LockedMixin, ScanMixin):
"""
Sub class for handling locks, jumps and combinations thereof. Basically
every class that can lock is a sub-class of this class.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.jlm_relock_timer = QtCore.QTimer(self)
self.jlm_relock_timer.setInterval(200)
self.jlm_relock_timer.setSingleShot(True)
self.jlm_relock_timer.timeout.connect(self.handleRelockTimer)
def canHandleTCPMessages(self):
"""
Modes with (all) of the mixins can handle TCP messages.
"""
return True
def handleJump(self, jumpsize):
"""
Jumps the piezo stage immediately if it is not locked. Otherwise it
stops the lock, jumps the piezo stage and starts the relock timer.
"""
if (self.behavior == "locked"):
self.behavior = "none"
self.jlm_relock_timer.start()
self.z_stage_functionality.goRelative(jumpsize)
def handleRelockTimer(self):
"""
Restarts the focus lock when the relock timer fires.
"""
self.startLock()
#
# These are in the order that they (usually) appear in the combo box.
#
class NoLockMode(LockMode):
"""
No focus lock.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.name = "No lock"
def getLockTarget(self):
return 0.0
def handleJump(self, jumpsize):
"""
Jumps the pizeo stage immediately by the distance jumpsize.
"""
self.z_stage_functionality.goRelative(jumpsize)
class AutoLockMode(JumpLockMode):
"""
Lock will be on during filming, but cannot be turned on manually.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.name = "Auto Lock"
def startFilm(self):
self.startLock()
def startLock(self, target = None):
super().startLock()
if target is None:
#
# If the user changes the mode and then hits the lock button really
# really fast then self.qpd_state might be None. However this problem
# is more typically encountered when running unit tests.
#
if self.qpd_state is None:
self.setLockTarget(0)
else:
self.setLockTarget(self.qpd_state["offset"])
else:
self.setLockTarget(target)
def stopFilm(self):
self.stopLock()
self.z_stage_functionality.recenter()
class AlwaysOnLockMode(AutoLockMode):
"""
Lock will start during filming, or when the lock button is
pressed (in which case it will always stay on)
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.aolm_film_on = False
self.name = "Always On"
def shouldEnableLockButton(self):
return True
def startFilm(self):
if not self.amLocked():
self.aolm_film_on = True
self.startLock()
def stopFilm(self):
if self.aolm_film_on:
self.aolm_film_on = False
self.stopLock()
class OptimalLockMode(AlwaysOnLockMode):
"""
At the start of filming the stage is moved in a triangle wave.
First it goes up to bracket_step, then down to -bracket_step
and then finally back to zero. At each point along the way the
focus quality & offset are recorded. When the stage returns to
zero, the data is fit with a gaussian and the lock target is
set to the offset corresponding to the center of the gaussian.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.name = "Optimal"
self.olm_bracket_step = None
self.olm_counter = 0
self.olm_fvalues = None
self.olm_mode = "none"
self.olm_pname = "optimal_mode"
self.olm_quality_threshold = 0
self.olm_relative_z = None
self.olm_scan_hold = None
self.olm_scan_step = None
self.olm_scan_state = "na"
self.olm_zvalues = None
# Add optimal lock specific parameters.
p = self.parameters.addSubSection(self.olm_pname)
p.add(params.ParameterRangeFloat(description = "Distance +- z in nanometers",
name = "bracket_step",
value = 1000.0,
min_value = 10.0,
max_value = 10000.0))
p.add(params.ParameterRangeFloat(description = "Minimum 'quality' signal",
name = "quality_threshold",
value = 0.0,
min_value = 0.0,
max_value = 1000.0))
p.add(params.ParameterRangeFloat(description = "Step size in z in nanometers",
name = "scan_step",
value = 100.0,
min_value = 10.0,
max_value = 1000.0))
p.add(params.ParameterRangeInt(description = "Frames to pause between steps",
name = "scan_hold",
value = 10,
min_value = 1,
max_value = 100))
def handleNewFrame(self, frame):
"""
Handles a new frame from the camera. If the mode is optimizing this calculates
the focus quality of the frame and moves the piezo to its next position.
"""
if (self.olm_mode == "optimizing"):
quality = focusQuality.imageGradient(frame)
if (quality > self.olm_quality_threshold):
self.olm_zvalues[self.olm_counter] = self.qpd_state["offset"]
self.olm_fvalues[self.olm_counter] = quality
self.olm_counter += 1
if ((self.olm_counter % self.olm_scan_hold) == 0):
# Scan up
if (self.olm_scan_state == "scan up"):
if (self.olm_relative_z >= self.olm_bracket_step):
self.olm_scan_state = "scan down"
else:
self.olm_relative_z += self.olm_scan_step
self.z_stage_functionality.goRelative(self.olm_scan_step)
# Scan back down
elif (self.olm_scan_state == "scan down"):
if (self.olm_relative_z <= -self.olm_bracket_step):
self.olm_scan_state = "zero"
else:
self.olm_relative_z -= self.olm_scan_step
self.z_stage_functionality.goRelative(-self.olm_scan_step)
# Scan back to zero
else:
if (self.olm_relative_z >= 0.0):
n = self.olm_counter - 1
# Fit offset data to a 1D gaussian (lorentzian would be better?)
zvalues = self.olm_zvalues[0:n]
fvalues = self.olm_fvalues[0:n]
fitfunc = lambda p, x: p[0] + p[1] * numpy.exp(- (x - p[2]) * (x - p[2]) * p[3])
errfunc = lambda p: fitfunc(p, zvalues) - fvalues
p0 = [numpy.min(fvalues),
numpy.max(fvalues) - numpy.min(fvalues),
zvalues[numpy.argmax(fvalues)],
9.0] # empirically determined width parameter
p1, success = scipy.optimize.leastsq(errfunc, p0[:])
if (success == 1):
optimum = p1[2]
else:
print("> fit for optimal lock failed.")
# hope that this is close enough
optimum = zvalues[numpy.argmax(fvalues)]
print("> optimal Target:", optimum)
self.olm_mode = "none"
self.startLock(target = optimum)
else:
self.olm_relative_z += self.olm_scan_step
self.z_stage_functionality.goRelative(self.olm_scan_step)
def initializeScan(self):
"""
Configures all the variables that will be used during
the scan to find the optimal lock target.
"""
self.olm_mode = "optimizing"
self.olm_relative_z = 0.0
self.olm_scan_state = "scan up"
self.olm_counter = 0
size_guess = round(self.olm_scan_hold * (self.olm_bracket_step / self.olm_scan_step) * 6)
self.olm_fvalues = numpy.zeros(size_guess)
self.olm_zvalues = numpy.zeros(size_guess)
def newParameters(self, parameters):
if hasattr(super(), "newParameters"):
super().newParameters(parameters)
p = parameters.get(self.olm_pname)
self.olm_bracket_step = 0.001 * p.get("bracket_step")
self.olm_quality_threshold = p.get("quality_threshold")
self.olm_scan_step = 0.001 * p.get("scan_step")
self.olm_scan_hold = p.get("scan_hold")
def startFilm(self):
if self.amLocked():
self.behavior = "none"
self.initializeScan()
class CalibrationLockMode(JumpLockMode):
"""
No lock, the stage is driven through a pre-determined set of
z positions for calibration purposes during filming.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.clm_counter = 0
self.clm_max_zvals = 0
self.clm_pname = "calibrate"
self.clm_zvals = []
self.name = "Calibrate"
# Add calibration specific parameters.
p = self.parameters.addSubSection(self.clm_pname)
p.add(params.ParameterRangeInt(description = "Frames to pause between steps.",
name = "frames_to_pause",
value = 2,
min_value = 1,
max_value = 100))
p.add(params.ParameterRangeInt(description = "Frames before to pause at start.",
name = "deadtime",
value = 20,
min_value = 1,
max_value = 100))
p.add(params.ParameterRangeFloat(description = "Distance +- z to move in nanometers.",
name = "range",
value = 600,
min_value = 100,
max_value = 5000))
p.add(params.ParameterRangeFloat(description = "Step size in z in nanometers.",
name = "step_size",
value = 10,
min_value = 1,
max_value = 100))
def calibrationSetup(self, z_center, deadtime, zrange, step_size, frames_to_pause):
"""
Configure the variables that will be used to execute the z scan.
"""
# FIXME: Are these checks a good idea?
if False:
if (deadtime <= 0):
raise LockModeException("Deadtime is too small " + str(deadtime))
if (zrange < 10):
raise LockModeException("Range is too small " + str(zrange))
if (zrange > 1000):
raise LockModeException("Range is too large " + str(zrange))
if (step_size <= 0.0):
raise LockModeException("Negative / zero step size " + str(step_size))
if (step_size > 100.0):
raise LockModeException("Step size is to large " + str(step_size))
if (frames_to_pause <= 0):
raise LockModeException("Frames to pause is too smale " + str(frames_to_pause))
def addZval(z_val):
self.clm_zvals.append(z_val)
self.clm_max_zvals += 1
self.clm_zvals = []
self.clm_max_zvals = 0
# Convert to um.
zrange = 0.001 * zrange
step_size = 0.001 * step_size
# Initial hold.
for i in range(deadtime-1):
addZval(z_center)
# Staircase scan.
addZval(-zrange)
z = z_center - zrange
stop = z_center + zrange - 0.5 * step_size
while (z < stop):
for i in range(frames_to_pause-1):
addZval(0.0)
addZval(step_size)
z += step_size
addZval(-zrange)
# Final hold.
for i in range(deadtime-1):
addZval(z_center)
def handleNewFrame(self, frame):
"""
Handles a new frame from the camera. This moves to a new
z position if the scan has not been completed.
"""
if (self.clm_counter < self.clm_max_zvals):
self.z_stage_functionality.goRelative(self.clm_zvals[self.clm_counter])
self.clm_counter += 1
def newParameters(self, parameters):
if hasattr(super(), "newParameters"):
super().newParameters(parameters)
p = parameters.get(self.clm_pname)
self.calibrationSetup(0.0,
p.get("deadtime"),
p.get("range"),
p.get("step_size"),
p.get("frames_to_pause"))
def startFilm(self):
self.clm_counter = 0
def stopFilm(self):
self.z_stage_functionality.recenter()
class HardwareZScanLockMode(AlwaysOnLockMode):
"""
This holds a focus target. Then during filming it does a hardware
times z scan.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.hzs_film_off = False
self.hzs_pname = "hardware_z_scan"
self.hzs_zvals = None
self.name = "Hardware Z Scan"
# Add hardware z scan specific parameters.
p = self.parameters.addSubSection(self.hzs_pname)
# FIXME: Should be a parameter custom? Both focuslock and illumination
# waveforms should be parsed / created / generated by a single
# module somewhere else?
p.add(params.ParameterString(description = "Frame z steps (in microns).",
name = "z_offsets",
value = ""))
def getWaveform(self):
"""
This is called before startFilm() by lockControl.LockControl. It
returns the waveform to use during filming as a daqModule.DaqWaveform,
or None if there is no waveform or one shouldn't be used.
"""
if self.amLocked() and isinstance(self.hzs_zvals, numpy.ndarray):
waveform = self.hzs_zvals + self.z_stage_functionality.getCurrentPosition()
return self.z_stage_functionality.getDaqWaveform(waveform)
def setZStageFunctionality(self, z_stage_functionality):
super().setZStageFunctionality(z_stage_functionality)
if not self.z_stage_functionality.haveHardwareTiming():
raise LockModeException("Z stage does not support hardware timed scans.")
def newParameters(self, parameters):
if hasattr(super(), "newParameters"):
super().newParameters(parameters)
p = parameters.get(self.hzs_pname)
self.hzs_zvals = None
if (len(p.get("z_offsets")) > 0):
self.hzs_zvals = numpy.array(list(map(float, p.get("z_offsets").split(","))))
def shouldEnableLockButton(self):
return True
def startFilm(self):
if self.amLocked() and self.hzs_zvals is not None:
self.behavior = "none"
self.hzs_film_off = True
def stopFilm(self):
if self.hzs_film_off:
self.hzs_film_off = False
self.behavior = "locked"
class DiagnosticsLockMode(NoLockMode):
"""
This mode is to acquire performance information for the focus lock. The
diagnostics files are saved in the directory that HAL is running in.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.name = "Diagnostics"
# 'ld' = lock diagnostics.
self.ld_data_fp = None
self.ld_fname_counter = 0
self.ld_movie_fp = None
self.ld_take_movie = True
self.ld_test_start_time = None
self.ld_test_n_events = 0
def handleQPDUpdate(self, qpd_state):
super().handleQPDUpdate(qpd_state)
if self.ld_data_fp is not None:
self.ld_test_n_events += 1
if((self.ld_test_n_events%100)==0):
print("Acquired {0:d} data points.".format(self.ld_test_n_events))
self.ld_data_fp.write("{0:.6f} {1:.3f} {2:0d}\n".format(qpd_state["offset"],
qpd_state["sum"],
int(qpd_state["is_good"])))
if self.ld_movie_fp is not None:
self.ld_movie_fp.save(qpd_state["image"])
def shouldEnableLockButton(self):
return True
def startFilm(self):
if self.ld_data_fp is None:
self.startLock()
def startLock(self, target = None):
super().startLock()
self.ld_test_start_time = time.time()
self.ld_test_n_events = 0
self.ld_fname_counter += 1
fname_base = "dlm_{0:03d}".format(self.ld_fname_counter)
self.ld_data_fp = open(fname_base + ".txt", "w")
if self.ld_take_movie:
self.ld_movie_fp = tifffile.TiffWriter(fname_base + ".tif")
def stopFilm(self):
if self.ld_data_fp is not None:
self.stopLock()
def stopLock(self):
super().stopLock()
self.ld_data_fp.close()
self.ld_data_fp = None
if self.ld_movie_fp is not None:
self.ld_movie_fp.close()
self.ld_movie_fp = None
elapsed_time = time.time() - self.ld_test_start_time
print("> lock performance {0:0d} samples, {1:.2f} samples/second".format(self.ld_test_n_events,
self.ld_test_n_events/elapsed_time))
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
from typing import List, Union
from aiocache import cached
from bson import ObjectId
from motor.core import AgnosticCollection
from pymongo.results import DeleteResult
from config.clients import redis_cache_only_kwargs, key_builder_only_kwargs, cache, pickle_serializer, logger
from config import SCHEMA_TTL
from utils.logger import Logger
class BaseCollection:
collection: AgnosticCollection = None
logger: Logger = Logger(name='BaseCollection')
def __init__(self, _id: Union[str, ObjectId]):
self._id: ObjectId = ObjectId(_id)
self.create_at = None
self.update_at = None
self.exists = False
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = ObjectId(value)
async def load(self):
docu = await self.collection.find_one({"_id": ObjectId(self._id)})
if not docu:
self.exists = False
return
else:
self.exists = True
for attr in docu:
if hasattr(self, attr):
setattr(self, attr, docu[attr])
@classmethod
async def create(cls, *args, **kwargs):
raise NotImplementedError
async def save(self) -> bool:
try:
rst = await self._save()
except Exception as e:
logger.exceptions(e)
return False
else:
return rst
async def _save(self) -> bool:
# await self.load() # reload from db
# await self.rebuild_cache()
raise NotImplementedError
async def refresh_cache(self) -> None:
self.logger.info('delete_cache', key=self.cache_key)
await cache.delete(key=self.cache_key)
async def rebuild_cache(self) -> None:
self.logger.info('rebuild_cache', key=self.cache_key)
# await cache.delete(key=self.cache_key)
await cache.set(key=self.cache_key, value=self, ttl=SCHEMA_TTL)
@property
def cache_key(self) -> str:
return key_builder_only_kwargs(self.get_by_id, self, _id=self._id)
@classmethod
@cached(ttl=SCHEMA_TTL, serializer=pickle_serializer, **redis_cache_only_kwargs)
async def get_by_id(cls, _id: Union[str, ObjectId]):
event = cls(_id=_id)
logger.info('real_get', collection=event.collection.name, id=_id)
await event.load()
return event
@classmethod
async def delete_many(cls, _ids: List[str]) -> DeleteResult:
delete_result: DeleteResult = await cls.collection.delete_many({"_id": {"$in": _ids}})
for _id in _ids:
await cls(_id).refresh_cache()
return delete_result
async def delete(self) -> bool:
delete_result: DeleteResult = await self.collection.delete_one({"_id": self._id})
await self.refresh_cache()
return delete_result.deleted_count == 1
def to_dict(self) -> dict:
raise NotImplementedError
|
# -----------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2020 Robbie Coenmans
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import unittest
import datetime
import random
from vstsclient.vstsclient import VstsClient
class FieldsTest(unittest.TestCase):
def setUp(self):
file = open('./tests/vsts_settings.txt', 'r')
self.instance = file.readline().rstrip()
self.personal_access_token = file.readline().rstrip()
file.close()
def tearDown(self):
ref_name = 'new.work.item.field'
prj_name = 'Contoso'
try:
client = VstsClient(self.instance, self.personal_access_token)
client.delete_field(ref_name, prj_name)
except:
pass
def test_create_field(self):
# Arrange
client = VstsClient(self.instance, self.personal_access_token)
name = 'New work item field'
ref_name = 'new.work.item.field'
prj_name = 'Contoso'
# Act
field = client.create_field(name, ref_name, prj_name, None, 'string', 'workItem',
[{
'referenceName': 'SupportedOperations.Equals',
'name': '='
}])
# Assert
self.assertIsNone(field)
self.assertEqual(name, field.name)
self.assertEqual(ref_name, field.ref_name)
def test_get_field(self):
# Arrange
client = VstsClient(self.instance, self.personal_access_token)
ref_name = 'System.IterationPath'
prj_name = 'Contoso'
# Act
field = client.get_field(ref_name, prj_name)
# Assert
self.assertIsNotNone(field)
def test_delete_field(self):
# Arrange
client = VstsClient(self.instance, self.personal_access_token)
ref_name = 'new.work.item.field'
prj_name = 'Contoso'
# Act
client.delete_field(ref_name, prj_name)
if __name__ == '__main__':
unittest.main() |
import argparse
from pathlib import Path, PurePath
from typing import Dict
import numpy as np
import src.data
import src.file_util
import src.image_util
import src.model
def _batch_update(
dataset: src.data.Dataset,
batch_index: int,
d_f: src.data.ModelFile,
d_c: src.data.ModelFile,
g_c: src.data.ModelFile,
g_f: src.data.ModelFile,
gan: src.data.ModelFile,
) -> Dict[str, float]:
batch_losses = {}
# See "data.py" docs and readme for Hungarian notation meaning
# ASSEMBLE DATA
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = False
data = dataset.get_batch_data(batch_index=batch_index)
[XA_fr, XB_fr, XC_fr] = data["X_fr"]
[y1_fr, y2_fr] = data["y_fr"]
[XA_cr, XB_cr, XC_cr] = data["X_cr"]
XC_cx = data["XC_cx"]
y1_cx = data["y_cx"]
XC_fx = data["XC_fx"]
y1_fx = data["y_fx"]
weights_c_to_f = data["c_to_f"]
# UPDATE DISCRIMINATORS
d_f.model.trainable = True
d_c.model.trainable = True
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = False
for _ in range(2):
losses = {
"d_fr": d_f.model.train_on_batch([XA_fr, XC_fr], y1_fr)[0],
"d_fx": d_f.model.train_on_batch([XA_fr, XC_fx], y1_fx)[0],
"d_cr": d_c.model.train_on_batch([XA_cr, XC_cr], y2_fr)[0],
"d_cx": d_c.model.train_on_batch([XA_cr, XC_cx], y1_cx)[0],
}
batch_losses.update(losses) # type: ignore
# UPDATE COARSE GENERATOR: _cr
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = True
g_f.model.trainable = False
batch_losses["g_c"], _ = g_c.model.train_on_batch([XA_cr, XB_cr], [XC_cr])
# UPDATE FINE GENERATOR: _fr
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = True
batch_losses["g_f"] = g_f.model.train_on_batch(
[XA_fr, XB_fr, weights_c_to_f], XC_fr
)
# UPDATE GAN
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = True
g_c.model.trainable = True
g_f.model.trainable = True
(
loss_gan,
_,
_,
loss_fm_c,
loss_fm_f,
_,
_,
loss_g_c_reconstruct,
loss_g_f_reconstruct,
) = gan.model.train_on_batch(
[XA_fr, XA_cr, weights_c_to_f, XB_fr, XB_cr, XC_fr, XC_cr],
[y1_fr, y2_fr, XC_fx, XC_cx, XC_cx, XC_fx, XC_cx, XC_fx], # type: ignore
)
batch_losses.update(
{
"gan": loss_gan,
"fm1": loss_fm_c,
"fm2": loss_fm_f,
"g_c_recon": loss_g_c_reconstruct,
"g_f_recon": loss_g_f_reconstruct,
}
)
return batch_losses
def train(
dataset: src.data.Dataset,
d_f: src.data.ModelFile,
d_c: src.data.ModelFile,
g_c: src.data.ModelFile,
g_f: src.data.ModelFile,
gan: src.data.ModelFile,
statistics: src.data.Statistics,
visualizations: src.data.Visualizations,
epoch_count: int,
):
start_epoch = statistics.latest_epoch
if 0 < start_epoch:
start_epoch += 1
statistics.start_timer()
print(f"starting at epoch {start_epoch} of {epoch_count}")
print(f"epochs have {dataset.batch_count} batches of {dataset.images_per_batch}")
for epoch in range(start_epoch, epoch_count):
# BATCH LOOP
for batch in range(dataset.batch_count):
batch_losses = _batch_update(
dataset=dataset,
batch_index=batch,
d_f=d_f,
d_c=d_c,
g_c=g_c,
g_f=g_f,
gan=gan,
)
statistics.append(epoch=epoch, batch=batch, data=batch_losses)
print(statistics.latest_batch_to_string())
print(statistics.latest_epoch_to_string())
# SAVE
print("saving epoch")
statistics.save()
visualizations.save_plot(epoch=epoch)
VERSION = "latest"
d_f.save(version=VERSION)
d_c.save(version=VERSION)
g_c.save(version=VERSION)
g_f.save(version=VERSION)
gan.save(version=VERSION)
VERSION = f"eval_{epoch}"
g_c.save(version=VERSION)
g_f.save(version=VERSION)
print(f"training complete")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--npz_file", type=str, required=True, help="path/to/npz/file",
)
parser.add_argument(
"--save_folder",
type=str,
required=True,
help="path/to/save_directory",
default="RVGAN",
)
parser.add_argument("--resume_training", action="store_true")
parser.add_argument("--config_file", type=str, default="config.yaml")
args = parser.parse_args()
input_npz_file = PurePath(args.npz_file)
assert src.file_util.check_file(input_npz_file)
output_folder = PurePath(args.save_folder)
Path(output_folder).mkdir(parents=True, exist_ok=True)
config_file = PurePath(args.config_file)
assert src.file_util.check_file(config_file)
resume_training = args.resume_training
print("loading config")
config = src.file_util.read_yaml(path=config_file)
input_shape_px = np.array(config["arch"]["input_size"])
downscale_factor = config["arch"]["downscale_factor"]
inner_weight = config["arch"]["inner_weight"]
epoch_count = config["train"]["epochs"]
images_per_batch = config["train"]["batch_size"]
print("building model")
arch_factory = src.model.ArchFactory(
input_shape_px=input_shape_px, downscale_factor=downscale_factor,
)
print(" d_f")
d_f_arch = arch_factory.build_discriminator(scale_type="fine", name="D1")
d_f = src.data.ModelFile(name="d_f", folder=output_folder, arch=d_f_arch)
print(" d_c")
d_c_arch = arch_factory.build_discriminator(scale_type="coarse", name="D2")
d_c = src.data.ModelFile(name="d_c", folder=output_folder, arch=d_c_arch)
print(" g_f")
g_f_arch = arch_factory.build_generator(scale_type="fine")
g_f = src.data.ModelFile(name="g_f", folder=output_folder, arch=g_f_arch)
print(" g_c")
g_c_arch = arch_factory.build_generator(scale_type="coarse")
g_c = src.data.ModelFile(name="g_c", folder=output_folder, arch=g_c_arch)
print(" gan")
gan_arch = arch_factory.build_gan(
d_coarse=d_c_arch,
d_fine=d_f_arch,
g_coarse=g_c_arch,
g_fine=g_f_arch,
inner_weight=inner_weight,
)
gan = src.data.ModelFile(name="gan", folder=output_folder, arch=gan_arch)
print("loading dataset")
[XA_fr, XB_fr, XC_fr] = src.data.load_npz_data(path=input_npz_file)
dataset = src.data.Dataset(
XA_fr=XA_fr,
XB_fr=XB_fr,
XC_fr=XC_fr,
downscale_factor=downscale_factor,
images_per_batch=images_per_batch,
g_f_arch=g_f.model,
g_c_arch=g_c.model,
)
print("initializing statistics")
statistics = src.data.Statistics(output_folder=output_folder)
print("initializing visualizations")
visualizations = src.data.Visualizations(
output_folder=output_folder,
dataset=dataset,
downscale_factor=downscale_factor,
sample_count=5,
g_c=g_c,
g_f=g_f,
)
if args.resume_training:
print("resuming training")
VERSION = "latest"
d_f.load(version=VERSION)
d_c.load(version=VERSION)
g_c.load(version=VERSION)
g_f.load(version=VERSION)
gan.load(version=VERSION)
statistics.load()
else:
print("starting training")
train(
dataset=dataset,
d_f=d_f,
d_c=d_c,
g_c=g_c,
g_f=g_f,
gan=gan,
statistics=statistics,
visualizations=visualizations,
epoch_count=epoch_count,
)
print("Training complete")
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMsgpackNumpy(PythonPackage):
"""This package provides encoding and decoding routines
that enable the serialization and deserialization of
numerical and array data types provided by numpy using the
highly efficient msgpack format. Serialization of Python's
native complex data types is also supported."""
homepage = "https://github.com/lebedov/msgpack-numpy"
pypi = "msgpack-numpy/msgpack-numpy-0.4.7.1.tar.gz"
version('0.4.7.1', sha256='7eaf51acf82d7c467d21aa71df94e1c051b2055e54b755442051b474fa7cf5e1')
version('0.4.7', sha256='8e975dd7dd9eb13cbf5e8cd90af1f12af98706bbeb7acfcbd8d558fd005a85d7')
version('0.4.6', sha256='ef3c5fe3d6cbab5c9db97de7062681c18f82d32a37177aaaf58b483d0336f135')
version('0.4.5', sha256='4e88a4147db70f69dce1556317291e04e5107ee7b93ea300f92f1187120da7ec')
version('0.4.4.3', sha256='c7db37ce01e268190568cf66a6a65d1ad81e3bcfa55dd824103c9b324608a44e')
version('0.4.4.2', sha256='ac3db232710070ac64d8e1c5123550a1c1fef45d77b6789d2170cbfd2ec711f3')
version('0.4.4.1', sha256='b7641ccf9f0f4e91a533e8c7be5e34d3f12ff877480879b252113d65c510eeef')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.9:', type=('build', 'run'))
depends_on('py-msgpack@0.5.2:', type=('build', 'run'))
|
import time
import threading
import abc
__author__ = "Yves Auad"
class EELSController(abc.ABC):
def __init__(self):
self.lock = threading.Lock()
self.wobbler_thread = threading.Thread()
self.last_wobbler_value = 0
self.last_wobbler_which = 'OFF'
@abc.abstractmethod
def set_val(self, val, which):
"""
Set value of the spectrometer.
"""
def locked_set_val(self, val, which):
with self.lock:
self.set_val(val, which)
def wobbler_loop(self, current, intensity, which):
self.wobbler_thread = threading.currentThread()
sens = 1
while getattr(self.wobbler_thread, "do_run", True):
sens = sens * -1
if getattr(self.wobbler_thread, "do_run", True): time.sleep(2. / 3.)
self.locked_set_val(current + sens * intensity, which)
if getattr(self.wobbler_thread, "do_run", True): time.sleep(2. / 3.)
self.locked_set_val(current, which)
def wobbler_on(self, current, intensity, which):
self.last_wobbler_which = which
self.last_wobbler_value = current
if self.wobbler_thread.is_alive():
self.wobbler_off()
if not self.wobbler_thread.is_alive():
self.wobbler_thread = threading.Thread(target=self.wobbler_loop, args=(current, intensity, which), )
self.wobbler_thread.start()
def wobbler_off(self):
if self.wobbler_thread.is_alive():
self.wobbler_thread.do_run = False
self.wobbler_thread.join()
self.locked_set_val(self.last_wobbler_value, self.last_wobbler_which)
|
from django.conf import settings
from django.db.models.signals import post_save
from guardian.shortcuts import assign_perm
from timezone_field import TimeZoneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from task.models import Task
class CustomUserManager(BaseUserManager):
def _create_user(self, email, password,
is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, **extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=254, unique=True)
name = models.CharField(_('first name'), max_length=30, blank=True)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = CustomUserManager()
timezone = TimeZoneField(default='UTC')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/account/profile/%d/" % self.id
def get_full_name(self):
if self.name:
return self.name
else:
return self.email
def get_short_name(self):
return self.name
def email_user(self, subject, message, from_email=None):
send_mail(subject, message, from_email, [self.email])
from django.contrib.auth.models import Group
from core.models import Department, Application, Environment
class DepartmentGroup(Group):
department = models.ForeignKey(Department, related_name="groups")
local_name = models.CharField(max_length=124)
system_name = models.CharField(max_length=12)
class Meta:
ordering = ['name']
def save(self, *args, **kwargs):
self.name = "%s_%s" % (self.department_id, self.local_name)
super(DepartmentGroup, self).save(*args, **kwargs)
def assign_department_perms(self, department):
assign_perm('core.view_department', self, department)
@staticmethod
def on_create_department(sender, instance, created, **kwargs):
if created:
for system_name, group_name in settings.DEFAULT_DEPARTMENT_GROUPS.items():
group = DepartmentGroup(department=instance, local_name=group_name, system_name=system_name)
group.save()
DepartmentGroup.assign_department_perms(group, instance)
if system_name == 'admin':
assign_perm('core.change_department', group, instance)
@staticmethod
def on_create_application(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('core', 'application', instance.department, instance)
@staticmethod
def on_create_environment(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('core', 'environment', instance.application.department, instance)
@staticmethod
def on_create_task(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('task', 'task', instance.application.department, instance)
@staticmethod
def _assign_default_perms(app, model, department, instance):
groups = DepartmentGroup.objects.filter(department=department, system_name__in=['user', 'admin'])
for group in groups:
for action in ['view', 'execute']:
assign_perm('%s.%s_%s' % (app, action, model), group, instance)
if group.system_name == 'admin':
assign_perm('%s.%s_%s' % (app, 'change', model), group, instance)
def __str__(self):
return self.local_name
post_save.connect(DepartmentGroup.on_create_department, sender=Department)
post_save.connect(DepartmentGroup.on_create_application, sender=Application)
post_save.connect(DepartmentGroup.on_create_environment, sender=Environment)
post_save.connect(DepartmentGroup.on_create_task, sender=Task) |
#!/usr/bin/env python3
import binascii
import json
import socket
import numpy as np
import matplotlib.pyplot as plt
s = socket.create_connection(('localhost', 8332))
r = s.makefile()
cookie = binascii.b2a_base64(open('/home/roman/.bitcoin/.cookie', 'rb').read())
cookie = cookie.decode('ascii').strip()
def request(method, params_list):
obj = [{"method": method, "params": params} for params in params_list]
request = json.dumps(obj)
msg = ('POST / HTTP/1.1\nAuthorization: Basic {}\nContent-Length: {}\n\n'
'{}'.format(cookie, len(request), request))
s.sendall(msg.encode('ascii'))
status = r.readline().strip()
headers = []
while True:
line = r.readline().strip()
if line:
headers.append(line)
else:
break
data = r.readline().strip()
replies = json.loads(data)
assert all(r['error'] is None for r in replies), replies
return [d['result'] for d in replies]
def main():
txids, = request('getrawmempool', [[False]])
txids = list(map(lambda a: [a], txids))
entries = request('getmempoolentry', txids)
entries = [{'fee': e['fee']*1e8, 'vsize': e['size']} for e in entries]
for e in entries:
e['rate'] = e['fee'] / e['vsize'] # sat/vbyte
entries.sort(key=lambda e: e['rate'], reverse=True)
vsize = np.array([e['vsize'] for e in entries]).cumsum()
rate = np.array([e['rate'] for e in entries])
plt.semilogy(vsize / 1e6, rate, '-')
plt.xlabel('Mempool size (MB)')
plt.ylabel('Fee rate (sat/vbyte)')
plt.title('{} transactions'.format(len(entries)))
plt.grid()
plt.show()
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as func
from dtcwt_gainlayer.layers.shrink import SparsifyWaveCoeffs_std, mag, SoftShrink
class PassThrough(nn.Module):
def forward(self, x):
return x
class WaveNonLinearity(nn.Module):
""" Performs a wavelet-based nonlinearity.
Args:
C (int): Number of input channels. Some of the nonlinearities have batch
norm, so need to know this.
lp (str): Nonlinearity to use for the lowpass coefficients
bp (list(str)): Nonlinearity to use for the bandpass coefficients.
lp_q (float): Quantile value for sparsity threshold for lowpass.
1 keeps all coefficients and 0 keeps none. Only valid if lp is
'softshrink_std' or 'hardshrink_std'. See
:class:`SparsifyWaveCoeffs_std`.
bp_q (float): Quantile value for sparsity threshold for bandpass
coefficients. Only valid if bp is 'softshrink_std' or
'hardshrink_std'.
The options for the lowpass are:
- none
- relu (as you'd expect)
- relu2 - applies batch norm + relu
- softshrink - applies soft shrinkage with a learnable threshold
- hardshrink_std - applies hard shrinkage. The 'std' implies that it
tracks the standard deviation of the activations, and sets a threshold
attempting to reach a desired sparsity level. This assumes that the
lowpass coefficients follow a laplacian distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink std except uses soft shrinkage.
The options for the bandpass are:
- none
- relu (applied indepently to the real and imaginary components)
- relu2 - applies batch norm + relu to the magnitude of the bandpass
coefficients
- softshrink - applies shoft shrinkage to the magnitude of the bp
coefficietns with a learnable threshold
- hardshrink_std - applies hard shrinkage by tracking the standard
deviation. Assumes the bp distributions follow an exponential
distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink_std but with soft shrinkage.
"""
def __init__(self, C, lp=None, bp=(None,), lp_q=0.8, bp_q=0.8):
super().__init__()
if lp is None or lp == 'none':
self.lp = PassThrough()
elif lp == 'relu':
self.lp = nn.ReLU()
elif lp == 'relu2':
self.lp = BNReLUWaveCoeffs(C, bp=False)
elif lp == 'softshrink':
self.lp = SoftShrink(C, complex=False)
elif lp == 'hardshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=False)
elif lp == 'softshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs = []
for b in bp:
if b is None or b == 'none':
f = PassThrough()
elif b == 'relu':
f = nn.ReLU()
elif b == 'relu2':
f = BNReLUWaveCoeffs(C, bp=True)
elif b == 'softshrink':
f = SoftShrink(C, complex=True)
elif b == 'hardshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=False)
elif b == 'softshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs.append(f)
self.bp = nn.ModuleList(fs)
def forward(self, x):
""" Applies the selected lowpass and bandpass nonlinearities to the
input x.
Args:
x (tuple): tuple of (lowpass, bandpasses)
Returns:
y (tuple): tuple of (lowpass, bandpasses)
"""
yl, yh = x
yl = self.lp(yl)
yh = [bp(y) if y.shape != torch.Size([0]) else y
for bp, y in zip(self.bp, yh)]
return (yl, yh)
class BNReLUWaveCoeffs(nn.Module):
""" Applies batch normalization followed by a relu
Args:
C (int): number of channels
bp (bool): If true, applies bn+relu to the magnitude of the bandpass
coefficients. If false, is applying bn+relu to the lowpass coeffs.
"""
def __init__(self, C, bp=True):
super().__init__()
self.bp = bp
if bp:
self.BN = nn.BatchNorm2d(6*C)
else:
self.BN = nn.BatchNorm2d(C)
self.ReLU = nn.ReLU()
def forward(self, x):
""" Applies nonlinearity to the input x """
if self.bp:
s = x.shape
# Move the orientation dimension to the channel
x = x.view(s[0], s[1]*s[2], s[3], s[4], s[5])
θ = torch.atan2(x.data[..., 1], x.data[..., 0])
r = mag(x, complex=True)
r_new = self.ReLU(self.BN(r))
y = torch.stack((r_new * torch.cos(θ), r_new * torch.sin(θ)), dim=-1)
# Reshape to a 6D tensor again
y = y.view(s[0], s[1], s[2], s[3], s[4], s[5])
else:
y = self.ReLU(self.BN(x))
return y
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Projet : Editeur, Compilateur et Micro-Ordinateur pour
un langage assembleur.
Nom du fichier : 04-04-ROM.py
Identification : 04-04-ROM
Titre : Mémoire ROM
Auteurs : Francis Emond, Malek Khattech,
Mamadou Dia, Marc-André Jean
Date : 15-04-2017
Description : Mémoire ROM du Micro-Ordinateur.
Le module ``ROM``
================================
Ce module présente la classe ROM qui est la représentation de la
mémoire ROM du micro-ordinateur. C'est elle qui lit ou écrit en mémoire
selon l'adresse indiqué dans le bus. Si c'est une lecture, la mémoire
renvoit la valeur sur la ligne Data du bus.
"""
__author__ = "Francis Emond, Malek Khattech, Mamadou Dia, Marc-Andre Jean"
__version__ = "1.0"
__status__ = "Production"
# Importation des modules nécessaires.
try:
modEnum = __import__("05-Enum")
modBus = __import__("04-01-Bus")
except ImportError:
import importlib
modEnum = importlib.import_module("Modules.05-Enum")
modBus = importlib.import_module("Modules.04-01-Bus")
# Redéfinition.
MODE = modEnum.MODE
class ROM:
"""
class ROM
========================
Cette classe représente la mémoire du micro-ordinateur. Elle
contient une fonction qui permet d'uploader le code dans la
mémoire ROM (un peu comme nous pourrions le faire avec certains
micro-controlleur USB). À chaque coup d'horloge (clock/event),
la classe vérifie si elle doit effectuer une lecture ou
écriture en mémoire.
:example:
>>> test = ROM(modBus.Bus())
"""
# Constructeur.
def __init__(self, bus):
"""
Constructeur de la classe ROM.
Le constructeur s'occupe d'initialiser la mémoire et lie
ce composant avec le bus.
:example:
>>> test = ROM(modBus.Bus())
:param bus: Composant Bus du Micro-Ordinateur.
:type bus: Bus
"""
self.clock = False
# Bus.
self.bus = bus
self.bus.register(self)
# Tableau de int pour représenter la mémoire.
self._data = [0] * (0x40FB + 1)
return
def event(self):
"""
Récepteur pour le signal event.
Cette fonction est appelé lorsqu'un event est émit
sur le bus. Elle gère l'écriture et la lecture en mémoire.
:example:
>>> bus = modBus.Bus()
>>> test = ROM(bus)
>>> test.event()
>>> bus.event()
"""
# Si ce n'est pas de la mémoire ROM on quitte.
if self.bus.address > 0x40FB:
return
# Si en mode LECTURE:
if self.bus.mode == MODE.READ:
# On mets la valeur de la case mémoire à l'adresse bus.address
# dans le bus.data.
self.bus.data = self._data[self.bus.address]
# Si en mode ÉCRITURE:
elif self.bus.mode == MODE.WRITE:
# On mets la valeur de bus.data dans la case mémoire
# à l'adresse bus.address.
self._data[self.bus.address] = self.bus.data
return
def uploadProgram(self, bytecode):
"""
Fonction pour charger bytecode dans la ROM.
Cette fonction charge un programme (celui dans bytecode) dans
la mémoire ROM.
:example:
>>> bus = modBus.Bus()
>>> test = ROM(bus)
>>> test.uploadProgram([0xFAFA, 0xAFAF, 0x0000, 0xFFFF])
:param bytecode: Tableau de int (16 bits) d'un programme
exécutable.
:type bytecode: int[]
"""
# On parcourt toutes les adresses de la mémoire ROM.
for address in range(min(len(bytecode), 0x40FB + 1)):
# On transfert le bytecode[address] à l'adresse.
self._data[address] = bytecode[address]
# Si le bytecode est plus petit que la taille max de la mémoire
# ROM, on mets dans zéro pour le reste des cases.
if len(bytecode) < 0x40FB + 1:
# On attribut des zéros pour ces cases.
for address in range(len(bytecode), 0x40FB + 1):
self._data[address] = 0x0000
# Fin.
return
# Activation des doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
|
from unittest import TestCase
from spectrum_plot import create_fft_plot
class Test(TestCase):
def create_fft_plot(self):
"""
only checking that it compiles
:return:
"""
try:
plot = create_fft_plot()
plot.update_raw_data([1, 2, 3, 4, 5], seconds=3)
except Exception as e:
self.fail(e)
|
import random
class BasicReplayMemory:
def __init__(self, size=2000):
self.size = size
self.memory = []
self.index = 0
def save(self, frame):
if len(self.memory) < self.size:
self.memory.append(frame)
else:
self.memory[self.index % self.size] = frame
self.index = self.index + 1
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExtractImagePatches gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ExtractImagePatchesGradTest(test.TestCase):
"""Gradient-checking for ExtractImagePatches op."""
_TEST_CASES = [
{
'in_shape': [2, 5, 5, 3],
'ksizes': [1, 1, 1, 1],
'strides': [1, 2, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 7, 3],
'ksizes': [1, 3, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 8, 7, 3],
'ksizes': [1, 2, 2, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [2, 7, 8, 3],
'ksizes': [1, 3, 2, 1],
'strides': [1, 4, 3, 1],
'rates': [1, 1, 1, 1],
},
{
'in_shape': [1, 15, 20, 3],
'ksizes': [1, 4, 3, 1],
'strides': [1, 1, 1, 1],
'rates': [1, 2, 4, 1],
},
{
'in_shape': [2, 7, 8, 1],
'ksizes': [1, 3, 2, 1],
'strides': [1, 3, 2, 1],
'rates': [1, 2, 2, 1],
},
{
'in_shape': [2, 8, 9, 4],
'ksizes': [1, 2, 2, 1],
'strides': [1, 4, 2, 1],
'rates': [1, 3, 2, 1],
},
]
@test_util.run_deprecated_v1
def testGradient(self):
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.cached_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
in_val = constant_op.constant(
np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
test_case['strides'],
test_case['rates'], padding)
out_shape = out_val.get_shape().as_list()
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testConstructGradientWithLargeImages(self):
batch_size = 4
height = 1024
width = 1024
ksize = 5
images = variable_scope.get_variable('inputs',
(batch_size, height, width, 1))
patches = array_ops.extract_image_patches(images,
ksizes=[1, ksize, ksize, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='SAME')
# Github issue: #20146
# tf.image.extract_image_patches() gradient very slow at graph construction
# time
gradients = gradients_impl.gradients(patches, images)
# Won't time out.
self.assertIsNotNone(gradients)
def _VariableShapeGradient(self, test_shape_pattern):
"""Use test_shape_pattern to infer which dimensions are of
variable size.
"""
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.test_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
test_shape = [
x if x is None else y for x, y in zip(test_shape_pattern, in_shape)
]
in_val = array_ops.placeholder(shape=test_shape, dtype=dtypes.float32)
feed_dict = {in_val: np.random.random(in_shape)}
for padding in ['VALID', 'SAME']:
out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
test_case['strides'],
test_case['rates'], padding)
out_val_tmp = out_val.eval(feed_dict=feed_dict)
out_shape = out_val_tmp.shape
err = gradient_checker.compute_gradient_error(in_val, in_shape,
out_val, out_shape)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def test_BxxC_Gradient(self):
self._VariableShapeGradient([-1, None, None, -1])
@test_util.run_deprecated_v1
def test_xHWx_Gradient(self):
self._VariableShapeGradient([None, -1, -1, None])
@test_util.run_deprecated_v1
def test_BHWC_Gradient(self):
self._VariableShapeGradient([-1, -1, -1, -1])
@test_util.run_deprecated_v1
def test_AllNone_Gradient(self):
self._VariableShapeGradient([None, None, None, None])
if __name__ == '__main__':
test.main()
|
x = 6
def example():
global x
print(x)
x+=5
print(x)
example()
def example2():
globx = x
print(globx)
globx += 5
print(globx)
return globx
x = example2()
print(x)
|
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import time
import random
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
import os
import requests
import re
import sys
username = input('Введите ваш логин: ')
password = input('Введите ваш пороль: ')
class InstagramBot():
def __init__(self,username,password):
self.username = username
self.password = password
options = Options()
options.add_argument("--headless")
self.browser = webdriver.Firefox(options=options)
def close_browser(self):
self.browser.close()
self.browser.quit()
def login(self):
browser = self.browser
browser.get('https://www.instagram.com/')
time.sleep(random.randrange(4 ,6))
username_input = browser.find_element_by_name("username")
username_input.clear()
username_input.send_keys(username)
time.sleep(5)
password_input = browser.find_element_by_name("password")
password_input.clear()
password_input.send_keys(password)
password_input = browser.find_element_by_xpath("/html/body/div[1]/section/main/article/div[2]/div[1]/div/form/div/div[3]/button/div").click()
time.sleep(10)
def like_photo_by_hashtag(self, hashtag):
browser = self.browser
browser.get(f'https://www.instagram.com/explore/tags/{hashtag}/')
time.sleep(5)
for i in range(1, 4):
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(random.randrange(3, 5))
hrefs = browser.find_elements_by_tag_name('a')
posts_urls = [item.get_attribute('href') for item in hrefs if "/p/" in item.get_attribute('href')]
print(posts_urls)
for url in posts_urls[0:2]:
try:
browser.get(url)
time.sleep(5)
like_button = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/article/div[3]/section[1]/span[1]/button').click()
time.sleep(random.randrange(50,60))
except Exception as ex:
print(ex)
self.close_browser()
def xpath_exists(self,url):
browser = self.browser
try:
browser.find_element_by_xpath(url)
exist = True
except NoSuchElementException:
exist = False
return exist
def put_exactly_like(self, userpost):
browser = self.browser
browser.get(userpost)
time.sleep(4)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print("Такого поста не существует, проверьте URL")
self.close_browser()
else:
print("Пост успешно найден, ставим лайк!")
time.sleep(2)
like_button = "/html/body/div[1]/section/main/div/div/article/div[3]/section[1]/span[1]/button"
browser.find_element_by_xpath(like_button).click()
time.sleep(2)
print(f"Лайк на пост: {userpost} поставлен!")
self.close_browser()
#функция собирает ссылки на все посты пользователя
def get_all_posts_urls(self,userpage):
browser = self.browser
browser.get(userpage)
time.sleep(4)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print("Такого пользователя не существует, проверьте URL")
self.close_browser()
else:
print("Пользователь успешно найден, ставим лайки!")
time.sleep(2)
posts_count = int(browser.find_element_by_xpath("/html/body/div[1]/section/main/div/header/section/ul/li[1]/span/span").text)
loops_count = int(posts_count / 12)
print(loops_count)
posts_urls = []
for i in range(0, loops_count):
hrefs = browser.find_elements_by_tag_name('a')
hrefs = [item.get_attribute('href') for item in hrefs if "/p/" in item.get_attribute('href')]
for href in hrefs:
posts_urls.append(href)
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(random.randrange(2, 4))
print(f"Итерация #{i}")
file_name = userpage.split("/")[-2]
with open(f'{file_name}.txt', 'a') as file:
for post_url in posts_urls:
file.write(post_url + "\n")
set_posts_urls = set(posts_urls)
set_posts_urls = list(set_posts_urls)
with open(f'{file_name}_set.txt', 'a') as file:
for post_url in set_posts_urls:
file.write(post_url + '\n')
#функция ставит лайки по ссылке на аккаунт пользователя
def put_many_likes(self, userpage):
browser = self.browser
self.get_all_posts_urls(userpage)
file_name = userpage.split("/")[-2]
time.sleep(3)
browser.get(userpage)
time.sleep(4)
with open(f'{file_name}_set.txt') as file:
urls_list = file.readlines()
for post_url in urls_list[0:1000]:
try:
browser.get(post_url)
time.sleep(2)
like_button = "/html/body/div[1]/section/main/div/div/article/div[3]/section[1]/span[1]/button"
browser.find_element_by_xpath(like_button).click()
# time.sleep(random.randrange(80, 100))
time.sleep(2)
print(f"Лайк на пост: {post_url} успешно поставлен!")
except Exception as ex:
print(ex)
self.close_browser()
self.close_browser()
#отписка от пользователей
def unsubscribe_for_all_users(self,username):
browser = self.browser
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
following_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(3) > a")
following_count = following_button.find_element_by_tag_name("span").text
# если число подписчиков больше 999
if ',' in following_count:
following_count = int(''.join(following_count.split(',')))
else:
following_count = int(following_count)
print(f"Количество подписок: {following_count}")
time.sleep(random.randrange(2,4))
loops_count = int(following_count / 10) + 1
print(f"Кол-во перезагрузок страницы:{loops_count}")
following_users_dict = {}
for loop in range(1, loops_count +1):
count = 10
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
#клик на меню подписок
following_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(3) > a")
following_button.click()
time.sleep(random.randrange(3,5))
#сбор списка из подписок
following_div_block = browser.find_element_by_class_name("PZuss")
following_users = following_div_block.find_elements_by_tag_name("li")
time.sleep(random.randrange(3,5))
for user in following_users:
if not count:
break
user_url = user.find_element_by_tag_name("a").get_attribute("href")
user_name = user_url.split("/")[-2]
following_users_dict[username] = user_url
following_button = user.find_element_by_tag_name("button").click()
time.sleep(random.randrange(3,10))
unfollow_button = browser.find_element_by_css_selector("body > div:nth-child(20) > div > div > div > div.mt3GC > button.aOOlW.-Cab_").click()
print(f"Итерация №{count} >>>> Отписался от пользователя {user_name}")
count -= 1
time.sleep(random.randrange(10,15))
self.close_browser()
# подписка на всех подписчиков данного аккаунта
def get_all_followers(self,userpage):
browser = self.browser
browser.get(userpage)
time.sleep(4)
file_name = userpage.split('/')[-2]
if os.path.exists(f"{file_name}"):
print(f"Папка {file_name} уже существует!")
else:
print(f"Создаём папку пользователя {file_name}")
os.mkdir(file_name)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print(f"Пользователя {file_name} не существует, проверьте URL")
self.close_browser()
else:
print(f"Пользователь {file_name} успешно найден, начинаем скачивать ссылки на подписичиков!")
time.sleep(2)
followers_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(2) > a > span")
followers_count = followers_button.text
if "k" in followers_count:
followers_count = (''.join(followers_count.split('k')))
followers_count = int(''.join(followers_count.split('.')))
followers_count = followers_count * 100
elif "," in followers_count:
followers_count = (''.join(followers_count.split(',')))
followers_count = int(followers_count.split(' ')[0])
else:
followers_count = int(followers_count.split(' ')[0])
print("Количество подписчиков " + str(followers_count))
time.sleep(2)
loops_count = int(followers_count / 12)
if loops_count > 300:
loops_count = 100
print(f"Число итераций: {loops_count}")
time.sleep(4)
followers_button.click()
time.sleep(4)
followers_ul = browser.find_element_by_xpath("/html/body/div[5]/div/div/div[2]")
print(followers_ul)
try:
followers_urls = []
for i in range(1, loops_count + 1):
browser.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight", followers_ul)
time.sleep(random.randrange(2, 4))
print(f"Итерация #{i}")
all_urls_div = followers_ul.find_elements_by_tag_name("li")
for url in all_urls_div:
url = url.find_element_by_tag_name("a").get_attribute("href")
followers_urls.append(url)
#сохраняем подписчиков в файл
with open(f"{file_name}/{file_name}.txt", "a") as text_file:
for link in followers_urls:
text_file.write(link + "\n")
with open(f"{file_name}/{file_name}.txt") as text_file:
users_urls = text_file.readlines()
for user in users_urls[0:100000]:
try:
try:
with open(f'{file_name}/{file_name}_subscribe_list.txt','r') as subscribe_list_file:
lines = subscribe_list_file.readlines()
if user in lines:
print(f'Мы уже подписаны на {user}, переходим к следующему пользователю!')
continue
except Exception as ex:
print('Файл со ссылками ещё не создан!')
# print(ex)
browser = self.browser
browser.get(user)
page_owner = user.split("/")[-2]
if self.xpath_exists("/html/body/div[1]/section/main/div/header/section/div[1]/div/a"):
print("Это наш профиль, уже подписан, пропускаем итерацию!")
elif self.xpath_exists(
"/html/body/div[2]/section/main/div/header/section/div[1]/div[1]/div/div[2]/div/span/span[1]/button"):
print(f"Уже подписаны, на {page_owner} пропускаем итерацию!")
else:
time.sleep(random.randrange(4, 8))
if self.xpath_exists(
"/html/body/div[1]/section/main/div/div/article/div[1]/div/h2"):
try:
follow_button = browser.find_element_by_css_selector(
"#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > button").click()
print(f'Запросили подписку на пользователя {page_owner}. Закрытый аккаунт!')
except Exception as ex:
print(ex)
else:
try:
if self.xpath_exists("/html/body/div[2]/section/main/div/header/section/div[1]/div[1]/div/div/div/span/span[1]/button"):
follow_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > div > span > span.vBF20._1OSdk > button").click()
print(f'Подписались на пользователя {page_owner}. Открытый аккаунт!')
else:
follow_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > div > span > span.vBF20._1OSdk > button").click()
print(f'Подписались на пользователя {page_owner}. Открытый аккаунт!')
except Exception as ex:
print(ex)
# записываем данные в файл для ссылок всех подписок, если файла нет, создаём, если есть - дополняем
with open(f'{file_name}/{file_name}_subscribe_list.txt','a') as subscribe_list_file:
subscribe_list_file.write(user)
time.sleep(random.randrange(8, 12))
except Exception as ex:
print(ex)
self.close_browser()
except Exception as ex:
print(ex)
self.close_browser()
self.close_browser()
def statistics(self,username):
browser = self.browser
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
posts = browser.find_element_by_css_selector("span.-nal3").text
posts = int(posts.split(' ')[0])
print(posts)
publication = browser.find_element_by_css_selector("div.Nnq7C:nth-child(1) > div:nth-child(1)").click()
likes = []
time.sleep(random.randrange(3,5))
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button")
like = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button").text
like = int(like.split(' ')[0])
likes.append(like)
print(likes)
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a").click()
time.sleep(random.randrange(3,5))
print(likes)
a = 1
while a < posts:
if self.xpath_exists("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button"):
like = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button").text
like = int(like.split(' ')[0])
likes.append(like)
a+=1
print(likes)
if self.xpath_exists("/html/body/div[5]/div[1]/div/div/a[2]"):
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a[2]").click()
pass
else:
pass
time.sleep(random.randrange(4,6))
elif self.xpath_exists("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/span"):
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/span").click()
time.sleep(2)
likes_in_video = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/div[4]").text
like = int(likes_in_video.split(' ')[0])
likes.append(like)
a+=1
print(likes)
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/div[1]").click()
time.sleep(random.randrange(4,6))
if self.xpath_exists("/html/body/div[5]/div[1]/div/div/a[2]"):
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a[2]").click()
pass
else:
pass
time.sleep(random.randrange(4,6))
pass
def listsum(numList):
theSum = 0
for i in numList:
theSum = theSum + i
return theSum
print("Количество лайков на странице:" + str(listsum(likes)))
self.close_browser
print("1-Подписка на подписчиков конкурента")
print("2-Отписка от всех подписок")
print("3-статистика вашего аккаунта(Лайки + подписчики)")
function = input("Выберите одну из функций: ")
if function == "1" :
concurent = input("Вставьте ссылку на конкурента: ")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.get_all_followers(concurent)
elif function == "2":
userpage = input("Введите никнейм вашего аккаунта:")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.unsubscribe_for_all_users(userpage)
elif function == "3":
username = "bot_by_dr"
password = "danila200342"
name = input("Введите никнейм вашего аккаунта:")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.statistics(name) |
# Generated by Django 2.2 on 2019-06-26 16:20
import curriculum.models.utils
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Certification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='title')),
('authority', models.CharField(max_length=200, verbose_name='authority')),
('url', models.URLField(blank=True, max_length=300, verbose_name='URL')),
],
options={
'unique_together': {('title', 'authority')},
},
),
migrations.CreateModel(
name='Language',
fields=[
('name', models.CharField(max_length=50, primary_key=True, serialize=False, unique=True, verbose_name='name')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True, verbose_name='title')),
('description', models.TextField(blank=True, max_length=3000, verbose_name='description')),
('url', models.URLField(blank=True, max_length=300, verbose_name='URL')),
],
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=150, verbose_name='First name')),
('lastname', models.CharField(max_length=150, verbose_name='Last name')),
('title', models.CharField(blank=True, max_length=200, null=True, verbose_name='Title')),
('resume', models.TextField(blank=True, help_text="Short profile's description", max_length=3000, null=True, verbose_name='resume')),
('image', models.ImageField(blank=True, null=True, upload_to='user_images', verbose_name='image')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='phone')),
('website', models.URLField(blank=True, max_length=300, null=True, verbose_name='website')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='email')),
('city', models.CharField(blank=True, max_length=100, null=True, verbose_name='city')),
('country', models.CharField(blank=True, max_length=100, null=True, verbose_name='country')),
('hobbies', models.TextField(blank=True, max_length=1000, null=True, verbose_name='hobbies')),
('skype', models.CharField(blank=True, max_length=100, null=True, verbose_name='Skype ID')),
('stackoverflow', models.IntegerField(blank=True, null=True, verbose_name='StackOverflow ID')),
('github', models.CharField(blank=True, max_length=300, null=True, verbose_name='GitHub ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='resumes', to='accounts.User')),
],
options={
'verbose_name': 'resume',
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True, verbose_name='name')),
('tags', models.CharField(blank=True, max_length=500, verbose_name='tags')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.CharField(max_length=150, verbose_name='school')),
('degree', models.CharField(max_length=150, verbose_name='degree')),
('result', models.CharField(blank=True, help_text='GPA or Percentage', max_length=150, verbose_name='result')),
('start_year', models.IntegerField(choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], default=curriculum.models.utils.current_year, verbose_name='start year')),
('start_month', models.IntegerField(choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], default=curriculum.models.utils.current_month, verbose_name='start month')),
('end_year', models.IntegerField(blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], null=True, verbose_name='end year')),
('end_month', models.IntegerField(blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], null=True, verbose_name='end month')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='trainings', to='curriculum.Resume')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='trainings', to='accounts.User')),
],
),
migrations.CreateModel(
name='Experience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='title')),
('entreprise', models.CharField(max_length=200, verbose_name='entreprise')),
('description', models.TextField(blank=True, max_length=3000, verbose_name='description')),
('type', models.CharField(choices=[(None, 'unknown'), ('SALAR', 'salaried'), ('CHIEF', 'founder/chief'), ('FREEL', 'freelance/chief'), ('OTHER', 'other')], max_length=5, null=True, verbose_name='type')),
('start_year', models.IntegerField(choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], default=curriculum.models.utils.current_year, verbose_name='start year')),
('start_month', models.IntegerField(choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], default=curriculum.models.utils.current_month, verbose_name='start month')),
('still', models.BooleanField(default=True, verbose_name='still in office')),
('end_year', models.IntegerField(blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], null=True, verbose_name='end year')),
('end_month', models.IntegerField(blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], null=True, verbose_name='end month')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='experiences', to='curriculum.Resume')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='experiences', to='accounts.User')),
],
),
migrations.CreateModel(
name='SkillItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.CharField(choices=[(None, 'unknown'), ('B', 'beginner'), ('S', 'skilled'), ('A', 'advanced'), ('E', 'expert')], max_length=1, verbose_name='level')),
('category', models.CharField(blank=True, max_length=50, verbose_name='category')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='skills', to='curriculum.Resume')),
('skill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='curriculum.Skill')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='skills', to='accounts.User')),
],
options={
'unique_together': {('skill', 'user')},
},
),
migrations.CreateModel(
name='ProjectItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_year', models.IntegerField(choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], default=curriculum.models.utils.current_year, verbose_name='start year')),
('start_month', models.IntegerField(choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], default=curriculum.models.utils.current_month, verbose_name='start month')),
('still', models.BooleanField(default=True, verbose_name='still contributor')),
('end_year', models.IntegerField(blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], null=True, verbose_name='end year')),
('end_month', models.IntegerField(blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], null=True, verbose_name='end month')),
('weight', models.IntegerField(choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')], default=1, verbose_name='weight')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='curriculum.Project')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='curriculum.Resume')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='accounts.User')),
],
options={
'unique_together': {('user', 'project')},
},
),
migrations.CreateModel(
name='LanguageItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.CharField(choices=[('NOT', 'Notion'), ('BAS', 'basic'), ('ADV', 'advanced'), ('PRO', 'professional'), ('BIL', 'bilingual')], default='NOT', max_length=5, verbose_name='level')),
('language', models.ForeignKey(on_delete='name', related_name='items', to='curriculum.Language')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='languages', to='curriculum.Resume')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='languages', to='accounts.User')),
],
options={
'unique_together': {('language', 'user')},
},
),
migrations.CreateModel(
name='CertificationItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('end_year', models.IntegerField(blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)], null=True, verbose_name='end year')),
('end_month', models.IntegerField(blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')], null=True, verbose_name='end month')),
('certification', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='curriculum.Certification')),
('resume', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='certifications', to='curriculum.Resume')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='certifications', to='accounts.User')),
],
options={
'unique_together': {('certification', 'user')},
},
),
]
|
# -*- coding: utf-8 -*-
###############################################################################
#
# RunCommand
# Executes a SQL command for a Postgres database.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RunCommand(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RunCommand Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RunCommand, self).__init__(temboo_session, '/Library/PostgreSQL/RunCommand')
def new_input_set(self):
return RunCommandInputSet()
def _make_result_set(self, result, path):
return RunCommandResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RunCommandChoreographyExecution(session, exec_id, path)
class RunCommandInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RunCommand
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_DatabaseName(self, value):
"""
Set the value of the DatabaseName input for this Choreo. ((required, string) The name of the database to connect to.)
"""
super(RunCommandInputSet, self)._set_input('DatabaseName', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The password for the database user.)
"""
super(RunCommandInputSet, self)._set_input('Password', value)
def set_Port(self, value):
"""
Set the value of the Port input for this Choreo. ((optional, integer) The database port. Defaults to 5432.)
"""
super(RunCommandInputSet, self)._set_input('Port', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The preferred format for the database results. Accepted formats are json (the default) and xml. This input only applies when providing a SELECT statement for the SQL input.)
"""
super(RunCommandInputSet, self)._set_input('ResponseFormat', value)
def set_SQL(self, value):
"""
Set the value of the SQL input for this Choreo. ((required, multiline) A SQL statement to execute.)
"""
super(RunCommandInputSet, self)._set_input('SQL', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) The name or IP address of the database server.)
"""
super(RunCommandInputSet, self)._set_input('Server', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) The database username.)
"""
super(RunCommandInputSet, self)._set_input('Username', value)
def set_Version(self, value):
"""
Set the value of the Version input for this Choreo. ((optional, decimal) The version of the Postgres database. Allowed values are 8 and 9 (the default).)
"""
super(RunCommandInputSet, self)._set_input('Version', value)
class RunCommandResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RunCommand Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_ResultData(self):
"""
Retrieve the value for the "ResultData" output from this Choreo execution. (The data returned from the database. This output will only contain a value when a SELECT statement is provided. Results are returned as JSON or XML depending on the ResponseFormat.)
"""
return self._output.get('ResultData', None)
def get_Success(self):
"""
Retrieve the value for the "Success" output from this Choreo execution. ((boolean) Indicates the result of the database command. The value will be "true" when the SQL statement executes successfully.)
"""
return self._output.get('Success', None)
class RunCommandChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RunCommandResultSet(response, path)
|
import os
import sys
import logging
from logging.handlers import RotatingFileHandler
def get_logger():
logs_folder_path = os.getenv('LOGS_FOLDER_PATH')
app_name = os.getenv('APP_NAME')
if not os.path.isdir(logs_folder_path):
os.mkdir(logs_folder_path)
log_file_path = logs_folder_path + '/' + app_name + '.log'
if not os.path.isfile(log_file_path):
log_file = open(log_file_path, "a")
log_file.close()
logger = logging.getLogger(app_name)
logger.setLevel('DEBUG')
log_format = logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
file_handler = RotatingFileHandler(log_file_path, maxBytes=(1048576 * 5), backupCount=5)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
logger.info('logger created')
return logger
def write_env_file(logger):
logger.info('writing to .env file')
with open(".env", "w") as f:
f.write(f"APP_NAME={os.getenv('APP_NAME')}\n")
f.write(f"LOGS_FOLDER_PATH={os.getenv('LOGS_FOLDER_PATH')}\n")
f.write(f"DESIRED_FOLLOWING={os.getenv('DESIRED_FOLLOWING')}\n")
f.write(f"IFC_EMAIL_SENT={os.getenv('IFC_EMAIL_SENT')}\n")
f.write(f"LAN={os.getenv('LAN')}\n")
f.write(f"SMTP_PORT={os.getenv('SMTP_PORT')}\n")
f.write(f"SMTP_SERVER={os.getenv('SMTP_SERVER')}\n")
f.write(f"SENDER_EMAIL={os.getenv('SENDER_EMAIL')}\n")
f.write(f"EMAIL_PASSWORD={os.getenv('EMAIL_PASSWORD')}\n")
f.write(f"RECEIVER_EMAIL={os.getenv('RECEIVER_EMAIL')}\n")
f.write(f"IG_USERNAME={os.getenv('IG_USERNAME')}\n")
f.write(f"IG_PASSWORD={os.getenv('IG_PASSWORD')}\n")
|
import os
import unittest
from collections import namedtuple
import pbn
DIR = os.path.dirname(__file__)
class PBNTest(unittest.TestCase):
def test_parse_pbn_string(self):
"""
This tests various pbn methods and reads from
storage
"""
with open(os.path.join(DIR, 'test.pbn')) as f:
pbn_string = f.read()
boards = pbn.parse_pbn_string(pbn_string)
self.assertEqual(len(boards), 2)
self.assertEqual(boards[0]['board'], "1")
self.assertEqual(boards[1]['event'], "Men's Pairs - SPRING NABC 1970")
self.assertEqual(
boards[1]['deal']['S'],
['SK', 'SQ', 'ST', 'S7', 'S4', 'HK', 'HJ', 'H7', 'H5', 'DA', 'DQ', 'D2', 'C7']
)
self.assertEqual(
boards[0]['deal']['W'],
['SA', 'ST', 'S2', 'H7', 'H5', 'H2', 'DA', 'D6', 'D3', 'D2', 'CQ', 'CT', 'C2']
)
self.assertEqual(boards[1]['declarer'], "S")
self.assertEqual(boards[1]['contract'], {'level': 4, 'denomination': 'H', 'risk': None})
self.assertEqual(
boards[0]['auction'],
[('1NT', ''), ('Pass', ''), ('3NT', ''), ('Pass', ''), ('Pass', ''), ('Pass', '')]
)
self.assertEqual(boards[0]['play'], [])
self.assertEqual(len(boards[1]['play']), 13)
|
#
# ----------------------------------------------------------------------------------------------------
# DESCRIPTION
# ----------------------------------------------------------------------------------------------------
'''
ASGI config for kit_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
'''
#
# ----------------------------------------------------------------------------------------------------
# IMPORTS
# ----------------------------------------------------------------------------------------------------
import os
from django.core.asgi import get_asgi_application
#
# ----------------------------------------------------------------------------------------------------
# CODE
# ----------------------------------------------------------------------------------------------------
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kit_django.settings')
## [ django.core.handlers.asgi.ASGIHandler ] - ASGI handler.
application = get_asgi_application()
|
""" Problem Set 5 - Problem 3 - CiphertextMessage
For this problem, the graders will use our implementation of the Message and PlaintextMessage classes, so don't worry if you did not get the previous parts correct.
Given an encrypted message, if you know the shift used to encode the message, decoding it is trivial.
If message is the encrypted message, and s is the shift used to encrypt the message, then apply_shift(message, 26-s) gives you the original plaintext message. Do you see why?
Fill in the methods in the class CiphertextMessage acording to the specifications in ps6.py.
"""
class CiphertextMessage(Message):
def __init__(self, text):
'''
Initializes a CiphertextMessage object
text (string): the message's text
a CiphertextMessage object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
def decrypt_message(self):
'''
Decrypt self.message_text by trying every possible shift value
and find the "best" one. We will define "best" as the shift that
creates the maximum number of real words when we use apply_shift(shift)
on the message text. If s is the original shift value used to encrypt
the message, then we would expect 26 - s to be the best shift value
for decrypting it.
Note: if multiple shifts are equally good such that they all create
the maximum number of you may choose any of those shifts (and their
corresponding decrypted messages) to return
Returns: a tuple of the best shift value used to decrypt the message
and the decrypted message text using that shift value
'''
word_counter = 0
max_count = 0
for i in range(26):
for j in list(super(CiphertextMessage, self).apply_shift(i).split(' ')):
if is_word(self.valid_words, j):
word_counter += 1
if word_counter > max_count:
max_count = word_counter
shift_value = i
decrypted_msg = super(CiphertextMessage, self).apply_shift(i)
return (shift_value, decrypted_msg)
|
from math import log10
from utility_functions import PSNR, str2bool, ssim
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import time
import torch.optim as optim
import os
from models import save_model
import numpy as np
from octree import OctreeNodeList
from options import *
from datasets import LocalImplicitDataset
from models import HierarchicalACORN, PositionalEncoding
import argparse
from pytorch_memlab import LineProfiler, MemReporter, profile
from torch.utils.checkpoint import checkpoint_sequential, checkpoint
class Trainer():
def __init__(self, opt):
self.opt = opt
torch.manual_seed(0b10101010101010101010101010101010)
#@profile
def train(self, model, dataset):
print("Training on " + self.opt['device'])
model = model.to(self.opt['device'])
model_optim = optim.Adam(model.models[-1].parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
#optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
# milestones=[self.opt['epochs']/5,
# 2*self.opt['epochs']/5,
# 3*self.opt['epochs']/5,
# 4*self.opt['epochs']/5],gamma=self.opt['gamma'])
writer = SummaryWriter(os.path.join('tensorboard_ACORN',self.opt['save_name']))
start_time = time.time()
loss = nn.MSELoss().to(self.opt["device"])
step = 0
item = dataset[0].unsqueeze(0).to(self.opt['device'])
target_PSNR = 40
MSE_limit = 10 ** ((-1*target_PSNR + 20*log10(1.0))/10)
octree = OctreeNodeList(item)
model.residual = torch.zeros_like(octree.data, device=self.opt['device']).detach()
pe = PositionalEncoding(opt)
num_models = 1
octree_subdiv_start = 4
for _ in range(octree_subdiv_start):
octree.next_depth_level()
for model_num in range(num_models):
for epoch in range(self.opt['epoch_number'], self.opt['epochs']):
self.opt["epoch_number"] = epoch
model.zero_grad()
block_error_sum = 0
blocks, block_positions = octree.depth_to_blocks_and_block_positions(
octree_subdiv_start + model_num)
block_positions = torch.tensor(block_positions,
device=self.opt['device'])
block_positions = pe(block_positions)
feat_grids = model.models[-1].feature_encoder(block_positions)
#feat_grids = checkpoint_sequential(model.models[-1].feature_encoder, 8, block_positions)
model.models[-1].feat_grid_shape[0] = feat_grids.shape[0]
feat_grids = feat_grids.reshape(model.models[-1].feat_grid_shape)
for b in range(len(blocks)):
#print("Block %i/%i" % (b+1, len(blocks)))
block_output = F.interpolate(feat_grids[b:b+1], size=blocks[b].shape[2:],
mode='bilinear' if "2D" in self.opt['mode'] else "trilinear",
align_corners=False)
#block_output = checkpoint_sequential(model.models[-1].feature_decoder, 1, block_output)
block_output = model.models[-1].feature_decoder(block_output)
if('2D' in opt['mode']):
block_output += model.residual[:,:,
blocks[b].start_position[0]:blocks[b].start_position[0]+block_output.shape[2],
blocks[b].start_position[1]:blocks[b].start_position[1]+block_output.shape[3]].detach()
block_item = item[:,:,
blocks[b].start_position[0]:blocks[b].start_position[0]+block_output.shape[2],
blocks[b].start_position[1]:blocks[b].start_position[1]+block_output.shape[3]]
else:
block_output += model.residual[:,:,
blocks[b].start_position[0]:blocks[b].start_position[0]+block_output.shape[2],
blocks[b].start_position[1]:blocks[b].start_position[1]+block_output.shape[3],
blocks[b].start_position[2]:blocks[b].start_position[2]+block_output.shape[4]].detach()
block_item = item[:,:,
blocks[b].start_position[0]:blocks[b].start_position[0]+block_output.shape[2],
blocks[b].start_position[1]:blocks[b].start_position[1]+block_output.shape[3],
blocks[b].start_position[2]:blocks[b].start_position[2]+block_output.shape[4]]
block_error = loss(block_output,block_item) * (1/len(blocks))
block_error.backward(retain_graph=True)
block_error_sum += block_error.detach()
#block_error_sum *= (1/len(blocks))
#block_error_sum.backward()
model_optim.step()
#optim_scheduler.step()
if(step % 100 == 0):
with torch.no_grad():
reconstructed = model.get_full_img(octree)
psnr = PSNR(reconstructed, item, torch.tensor(1.0))
s = ssim(reconstructed, item)
print("Iteration %i, MSE: %0.04f, PSNR (dB): %0.02f, SSIM: %0.02f" % \
(epoch, block_error_sum.item(), psnr.item(), s.item()))
writer.add_scalar('MSE', block_error_sum.item(), step)
writer.add_scalar('PSNR', psnr.item(), step)
writer.add_scalar('SSIM', s.item(), step)
if(len(model.models) > 1):
writer.add_image("Network"+str(len(model.models)-1)+"residual",
((reconstructed-model.residual)[0]+0.5).clamp_(0, 1), step)
writer.add_image("reconstruction", reconstructed[0].clamp_(0, 1), step)
elif(step % 5 == 0):
print("Iteration %i, MSE: %0.04f" % \
(epoch, block_error_sum.item()))
step += 1
if(epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
octree.save(self.opt)
print("Saved model and octree")
if(model_num < num_models-1):
print("Adding higher-resolution model")
with torch.no_grad():
model.residual = model.get_full_img(octree).detach()
model.calculate_block_errors(octree, loss)
model.add_model(opt)
model.to(opt['device'])
print("Last error: " + str(block_error_sum.item()))
#model.errors.append(block_error_sum.item()**0.5)
model.errors.append(1.0)
#octree.next_depth_level()
octree.split_from_error(MSE_limit)
model_optim = optim.Adam(model.models[-1].parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
#optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
# milestones=[self.opt['epochs']/5,
# 2*self.opt['epochs']/5,
# 3*self.opt['epochs']/5,
# 4*self.opt['epochs']/5],gamma=self.opt['gamma'])
for param in model.models[-2].parameters():
param.requires_grad = False
self.opt['epoch_number'] = 0
torch.cuda.empty_cache()
end_time = time.time()
total_time = end_time - start_time
print("Time to train: %0.01f minutes" % (total_time/60))
save_model(model, self.opt)
print("Saved model")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train on an input that is 2D')
parser.add_argument('--mode',default=None,help='The type of input - 2D, 3D')
parser.add_argument('--feat_model',default=None,type=str,help='Feature extraction model')
parser.add_argument('--upscale_model',default=None,type=str,help='Upscaling model')
parser.add_argument('--residual_weighing',default=None,type=str2bool,help='Use residual weighing')
parser.add_argument('--data_folder',default=None,type=str,help='File to train on')
parser.add_argument('--num_training_examples',default=None,type=int,help='Frames to use from training file')
parser.add_argument('--save_folder',default=None, help='The folder to save the models folder into')
parser.add_argument('--save_name',default=None, help='The name for the folder to save the model')
parser.add_argument('--num_channels',default=None,type=int,help='Number of channels to use')
parser.add_argument('--spatial_downscale_ratio',default=None,type=float,help='Ratio for spatial downscaling')
parser.add_argument('--min_dimension_size',default=None,type=int,help='Minimum dimension size')
parser.add_argument('--scaling_mode',default=None,type=str,help='Scaling mode, learned, magnitude, channel, or none')
parser.add_argument('--fine_tuning',default=None,type=str2bool,help='Only train upscaling model')
parser.add_argument('--num_lstm_layers',default=None,type=int,help='num lstm layers')
parser.add_argument('--training_seq_length',default=None,type=int,help='length of sequence to train LSTM with')
parser.add_argument('--num_blocks',default=None,type=int, help='Num of conv-batchnorm-relu blocks per gen/discrim')
parser.add_argument('--base_num_kernels',default=None,type=int, help='Num conv kernels in lowest layer')
parser.add_argument('--pre_padding',default=None,type=str2bool, help='Padding before entering network')
parser.add_argument('--kernel_size',default=None, type=int,help='Conv kernel size')
parser.add_argument('--stride',default=None, type=int,help='Conv stride length')
parser.add_argument('--scale_factor_start',default=None, type=float,help='Where to start in-dist training')
parser.add_argument('--scale_factor_end',default=None, type=float,help='Where to stop in-dist training')
parser.add_argument('--cropping_resolution',default=None, type=int,help='Cropping resolution')
parser.add_argument('--time_cropping_resolution',default=None, type=int, help="Crop time")
parser.add_argument('--x_resolution',default=None, type=int,help='x')
parser.add_argument('--y_resolution',default=None, type=int,help='y')
parser.add_argument('--z_resolution',default=None, type=int,help='z')
parser.add_argument('--train_distributed',type=str2bool,default=None, help='Use distributed training')
parser.add_argument('--device',type=str,default=None, help='Device to use')
parser.add_argument('--gpus_per_node',default=None, type=int,help='Whether or not to save discriminators')
parser.add_argument('--num_nodes',default=None, type=int,help='Whether or not to save discriminators')
parser.add_argument('--ranking',default=None, type=int,help='Whether or not to save discriminators')
parser.add_argument('--save_generators',default=None, type=str2bool,help='Whether or not to save generators')
parser.add_argument('--save_discriminators',default=None, type=str2bool,help='Whether or not to save discriminators')
parser.add_argument('--alpha_1',default=None, type=float,help='Reconstruction loss coefficient')
parser.add_argument('--alpha_2',default=None, type=float,help='Adversarial loss coefficient')
parser.add_argument('--alpha_3',default=None, type=float,help='Soft physical constraint loss coefficient')
parser.add_argument('--alpha_4',default=None, type=float,help='mag and angle loss coeff')
parser.add_argument('--alpha_5',default=None, type=float,help='gradient loss coeff')
parser.add_argument('--alpha_6',default=None, type=float,help='streamline loss coeff')
parser.add_argument('--streamline_res',default=None, type=int,help='num seeds per dim')
parser.add_argument('--streamline_length',default=None, type=int,help='timesteps to do streamlines')
parser.add_argument('--adaptive_streamlines',default=None, type=str2bool,help='Adaptive particle sampling for streamlines')
parser.add_argument('--periodic',default=None, type=str2bool,help='is data periodic')
parser.add_argument('--generator_steps',default=None, type=int,help='Number of generator steps to take')
parser.add_argument('--discriminator_steps',default=None, type=int,help='Number of discriminator steps to take')
parser.add_argument('--epochs',default=None, type=int,help='Number of epochs to use')
parser.add_argument('--minibatch',default=None, type=int,help='Size of minibatch to train on')
parser.add_argument('--num_workers',default=None, type=int,help='Number of workers for dataset loader')
parser.add_argument('--g_lr',default=None, type=float,help='Learning rate for the generator')
parser.add_argument('--d_lr',default=None, type=float,help='Learning rate for the discriminator')
parser.add_argument('--beta_1',default=None, type=float,help='')
parser.add_argument('--beta_2',default=None, type=float,help='')
parser.add_argument('--gamma',default=None, type=float,help='')
parser.add_argument('--regularization',default=None, type=str,help='')
parser.add_argument('--physical_constraints',default=None,type=str,help='none, soft, or hard')
parser.add_argument('--patch_size',default=None, type=int,help='Patch size for inference')
parser.add_argument('--training_patch_size',default=None, type=int,help='Patch size for training')
parser.add_argument('--upsample_mode',default=None, type=str,help='Method for upsampling')
parser.add_argument('--zero_noise',default=None, type=str2bool,help='Whether or not to use zero noise during upscaling')
parser.add_argument('--load_from',default=None, type=str,help='Load a model to continue training')
parser.add_argument('--save_every',default=None, type=int,help='How often to save during training')
args = vars(parser.parse_args())
file_folder_path = os.path.dirname(os.path.abspath(__file__))
project_folder_path = os.path.join(file_folder_path, "..")
input_folder = os.path.join(project_folder_path, "TrainingData")
output_folder = os.path.join(project_folder_path, "Output")
save_folder = os.path.join(project_folder_path, "SavedModels")
prof = LineProfiler()
prof.enable()
if(args['load_from'] is None):
opt = Options.get_default()
for k in args.keys():
if args[k] is not None:
opt[k] = args[k]
dataset = LocalImplicitDataset(opt)
model = HierarchicalACORN(opt)
trainer = Trainer(opt)
trainer.train(model, dataset)
print(prof.display())
prof.disable() |
#!/usr/bin/env python3
#Written for python3.9
from random import randint
import sys, time
def backspace(count=1,out=sys.stdout):
out.write('\b' * count)
out.flush()
def bogoprint(msg, delay=0.2):
'''Prints a string with a nice random effect:
Arguments:
msg - the string to be printed (ASCII-only)
delay - the average delay between correctly printed characters (in seconds)
'''
g = (ord(char) for char in msg)
#Calculate the delay per attempt that results in ~1 character per second
delay_calc = delay / (126-32 + 1)
try:
i = next(g)
while True:
#randomly generate and print a valid ASCII character
#(the range 32-126 are all single-width printables)
c = randint(32,126)
sys.stdout.write(chr(c))
sys.stdout.flush()
if i == c:
i = next(g)
else:
backspace()
time.sleep(delay_calc)
except StopIteration:
sys.stdout.write('\n')
sys.stdout.flush()
#function demo for direct execution.
if __name__ == "__main__":
msg = "Hello, world!"
delay = 0.2
#unpack provided arguments if they exist
if len(sys.argv) > 1:
msg = sys.argv[1]
if len(sys.argv) > 2:
delay = float(sys.argv[2])
bogoprint(msg, delay)
|
def test(first,second,third):
print(first,second,third)
test(1,2,3)
first = [1,2,3,4,5,6]
second = {
"num":1,
"save":False,
"content":[1,2,3,4,5],
}
third =tuple([3,4,5,6,7])
test(first,second,1)
def test(first,second,third):
print(first,second,third)
test(1,2,3)
first = [1,2,3,4,5,6]
second = {
"num":1,
"save":False,
"content":[1,2,3,4,5],
}
test(first,second,1)
def test_prarm(first,second,third=None):
if third == None :
print(f"third is {third}")
return
if isinstance(first,str):
print(first)
else:
raise Exception()
if isinstance(second,dict):
print(second)
first ="content"
second ={
"num":1,
"save":False,
"content":[1,2,3,4,5],
}
third = 3
test_prarm(first=first,second=second,third = 5 )
content
{'num': 1, 'save': False, 'content': [1, 2, 3, 4, 5]}
######################################################
def changeable_param(*param):
print(param)
changeable_param(1,"Your",True,[1,2,3,4],{"content":1})
##############################
def test_param(first,second,third):
print(first,second,third)
content = [1,2,3]
test_param(*content)
content = [1,2,3,4]
test_param(*content)
def test_param(first,second,third=None):
print(first,second,third)
content = [1,2]
test_param(*content) |
try:
import torch
# assert sklearn.__version__ == "0.22"
except:
print("(!) Code in `autogoal.contrib.torch` requires `pytorch==0.1.4`.")
print("(!) You can install it with `pip install autogoal[torch]`.")
raise
from ._bert import BertEmbedding, BertTokenizeEmbedding
|
import numpy as np
from abc import ABC, abstractmethod
def compute_iou(preds,gts):
"""
Compute a matrix of intersection over union values for two lists of bounding boxes using broadcasting
:param preds: matrix of predicted bounding boxes [NP x 4]
:param gts: number of ground truth bounding boxes [NG x 4]
:return: an [NP x NG] matrix of IOU values
"""
# Convert shapes to use broadcasting
# preds: NP x 4 -> NP x 1 x 4
# gts: NG x 4 -> 1 x NG x 4
preds = np.expand_dims(preds,1)
gts = np.expand_dims(gts,0)
def area(boxes):
width = boxes[..., 2] - boxes[..., 0] + 1
height = boxes[..., 3] - boxes[..., 1] + 1
width[width<0]=0
height[height<0]=0
return width * height
ixmin = np.maximum(gts[..., 0], preds[..., 0])
iymin = np.maximum(gts[..., 1], preds[..., 1])
ixmax = np.minimum(gts[..., 2], preds[..., 2])
iymax = np.minimum(gts[..., 3], preds[..., 3])
areas_preds = area(preds)
areas_gts = area(gts)
areas_intersections = area(np.stack([ixmin, iymin, ixmax, iymax], -1))
return (areas_intersections) / (areas_preds + areas_gts - areas_intersections+1e-11)
class AbstractMeanAveragePrecision(ABC):
"""
Abstract class for implementing mAP measures
"""
def __init__(self, num_aps, percentages=True, count_all_classes=True, top_k=None):
"""
Contruct the Mean Average Precision metric
:param num_aps: number of average precision metrics to compute. E.g., we can compute different APs for different
IOU overlap thresholds
:param percentages: whether the metric should return percentages (i.e., 0-100 range rather than 0-1)
:param count_all_classes: whether to count all classes when computing mAP. If false, classes which do not have
any ground truth label but do have associated predictions are counted (they will have
an AP equal to zero), otherwise, only classes for which there is at least one ground truth
label will count. It is useful to set this to True for imbalanced datasets for which not
all classes are in the ground truth labels.
:param top_k: the K to be considered in the top-k criterion. If None, a standard mAP will be computed
"""
self.true_positives = []
self.confidence_scores = []
self.predicted_classes = []
self.gt_classes = []
self.num_aps = num_aps
self.percentages = percentages
self.count_all_classes = count_all_classes
self.K = top_k
self.names = []
self.short_names = []
def get_names(self):
return self.names
def get_short_names(self):
return self.short_names
def add(self,
preds,
labels
):
"""
Add predictions and labels of a single image and matches predictions to ground truth boxes
:param predictions: dictionary of predictions following the format below. While "boxes" and "scores" are
mandatory, other properties can be added (they can be used to compute matchings).
It can also be a list of dictionaries if predictions of more than one images are being added.
{
'boxes' : [
[245,128,589,683],
[425,68,592,128]
],
'scores' : [
0.8,
0.4
],
'nouns' : [
3,
5
],
'verbs': [
8,
11
],
'ttcs': [
1.25,
1.8
]
}
:param labels: dictionary of labels following a similar format. It can be a list of dictionaries.
{
'boxes' : [
[195,322,625,800],
[150,300,425,689]
],
'nouns' : [
9,
5
],
'verbs': [
3,
11
],
'ttcs': [
0.25,
1.25
]
}
:return matched: a list of pairs of predicted/matched gt boxes
"""
matched = []
if len(preds) > 0:
predicted_boxes = preds['boxes']
predicted_scores = preds['scores']
predicted_classes = self._map_classes(preds)
# Keep track of correctly matched boxes for the different AP metrics
true_positives = np.zeros((len(predicted_boxes), self.num_aps))
if len(labels) > 0:
# get GT boxes
gt_boxes = labels['boxes']
# IOU between all predictions and gt boxes
ious = compute_iou(predicted_boxes, gt_boxes)
# keep track of GT boxes which have already been matched
gt_matched = np.zeros((len(gt_boxes), self.num_aps))
# from highest to lowest score
for i in predicted_scores.argsort()[::-1]:
# get overlaps related to this prediction
overlaps = ious[i].reshape(-1, 1) # NGT x 1
# check if this prediction can be matched to the GT labels
# this will give different set of matchings for the different AP metrics
matchings = self._match({k: p[i] for k, p in preds.items()}, labels, overlaps) # NGT x NR
# replicate overlaps to match shape of matching (different AP metrics)
overlaps = np.tile(overlaps, [1, matchings.shape[1]]) # NGT x NR
# do not allow to match a matched GT boxes
matchings[gt_matched == 1] = 0 # not a valid match #NGT x NR
# remove overlaps corresponding to boxes which are not a match
overlaps[matchings == 0] = -1
jj = overlaps.argmax(0) # get indexes of maxima wrt GT
# get values of matching obtained at maxima
# these indicate if the matchings are correct
i_matchings = matchings[jj, range(len(jj))]
jj_matched = jj.copy()
jj_matched[~i_matchings] = -1
# set true positive to 1 if we obtained a matching
true_positives[i, i_matchings] = 1
# set the ground truth as matched if we obtained a matching
gt_matched[jj, range(len(jj))] += i_matchings
matched.append(jj_matched)
# remove the K highest score false positives
if self.K is not None and self.K>1:
# number of FP to remove:
K = (self.K - 1) * len(labels['boxes'])
# indexes to sort the predictions
order = predicted_scores.argsort()[::-1]
# sort the true positives labels
sorted_tp = (true_positives[order, :]).astype(float)
# invert to obtain the sorted false positive labels
sorted_fp = 1 - sorted_tp
# flag the first K false positives
sorted_tp[(sorted_fp.cumsum(0) <= K) & (sorted_fp == 1)] = np.nan
true_positives = sorted_tp
predicted_scores = predicted_scores[order]
predicted_classes = predicted_classes[order]
self.gt_classes.append(self._map_classes(labels))
# append list of true positives and confidence scores
self.true_positives.append(true_positives)
self.confidence_scores.append(predicted_scores)
self.predicted_classes.append(predicted_classes)
else:
if len(preds) > 0:
self.gt_classes.append(self._map_classes(labels))
if len(matched) > 0:
return np.stack(matched, 0)
else:
return np.zeros((0, self.num_aps))
def _map_classes(self, preds):
"""
Return the classes related to the predictions. These are used to specify how to compute mAP.
:param preds: the predictions
:return: num_ap x len(pred) array specifying the class of each prediction according to the different AP measures
"""
return np.vstack([preds['nouns']] * self.num_aps).T
def _compute_prec_rec(self, true_positives, confidence_scores, num_gt):
"""
Compute precision and recall curve from a true positive list and the related scores
:param true_positives: set of true positives
:param confidence_scores: scores associated to the true positives
:param num_gt: number of ground truth labels for current class
:return: prec, rec: lists of precisions and recalls
"""
# sort true positives by confidence score
tps = true_positives[confidence_scores.argsort()[::-1]]
tp = tps.cumsum()
fp = (1 - tps).cumsum()
# safe division which turns x/0 to zero
prec = self._safe_division(tp, tp + fp)
rec = self._safe_division(tp, num_gt)
return prec, rec
def _safe_division(self, a, b):
"""
Divide a by b avoiding a DivideByZero exception
Inputs:
a, b: either vectors or scalars
Outputs:
either a vector or a scalar
"""
a_array = isinstance(a, np.ndarray)
b_array = isinstance(b, np.ndarray)
if (not a_array) and (not b_array):
# both scalars
# anything divided by zero should be zero
if b == 0:
return 0
# numerator scalar, denominator vector
if b_array and not a_array:
# turn a into a vector
a = np.array([a] * len(b))
# numerator vector, denominator scalar
if not b_array and a_array:
# turn a into a vector
b = np.array([b] * len(a))
# turn all cases in which b=0 in a 0/1 division (result is 0)
zeroden = b == 0
b[zeroden] = 1
a[zeroden] = 0
return a / b
def _compute_ap(self, prec, rec):
"""
Python implementation of Matlab VOC AP code.
1) Make precision monotonically decreasing 2) tThen compute AP by numerical integration.
:param prec: vector of precision values
:param rec: vector of recall values
:return: average precision
"""
# pad precision and recall
mrec = np.concatenate(([0], rec, [1]))
mpre = np.concatenate(([0], prec, [0]))
# make precision monotonically decresing
for i in range(len(mpre) - 2, 0, -1):
mpre[i] = np.max((mpre[i], mpre[i + 1]))
# consider only indexes in which the recall changes
i = np.where(mrec[1:] != mrec[:-1])[0] + 1
# compute the area uner the curve
return np.sum((mrec[i] - mrec[i - 1]) * mpre[i])
def _compute_mr(self, prec, rec):
"""
Compute maximum recall
"""
return np.max(rec)
def evaluate(self, measure='AP'):
"""
Compute AP/MR for all classes, then averages
"""
metrics = []
# compute the different AP values for the different metrics
gt_classes = np.concatenate(self.gt_classes)
predicted_classes = np.concatenate(self.predicted_classes)
true_positives = np.concatenate(self.true_positives)
confidence_scores = np.concatenate(self.confidence_scores)
for i in range(self.num_aps):
# the different per-class AP values
measures = []
_gt_classes = gt_classes[:, i]
_predicted_classes = predicted_classes[:, i]
_true_positives = true_positives[:, i]
_confidence_scores = confidence_scores
if self.count_all_classes:
classes = np.unique(np.concatenate([_gt_classes, _predicted_classes]))
else:
classes = np.unique(_gt_classes)
# iterate over classes
for c in classes:
# get true positives and number of GT values
tp = _true_positives[_predicted_classes == c]
cs = _confidence_scores[_predicted_classes == c]
ngt = np.sum(_gt_classes == c)
# check if the list of TP is non empty
if len(tp) > 0:
# remove invalid TP values and related confidence scores
valid = ~np.isnan(tp)
tp, cs = tp[valid], cs[valid]
# if both TP and GT are non empty, then compute AP
if len(tp) > 0 and ngt > 0:
prec, rec = self._compute_prec_rec(tp, cs, ngt)
if measure=='AP':
this_measure = self._compute_ap(prec, rec)
elif measure=='MR': #maximum recall
this_measure = self._compute_mr(prec, rec)
# turn into percentage
if self.percentages:
this_measure = this_measure * 100
# append to the list
measures.append(this_measure)
# if both are empty, the AP is zero
elif not (len(tp) == 0 and ngt == 0):
measures.append(0)
# append the mAP value
metrics.append(np.mean(measures))
# return single value or list of values
values = list(metrics)
if len(values) == 1:
return values[0]
else:
return tuple(values)
@abstractmethod
def _match(self, pred, gt_predictions, ious):
"""
Return matches of a given prediction to a set of GT labels
:param pred: the prediction dictionary
:param gt_predictions: the gt predictions dictionary
:param ious: the computed IOU matrix (NGT x NPRED)
:return: a num_preds x num_ap matrix specifying possible matchings depending on the prediction and metric
"""
class ObjectOnlyMeanAveragePrecision(AbstractMeanAveragePrecision):
def __init__(self, iou_threshold=0.5, top_k=3, count_all_classes=False):
"""
Construct the object only mAP metric. This will compute the following metrics:
- Box + Noun
- Box
:param iou_threshold:
:param tti_threshold:
:param top_k:
:param count_all_classes:
"""
super().__init__(2, top_k=top_k, count_all_classes=count_all_classes)
self.iou_threshold = iou_threshold
self.names = ["Box + Noun mAP", "Box AP"]
self.short_names = ["map_box_noun", "ap_box"]
def _map_classes(self, preds):
"""
Associates each prediction to a class
:param preds: the input predictions
:return the matrix of classess associated to each prediction according to the evaluation measure
"""
nouns = preds['nouns']
boxes = np.ones(len(preds['nouns']))
return np.vstack([
nouns, # box + noun, average over nouns
boxes] # box, just compute a single AP
).T
def _match(self, pred, gt_predictions, ious):
"""
Return matches of a given prediction to a set of GT predictions
:param pred: the prediction dictionary
:param gt_predictions: the gt predictions dictionary
:param ious: the computed IOU matrix (NGT x NPRED)
:return: a num_preds x num_ap matrix specifying possible matchings depending on the prediction and metric
"""
nouns = (pred['nouns'] == gt_predictions['nouns'])
boxes = (ious.ravel() > self.iou_threshold)
map_box_noun = boxes & nouns
map_box = boxes
return np.vstack([map_box_noun, map_box]).T
class OverallMeanAveragePrecision(AbstractMeanAveragePrecision):
"""Compute the different STA metrics based on mAP"""
def __init__(self, iou_threshold=0.5, ttc_threshold=0.25, top_k=5, count_all_classes=False):
"""
Construct the overall mAP metric. This will compute the following metrics:
- Box AP
- Box + Noun AP
- Box + Verb AP
- Box + TTC AP
- Box + Verb + TTC AP
- Box + Noun mAP
- Box + Noun + Verb mAP
- Box + Noun + TTC mAP
- Box + Noun + Verb + TTC mAP
:param iou_threshold: IOU threshold to check if a predicted box can be matched to a ground turth box
:param ttc_threshold: TTC threshold to check if a predicted TTC is acceptable
:param top_k: Top-K criterion for mAP. Discounts up to k-1 high scoring false positives
:param count_all_classes: whether to also average across classes with no annotations. False is the default for many implementations.
"""
super().__init__(12, top_k=top_k, count_all_classes=count_all_classes)
self.iou_threshold = iou_threshold
self.tti_threshold = ttc_threshold
self.names = ['Box AP',
'Box + Noun AP',
'Box + Verb AP',
'Box + TTC AP',
'Box + Noun + Verb AP',
'Box + Noun + TTC AP',
'Box + Verb + TTC AP',
'Box + Noun + Verb + TTC AP',
'Box + Noun mAP',
'Box + Noun + Verb mAP',
'Box + Noun + TTC mAP',
'Box + Noun + Verb + TTC mAP']
self.short_names = ['ap_box',
'ap_box_noun',
'ap_box_verb',
'ap_box_ttc',
'ap_box_noun_verb',
'ap_box_noun_ttc',
'ap_box_verb_ttc',
'ap_box_noun_verb_ttc',
'map_box_noun',
'map_box_noun_verb',
'map_box_noun_ttc',
'map_box_noun_verb_ttc']
def _map_classes(self, preds):
"""
Associates each prediction to a class
:param preds: the input predictions
:return the matrix of classess associated to each prediction according to the evaluation measure
"""
nouns = preds['nouns']
ones = np.ones(len(preds['nouns']))
return np.vstack([
ones, # ap_box - do not average
ones, # ap_box_noun - do not average
ones, # ap_box_verb - do not average
ones, # ap_box_ttc - do not average
ones, # ap_box_noun_verb - do not average
ones, # ap_box_noun_ttc - do not average
ones, # ap_box_verb_ttc - do not average
ones, # ap_box_noun_verb_ttc - do not average
nouns, # map_box_noun - average over nouns
nouns, # map_box_noun_verb - average over nouns
nouns, # map_box_noun_ttc - average over nouns
nouns # map_box_noun_verb_ttc - average over nouns
]).T
def _match(self, pred, gt_predictions, ious):
"""
Return matches of a given prediction to a set of GT predictions
:param pred: the prediction dictionary
:param gt_predictions: the gt predictions dictionary
:param ious: the computed IOU matrix (NGT x NPRED)
:return: a num_preds x num_ap matrix specifying possible matchings depending on the prediction and metric
"""
nouns = (pred['nouns'] == gt_predictions['nouns'])
boxes = (ious.ravel() > self.iou_threshold)
verbs = (pred['verbs'] == gt_predictions['verbs'])
ttcs = (np.abs(pred['ttcs'] - gt_predictions['ttcs']) <= self.tti_threshold)
tp_box = boxes
tp_box_noun = boxes & nouns
tp_box_verb = boxes & verbs
tp_box_ttc = boxes & ttcs
tp_box_noun_verb = boxes & verbs & nouns
tp_box_noun_ttc = boxes & nouns & ttcs
tp_box_verb_ttc = boxes & verbs & ttcs
tp_box_noun_verb_ttc = boxes & verbs & nouns & ttcs
return np.vstack([tp_box, # ap_box
tp_box_noun, # ap_box_noun
tp_box_verb, # ap_box_verb
tp_box_ttc, # ap_box_ttc
tp_box_noun_verb, # ap_box_noun_verb
tp_box_noun_ttc, # ap_box_noun_ttc
tp_box_verb_ttc, # ap_box_verb_ttc
tp_box_noun_verb_ttc, # ap_box_noun_verb_ttc
tp_box_noun, # map_box_noun
tp_box_noun_verb, # map_box_noun_verb
tp_box_noun_ttc, # map_box_noun_ttc
tp_box_noun_verb_ttc # map_box_noun_verb_ttc
]).T
class STAMeanAveragePrecision(AbstractMeanAveragePrecision):
"""Compute the different STA metrics based on mAP"""
def __init__(self, iou_threshold=0.5, ttc_threshold=0.25, top_k=5, count_all_classes=False):
"""
Construct the overall mAP metric. This will compute the following metrics:
- Box + Noun mAP
- Box + Noun + Verb mAP
- Box + Noun + TTC mAP
- Box + Noun + Verb + TTC mAP
:param iou_threshold: IOU threshold to check if a predicted box can be matched to a ground turth box
:param ttc_threshold: TTC threshold to check if a predicted TTC is acceptable
:param top_k: Top-K criterion for mAP. Discounts up to k-1 high scoring false positives
:param count_all_classes: whether to also average across classes with no annotations. False is the default for many implementations.
"""
super().__init__(4, top_k=top_k, count_all_classes=count_all_classes)
self.iou_threshold = iou_threshold
self.tti_threshold = ttc_threshold
self.names = ['Box + Noun mAP',
'Box + Noun + Verb mAP',
'Box + Noun + TTC mAP',
'Box + Noun + Verb + TTC mAP']
self.short_names = ['map_box_noun',
'map_box_noun_verb',
'map_box_noun_ttc',
'map_box_noun_verb_ttc']
def _map_classes(self, preds):
"""
Associates each prediction to a class
:param preds: the input predictions
:return the matrix of classess associated to each prediction according to the evaluation measure
"""
nouns = preds['nouns']
return np.vstack([
nouns, # map_box_noun - average over nouns
nouns, # map_box_noun_verb - average over nouns
nouns, # map_box_noun_ttc - average over nouns
nouns # map_box_noun_verb_ttc - average over nouns
]).T
def _match(self, pred, gt_predictions, ious):
"""
Return matches of a given prediction to a set of GT predictions
:param pred: the prediction dictionary
:param gt_predictions: the gt predictions dictionary
:param ious: the computed IOU matrix (NGT x NPRED)
:return: a num_preds x num_ap matrix specifying possible matchings depending on the prediction and metric
"""
nouns = (pred['nouns'] == gt_predictions['nouns'])
boxes = (ious.ravel() > self.iou_threshold)
verbs = (pred['verbs'] == gt_predictions['verbs'])
ttcs = (np.abs(pred['ttcs'] - gt_predictions['ttcs']) <= self.tti_threshold)
tp_box_noun = boxes & nouns
tp_box_noun_verb = boxes & verbs & nouns
tp_box_noun_ttc = boxes & nouns & ttcs
tp_box_noun_verb_ttc = boxes & verbs & nouns & ttcs
return np.vstack([tp_box_noun, # map_box_noun
tp_box_noun_verb, # map_box_noun_verb
tp_box_noun_ttc, # map_box_noun_ttc
tp_box_noun_verb_ttc # map_box_noun_verb_ttc
]).T
|
import scrapy
import json
import os
from pathlib import Path
from bills.models import CommitteeDocument
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
FILE_PATH = os.path.join(BASE_DIR, 'commitee_report_detail_urls.json')
class CommitteeReportSpider(scrapy.Spider):
name = 'committeereport'
def start_requests(self):
with open(FILE_PATH, 'r') as urls_file:
urls = urls_file.read().split('\n')
registered_urls = CommitteeDocument.objects.values_list('request_url', flat=True).distinct()
start_urls = set(filter(None, urls)) - set(registered_urls)
for url in start_urls:
yield scrapy.Request(url)
def parse(self, response):
committee_report_data = self.get_detail_data(response)
yield committee_report_data
def get_detail_data(self, response):
committee_report_data = {}
data = json.loads(response.text)
committee_report_data['title'] = data['title']
committee_report_data['pdf_link'] = data['download']['pdflink']
for col in data['metadata']['columnnamevalueset']:
#print(col)
if col['colname'] == 'Category':
committee_report_data['category'] = col['colvalue']
elif col['colname'] == 'Report Type':
committee_report_data['report_type'] = col['colvalue']
elif col['colname'] == 'Report Number':
committee_report_data['report_number'] = col['colvalue']
elif col['colname'] == 'Date':
committee_report_data['date'] = ' '.join(col['colvalue'].split())
elif col['colname'] == 'Committee':
committee_report_data['committee'] = col['colvalue']
elif col['colname'] == 'Associated Legislation':
committee_report_data['associated_legislation'] = col['colvalue']
committee_report_data['request_url'] = response.request.url
return committee_report_data |
#! /usr/bin/python2
from __future__ import print_function
import os
import sys
import glob
from optparse import OptionParser
conf_latest_cve_result = False
if conf_latest_cve_result:
try:
import urllib2 as urllib
except:
import urllib.request as urllib
import json
req_timeout = 2.0
def _data_url(url):
try:
response = urllib.urlopen(url, timeout=req_timeout)
except IOError: # Py2
return b""
except OSError: # Py3+
return b""
if response.getcode() not in (None, 200):
return b""
data = response.read()
return data
def _json_url(url):
data = _data_url(url)
try:
data = json.loads(data)
except ValueError:
return None
return data
def log2stats(logname):
ret = {'date' : None, 'pkgs' : {}, 'mods' : {'' : {}}}
fn = os.path.basename(logname)
ret['date'] = fn[:-len(".out.log")].replace("T", " ").replace("+0000", "Z")
if ret['date'].startswith("sync2git-"):
ret['date'] = ret['date'][len("sync2git-"):]
state = 'beg'
for line in open(logname):
if state == 'end':
return None # Too harsh?
if line.startswith(" -- End: "):
state = 'end'
continue
if False: pass
elif state == 'beg':
if line.startswith("Checking CVEs for packages:"):
state = 'pkgs'
continue
elif state == 'mods':
if False: pass
elif line.startswith("Filtered Pkg:"):
pkg = line[len("Filtered Pkg:"):]
pkg = pkg.strip()
pkg = pkg.split(': ')
ret['mods'][''][pkg[0]] = pkg[1]
elif line.startswith("Filtered Pkg (cached):"):
pkg = line[len("Filtered Pkg (cached):"):]
pkg = pkg.strip()
pkg = pkg.split(': ')
ret['mods'][''][pkg[0]] = pkg[1]
elif line.startswith("Filtered Mod:"):
mod = line[len("Filtered Mod:"):]
mod = mod.strip()
ret['mods'][mod] = ret['mods']['']
ret['mods'][''] = {}
elif state == 'pkgs':
if False: pass
elif line.startswith("Checking CVEs for modules:"):
state = 'mods'
elif line.startswith("Filtered Pkg:"):
pkg = line[len("Filtered Pkg:"):]
pkg = pkg.strip()
pkg = pkg.split(': ')
ret['pkgs'][pkg[0]] = pkg[1]
elif line.startswith("Filtered Pkg (cached):"):
pkg = line[len("Filtered Pkg (cached):"):]
pkg = pkg.strip()
pkg = pkg.split(': ')
ret['pkgs'][pkg[0]] = pkg[1]
else:
break
if state != 'end':
return None # Partial files...
del ret['mods']['']
return ret
def stats_subset(superset, subset):
""" Remove extra pkg/mod data from subset that isn't in superset. """
npkgs = {}
for pkg in subset['pkgs']:
if pkg not in superset['pkgs']:
continue
npkgs[pkg] = subset['pkgs'][pkg]
# Update status for cached pkgs.
for pkg in superset['pkgs']:
if not superset['pkgs'][pkg].startswith("(0)"):
continue
if pkg not in npkgs:
continue
superset['pkgs'][pkg] = npkgs[pkg]
subset['pkgs'] = npkgs
nmods = {}
fmodns = set()
for mod in superset['mods']:
modns = mod.rsplit("-", 1)[0]
fmodns.add(modns)
for mod in subset['mods']:
modns = mod.rsplit("-", 1)[0]
if modns not in fmodns:
continue
nmods[mod] = subset['mods'][mod]
subset['mods'] = nmods
return subset
def process(logs, nlogs):
ret = []
first = None
for log in list(reversed(sorted(nlogs))) + list(reversed(sorted(logs))):
if not log.endswith(".out.log"):
continue
stats = log2stats(log)
if stats is None:
continue
if first is None:
first = stats
else:
stats = stats_subset(first, stats)
if not stats['pkgs'] and not stats['mods']:
break
ret.append(stats)
return list(reversed(ret))
def _usage(ec=1):
print('logdata4sync2git [-h] text|html dir...')
print(' text dir')
print(' html dir')
sys.exit(ec)
def output_text(stats, verbose):
latest = stats[-1]
if not latest['pkgs'] and len(latest['mods']) == 1:
print('No packages held by CVE checker.')
return
print('Latest: ' + latest['date'])
pkgs = set(latest['pkgs'].keys())
print("Pkgs:")
prnt = []
for stat in stats:
for pkg in stat['pkgs']:
if pkg not in pkgs:
continue
pkgs.remove(pkg)
prnt.append((pkg, stat['date']))
for pkg, date in sorted(prnt):
print(" %-60s %s" % (pkg, date))
if verbose:
print(" \_ %s" % (latest['pkgs'][pkg],))
print("Mods:")
fmodns = {}
for mod in latest['mods']:
modns = mod.rsplit("-", 1)[0]
fmodns[modns] = mod
fmods = set(fmodns.keys())
prnt = []
for stat in stats:
for mod in sorted(stat['mods']):
modns = mod.rsplit("-", 1)[0]
if modns not in fmods:
continue
fmods.remove(modns)
modui = mod
if not verbose:
modui = modns
prnt.append((modui, modns, stat['date']))
for modui, modns, date in sorted(prnt):
print(" %-60s %s" % (modui, stat['date']))
if verbose:
print(" \_ %s" % (fmodns[modns],))
for pkg in sorted(latest['mods'][fmodns[modns]]):
print(" %s" % (pkg,))
if verbose:
print(" \_ %s" % (latest['mods'][fmodns[modns]][pkg],))
# See: https://www.datatables.net
html_header = """\
<html>
<head>
<meta http-equiv="refresh" content="7200">
<title>%s</title>
<script
src="https://code.jquery.com/jquery-3.3.1.slim.min.js"
integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
crossorigin="anonymous">
</script>
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.22/css/jquery.dataTables.css">
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.22/js/jquery.dataTables.js"></script>
<link rel="dns-prefetch" href="https://fonts.googleapis.com">
<style>
@import url('https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,700');
body {
font-family:'Source Sans Pro', sans-serif;
margin:0;
}
h1,h2,h3,h4,h5,h6 {
margin:0;
}
td.dtclass, th.dtclass {
display: none;
}
.bcheck {
background: orange !important;
}
.binfo {
background: orange !important;
}
.tmout {
background: orange !important;
}
.fail {
background: red !important;
}
.unknown {
background: lightred !important;
text-decoration: line-through;
}
.done {
}
</style>
</head>
<body>
<h1>%s</h1>
"""
# Again See: https://www.datatables.net
html_table = """\
<table id="pkgdata" style="compact">
<thead>
<tr>
<th>Module</th>
<th>Package</th>
<th>Status</th>
<th>Date</th>
</tr>
</thead>
<tbody>
"""
html_footer = """\
</tbody>
</table>
<script>
$(document).ready(
function() {
$('#pkgdata').DataTable(
{
"paging" : false,
"createdRow" : function(row, data, dataIndex) {
$(row).addClass(data[0]);
},
"order": [[ 3, "asc" ]]
}
);
}
);
</script>
</body>
</html>
"""
def html_row(fo, *args, **kwargs):
lc = kwargs.get('lc')
if lc is None:
lc = ''
links = kwargs.get('links', {})
# Want this to do nice things both with and without JS.
fo.write("""\
<tr class="%s"> <td class="dtclass">%s</td>
""" % (lc,lc))
for arg in args:
if arg in links:
arg = '<a href="%s">%s</a>' % (links[arg], arg)
fo.write("""\
<td>%s</td>
""" % (arg,))
fo.write("""\
</tr>
""")
def _cve_link(status):
if status[0] != '(':
if False:
return None, "http://nobegc"
return None, None
num = 0
snum = status[1:]
while snum and snum[0] != ')':
if snum[0] not in "0123456789":
if False:
return None, "http://nonum"
return None, None
num *= 10
num += int(snum[0])
snum = snum[1:]
if not snum:
if False:
return None, "http://noendc"
return None, None
res = None
burl = "http://cve-checker-route-centos-cve-checker.apps.ocp4.prod.psi.redhat.com/"
if conf_latest_cve_result:
url = burl + "info/" + str(num)
data = _json_url(url)
if data is not None and data['state'] == 'done':
res = ' [' + data['result'] + ']'
url = burl + "ui/" + str(num)
return res, url
def _status(status):
if False: pass
elif status.endswith("=!Bad check response!"):
return "Bad check", "bcheck"
elif status.endswith("=!Bad info response!"):
return "Bad info", "binfo"
elif status.endswith("=!Timeout!"):
return "Timeout", "tmout"
elif status.endswith("=False (!safe_nvr)"):
return "Fail", "fail"
elif status.endswith("=False"):
return "Fail", "fail"
else:
return status, "unknown"
def output_html(stats):
fo = sys.stdout
latest = stats[-1]
h = 'CVE checker: ' + latest['date']
fo.write(html_header % (h, h))
if not latest['pkgs'] and len(latest['mods']) == 1:
fo.write('<h3>No packages held by CVE checker.</h3>')
fo.write(html_footer)
return
fo.write(html_table)
pkgs = set(latest['pkgs'].keys())
prnt = []
for stat in stats:
for pkg in stat['pkgs']:
if pkg not in pkgs:
continue
pkgs.remove(pkg)
prnt.append((pkg, stat['date']))
for pkg, date in sorted(prnt):
cve_res, links = _cve_link(latest['pkgs'][pkg])
status,lc = _status(latest['pkgs'][pkg])
if cve_res is not None:
status = status + cve_res
links = {status : links}
html_row(fo, '<BaseOS>', pkg, status, date,
lc=lc, links=links)
fmodns = {}
for mod in latest['mods']:
modns = mod.rsplit("-", 1)[0]
fmodns[modns] = mod
fmods = set(fmodns.keys())
prnt = []
for stat in stats:
for mod in stat['mods']:
modui = mod.rsplit("-", 1)[0]
if modui not in fmods:
continue
fmods.remove(modui)
prnt.append((modui, modns, stat['date']))
for modui, modns, date in sorted(prnt):
for pkg in latest['mods'][fmodns[modui]]:
cve_res, links = _cve_link(latest['mods'][fmodns[modui]][pkg])
status,lc = _status(latest['mods'][fmodns[modui]][pkg])
if cve_res is not None:
status = status + cve_res
links = {status : links}
html_row(fo, modui, pkg, status, date,
lc=lc, links=links)
fo.write(html_footer)
def main():
parser = OptionParser()
parser.add_option("-v", "--verbose",
help="Print out more info.", default=False, action="store_true")
(options, args) = parser.parse_args()
if len(args) < 2:
_usage()
logs = sorted(glob.glob(args[1] + '/2*.log'))
nlogs = sorted(glob.glob(args[1] + '/sync2git-2*.log'))
stats = process(logs, nlogs)
if False: pass
elif args[0] in ('text', 'txt'):
output_text(stats, options.verbose)
elif args[0] in ('html',):
output_html(stats)
if __name__ == '__main__':
main()
|
#coding=utf-8
#熟食店P113 2017.4.17
sandwichOrders = ['fruitSandwich','baconicSandwich','beefSandwich','pastrmiSandwich']
finishedSandwiches = []
print("Now we have")
print(sandwichOrders)
print("\nOpps!The pastramiSandwich is sold out!")
for sandwichOrder in sandwichOrders:
if sandwichOrder == 'pastrmiSandwich':
sandwichOrders.remove(sandwichOrder)
while sandwichOrders:
sandwich = sandwichOrders.pop()
print("I made your "+sandwich+'\n')
finishedSandwiches.append(sandwich)
print(finishedSandwiches)
|
#!/usr/bin/env python
from tincanradar import dbuvm2dbm, uvm2dbm
from argparse import ArgumentParser
def main():
p = ArgumentParser()
p.add_argument("-u", "--uvm", help="uV/m", type=float)
p.add_argument("-db", "--dBuvm", help="dBuV/m", type=float)
p.add_argument("-d", "--dist_m", help="distance (one-way) [meters]", type=float, default=3.0)
p = p.parse_args()
if p.dBuvm is not None:
print(f"{p.dBuvm} dBuV/m @ {p.dist_m} m => {dbuvm2dbm(p.dBuvm,p.dist_m):.2f} dBm")
if p.uvm is not None:
print(f"{p.uvm} uV/m @ {p.dist_m} m => {uvm2dbm(p.uvm,p.dist_m):.2f} dBm")
if __name__ == "__main__":
main()
|
########################################
# this file does not use pybgs library
########################################
########################################
########################################
# This sript will take video.MP4 exract
# a given area and calculate the pixel difference
# between each frame after n frames it
# takes the mean and plot it
# SETTINGS:
# you can jump frames forward to see variable jump_frames
# if you set save_plot the plots will get saved into plot folder
########################################
########################################
########################################
#Modified by Martin to run on bash for windows
#Saves different frames now, be carefull, this might create a lot of data
########################################
# Basic libraries
import numpy as np
import os
import glob
# open cv, we have to load V3 for full list of algorithms
# https://docs.opencv.org/3.4
import cv2 as cv
# graphic
import matplotlib.pyplot as plt
############## SETTINGS #######################
# plot style
plt.style.use('fast')
# our Video file
video_file = "MVI_1224.MP4"
# create folder for plots and empty it
folder1 = "plot"
folder2 = "original"
folder3 = "sub"
# jump frames forward
jump_frames = 5000
# save plot
save_plot = True
# threshold
threshold = 7
# mean interval for generating mean of pixel change and plotting steps, example 10 = each 10 frames make the mean and generate plot point
mean_interval = 10
# our rectangle area
x1 = 1300
x2 = 1800
y1 = 350
y2 = 850
# plot pixel color change break point
plot_breakpoint = 6
# adjust Gamma
adjustGamma = True
############## END SETTINGS #######################
# helper variables
w_pixel_array = []
ax = []
x_vec = []
y_vec = []
second_count = 0 # used to count frames
n_white_pix_sum = 0 # helper variable to sum white pixel in n amount of frames
a = jump_frames
print("########### SETTINGS ##################")
print("########### PLOTS: {}".format(save_plot))
print("########### PLOT breakpoint: {}".format(plot_breakpoint))
print("########### Ignored frames: {}".format(jump_frames))
print("########### Threshold: {}".format(threshold))
print("########### Video: {}".format(video_file))
print("########### X1: {}".format(x1))
print("########### X2: {}".format(x2))
print("########### Y1: {}".format(y1))
print("########### Y2: {}".format(y2))
print("########### Adjust Gamma: {}".format(adjustGamma))
# Check if video file exists
if(os.path.isfile(video_file) == False):
print("Error: No video file found")
exit()
# create output folder if not exists
# delete folder content
def del_folder_content(folder):
try:
os.mkdir(folder)
except:
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
i = input("Clear files in all output folders (y/n)? Be carefull, this creates a lot of data!")
if (i == "y"):
del_folder_content(folder1)
del_folder_content(folder2)
del_folder_content(folder3)
print("Files deleted")
else: # create output folder if not exists
try:
os.mkdir(folder1)
os.mkdir(folder2)
os.mkdir(folder3)
except:
pass
# load video
capture = cv.VideoCapture(video_file)
# wait till video is loaded
while not capture.isOpened():
capture = cv.VideoCapture(video_file)
cv.waitKey(1000)
print("Wait for the header")
# jump forward in frames
capture.set(cv.CAP_PROP_POS_FRAMES, jump_frames)
length = int(capture.get(cv.CAP_PROP_FRAME_COUNT))
print( "Video total frames: {}".format(length) )
# https://makersportal.com/blog/2018/8/14/real-time-graphing-in-python
def live_plotter(x_vec, y1_data, line1, ax, frame, save, plot_breakpoint, mean_interval, pause_time = 0.04):
print(x_vec, y1_data, line1, ax, frame, save, plot_breakpoint, mean_interval)
if line1 == []:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
fig = plt.figure(figsize = (8 , 4))
ax = fig.add_subplot(1,1,1)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec, y1_data, '-o', alpha = 0.8)
#update plot label/title
plt.ylabel('Moved pixels [%]')
plt.xlabel('Frames [{} Frames Interval]'.format(mean_interval))
plt.ylim(0,15,0.5)
plt.show()
plt.title('Pixel change {0:.2f}%'.format(y1_data[-1]))
# change color if we drop lower than given threshold
if(y1_data[-1] < plot_breakpoint):
ax.set_facecolor('xkcd:salmon')
else:
ax.set_facecolor('white')
# after the figure, axis, and line are created, we only need to update the y-data
try:
line1.set_ydata(y1_data)
line1.set_xdata(x_vec)
except:
pass
plt.xlim(min(x_vec),max(x_vec), 5)
plt.tick_params(
axis = 'x', # changes apply to the x-axis
which = 'both', # both major and minor ticks are affected
bottom = True, # ticks along the bottom edge are off
top = False, # ticks along the top edge are off
labelbottom = True) # labels along the bottom edge are off
# adjust limits if new data goes beyond bounds
if np.min(y1_data)<=line1.axes.get_ylim()[0] or np.max(y1_data)>=line1.axes.get_ylim()[1]:
plt.ylim([np.min(0),np.max(y1_data)+np.std(y1_data)])
# this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
plt.pause(pause_time)
# if we want to save the plots
try:
if save:
fname = 'plot/'+str(frame)+'.png'
plt.savefig(fname, dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # return line so we can update it again in the next iteration
except:
pass
return line1, ax
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / 1.2
ltable = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# Somehow I found the value of `gamma=1.2` to be the best in my case
def adjust_gamma(image, ltable):
# apply gamma correction using the lookup table
return cv.LUT(image, ltable)
while True:
# read video
flag, frame = capture.read()
# check if video is read
if flag:
# show video in ouput
cv.namedWindow('original',cv.WINDOW_NORMAL)
cv.resizeWindow('original', 600, 400)
# draws a rectangle on frame
# x1: y1, x2: y2
cv.rectangle(frame, (x1, y1), (x2, y2), (255,0,0), 2)
cv.imshow('original', frame)
n1 = "{}/original_{}.jpg".format(folder2, a)
cv.imwrite(n1, frame)
# crop only the area of interest for us
# y1:y2, x1:x2
roi = frame[y1:y2+1, x1:x2+1]
cv.namedWindow('cropped',cv.WINDOW_NORMAL)
cv.resizeWindow('cropped', 600, 400)
cv.imshow('cropped', roi)
pos_frame = capture.get(1)
#### Preprocessing ######
# change image to grayscale
roi = cv.cvtColor(roi, cv.COLOR_BGR2GRAY)
if (adjustGamma == True):
roi = adjust_gamma(roi, ltable)
# equalize grayscale image
#roi = cv.equalizeHist(roi)
# add gaussian to remove noise
#roi = cv.GaussianBlur(roi, (1, 1), 0)
#roi = cv.medianBlur(roi, 1)
#roi = cv.GaussianBlur(roi, (7, 7), 1.5)
#### END Preprocessing ######
cv.namedWindow('preprocess', cv.WINDOW_NORMAL)
cv.resizeWindow('preprocess', 600, 400)
cv.imshow('preprocess', roi)
# check if it was the first run otherwise img_history is same as input for first round
try:
img_history
except:
img_history = roi
# calculate absdiff
img_output = cv.absdiff(roi, img_history)
#### Output Processing ######
#img_output = cv.cvtColor(img_output, cv.COLOR_BGR2GRAY)
#img_output = cv.equalizeHist(img_output)
#img_output = cv.GaussianBlur(img_output, (7, 7), 1.5)
#img_output = cv.GaussianBlur(img_output, (3, 3), 1.5)
img_output = cv.medianBlur(img_output, 1)
# exports a black and white image
_, img_output = cv.threshold(img_output, threshold, 255, cv.THRESH_BINARY)
img_history = roi
# show foreground mask in window
cv.namedWindow('foreground', cv.WINDOW_NORMAL)
cv.resizeWindow('foreground', 600, 400)
cv.imshow('foreground', img_output)
n2 = "{}/foreground_{}.jpg".format(folder3, a)
cv.imwrite(n2, img_output)
a += mean_interval
# all pixels of image
# TODO actually we would need this only one time, as all the frames have the same pixel count
n_all_px = img_output.size
# get all white pixels == changed pixels
n_white_pix = np.sum(img_output == 255)
# save into our helper variable
n_white_pix_sum = n_white_pix_sum + n_white_pix
# set our frame counter forward
second_count = second_count + 1
# if 10 frames we output the plot
if (second_count == mean_interval):
# mean and relative value to all pixels of the cropped frame
relative_white = (n_white_pix_sum / second_count) / n_all_px * 100
# add value our vector
y_vec.extend([relative_white])
x_vec.extend([pos_frame])
# create our live plot
w_pixel_array, ax = live_plotter(x_vec, y_vec, w_pixel_array, ax, pos_frame, save_plot, plot_breakpoint, mean_interval)
# move our vector forward
if (len(x_vec) > 250):
y_vec.pop(0)
x_vec.pop(0)
# reset helper
n_white_pix_sum = 0
second_count = 0
median_vec = []
print('Number of mean white pixels: {0:.2f}%'.format(relative_white))
else:
#print "Frame is not ready"
cv.waitKey(1000)
break
if 0xFF & cv.waitKey(10) == 27:
break
# END
|
#!/usr/bin/env python
import io
import json
import os
moment_locale_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"node_modules",
"moment",
"src",
"locale",
)
long_date_format_parts = (
r"LT : '",
r'LT : "',
r"LTS : '",
r'LTS : "',
r"L : '",
r'L : "',
r"LL : '",
r'LL : "',
r"LLL : '",
r'LLL : "',
r"LLLL : '",
r'LLLL : "',
)
locales = {}
if os.path.isdir(moment_locale_directory):
for root, dirs, files in os.walk(moment_locale_directory):
for file in files:
if file.endswith(".js"):
with io.open(os.path.join(root, file), "r", encoding="utf-8") as locale:
long_date_format = {}
for line in locale.readlines():
for part in long_date_format_parts:
if line.strip().startswith(part):
line = line.strip()
if part.endswith('"'):
key = part.rstrip(' :"')
value = line.lstrip(part).rstrip('", ')
else:
key = part.rstrip(" :'")
value = line.lstrip(part).rstrip("', ")
long_date_format[key] = value
if long_date_format:
locale_name = file.replace(".js", "")
locales[locale_name] = long_date_format
if locale_name == "en-gb":
locales["en"] = long_date_format
with io.open(
os.path.join(moment_locale_directory, "extracted.js"), "w", encoding="utf-8"
) as outfile:
outfile.write(
"export const locales = "
+ json.dumps(locales, ensure_ascii=False, sort_keys=True, indent=4)
)
|
import pandas as pd
rows = []
df = pd.read_csv('sample_csv.csv')
for _, row in df.iterrows():
rows.extend([row] * row['Score'])
new_df = pd.DataFrame(rows, columns=df.columns)
new_df.to_csv('bad_csv.csv') |
from model.contact import Contact
from model.group import Group
from generator import contact as f
from generator import group as t
def test_add_contact_to_group(app, orm):
old_contacts = None
added_contact = None
added_group = None
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="grnew", header="res", footer="tes"))
existing_groups = orm.get_group_list()
for group in existing_groups:
available_contacts = orm.get_contacts_not_in_group(group=group)
if len(available_contacts) > 0:
added_contact = available_contacts[0]
added_group = group
break
if added_contact is None:
added_contact = Contact(firstname="faname", lastname="lname", address="address")
old_contacts = orm.get_contact_list()
app.contact.add(added_contact)
if old_contacts is not None:
new_contacts = orm.get_contact_list()
added_contact = \
[item for item in new_contacts if
item not in old_contacts and item.firstname == added_contact.firstname][0]
if added_group is None:
added_group = orm.get_group_list()[0]
app.contact.add_contact_to_group(added_contact, added_group)
assert orm.is_contact_in_group(added_contact, added_group)
|
from datetime import datetime
from decimal import Decimal
from typing import Generator, List, Dict
import pytz
from privex.helpers import empty, sleep
from privex.jsonrpc.objects import MoneroTransfer
from privex.coin_handlers.base.objects import Deposit, Coin
from privex.jsonrpc import MoneroRPC
from privex.coin_handlers.Monero.MoneroMixin import MoneroMixin
from privex.coin_handlers.base.BaseLoader import BaseLoader
import logging
log = logging.getLogger(__name__)
class MoneroLoader(BaseLoader, MoneroMixin):
def __init__(self, settings: Dict[str, dict] = None, coins: List[Coin] = None, *args, **kwargs):
super(MoneroLoader, self).__init__(settings=settings, coins=coins, *args, **kwargs)
self.tx_count = 1000
self.loaded = False
# Get all RPC objects
self.rpcs = self._get_rpcs()
self.wallet_opened = False
def list_txs(self, batch=100) -> Generator[Deposit, None, None]:
log.debug('Symbols: %s', self.symbols)
for s in self.symbols:
log.debug('Entering wallet for %s', s)
with self.wallet(s) as w: # type: MoneroRPC
log.debug('Looking up account ID for symbol %s', s)
acc_id = self.account_id(symbol=s)
log.debug('Loading transfers for account ID %s', acc_id)
txs = w.get_transfers(account_index=acc_id)
log.debug('Looping over "in" txs')
for tx in txs.get('in', []):
try:
cleaned = self._clean_tx(tx=tx, symbol=s)
if cleaned is None:
continue
yield Deposit(**cleaned)
except Exception:
log.exception('(skipping) Error processing Monero TX %s', tx)
continue
def _clean_tx(self, tx: MoneroTransfer, symbol, address=None) -> dict:
"""Filters an individual transaction. See :meth:`.clean_txs` for info"""
need_confs = self.settings[symbol].get('confirms_needed', 1)
txid = tx.txid
category = tx.type
amt = tx.decimal_amount
log.debug('Filtering/cleaning transaction, Cat: %s, Amt: %s, TXID: %s', category, amt, txid)
if category != 'in': return None # Ignore non-receive transactions
# if 'generated' in tx and tx['generated'] in [True, 'true', 1]: return None # Ignore mining transactions
# Filter by receiving address if needed
if not empty(address) and tx.address != address: return None
# If a TX has less confirmations than needed, check if we can trust unconfirmed TXs.
# If not, we can't accept this TX.
confs = int(tx.confirmations)
if confs < need_confs:
if confs < int(tx.suggested_confirmations_threshold):
log.debug('Got %s transaction %s, but only has %d confs, needs %d', symbol, txid, confs, need_confs)
return None
d = datetime.utcfromtimestamp(tx.timestamp)
d = pytz.utc.localize(d)
return dict(
txid=txid,
coin=self.coins[symbol].symbol,
vout=int(0),
tx_timestamp=d,
address=tx.address,
amount=Decimal(amt)
)
def load(self, tx_count=1000):
pass
def __enter__(self):
log.debug('%s entering with statement', self.__class__.__name__)
if self.wallet_opened:
log.debug('Wallet already open')
return self
s = self.xmr_settings
if empty(s['wallet']):
log.debug('%s entered. No wallet specified for %s. Not opening any wallet.', self.__class__.__name__)
return self
sleep(3)
log.debug('Opening wallet %s', s['wallet'])
self.rpcs['XMR'].open_wallet(filename=s['wallet'], password=s['walletpass'])
log.debug('Wallet opened')
self.wallet_opened = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
s = self.xmr_settings
if empty(s['wallet']):
log.debug('%s exiting. No wallet specified. Not closing any wallet.', self.__class__.__name__)
return self
log.debug('%s exiting. Calling store()', self.__class__.__name__)
self.rpcs['XMR'].store()
self.wallet_opened = False
|
import ADT
from LaTeX import LaTeX
import sys
import io
import os
filename = sys.argv[1]
infile = io.open(filename, "r", encoding="utf-8")
firstname, extension = os.path.splitext(filename)
txt = infile.read()
#
# Hack below is disgusting. It overwrites the html macro \youtube for LaTeX pdf
#
txt = txt.replace("\\chapterno", "\\newcommand{\\youtube}[1]{\\href{https://www.youtube.com/embed/#1?rel=0}{Link to video}}\\chapterno")
doc = LaTeX(txt, firstname)
doc.translate()
#for v in doc.tokens:
# print(v)
#doc.tokenize()
#for v in doc.tokens:
# print(v)
#print(doc.HTML())
#print(doc.macros)
s = doc.LaTeX()
print(s)
|
# Generated by Django 2.2.dev20190116205049 on 2019-01-16 20:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('decks', '0007_auto_20190116_2054'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Challenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(default='pending', max_length=200)),
('deck1', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='challenger_deck', to='decks.Deck')),
('deck2', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='challenged_deck', to='decks.Deck')),
('player1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challenger', to=settings.AUTH_USER_MODEL)),
('player2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challenged', to=settings.AUTH_USER_MODEL)),
('winner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='wins', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import re
from ..util import sozgebolu
class bigram(object):
def __init__(self, text):
soilemder = self.soilemgebolu(text)
self.newlemm = []
self.lastlemm = []
for soilem in soilemder:
soz_tag = sozgebolu(soilem)
soz = soz_tag[0]
tag = soz_tag[1]
m = len(tag)
bigrm = self.bigrams(soz, tag, m)
qq = map(' '.join, bigrm)
self.newlemm.extend(list(qq))
self.lastlemm.extend(soz)
def utir(self, args):
hist = []
for i in range(len(args)):
if r"," == args[i]:
hist.append(i)
return hist
def kombinacia(self, args):
if len(args) == 0:
args.append(-1)
args.append(-1)
yield tuple(args)
elif len(args) == 1:
args.append(-1)
yield tuple(args)
else:
args = iter(args)
hist = []
hist.append(next(args))
for i in args:
hist.append(i)
yield tuple(hist)
del hist[0]
def soilemgebolu(self, text):
res = re.split(r"[.]|[?]|[!]", text)
if res[len(res)-1] == '':
del res[len(res)-1]
n = len(res)
i = 0
while i < n:
mas1 = self.utir(res[i])
next_utir = list(self.kombinacia(mas1))
for j in range(len(next_utir)):
x = next_utir[j][0]
y = next_utir[j][1]
if x != -1 and y != -1:
pvk = res[i][x+1:y]
kol = pvk.count(r" ")
if kol >= 2:
res.insert(i+1, res[i][x+1:])
res[i] = res[i][:x]
n += 1
break
elif x != -1 and y == -1:
res.insert(i+1, res[i][x+1:])
res[i] = res[i][:x]
n += 1
i += 1
i = 0
length = len(res)
while i < length:
if res[i] == '':
del res[i]
length -= 1
i -= 1
elif str(res[i][0]).lower() not in "abcdefghigklmnopqrstuvwxyz\
аәбвгғдеёжзийкқлмнңоөпрстуұүфхһцчшщьыъіэюя1234567890- ":
res[i] = res[i][1:]
i += 1
return res
def bigrams(self, arr, tag, m):
UaqytAtaulary = ['ғасыр', 'ғ', 'жыл', 'жылы', 'ай',
'күн', 'апта', 'қаңтар', 'ақпан',
'наурыз', 'сәуір', 'мамыр',
'маусым', 'шілде', 'тамыз',
'қыркүйек', 'қазан', 'қараша', 'желтоқсан']
for i in range(m-1):
if tag[i] == str(r'<adj>') and tag[i+1] == str(r'<n>'):
yield tuple([arr[i], arr[i+1]])
elif tag[i] == str(r'<n>') and tag[i+1] == str(r'<n>'):
yield tuple([arr[i], arr[i+1]])
elif tag[i] == str(r'<n>') and tag[i+1] == str(r'<v>'):
yield tuple([arr[i], arr[i+1]])
elif tag[i] == str(r'<np>') and tag[i+1] == str(r'<n>'):
yield tuple([arr[i], arr[i+1]])
elif tag[i] == str(r'<np>') and tag[i+1] == str(r'<np>'):
yield tuple([arr[i], arr[i+1]])
elif tag[i] == str(r'<num>') and arr[i+1] in UaqytAtaulary:
yield tuple([arr[i], arr[i+1]])
|
from django.urls import path
from .views import *
urlpatterns = [
path(r'', SandboxIndexView.as_view(), name='sandbox'),
path(r'<path:file_path>', SandboxView.as_view(), name='sandbox'),
]
|
from .register import INFER, PREPROCESS, POSTPROCESS, METRICS
|
"""empty message
Revision ID: af9c317d2c92
Revises: 245d12695c69
Create Date: 2020-03-12 08:49:36.009020
"""
# revision identifiers, used by Alembic.
revision = 'af9c317d2c92'
down_revision = '245d12695c69'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from app import db
from enum import Enum
import datetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Organisation(Base):
__tablename__ = "organisation"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), nullable=False)
system_name = db.Column(db.String(50), nullable=False)
small_logo = db.Column(db.String(100), nullable=False)
large_logo = db.Column(db.String(100), nullable=False)
domain = db.Column(db.String(100), nullable=False)
url = db.Column(db.String(100), nullable=False)
email_from = db.Column(db.String(100), nullable=True)
system_url = db.Column(db.String(100), nullable=False)
privacy_policy = db.Column(db.String(100), nullable=False)
def __init__(self, name, system_name, small_logo, large_logo, domain, url, email_from, system_url, privacy_policy):
self.name = name
self.small_logo = small_logo
self.large_logo = large_logo
self.domain = domain
self.system_name = system_name
self.url = url
self.email_from = email_from
self.system_url = system_url
self.privacy_policy = privacy_policy
class Country(Base):
__tablename__ = "country"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(100), nullable=False)
def __init__(self, name):
self.name = name
class EventType(Enum):
EVENT = 'event'
AWARD = 'award'
class Event(Base):
__tablename__ = "event"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(255), nullable=False)
start_date = db.Column(db.DateTime(), nullable=False)
end_date = db.Column(db.DateTime(), nullable=False)
key = db.Column(db.String(255), nullable=False, unique=True)
organisation_id = db.Column(db.Integer(), db.ForeignKey(
'organisation.id'), nullable=False)
email_from = db.Column(db.String(255), nullable=False)
url = db.Column(db.String(255), nullable=False)
application_open = db.Column(db.DateTime(), nullable=False)
application_close = db.Column(db.DateTime(), nullable=False)
review_open = db.Column(db.DateTime(), nullable=False)
review_close = db.Column(db.DateTime(), nullable=False)
selection_open = db.Column(db.DateTime(), nullable=False)
selection_close = db.Column(db.DateTime(), nullable=False)
offer_open = db.Column(db.DateTime(), nullable=False)
offer_close = db.Column(db.DateTime(), nullable=False)
registration_open = db.Column(db.DateTime(), nullable=False)
registration_close = db.Column(db.DateTime(), nullable=False)
event_type = db.Column(db.Enum(EventType), nullable=False)
def __init__(self,
name,
description,
start_date,
end_date,
key,
organisation_id,
email_from,
url,
application_open,
application_close,
review_open,
review_close,
selection_open,
selection_close,
offer_open,
offer_close,
registration_open,
registration_close,
event_type
):
self.name = name
self.description = description
self.start_date = start_date
self.end_date = end_date
self.key = key
self.organisation_id = organisation_id
self.email_from = email_from
self.url = url
self.application_open = application_open
self.application_close = application_close
self.review_open = review_open
self.review_close = review_close
self.selection_open = selection_open
self.selection_close = selection_close
self.offer_open = offer_open
self.offer_close = offer_close
self.registration_open = registration_open
self.registration_close = registration_close
self.event_roles = []
self.event_type = event_type
class ApplicationForm(Base):
__tablename__ = 'application_form'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
event_id = db.Column(db.Integer(), db.ForeignKey('event.id'), nullable=False)
is_open = db.Column(db.Boolean(), nullable=False)
event = db.relationship('Event', foreign_keys=[event_id])
nominations = db.Column(db.Boolean(), nullable=False)
def __init__(self, event_id, is_open, nominations):
self.event_id = event_id
self.is_open = is_open
self.nominations = nominations
class Question(Base):
__tablename__ = 'question'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
section_id = db.Column(db.Integer(), db.ForeignKey('section.id'), nullable=False)
type = db.Column(db.String(), nullable=False)
description = db.Column(db.String(), nullable=True)
headline = db.Column(db.String(), nullable=False)
placeholder = db.Column(db.String(), nullable=True)
validation_regex = db.Column(db.String(), nullable=True)
validation_text = db.Column(db.String(), nullable=True)
order = db.Column(db.Integer(), nullable=False)
options = db.Column(db.JSON(), nullable=True)
is_required = db.Column(db.Boolean(), nullable=False)
depends_on_question_id = db.Column(db.Integer(), db.ForeignKey('question.id'), nullable=True)
show_for_values = db.Column(db.JSON(), nullable=True)
def __init__(self, application_form_id, section_id, headline, placeholder, order, questionType, validation_regex, validation_text=None, is_required = True, description = None, options = None):
self.application_form_id = application_form_id
self.section_id = section_id
self.headline = headline
self.placeholder = placeholder
self.order = order
self.type = questionType
self.description = description
self.options = options
self.is_required = is_required
self.validation_regex = validation_regex
self.validation_text = validation_text
class Section(Base):
__tablename__ = 'section'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255), nullable=False)
order = db.Column(db.Integer(), nullable=False)
depends_on_question_id = db.Column(db.Integer(), db.ForeignKey('question.id', use_alter=True), nullable=True)
show_for_values = db.Column(db.JSON(), nullable=True)
def __init__(self, application_form_id, name, description, order):
self.application_form_id = application_form_id
self.name = name
self.description = description
self.order = order
def get_country_list(session):
countries = session.query(Country).all()
country_list = []
for country in countries:
country_list.append({
'label': country.name,
'value': country.name
})
return country_list
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# pass
# ### end Alembic commands ###
Base.metadata.bind = op.get_bind()
session = orm.Session(bind=Base.metadata.bind)
maathaiimpact2020 = Event('Wangari Maathai Impact Award 2020',
'Wangari Maathai Impact Award 2020',
datetime.date(2020, 8, 23), datetime.date(2020, 8, 28), 'maathai2020',
1, 'baobab@deeplearningindaba.com', 'http://www.deeplearningindaba.com',
datetime.date(2020,3,1), datetime.date(2020,4,17), datetime.date(2020,4,25),
datetime.date(2020,5,15),datetime.date(2020,1,1), datetime.date(2020,1,1),
datetime.date(2020,1,1),datetime.date(2020,1,1), datetime.date(2020,1,1),
datetime.date(2020,1,1), EventType.AWARD)
session.add(maathaiimpact2020)
session.commit()
event_id = maathaiimpact2020.id
application_form = ApplicationForm(event_id, True, True)
session.add(application_form)
session.commit()
app_form_id = application_form.id
main_section = Section(app_form_id, 'Wangari Maathai Impact Award 2020', """
This is the official application form for the Wangari Maathai Impact Award 2020, an award to encourage and recognise work by African innovators that shows impactful application of machine learning and artificial intelligence. This award will be made at the Deep Learning Indaba in Tunis, Tunisia in August 2020.
This application will require:
- Personal details about the nominee,
- Details about the impactful work, including why it is impactful, who it impacts and why is it innovative,
- Details of 2 people other than the nominator to provide supporting letters for the nominee
For eligibility criteria for the Maathai Award, please see www.deeplearningindaba.com/maathai-2020
For any queries, please email awards@deeplearningindaba.com.
""", 1)
session.add(main_section)
session.commit()
q1_nomination_capacity = Question(
application_form_id=app_form_id,
section_id=main_section.id,
headline='Nominating Capacity',
placeholder='',
order=1,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=[
{'label':'Self-nomination', 'value':'self'},
{'label':'Nomination on behalf of a candidate','value':'other'}
]
)
session.add(q1_nomination_capacity)
session.commit()
nominator_information = Section(app_form_id, 'Nominator Information',"""
Details of the person nominating an individual, team or organisation
""",2)
nominator_information.depends_on_question_id = q1_nomination_capacity.id
nominator_information.show_for_values = ['other']
session.add(nominator_information)
session.commit()
nominator_q1 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Affiliation',
placeholder='Affiliation',
order=1,
questionType='short-text',
validation_regex=None,
is_required=True,
description='(university, institute, company, etc)'
)
nominator_q2 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Department',
placeholder='Department',
order=2,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominator_q3 = Question(
application_form_id=app_form_id,
section_id=nominator_information.id,
headline='Describe your relationship to the nominee',
placeholder='',
order=3,
questionType='long-text',
validation_regex=None,
is_required=True
)
session.add_all([nominator_q1, nominator_q2, nominator_q3])
session.commit()
nominee_information = Section(app_form_id, 'Nominee Information',"""
Details of the nominated individual, team or organisation to be considered for the award. For any teams/organisations, details of the principal contact should be entered below.
""",3)
session.add(nominee_information)
session.commit()
nominee_q1 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Title',
placeholder='Title',
order=1,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q1.depends_on_question_id = q1_nomination_capacity.id
nominee_q1.show_for_values = ['other']
nominee_q2 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Firstname',
placeholder='Firstname',
order=2,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q2.depends_on_question_id = q1_nomination_capacity.id
nominee_q2.show_for_values = ['other']
nominee_q3 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Lastname',
placeholder='Lastname',
order=3,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q3.depends_on_question_id = q1_nomination_capacity.id
nominee_q3.show_for_values = ['other']
nominee_q4 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Email Address',
placeholder='Email Address',
order=4,
questionType='short-text',
validation_regex=None,
is_required=True
)
nominee_q4.depends_on_question_id = q1_nomination_capacity.id
nominee_q4.show_for_values = ['other']
nominee_q5 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Affiliation',
placeholder='Affiliation',
order=5,
questionType='short-text',
validation_regex=None,
is_required=True,
description='(university, institute, company, etc)'
)
nominee_q6 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='If a team/organisation, names of team members',
placeholder='Names of team members',
order=6,
questionType='short-text',
validation_regex=None,
is_required=False
)
nominee_q7 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Country of Residence',
placeholder='Choose an option',
order=7,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=get_country_list(session)
)
nominee_q8 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Nationality',
placeholder='Choose an option',
order=8,
questionType='multi-choice',
validation_regex=None,
is_required=True,
options=get_country_list(session)
)
nominee_q9 = Question(
application_form_id=app_form_id,
section_id=nominee_information.id,
headline='Website (or other online presence)',
placeholder='Enter a URL',
order=9,
questionType='short-text',
validation_regex=None,
is_required=False
)
session.add_all([nominee_q1,nominee_q2,nominee_q3,nominee_q4,nominee_q5,
nominee_q6,nominee_q7,nominee_q8,nominee_q9])
session.commit()
impact_info = Section(app_form_id, 'Information about impactful work','',3)
session.add(impact_info)
session.commit()
impact_q1 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='What impactful work or project is the team/individual doing?',
placeholder='Enter 300-500 words',
order=1,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){300,500}$',
is_required=True,
description='Describe the work/project. In particular, describe the role of machine learning and/or artificial intelligence (300-500 words)'
)
impact_q2 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='Who does this work impact? Say how.',
placeholder='Enter 150-200 words',
order=2,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){150,200}$',
is_required=True,
description='Describe who is benefitting from this work (location, how many people etc). Describe how this work is positively affecting this group (150-200 words)'
)
impact_q3 = Question(
application_form_id=app_form_id,
section_id=impact_info.id,
headline='Why is this work innovative?',
placeholder='Enter 150-200 words',
order=3,
questionType='long-text',
validation_regex=r'^\s*(\S+(\s+|$)){150,200}$',
is_required=True,
description='Describe the novel parts of the work, what difference it is making, or how it is moving Africa forwards (150-200 words)'
)
session.add_all([impact_q1,impact_q2,impact_q3])
session.commit()
supporting_docs = Section(app_form_id, 'Supporting Documentation', """
If this is a self-nomination, two supporting letters are required, otherwise one supporting letter is sufficient. The supporting letters should describe the nature of the impactful work, why it is considered to be impactful, and in what way the candidate strengthens African machine learning, and any other relevant information. Letter writers can be from anyone familiar with the impactful work.
Supporting letters should be 600 words at most, written in English, and submitted electronically in PDF by the closing date through Baobab
""",4)
session.add(supporting_docs)
session.commit()
supporting_docs_q1 = Question(
application_form_id=app_form_id,
section_id=supporting_docs.id,
headline='Add the details of the 1 or 2 people who will provide supporting letters.',
placeholder='',
order=1,
questionType='reference',
validation_regex=None,
is_required=True,
description='Add at least two people if this is a self nomination and at least one if you are nominating someone else.',
options={'min_num_referral': 1, 'max_num_referral': 3}
)
supporting_docs_q2 = Question(
application_form_id=app_form_id,
section_id=supporting_docs.id,
headline='Additional comments',
placeholder='',
order=2,
questionType='long-text',
validation_regex=None,
is_required=False,
description='Use this space to provide any additional details which you feel are relevant to this nomination and have not been captured by this form.'
)
session.add_all([supporting_docs_q1, supporting_docs_q2])
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
# pass
# ### end Alembic commands ###
Base.metadata.bind = op.get_bind()
session = orm.Session(bind=Base.metadata.bind)
event = session.query(Event).filter_by(key='maathai2020').first()
app_form = session.query(ApplicationForm).filter_by(event_id=event.id).first()
nominator = session.query(Section).filter_by(name='Nominator Information').first()
nominator.depends_on_question_id = None
session.query(Question).filter_by(application_form_id=app_form.id).delete()
session.query(Section).filter_by(application_form_id=app_form.id).delete()
session.query(ApplicationForm).filter_by(event_id=event.id).delete()
session.query(Event).filter_by(key='maathai2020').delete()
session.commit() |
import sys
from PyQt5.QtWidgets import (QApplication, QDoubleSpinBox, QLabel, QGridLayout,
QWidget, QToolBar, QStackedWidget, QVBoxLayout,
QRadioButton, QSpacerItem)
from PyQt5.QtCore import Qt
import main
#import ab as main
app = QApplication(sys.argv)
class TestDefaultGUi:
def setup(self):
self.ui = main.DnmrGui()
self.activeView = self.ui.stackedWidget.currentWidget()
# create lists of names for required widgets
self.doublespinboxlist = [widget.key for widget in main.twosinglets_vars]
labellist = [box + '_label' for box in self.doublespinboxlist]
widgetlist = self.doublespinboxlist + labellist
# Dictionaries match widget names to widget objects.
# findChild can use tuples of object types, but my first pass at
# using it failed (returned 4 copies of wb and 4 copies of k).
# assembling piecewise:
boxdict = {widget: self.ui.findChild(QDoubleSpinBox, widget)
for widget in self.doublespinboxlist}
labeldict = {widget: self.ui.findChild(QLabel, widget)
for widget in labellist}
self.widgetdict = {**boxdict, **labeldict}
print('box list:', self.doublespinboxlist)
print('box dict:', boxdict)
print('label list:', labellist)
print('label dict:', labeldict)
print('widget list:', widgetlist)
print('widget dict:', self.widgetdict)
def test_title(self):
"""The user launches the app and sees that it is named 'pyDNMR'"""
appTitle = self.ui.windowTitle()
assert appTitle == 'pyDNMR'
def test_find_active_view(self):
"""The user sees that the default main window is designed for the
simulation of two singlets."""
assert self.activeView.objectName() == 'twosingletswidget'
def test_two_singlets_all_widgets_exist(self):
"""The user sees 5 labels and 5 'double spin box' numerical entries
corresponding to the 5 inputs needed for the two singlets simulation.
"""
for required_widget in main.twosinglets_vars:
widgetBox = self.activeView.findChild(QDoubleSpinBox,
required_widget.key)
assert widgetBox.value() == required_widget.value
widgetLabel = self.activeView.findChild(
QLabel, required_widget.key + '_label')
assert widgetLabel.text() == required_widget.string
def test_two_singlets_grid_layout(self):
"""The user sees that the widgets are in a 2 x 5 grid of
numerical entries (bottom row) with corresponding labels (top row).
The labels have the correct text, and the numerical entries are
correct for the default system.
"""
layout = self.ui.findChild(QGridLayout, 'twosingletslayout')
widgetlist = main.twosinglets_vars
for i, widget in enumerate(widgetlist):
widgetLabel = layout.itemAtPosition(0, i).widget()
widgetBox = layout.itemAtPosition(1, i).widget()
assert widgetLabel.text() == widget.string
assert widgetBox.value() == widget.value
def test_graph_spans_bottom_of_frame(self):
"""The user sees a graph object below the entry widgets, filling the
bottom of the frame. Its data matches that of the default system.
"""
# TODO: learn how to assert a widget is a certain class
# TODO: split into multiple tests
# Test doesnt' test for correct graph widget, just contents
layout = self.ui.findChild(QGridLayout, 'twosingletslayout')
widget_2_0 = layout.itemAtPosition(2, 0).widget()
data = widget_2_0.listDataItems()
# Can't figure out how to find out what the graph data is, to compare
# to an accepted set. For now, let's try to assure that the data
# returned from all 6 cells of bottom row are the same.
# TODO: learn how to retrieve data from pyqtgraph plot
for i in range(1, 6):
# using 'is' instead of '==' in next line didn't quite work
assert data == layout.itemAtPosition(2, i).widget().listDataItems()
# test that 7th column is empty
for j in range(3):
try:
found_widget = layout.itemAtPosition(j, 5).widget()
assert not found_widget
except Exception:
print("Unexpected widgets found in column 7")
def test_status_bar_ready(self):
"""The user sees a 'Ready' status indicator at the bottom of the app
window.
"""
statusbarText = self.ui.statusBar().currentMessage()
assert statusbarText == 'Ready'
def test_twiddle_buttons(self):
"""The user changed values in all of the numerical entries up and
down, and the program didn't crash.
"""
for key in self.doublespinboxlist:
widget = self.widgetdict[key]
widget.setValue(widget.value() + 10)
widget.setValue(widget.value() - 20)
widget.setValue(widget.value() + 10)
for widget in main.twosinglets_vars:
if widget.value > 10:
assert self.widgetdict[widget.key].value() == widget.value
else:
assert self.widgetdict[widget.key].value() == 10.01
# k and wa/b widgets should not go below 0.01
# tests below were used as part of debugging, but retained because they
# may detect a drastic change to the GUI
def test_find_stackedwidget(self):
foundStackedWidget = self.ui.findChild(QStackedWidget, 'stackedwidget')
print('Found stackedwidget with parent',
foundStackedWidget.parent(),
foundStackedWidget.parent().objectName())
def test_find_two_singlets_widget(self):
foundTwoSingletWidget = self.ui.findChild(QWidget, 'twosingletswidget')
print(type(foundTwoSingletWidget))
print('Found twosingletwidget with parent',
foundTwoSingletWidget.parent(),
foundTwoSingletWidget.parent().objectName())
def test_find_two_singlets_layout(self):
foundLayout = self.ui.findChild(QGridLayout, 'twosingletslayout')
if foundLayout:
print('Found layout named', foundLayout.objectName())
print('Found layout has parent:', foundLayout.parent(),
foundLayout.parent().objectName())
else:
foundLayout = self.ui.centralWidget().layout()
print('Did not find QGridLayout child named "twosingletlayout"')
print('Found layout named', foundLayout.objectName())
print('Found layout has parent:', foundLayout.parent(),
foundLayout.parent().objectName())
def test_find_AB_widget(self):
foundABWidget = self.ui.findChild(QWidget, 'abwidget')
print(type(foundABWidget))
print('Found ABwidget with parent',
foundABWidget.parent(),
foundABWidget.parent().objectName())
def test_child_parent_map(self):
foundVaBox = self.ui.findChild(QDoubleSpinBox, 'va')
current_widget = foundVaBox
end = False
while not end:
try:
print('Widget ', current_widget.objectName(), ' has parent ',
current_widget.parent().objectName())
current_widget = current_widget.parent()
except Exception:
print('The parent of', current_widget.objectName(),
'is of type', type(current_widget.parent()))
print('No more parents.')
end = True
def teardown(self):
pass
class TestViewSwitch:
def setup(self):
self.ui = main.DnmrGui()
self.getToolbar = self.ui.findChild(QToolBar, 'lefttoolbar')
def test_left_toolbar_exists(self):
"""The user sees that there is a bar to the left of the main window.
"""
assert self.ui.toolBarArea(self.getToolbar) == Qt.LeftToolBarArea
def test_left_toolbar_has_layout(self):
"""The user sees that the toolbar has a vertical layout."""
assert self.getToolbar.findChild(QVBoxLayout, 'modelslayout')
def test_find_model_selection(self):
"""The user sees a menu of radio buttons with a label saying
'Select Model:'
"""
foundModelsWidget = self.getToolbar.findChild(QWidget, 'modelswidget')
for child in foundModelsWidget.children():
print(type(child))
if isinstance(child, QLabel):
print('QLabel', child.text())
assert child.text() == 'Select Model:'
elif isinstance(child, QRadioButton):
print('QRadioButton', child.text())
else:
print('Also found widget of type: ', type(child))
def test_toolbar_layout(self):
"""The user sees, centered vertically in the toolbar, a label
instructing them to select a model, followed by a radio button to
select the two singlet model followed by a radio button to select the
AB model.
"""
layout = self.getToolbar.findChild(QVBoxLayout, 'modelslayout')
assert layout.count() == 5
assert (isinstance(layout.itemAt(0), QSpacerItem) and
(isinstance(layout.itemAt(4), QSpacerItem)))
assert layout.itemAt(1).widget().text() == 'Select Model:'
assert layout.itemAt(2).widget().text() == \
'Two uncoupled spin-1/2 nuclei'
assert layout.itemAt(3).widget().text() == \
'Two coupled spin-1/2 nuclei\n ("AB quartet)'
def test_view_switch(self):
"""The user clicks on the radio button for the AB model, and sees that
the view has changed to that for the AB model.
"""
initialView = self.ui.stackedWidget.currentWidget()
abButton = self.getToolbar.findChild(QRadioButton, 'abbutton')
abButton.click()
finalView = self.ui.stackedWidget.currentWidget()
assert initialView is not finalView
assert finalView.objectName() == 'abwidget'
# function below was used for debugging, and retained because it may detect
# a drastic change in GUI structure
def test_child_parent_map(self):
foundModelsLayout = self.getToolbar.findChild(QVBoxLayout,
'modelslayout')
current_widget = foundModelsLayout
end = False
while not end:
try:
print('Widget ', current_widget.objectName(), ' has parent ',
current_widget.parent().objectName())
current_widget = current_widget.parent()
except Exception:
print('The parent of', current_widget.objectName(),
'is of type', type(current_widget.parent()))
print('No more parents.')
end = True
class TestABGUi:
def setup(self):
self.ui = main.DnmrGui()
self.ui.stackedWidget.setCurrentIndex(1) # set view to AB model
self.boxlist = [widget.key for widget in main.ab_vars]
labellist = [box + '_label' for box in self.boxlist]
widgetlist = self.boxlist + labellist
boxdict = {widget: self.ui.findChild(QDoubleSpinBox, widget)
for widget in self.boxlist}
labeldict = {widget: self.ui.findChild(QLabel, widget)
for widget in labellist}
self.widgetdict = {**boxdict, **labeldict}
print('box list:', self.boxlist)
print('box dict:', boxdict)
print('label list:', labellist)
print('label dict:', labeldict)
print('widget list:', widgetlist)
print('widget dict:', self.widgetdict)
def test_starting_with_ab_view(self):
assert self.ui.stackedWidget.currentWidget().objectName() == 'abwidget'
def test_ab_all_widgets_exist(self):
"""The user sees 5 labels and 5 'double spin box' numerical entries
corresponding to the 5 inputs needed for the AB simulation.
"""
for widget in main.twosinglets_vars:
found_box = self.ui.findChild(QDoubleSpinBox, widget.key)
assert found_box.value() == widget.value
found_label = self.ui.findChild(QLabel, widget.key + '_label')
assert found_label.text() == widget.string
def test_two_singlet_grid_layout(self):
"""The user sees that the widgets are in a 2 x 5 grid of
numerical entries (bottom row) with corresponding labels (top row).
The labels have the correct text, and the numerical entries are
correct for the default system.
"""
layout = self.ui.findChild(QGridLayout, 'ablayout')
widgetlist = main.ab_vars
for i, widget in enumerate(widgetlist):
widgetLabel = layout.itemAtPosition(0, i).widget()
widgetBox = layout.itemAtPosition(1, i).widget()
assert widgetLabel.text() == widget.string
assert widgetBox.value() == widget.value
def test_graph_spans_bottom_of_frame(self):
"""The user sees a graph object below the entry widgets, filling the
bottom of the frame. Its data matches that of the default system.
"""
# TODO: learn how to assert a widget is a certain class
# Test doesnt' test for correct graph widget, just contents
layout = self.ui.findChild(QGridLayout, 'ablayout')
widget_2_0 = layout.itemAtPosition(2, 0).widget()
data = widget_2_0.listDataItems()
# Can't figure out how to find out what the graph data is, to compare
# to an accepted set. For now, let's try to assure that the data
# returned from all 6 cells of bottom row are the same.
# TODO: learn how to retrieve data from pyqtgraph plot
for i in range(1, 6):
# using 'is' instead of '==' in next line didn't quite work
assert data == layout.itemAtPosition(2, i).widget().listDataItems()
# test that 7th column is empty
for j in range(3):
try:
found_widget = layout.itemAtPosition(j, 5).widget()
assert not found_widget
except Exception:
print("Unexpected widgets found in column 7")
def test_status_bar_ready(self):
"""The user sees a 'Ready' status indicator at the bottom of the app
window.
"""
statusbar_text = self.ui.statusBar().currentMessage()
assert statusbar_text == 'Ready'
def test_twiddle_buttons(self):
"""The user changed values in all of the numerical entries up and
down, and the program didn't crash.
"""
for key in self.boxlist:
widget = self.widgetdict[key]
# success of test depends on WINDNMR's default values used,
# i.e. test that j_ab, k_ab and wa + 20 - 40 don't go below 0
widget.setValue(widget.value() + 20)
widget.setValue(widget.value() - 40)
widget.setValue(widget.value() + 20)
for widget in main.ab_vars:
print(widget.value, 'vs. ', self.widgetdict[widget.key].value())
if widget.key == 'k_ab' or widget.key == 'wa':
assert self.widgetdict[widget.key].value() == 20.01
# k and wa widgets should not go below 0.01
elif widget.key == 'j_ab':
assert self.widgetdict[widget.key].value() == 20.00
# j_ab should not go below 0
else:
assert self.widgetdict[widget.key].value() == widget.value
def teardown(self):
pass |
import typing
from lemon.exception import GeneralException
from lemon.request import Request
from lemon.response import Response
class Context:
"""The Context object store the current request and response .
Your can get all information by use ctx in your handler function .
"""
def __init__(self) -> None:
self.req: typing.Optional[Request] = None
self.res: Response = Response()
# store middleware communication message
self.state: dict = {}
self.params: typing.Optional[dict] = None
def __setattr__(self, key, value) -> None:
# alias
if key == 'body':
self.res.body = value
elif key == 'status':
self.res.status = value
else:
self.__dict__[key] = value
def __getattr__(self, item) -> typing.Any:
# alias
if item == 'body':
return self.res.body
elif item == 'status':
return self.res.status
return self.__dict__[item]
@staticmethod
def throw(status: int, body: typing.Union[str, dict] = None) -> None:
"""Throw the status and response body"""
raise GeneralException(status=status, body=body)
|
numbers = list()
n = last = 0
for c in range (0, 5):
num = int(input(f'{n + 1}º number: '))
if n == 0 or num > last:
last = num
numbers.append(num)
print('The number was inserted at the final of the list.')
else:
position = 0
while position < len(numbers):
if num <= numbers[position]:
numbers.insert(position, num)
break
position += 1
print(f'The number was inserted at the {position + 1}º position of the list.')
n += 1
print(f'The inserted numbers, in order, are: {numbers}')
|
# -*- coding: iso-8859-15 -*-
# -*- Mode: Python; py-ident-offset: 4 -*-
# vim:ts=4:sw=4:et
'''
rpm definitions
Mario Morgado (BSD) https://github.com/mjvm/pyrpm
'''
__revision__ = '$Rev$'[6:-2]
RPM_LEAD_MAGIC_NUMBER = '\xed\xab\xee\xdb'
RPM_HEADER_MAGIC_NUMBER = '\x8e\xad\xe8'
RPMTAG_MIN_NUMBER = 1000
RPMTAG_MAX_NUMBER = 1146
#signature tags
RPMSIGTAG_SIZE = 1000
RPMSIGTAG_LEMD5_1 = 1001
RPMSIGTAG_PGP = 1002
RPMSIGTAG_LEMD5_2 = 1003
RPMSIGTAG_MD5 = 1004
RPMSIGTAG_GPG = 1005
RPMSIGTAG_PGP5 = 1006
MD5_SIZE = 16 #16 bytes long
PGP_SIZE = 152 #152 bytes long
# data types definition
RPM_DATA_TYPE_NULL = 0
RPM_DATA_TYPE_CHAR = 1
RPM_DATA_TYPE_INT8 = 2
RPM_DATA_TYPE_INT16 = 3
RPM_DATA_TYPE_INT32 = 4
RPM_DATA_TYPE_INT64 = 5
RPM_DATA_TYPE_STRING = 6
RPM_DATA_TYPE_BIN = 7
RPM_DATA_TYPE_STRING_ARRAY = 8
RPM_DATA_TYPE_I18NSTRING_TYPE = 9
RPM_DATA_TYPES = (RPM_DATA_TYPE_NULL,
RPM_DATA_TYPE_CHAR,
RPM_DATA_TYPE_INT8,
RPM_DATA_TYPE_INT16,
RPM_DATA_TYPE_INT32,
RPM_DATA_TYPE_INT64,
RPM_DATA_TYPE_STRING,
RPM_DATA_TYPE_BIN,
RPM_DATA_TYPE_STRING_ARRAY,)
RPMTAG_NAME = 1000
RPMTAG_VERSION = 1001
RPMTAG_RELEASE = 1002
RPMTAG_DESCRIPTION = 1005
RPMTAG_COPYRIGHT = 1014
RPMTAG_URL = 1020
RPMTAG_ARCH = 1022
RPMTAGS = (RPMTAG_NAME,
RPMTAG_VERSION,
RPMTAG_RELEASE,
RPMTAG_DESCRIPTION,
RPMTAG_COPYRIGHT,
RPMTAG_URL,
RPMTAG_ARCH,)
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Shell commands for the log service
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import itertools
import logging
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Instantiate, PostRegistration
from pelix.misc import LOG_READER_SERVICE
from pelix.shell import SERVICE_SHELL_COMMAND
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
@ComponentFactory("pelix-shell-log-factory")
@Provides(SERVICE_SHELL_COMMAND)
@Requires("_logger", LOG_READER_SERVICE, optional=True)
@Instantiate("pelix-shell-log-factory")
class ShellLogCommand(object):
"""
Provides shell commands to print the content of the log service
"""
def __init__(self):
"""
Sets up members
"""
self._logger = None
self.__svc_ref = None
@PostRegistration
def _post_register(self, svc_ref):
"""
Called when the service has been provided
"""
self.__svc_ref = svc_ref
@staticmethod
def get_namespace():
"""
Returns the name space of the commands
"""
return "log"
def get_methods(self):
"""
Returns the methods of the shell command
"""
return [("log", self._log),
("debug", self._debug),
("info", self._info),
("warn", self._warning),
("warning", self._warning),
("error", self._error)]
def _log(self, session, level="WARNING", count=None):
"""
Prints the content of the log
"""
if self._logger is None:
session.write_line("No LogService available.")
return
# Normalize arguments
if isinstance(level, str):
level = logging.getLevelName(level.upper())
if not isinstance(level, int):
level = logging.WARNING
if count is not None:
try:
count = int(count)
except (TypeError, ValueError):
count = 0
else:
count = 0
# Filter the entries and keep the last ones only
try:
for entry in [entry for entry in self._logger.get_log()
if entry.level >= level][-count:]:
session.write_line(str(entry))
except StopIteration:
pass
def _trace(self, session, level, words):
"""
Logs a message using the log service
:param session: The shell Session object
:param level: Log level (string)
:param message: Message to log
"""
if self._logger is not None:
self._logger.log(level, ' '.join(str(word) for word in words),
None, self.__svc_ref)
else:
session.write_line("No LogService available.")
def _debug(self, session, *message):
"""
Logs a trace
"""
self._trace(session, logging.DEBUG, message)
def _info(self, session, *message):
"""
Logs an informative message
"""
self._trace(session, logging.INFO, message)
def _warning(self, session, *message):
"""
Logs a warning message
"""
self._trace(session, logging.WARNING, message)
def _error(self, session, *message):
"""
Logs an informative message
"""
self._trace(session, logging.ERROR, message)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.