content stringlengths 5 1.05M |
|---|
import os
import time
import webbrowser
print("Beginning set-up")
os.system("pip3 install -r requirements.txt")
print("Step1-> setting clustername")
os.system("python3 settings.py")
print("Step2-> adding the git repository")
os.system("git add .")
print("Step 3->commit and pushing to hasura cluster")
os.system("git commit -m'Initial Commit'")
x=os.system("git push hasura master")
if(x!=0):
print("Remote server :EROR Redeploying it may take seconds...")
while(x!=0):
time.sleep(1)
x=os.system("git push hasura master")
import yaml
with open("clusters.yaml", 'r') as stream:
try:
clustername=yaml.load(stream)[0]['name']
print(clustername)
except yaml.YAMLError as exc:
print(exc)
print("Deployement succesfull!!")
print("Trying to open the url..Hold on.........")
time.sleep(10)
try:
webbrowser.open("https://ui."+clustername+".hasura-app.io/")
except:
print("Opening failed.Try "+"https://ui."+clustername+".hasura-app.io/"+" opening manually..")
|
from torchvision import models, transforms
from torchvision.datasets import ImageFolder
from medcam import medcam
import torch
import cv2
from torch.utils.data import DataLoader
import gc
import shutil
import os
import unittest
CLEAR = True
class TestClassification(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestClassification, self).__init__(*args, **kwargs)
self.DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.current_path = os.path.dirname(os.path.abspath(__file__))
self.dataset = ImageFolder(os.path.join(self.current_path, 'data'), loader=self.load_image)
self.model = models.resnet152(pretrained=True)
self.model.to(device=self.DEVICE)
self.model.eval()
def load_image(self, image_path):
raw_image = cv2.imread(image_path)
raw_image = cv2.resize(raw_image, (224,) * 2)
image = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(raw_image[..., ::-1].copy())
image = image.to(self.DEVICE)
return image, raw_image
def test_gbp(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gbp'), backend='gbp',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcam(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcam'), backend='gcam',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcam_overlay(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcam_overlay'), backend='gcam',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0], raw_input=batch[0][1])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_ggcam(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_ggcam'), backend='ggcam',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcampp(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcampp'), backend='gcampp',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
if __name__ == '__main__':
unittest.main()
|
import phonenumbers
from phonenumbers import carrier, geocoder
from termcolor import colored
import auxillary
class Local:
# initializes the instance attributes
def __init__(self, phone_no) -> None:
self.phone_no = phone_no
self.parse_details = phonenumbers.parse(self.phone_no)
self.national_no = phonenumbers.format_number(
self.parse_details, phonenumbers.PhoneNumberFormat.NATIONAL
)
self.international_no = phonenumbers.format_number(
self.parse_details, phonenumbers.PhoneNumberFormat.INTERNATIONAL
)
self.e164_no = phonenumbers.format_number(
self.parse_details, phonenumbers.PhoneNumberFormat.E164
)
self.country_code = self.parse_details.country_code
self.country = geocoder.description_for_number(self.parse_details, "en")
self.carrier = carrier.name_for_number(self.parse_details, "en")
# runs the Local lookup process
def display_results(self, colors):
print()
auxillary.line()
print(colored("Local Lookup Results:", colors[0]))
auxillary.line()
print(
colored("National format : ", colors[1]),
colored(self.national_no, colors[2]),
)
print(
colored("E164 format : ", colors[1]),
colored(self.e164_no, colors[2]),
)
print(
colored("International format: ", colors[1]),
colored(self.international_no, colors[2]),
)
print(
colored("Country Code : ", colors[1]),
colored(self.country_code, colors[2]),
)
print(
colored("Country : ", colors[1]),
colored(self.country, colors[2]),
)
print(
colored("Carrier : ", colors[1]),
colored(self.carrier, colors[2]),
)
auxillary.line()
# sets results to be displayed in the web-UI
# creation of a dictionary for easier referencing
def set_results(self):
self.heading = "Local Lookup"
self.dictionary = (
{
"Number": self.phone_no,
"National format": self.national_no,
"International format": self.international_no,
"E164 format": self.e164_no,
"Country code": self.country_code,
"Country": self.country,
"Carrier": self.carrier,
},
)
# returns results to be displayed in the web-UI
# NOTE: the returned value is a tuple consisting of the heading for the lookup and a dictionary (for mapping)
def get_results(self):
return (self.heading, self.dictionary)
|
from collections import deque
import torch
torch.backends.cudnn.benchmark = True
import torch.nn.functional as F
import numpy as np
from core.network import Network
from core.optimizer import Optimizer
from core.buffer import PERBuffer
from .rainbow import Rainbow
class RainbowIQN(Rainbow):
"""Rainbow IQN agent.
Args:
state_size (int): dimension of state.
action_size (int): dimension of action.
hidden_size (int): dimension of hidden unit.
network (str): key of network class in _network_dict.txt.
head (str): key of head in _head_dict.txt.
optim_config (dict): dictionary of the optimizer info.
(key: 'name', value: name of optimizer)
gamma (float): discount factor.
buffer_size (int): the size of the memory buffer.
batch_size (int): the number of samples in the one batch.
start_train_step (int): steps to start learning.
target_update_period (int): period to update the target network. (unit: step)
n_step: number of steps in multi-step Q learning.
alpha (float): prioritization exponent.
beta (float): initial value of degree to use importance sampling.
learn_period (int): period to train (unit: step)
uniform_sample_prob (float): ratio of uniform random sampling.
noise_type (str): NoisyNet noise type. One of ['factorized', 'independent']
('factorized': Factorized Gaussian Noise, else: Independent Gaussian Noise)
num_sample (int): number of sample points
embedding_dim (int): dimension of sample point embedding.
sample_min (float): quantile minimum thresholds (tau_min).
sample_max (float): quantile maximum thresholds (tau_max).
device (str): device to use. (e.g. 'cpu' or 'gpu'. None can also be used, and in this case, the cpu is used.)
run_step (int): number of run step.
"""
def __init__(
self,
state_size,
action_size,
hidden_size=512,
network="rainbow_iqn",
head="mlp",
optim_config={"name": "adam"},
gamma=0.99,
explore_ratio=0.1,
buffer_size=50000,
batch_size=64,
start_train_step=2000,
target_update_period=500,
# MultiStep
n_step=4,
# PER
alpha=0.6,
beta=0.4,
learn_period=4,
uniform_sample_prob=1e-3,
# Noisy
noise_type="factorized", # [independent, factorized]
# IQN
num_sample=64,
embedding_dim=64,
sample_min=0.0,
sample_max=1.0,
device=None,
run_step=1e6,
**kwargs,
):
self.device = (
torch.device(device)
if device
else torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
self.action_size = action_size
self.network = Network(
network,
state_size,
action_size,
embedding_dim,
num_sample,
noise_type,
D_hidden=hidden_size,
head=head,
).to(self.device)
self.target_network = Network(
network,
state_size,
action_size,
embedding_dim,
num_sample,
noise_type,
D_hidden=hidden_size,
head=head,
).to(self.device)
self.target_network.load_state_dict(self.network.state_dict())
self.optimizer = Optimizer(**optim_config, params=self.network.parameters())
self.gamma = gamma
self.explore_step = run_step * explore_ratio
self.batch_size = batch_size
self.start_train_step = start_train_step
self.target_update_stamp = 0
self.target_update_period = target_update_period
self.num_learn = 0
self.time_t = 0
# MultiStep
self.n_step = n_step
self.tmp_buffer = deque(maxlen=n_step)
# PER
self.alpha = alpha
self.beta = beta
self.learn_period = learn_period
self.learn_period_stamp = 0
self.uniform_sample_prob = uniform_sample_prob
self.beta_add = (1 - beta) / run_step
# IQN
self.num_sample = num_sample
self.embedding_dim = embedding_dim
self.sample_min = sample_min
self.sample_max = sample_max
# MultiStep
self.memory = PERBuffer(buffer_size, uniform_sample_prob)
@torch.no_grad()
def act(self, state, training=True):
self.network.train(training)
sample_min = 0 if training else self.sample_min
sample_max = 1 if training else self.sample_max
if training and self.memory.size < max(self.batch_size, self.start_train_step):
batch_size = (
state[0].shape[0] if isinstance(state, list) else state.shape[0]
)
action = np.random.randint(0, self.action_size, size=(batch_size, 1))
else:
logits, _ = self.network(
self.as_tensor(state), training, sample_min, sample_max
)
_, q_action = self.logits2Q(logits)
action = torch.argmax(q_action, -1, keepdim=True).cpu().numpy()
return {"action": action}
def learn(self):
transitions, weights, indices, sampled_p, mean_p = self.memory.sample(
self.beta, self.batch_size
)
for key in transitions.keys():
transitions[key] = self.as_tensor(transitions[key])
state = transitions["state"]
action = transitions["action"]
reward = transitions["reward"]
next_state = transitions["next_state"]
done = transitions["done"]
# Get Theta Pred, Tau
logit, tau = self.network(state, True)
logits, q_action = self.logits2Q(logit)
action_eye = torch.eye(self.action_size, device=self.device)
action_onehot = action_eye[action.long()]
theta_pred = action_onehot @ logits
tau = torch.transpose(tau, 1, 2).contiguous()
with torch.no_grad():
# Get Theta Target
logit_next, _ = self.network(next_state, True)
_, q_next = self.logits2Q(logit_next)
logit_target, _ = self.target_network(next_state, True)
logits_target, _ = self.logits2Q(logit_target)
max_a = torch.argmax(q_next, axis=-1, keepdim=True)
max_a_onehot = action_eye[max_a.long()]
theta_target = torch.squeeze(max_a_onehot @ logits_target, 1)
for i in reversed(range(self.n_step)):
theta_target = (
reward[:, i] + (1 - done[:, i]) * self.gamma * theta_target
)
theta_target = torch.unsqueeze(theta_target, 2)
error_loss = theta_target - theta_pred
huber_loss = F.smooth_l1_loss(
*torch.broadcast_tensors(theta_pred, theta_target), reduction="none"
)
# Get Loss
loss = torch.where(error_loss < 0.0, 1 - tau, tau) * huber_loss
loss = torch.mean(torch.sum(loss, axis=2), axis=1)
max_Q = torch.max(q_action).item()
max_logit = torch.max(logit).item()
min_logit = torch.min(logit).item()
# PER
p_j = torch.pow(loss, self.alpha)
for i, p in zip(indices, p_j):
self.memory.update_priority(p.item(), i)
weights = torch.unsqueeze(torch.FloatTensor(weights).to(self.device), -1)
loss = (weights * loss).mean()
self.optimizer.zero_grad(set_to_none=True)
loss.backward()
self.optimizer.step()
self.num_learn += 1
result = {
"loss": loss.item(),
"beta": self.beta,
"max_Q": max_Q,
"max_logit": max_logit,
"min_logit": min_logit,
"sampled_p": sampled_p,
"mean_p": mean_p,
}
return result
def logits2Q(self, logits):
_logits = torch.transpose(logits, 1, 2).contiguous()
q_action = torch.mean(_logits, dim=-1)
return _logits, q_action
|
import cirq
import kc_examples.kc_bell_inequality
import kc_examples.kc_bernstein_vazirani
import kc_examples.kc_grover
# import examples.place_on_bristlecone
# import kc_examples.kc_hello_qubit
import kc_examples.kc_quantum_fourier_transform
# import examples.bcs_mean_field
import kc_examples.kc_phase_estimator
import kc_examples.kc_basic_arithmetic
import kc_examples.kc_quantum_teleportation
import kc_examples.kc_superdense_coding
# Standard test runs do not include performance benchmarks.
# coverage: ignore
def test_example_runs_bernstein_vazirani_perf(benchmark):
benchmark(kc_examples.kc_bernstein_vazirani.main, qubit_count=3)
# Check empty oracle case. Cover both biases.
a = cirq.NamedQubit('a')
assert list(kc_examples.kc_bernstein_vazirani.make_oracle([], a, [], False)) == []
assert list(kc_examples.kc_bernstein_vazirani.make_oracle([], a, [], True)) == [cirq.X(a)]
# def test_example_runs_hello_line_perf(benchmark):
# benchmark(examples.place_on_bristlecone.main)
# def test_example_runs_hello_qubit_perf(benchmark):
# benchmark(examples.hello_qubit.main)
def test_example_runs_bell_inequality_perf(benchmark):
benchmark(kc_examples.kc_bell_inequality.main)
def test_example_runs_quantum_fourier_transform_perf(benchmark):
benchmark(kc_examples.kc_quantum_fourier_transform.main)
# def test_example_runs_bcs_mean_field_perf(benchmark):
# benchmark(examples.bcs_mean_field.main)
def test_example_runs_grover_perf(benchmark):
benchmark(kc_examples.kc_grover.main)
# def test_example_runs_phase_estimator_perf(benchmark):
# benchmark(kc_examples.kc_phase_estimator.main, qnums=(2,), repetitions=2)
def test_example_runs_quantum_teleportation(benchmark):
benchmark(kc_examples.kc_quantum_teleportation.main)
def test_example_runs_superdense_coding(benchmark):
benchmark(kc_examples.kc_superdense_coding.main)
|
import os
import sys
from urllib.parse import urlparse
import click
import filelock
import structlog
from eth_utils import encode_hex, to_canonical_address, to_checksum_address, to_normalized_address
from requests.exceptions import ConnectTimeout
from web3 import HTTPProvider, Web3
from raiden.constants import SQLITE_MIN_REQUIRED_VERSION, Environment
from raiden.exceptions import (
AddressWithoutCode,
AddressWrongContract,
ContractVersionMismatch,
EthNodeCommunicationError,
EthNodeInterfaceError,
RaidenError,
)
from raiden.message_handler import MessageHandler
from raiden.network.blockchain_service import BlockChainService
from raiden.network.discovery import ContractDiscovery
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.throttle import TokenBucket
from raiden.network.transport import MatrixTransport, UDPTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.settings import (
DEFAULT_MATRIX_KNOWN_SERVERS,
DEFAULT_NAT_KEEPALIVE_RETRIES,
DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,
)
from raiden.storage.sqlite import RAIDEN_DB_VERSION, assert_sqlite_version
from raiden.utils import is_supported_client, pex, split_endpoint, typing
from raiden.utils.cli import get_matrix_servers
from raiden_contracts.constants import (
CONTRACT_ENDPOINT_REGISTRY,
CONTRACT_SECRET_REGISTRY,
CONTRACT_TOKEN_NETWORK_REGISTRY,
ID_TO_NETWORKNAME,
)
from raiden_contracts.contract_manager import (
ContractManager,
contracts_precompiled_path,
get_contracts_deployed,
)
from .prompt import prompt_account
from .sync import check_discovery_registration_gas, check_synced
log = structlog.get_logger(__name__)
def handle_contract_version_mismatch(mismatch_exception: ContractVersionMismatch) -> None:
click.secho(
f'{str(mismatch_exception)}. Please update your Raiden installation.',
fg='red',
)
sys.exit(1)
def handle_contract_no_code(name: str, address: typing.Address) -> None:
hex_addr = to_checksum_address(address)
click.secho(f'Error: Provided {name} {hex_addr} contract does not contain code', fg='red')
sys.exit(1)
def handle_contract_wrong_address(name: str, address: typing.Address) -> None:
hex_addr = to_checksum_address(address)
click.secho(
f'Error: Provided address {hex_addr} for {name} contract'
' does not contain expected code.',
fg='red',
)
sys.exit(1)
def _assert_sql_version():
if not assert_sqlite_version():
log.error('SQLite3 should be at least version {}'.format(
'{}.{}.{}'.format(*SQLITE_MIN_REQUIRED_VERSION),
))
sys.exit(1)
def _setup_web3(eth_rpc_endpoint):
web3 = Web3(HTTPProvider(eth_rpc_endpoint))
try:
node_version = web3.version.node # pylint: disable=no-member
except ConnectTimeout:
raise EthNodeCommunicationError("Couldn't connect to the ethereum node")
except ValueError:
raise EthNodeInterfaceError(
'The underlying ethereum node does not have the web3 rpc interface '
'enabled. Please run it with --rpcapi eth,net,web3,txpool for geth '
'and --jsonrpc-apis=eth,net,web3,parity for parity.',
)
supported, _ = is_supported_client(node_version)
if not supported:
click.secho(
'You need a Byzantium enabled ethereum node. Parity >= 1.7.6 or Geth >= 1.7.2',
fg='red',
)
sys.exit(1)
return web3
def _setup_udp(
config,
blockchain_service,
address,
contracts,
endpoint_registry_contract_address,
):
check_discovery_registration_gas(blockchain_service, address)
try:
dicovery_proxy = blockchain_service.discovery(
endpoint_registry_contract_address or to_canonical_address(
contracts[CONTRACT_ENDPOINT_REGISTRY]['address'],
),
)
discovery = ContractDiscovery(
blockchain_service.node_address,
dicovery_proxy,
)
except ContractVersionMismatch as e:
handle_contract_version_mismatch(e)
except AddressWithoutCode:
handle_contract_no_code('Endpoint Registry', endpoint_registry_contract_address)
except AddressWrongContract:
handle_contract_wrong_address('Endpoint Registry', endpoint_registry_contract_address)
throttle_policy = TokenBucket(
config['transport']['udp']['throttle_capacity'],
config['transport']['udp']['throttle_fill_rate'],
)
transport = UDPTransport(
address,
discovery,
config['socket'],
throttle_policy,
config['transport']['udp'],
)
return transport, discovery
def _setup_matrix(config):
if config['transport']['matrix'].get('available_servers') is None:
# fetch list of known servers from raiden-network/raiden-tranport repo
available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[config['environment_type']]
available_servers = get_matrix_servers(available_servers_url)
log.debug('Fetching available matrix servers', available_servers=available_servers)
config['transport']['matrix']['available_servers'] = available_servers
try:
transport = MatrixTransport(config['transport']['matrix'])
except RaidenError as ex:
click.secho(f'FATAL: {ex}', fg='red')
sys.exit(1)
return transport
def run_app(
address,
keystore_path,
gas_price,
eth_rpc_endpoint,
tokennetwork_registry_contract_address,
secret_registry_contract_address,
endpoint_registry_contract_address,
listen_address,
mapped_socket,
max_unresponsive_time,
api_address,
rpc,
sync_check,
console,
password_file,
web_ui,
datadir,
transport,
matrix_server,
network_id,
environment_type,
unrecoverable_error_should_crash,
config=None,
extra_config=None,
**kwargs,
):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements,unused-argument
from raiden.app import App
_assert_sql_version()
if transport == 'udp' and not mapped_socket:
raise RuntimeError('Missing socket')
if datadir is None:
datadir = os.path.join(os.path.expanduser('~'), '.raiden')
address_hex = to_normalized_address(address) if address else None
address_hex, privatekey_bin = prompt_account(address_hex, keystore_path, password_file)
address = to_canonical_address(address_hex)
(listen_host, listen_port) = split_endpoint(listen_address)
(api_host, api_port) = split_endpoint(api_address)
config['transport']['udp']['host'] = listen_host
config['transport']['udp']['port'] = listen_port
config['console'] = console
config['rpc'] = rpc
config['web_ui'] = rpc and web_ui
config['api_host'] = api_host
config['api_port'] = api_port
if mapped_socket:
config['socket'] = mapped_socket.socket
config['transport']['udp']['external_ip'] = mapped_socket.external_ip
config['transport']['udp']['external_port'] = mapped_socket.external_port
config['transport_type'] = transport
config['transport']['matrix']['server'] = matrix_server
config['transport']['udp']['nat_keepalive_retries'] = DEFAULT_NAT_KEEPALIVE_RETRIES
timeout = max_unresponsive_time / DEFAULT_NAT_KEEPALIVE_RETRIES
config['transport']['udp']['nat_keepalive_timeout'] = timeout
config['privatekey_hex'] = encode_hex(privatekey_bin)
config['unrecoverable_error_should_crash'] = unrecoverable_error_should_crash
parsed_eth_rpc_endpoint = urlparse(eth_rpc_endpoint)
if not parsed_eth_rpc_endpoint.scheme:
eth_rpc_endpoint = f'http://{eth_rpc_endpoint}'
web3 = _setup_web3(eth_rpc_endpoint)
rpc_client = JSONRPCClient(
web3,
privatekey_bin,
gas_price_strategy=gas_price,
block_num_confirmations=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,
uses_infura='infura.io' in eth_rpc_endpoint,
)
blockchain_service = BlockChainService(
privatekey_bin=privatekey_bin,
jsonrpc_client=rpc_client,
# Not giving the contract manager here, but injecting it later
# since we first need blockchain service to calculate the network id
)
given_network_id = network_id
node_network_id = blockchain_service.network_id
known_given_network_id = given_network_id in ID_TO_NETWORKNAME
known_node_network_id = node_network_id in ID_TO_NETWORKNAME
if node_network_id != given_network_id:
if known_given_network_id and known_node_network_id:
click.secho(
f"The chosen ethereum network '{ID_TO_NETWORKNAME[given_network_id]}' "
f"differs from the ethereum client '{ID_TO_NETWORKNAME[node_network_id]}'. "
"Please update your settings.",
fg='red',
)
else:
click.secho(
f"The chosen ethereum network id '{given_network_id}' differs "
f"from the ethereum client '{node_network_id}'. "
"Please update your settings.",
fg='red',
)
sys.exit(1)
config['chain_id'] = given_network_id
# interpret the provided string argument
if environment_type == Environment.PRODUCTION:
# Safe configuration: restrictions for mainnet apply and matrix rooms have to be private
config['environment_type'] = Environment.PRODUCTION
config['transport']['matrix']['private_rooms'] = True
else:
config['environment_type'] = Environment.DEVELOPMENT
environment_type = config['environment_type']
print(f'Raiden is running in {environment_type.value.lower()} mode')
chain_config = {}
contract_addresses_known = False
contracts = dict()
contracts_version = 'pre_limits' if environment_type == Environment.DEVELOPMENT else None
config['contracts_path'] = contracts_precompiled_path(contracts_version)
if node_network_id in ID_TO_NETWORKNAME and ID_TO_NETWORKNAME[node_network_id] != 'smoketest':
deployment_data = get_contracts_deployed(node_network_id, contracts_version)
not_allowed = ( # for now we only disallow mainnet with test configuration
network_id == 1 and
environment_type == Environment.DEVELOPMENT
)
if not_allowed:
click.secho(
f'The chosen network ({ID_TO_NETWORKNAME[node_network_id]}) is not a testnet, '
'but the "development" environment was selected.\n'
'This is not allowed. Please start again with a safe environment setting '
'(--environment production).',
fg='red',
)
sys.exit(1)
contracts = deployment_data['contracts']
contract_addresses_known = True
blockchain_service.inject_contract_manager(ContractManager(config['contracts_path']))
if sync_check:
check_synced(blockchain_service, known_node_network_id)
contract_addresses_given = (
tokennetwork_registry_contract_address is not None and
secret_registry_contract_address is not None and
endpoint_registry_contract_address is not None
)
if not contract_addresses_given and not contract_addresses_known:
click.secho(
f"There are no known contract addresses for network id '{given_network_id}'. "
"Please provide them on the command line or in the configuration file.",
fg='red',
)
sys.exit(1)
try:
token_network_registry = blockchain_service.token_network_registry(
tokennetwork_registry_contract_address or to_canonical_address(
contracts[CONTRACT_TOKEN_NETWORK_REGISTRY]['address'],
),
)
except ContractVersionMismatch as e:
handle_contract_version_mismatch(e)
except AddressWithoutCode:
handle_contract_no_code('token network registry', tokennetwork_registry_contract_address)
except AddressWrongContract:
handle_contract_wrong_address(
'token network registry',
tokennetwork_registry_contract_address,
)
try:
secret_registry = blockchain_service.secret_registry(
secret_registry_contract_address or to_canonical_address(
contracts[CONTRACT_SECRET_REGISTRY]['address'],
),
)
except ContractVersionMismatch as e:
handle_contract_version_mismatch(e)
except AddressWithoutCode:
handle_contract_no_code('secret registry', secret_registry_contract_address)
except AddressWrongContract:
handle_contract_wrong_address('secret registry', secret_registry_contract_address)
database_path = os.path.join(
datadir,
f'node_{pex(address)}',
f'netid_{given_network_id}',
f'network_{pex(token_network_registry.address)}',
f'v{RAIDEN_DB_VERSION}_log.db',
)
config['database_path'] = database_path
print(
'\nYou are connected to the \'{}\' network and the DB path is: {}'.format(
ID_TO_NETWORKNAME.get(given_network_id, given_network_id),
database_path,
),
)
discovery = None
if transport == 'udp':
transport, discovery = _setup_udp(
config,
blockchain_service,
address,
contracts,
endpoint_registry_contract_address,
)
elif transport == 'matrix':
transport = _setup_matrix(config)
else:
raise RuntimeError(f'Unknown transport type "{transport}" given')
raiden_event_handler = RaidenEventHandler()
message_handler = MessageHandler()
try:
if 'contracts' in chain_config:
start_block = chain_config['contracts']['TokenNetworkRegistry']['block_number']
else:
start_block = 0
raiden_app = App(
config=config,
chain=blockchain_service,
query_start_block=start_block,
default_registry=token_network_registry,
default_secret_registry=secret_registry,
transport=transport,
raiden_event_handler=raiden_event_handler,
message_handler=message_handler,
discovery=discovery,
)
except RaidenError as e:
click.secho(f'FATAL: {e}', fg='red')
sys.exit(1)
try:
raiden_app.start()
except RuntimeError as e:
click.secho(f'FATAL: {e}', fg='red')
sys.exit(1)
except filelock.Timeout:
name_or_id = ID_TO_NETWORKNAME.get(given_network_id, given_network_id)
click.secho(
f'FATAL: Another Raiden instance already running for account {address_hex} on '
f'network id {name_or_id}',
fg='red',
)
sys.exit(1)
return raiden_app
|
from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
__all__ = ['DirectRecentRecipientsResponse']
class DirectRecentRecipientsResponseInterface(ApiResponseInterface):
expiration_interval: AnyType
recent_recipients: AnyType
class DirectRecentRecipientsResponse(ApiResponse, DirectRecentRecipientsResponseInterface):
pass
|
token = ...
welcome_channel_id = ...
birthday_channel_id = ...
|
from parasolr.django import AliasedSolrQuerySet
class WorkSolrQuerySet(AliasedSolrQuerySet):
"""':class:`~parasolr.django.AliasedSolrQuerySet` for
:class:`~mep.book.models.Item`"""
#: always filter to item records
filter_qs = ['item_type:work']
#: map readable field names to actual solr fields
field_aliases = {
'pk': 'pk_i',
'title': 'title_t',
'sort_title': 'sort_title_isort',
'authors': 'authors_t',
'sort_authors': 'sort_authors_t',
'creators': 'creators_t',
'pub_date': 'pub_date_i',
'format': 'format_s_lower',
'slug': 'slug_s',
'notes': 'notes_txt_en',
'account_start': 'account_start_i',
'account_end': 'account_end_i',
'is_uncertain': 'is_uncertain_b',
'event_count': 'event_count_i',
'event_years': 'event_years_is',
}
# edismax alias for searching on admin work pseudo-field
admin_work_qf = '{!qf=$admin_work_qf pf=$admin_work_pf v=$work_query}'
def search_admin_work(self, search_term):
return self.search(self.admin_work_qf) \
.raw_query_parameters(work_query=search_term)
|
"""Test add node to graph."""
import pytest
import vt_graph_api
import vt_graph_api.errors
test_graph = vt_graph_api.VTGraph(
"Dummy api key", verbose=False, private=False, name="Graph test",
user_editors=["agfernandez"], group_viewers=["virustotal"])
def test_add_link(mocker):
"""Test add link."""
mocker.patch.object(test_graph, "_fetch_node_information")
node_1 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa",
"file", label="Investigation node")
node_2 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41bb",
"file", label="Investigation node")
test_graph.add_link(node_1.node_id, node_2.node_id, "compressed_parents")
assert test_graph.links[
(node_1.node_id, node_2.node_id, "compressed_parents")]
mocker.resetall()
def test_add_link_not_existing_node():
"""Test link between not existing nodes."""
with pytest.raises(vt_graph_api.errors.NodeNotFoundError,
match=r"Node 'dummy id 1' not found in nodes."):
test_graph.add_link("dummy id 1", "dummy id 2", "compressed_parents")
def test_add_link_between_the_same_node():
"""Test add link between the same node."""
dummy_id = "dummy id"
with pytest.raises(
vt_graph_api.errors.SameNodeError,
match=r"It is no possible to add links between the same node; id: {gid}."
.format(gid=dummy_id)
):
test_graph.add_link(dummy_id, dummy_id, "compressed_parents")
def test_add_links_if_match(mocker):
"""Test add links if match."""
mocker.patch.object(test_graph, "_fetch_node_information")
search_connection_response = [[(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa",
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41cc",
"similar_files",
"file"
)]]
mocker.patch("vt_graph_api.VTGraph._search_connection",
return_value=search_connection_response)
node_1 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa",
"file", label="Investigation node")
node_2 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41cc",
"file", label="Investigation node")
assert test_graph.add_links_if_match(node_1.node_id, node_2.node_id)
assert test_graph.links[
(node_1.node_id, node_2.node_id, "similar_files")]
mocker.resetall()
def test_add_links_if_match_link_already_exists(mocker):
"""Test add links if match if link already exists."""
mocker.patch.object(test_graph, "_fetch_node_information")
node_1 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa",
"file", label="Investigation node")
node_2 = test_graph.add_node(
"ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41bb",
"file", label="Investigation node")
test_graph.add_link(node_1.node_id, node_2.node_id, "compressed_parents")
assert test_graph.add_links_if_match(node_1.node_id, node_2.node_id)
assert test_graph.links[
(node_1.node_id, node_2.node_id, "compressed_parents")]
assert not test_graph.links.get(
(node_1.node_id, node_2.node_id, "similar_files"))
mocker.resetall()
def test_add_links_if_match_not_existing_node():
"""Test add links if match between not existing nodes."""
with pytest.raises(vt_graph_api.errors.NodeNotFoundError,
match=r"Node 'dummy id 1' not found in nodes."):
test_graph.add_links_if_match("dummy id 1", "dummy id 2")
def test_add_links_if_match_between_the_same_node():
"""Test add links if match between the same node."""
dummy_id = "dummy id"
with pytest.raises(
vt_graph_api.errors.SameNodeError,
match=r"It is no possible to add links between the same node; id: {gid}."
.format(gid=dummy_id)
):
test_graph.add_links_if_match(dummy_id, dummy_id, "compressed_parents")
def test_connect_with_graph_and_found(mocker):
"""Test connect node with graph resolving connections."""
search_connection_response = [
[
(
"b3b7d8a4daee86280c7e54b0ff3283afe3579480",
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"execution_parents",
"file"
),
(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"nsis.sf.net",
"embedded_domains",
"domain"
)
],
[
(
"b3b7d8a4daee86280c7e54b0ff3283afe3579480",
"www.openssl.org",
"embedded_domains",
"domain"
)
]
]
mocker.patch("vt_graph_api.VTGraph._search_connection",
return_value=search_connection_response)
mocker.spy(test_graph, "_search_connection")
test_graph.add_node("b3b7d8a4daee86280c7e54b0ff3283afe3579480", "file", False)
test_graph.add_node("nsis.sf.net", "domain", False)
test_graph.add_node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file", False)
test_graph.add_node("www.openssl.org", "domain", False)
assert test_graph.connect_with_graph(
"b3b7d8a4daee86280c7e54b0ff3283afe3579480",
max_api_quotas=1000, max_depth=10)
assert test_graph._search_connection.call_count == 1
assert test_graph.links[
(
"b3b7d8a4daee86280c7e54b0ff3283afe3579480",
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"execution_parents"
)
]
assert test_graph.links[
(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"nsis.sf.net",
"embedded_domains"
)
]
assert test_graph.links[
(
"b3b7d8a4daee86280c7e54b0ff3283afe3579480",
"www.openssl.org",
"embedded_domains"
)
]
mocker.resetall()
def test_connect_with_graph_and_not_found(mocker):
"""Test connect node with graph resolving connections."""
search_connection_response = []
mocker.patch("vt_graph_api.VTGraph._search_connection",
return_value=search_connection_response)
mocker.spy(test_graph, "_search_connection")
test_graph.add_node("98374253453454352345fdgdsfg3grgh", "file", False)
assert not test_graph.connect_with_graph(
"98374253453454352345fdgdsfg3grgh",
max_api_quotas=1000, max_depth=10)
assert test_graph._search_connection.call_count == 1
mocker.resetall()
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('.')
from common import TestCase
import unittest
import fatuv as uv
import tempfile
import os
U_TIME = (4200, 4200)
class TestFSPoll(TestCase):
def test_fs_poll_change(self):
def on_change(fs_poll, status, previous_stat, current_stat):
#self.assert_not_equal(previous_stat.mtim, current_stat.mtim) #TODO
self.assert_equal(status, uv.error.STATUS_SUCCESS)
fs_poll.close()
def on_timeout(timer):
os.utime(self.temp_file.name, U_TIME)
timer.close()
self.fs_poll = uv.FSPoll(self.loop, interval=2000, callback=on_change)
self.timer = uv.Timer(self.loop)
with tempfile.NamedTemporaryFile() as temp_file:
self.temp_file = temp_file
self.fs_poll.path = temp_file.name
self.fs_poll.start()
self.timer.start(on_timeout,1,0)
self.loop.run()
def test_fs_poll_stop(self):
self.fs_poll = uv.FSPoll(self.loop)
with tempfile.NamedTemporaryFile() as temp_file:
self.fs_poll.path = temp_file.name
self.fs_poll.start()
self.fs_poll.stop()
self.loop.run()
def test_closed(self):
self.fs_poll = uv.FSPoll(self.loop)
self.fs_poll.close()
self.assert_raises(uv.HandleClosedError, self.fs_poll.start)
self.assert_is(self.fs_poll.stop(), None)
def test_path_none(self):
self.fs_poll = uv.FSPoll(self.loop)
self.assert_raises(uv.error.ArgumentError, self.fs_poll.start)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
class InputStream:
"""
Stream that bring new data to the system,
Like user keyboard or socket info.
To mark stream as input just inherit from InputStream in addition to the BasicStream
"""
pass
|
from django.contrib import admin
# Register your models here.
from .models import Cart
admin.site.register(Cart)
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
class Database():
def __init__(self, uri=None):
self.base = declarative_base()
self.session = None
if uri is not None:
self.set_engine(uri)
def set_uri(self, uri, echo=False):
print("Using database engine: {}".format(uri))
self.engine = create_engine(uri, echo=echo)
session = self.get_session()
self.base.query = session.query_property()
def get_session(self):
if self.session is None:
self.session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
return self.session
def init_app(self, app):
uri = app.config["SQLALCHEMY_DATABASE_URI"]
# Call set_uri with configuration from app if available
if "SQLALCHEMY_ECHO" in app.config:
self.set_uri(uri, app.config["SQLALCHEMY_ECHO"])
else:
self.set_uri(uri)
@app.teardown_appcontext
def close_session(exception=None):
db.session.remove()
if uri == 'sqlite://':
self.init_db()
def init_db(self):
print("Init database")
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import itviec.models # noqa
self.base.metadata.create_all(bind=self.engine)
db = Database()
|
from . import plot
from . import fit
from . import parfit
from . import score
from . import crossval
__all__ = (plot.__all__ + fit.__all__ + score.__all__ + crossval.__all__ + parfit.__all__)
|
import os
import glob
import numpy as np
import pandas as pd
from skimage import io
import torch
from torch.utils.data import Dataset
import zipfile
from util import utils_np
'''
'''
class ImageStackDatasetSim(Dataset):
def __init__(self, csv_path, root_dir, channel_per_image, transform=None, T_channel=False):
'''
Args:
csv_path: Path to the CSV file with dataset info.
root_dir: Directory with all image folders.
root_dir - obj_folder - obj & env
'''
super().__init__()
self.info_frame = pd.read_csv(csv_path)
self.root_dir = root_dir
self.tr = transform
self.with_T = T_channel
self.cpi = channel_per_image
self.nc = len(list(self.info_frame))-4 # number of image channels in total
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self,idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
self.T = info['T']
index = info['index']
traj = []
for i in range(self.nc):
img_name = info['f{}'.format(i)]
obj_id = img_name.split('_')[0]
if self.cpi == 1:
img_path = os.path.join(self.root_dir, obj_id, img_name)
this_x = float(img_name[:-4].split('_')[2])
this_y = float(img_name[:-4].split('_')[3])
traj.append([this_x,this_y])
elif self.cpi == 2:
if len(img_name.split('_'))==5:
img_path = os.path.join(self.root_dir, obj_id, 'obj', img_name)
time_step = int(img_name[:-4].split('_')[1])
this_x = float(img_name[:-4].split('_')[2])
this_y = float(img_name[:-4].split('_')[3])
traj.append([this_x,this_y])
else:
img_path = os.path.join(self.root_dir, obj_id, 'env', img_name)
image = self.togray(io.imread(img_path))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
if self.with_T:
T_channel = np.ones(shape=[self.img_shape[0],self.img_shape[1],1])*self.T # T_channel
input_img = np.concatenate((input_img, T_channel), axis=2) # T_channel
label = {'x':info['x'], 'y':info['y']}
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
if self.cpi == 2:
sample['time'] = time_step
return sample
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['f0']
obj_id = img_name.split('_')[0]
if self.cpi == 1:
img_path = os.path.join(self.root_dir, obj_id, img_name)
elif self.cpi == 2:
img_path = os.path.join(self.root_dir, obj_id, 'obj', img_name)
image = self.togray(io.imread(img_path))
return image.shape
class ImageStackDatasetSim_ZIP(Dataset):
def __init__(self, zip_path, csv_path, root_dir, channel_per_image, transform=None, T_channel=False):
'''
Args:
zip_path: Path to the ZIP file with everything
csv_path: Path to the CSV file with dataset info.
root_dir: Directory with all image folders.
root_dir - obj_folder - obj & env
'''
super().__init__()
self.archive = zipfile.ZipFile(zip_path, 'r')
self.info_frame = pd.read_csv(self.archive.open(csv_path))
self.root_dir = root_dir
self.tr = transform
self.with_T = T_channel
self.cpi = channel_per_image
self.nc = len(list(self.info_frame))-4 # number of image channels in total
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self,idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
self.T = info['T']
index = info['index']
traj = []
for i in range(self.nc):
img_name = info['f{}'.format(i)]
obj_id = img_name.split('_')[0]
if self.cpi == 1:
img_path = os.path.join(self.root_dir, obj_id, img_name)
this_x = float(img_name[:-4].split('_')[2])
this_y = float(img_name[:-4].split('_')[3])
traj.append([this_x,this_y])
elif self.cpi == 2:
if len(img_name.split('_'))==5:
img_path = os.path.join(self.root_dir, obj_id, 'obj', img_name)
time_step = int(img_name[:-4].split('_')[1])
this_x = float(img_name[:-4].split('_')[2])
this_y = float(img_name[:-4].split('_')[3])
traj.append([this_x,this_y])
else:
img_path = os.path.join(self.root_dir, obj_id, 'env', img_name)
image = self.togray(io.imread(self.archive.open(img_path)))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
if self.with_T:
T_channel = np.ones(shape=[self.img_shape[0],self.img_shape[1],1])*self.T # T_channel
input_img = np.concatenate((input_img, T_channel), axis=2) # T_channel
label = {'x':info['x'], 'y':info['y']}
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
if self.cpi == 2:
sample['time'] = time_step
return sample
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['f0']
obj_id = img_name.split('_')[0]
if self.cpi == 1:
img_path = os.path.join(self.root_dir, obj_id, img_name)
elif self.cpi == 2:
img_path = os.path.join(self.root_dir, obj_id, 'obj', img_name)
image = self.togray(io.imread(self.archive.open(img_path)))
return image.shape
class ImageStackDatasetSDD(Dataset):
def __init__(self, csv_path, root_dir, ext='.jpg', channel_per_image=None, transform=None, T_channel=False):
'''
Args:
csv_path: Path to the CSV file with dataset info.
root_dir: Directory with all image folders.
root_dir - video_folder - imgs
'''
super().__init__()
self.info_frame = pd.read_csv(csv_path)
self.root_dir = root_dir
self.tr = transform
self.with_T = T_channel
self.ext = ext
self.nc = len(list(self.info_frame))-5 # number of image channels in half
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
self.T = info['T']
index = info['index']
traj = []
for i in range(self.nc):
img_name = info[f't{i}'].split('_')[0] + self.ext
video_idx = info['index']
img_path = os.path.join(self.root_dir, video_idx, img_name)
csv_name = glob.glob(os.path.join(self.root_dir, video_idx, '*.csv'))
original_scale = os.path.basename(csv_name[0]).split('.')[0]
original_scale = (int(original_scale.split('_')[0]), int(original_scale.split('_')[1])) # HxW
time_step = int(info[f't{i}'].split('_')[0])
this_x = float(info[f't{i}'].split('_')[1])
this_y = float(info[f't{i}'].split('_')[2])
traj.append([this_x,this_y])
image = self.togray(io.imread(img_path))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
white_canvas = np.zeros_like(image)
# obj_coords = self.rescale_label((this_x, this_y), original_scale)
obj_coords = (this_x, this_y)
obj_map = utils_np.np_gaudist_map(obj_coords, white_canvas, sigmas=[20,20])
input_img = np.concatenate((input_img, obj_map[:,:,np.newaxis]), axis=2)
if self.with_T:
T_channel = np.ones(shape=[self.img_shape[0],self.img_shape[1],1])*self.T # T_channel
input_img = np.concatenate((input_img, T_channel), axis=2) # T_channel
label = {'x':info['x'], 'y':info['y']}
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
sample['time'] = time_step
return sample
def rescale_label(self, label, original_scale): # x,y & HxW
current_scale = self.check_img_shape()
rescale = (current_scale[0]/original_scale[0] , current_scale[1]/original_scale[1])
return (label[0]*rescale[1], label[1]*rescale[0])
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['t0'].split('_')[0] + self.ext
video_folder = info['index']
img_path = os.path.join(self.root_dir, video_folder, img_name)
image = self.togray(io.imread(img_path))
return image.shape
class ImageStackDatasetSDD_ZIP(Dataset):
def __init__(self, zip_path, csv_path, root_dir, ext='.jpg', channel_per_image=None, transform=None, T_channel=False):
'''
Args:
zip_path: Path (absolute) to the ZIP file with everything
csv_path: Path (relative) to the CSV file with dataset info.
root_dir: Directory (relative) with all image folders.
root_dir - obj_folder - obj & other
'''
super().__init__()
self.archive = zipfile.ZipFile(zip_path, 'r')
self.info_frame = pd.read_csv(self.archive.open(csv_path))
self.root_dir = root_dir
self.tr = transform
self.with_T = T_channel
self.cpi = channel_per_image
self.ext = ext
self.nc = len(list(self.info_frame))-5 # number of image channels in half
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
self.T = info['T']
index = info['index']
traj = []
for i in range(self.nc):
img_name = info[f't{i}'].split('_')[0] + self.ext
video_idx = info['index']
img_path = os.path.join(self.root_dir, video_idx, img_name)
csv_name = [x for x in self.archive.namelist() if ((video_idx in x)&('csv' in x))]
original_scale = os.path.basename(csv_name[0]).split('.')[0]
original_scale = (int(original_scale.split('_')[0]), int(original_scale.split('_')[1])) # HxW
time_step = int(info[f't{i}'].split('_')[0])
this_x = float(info[f't{i}'].split('_')[1])
this_y = float(info[f't{i}'].split('_')[2])
traj.append([this_x,this_y])
image = self.togray(io.imread(self.archive.open(img_path)))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
white_canvas = np.zeros_like(image)
# obj_coords = self.rescale_label((this_x, this_y), original_scale)
obj_coords = (this_x, this_y)
obj_map = utils_np.np_gaudist_map(obj_coords, white_canvas, sigmas=[20,20])
input_img = np.concatenate((input_img, obj_map[:,:,np.newaxis]), axis=2)
if self.with_T:
T_channel = np.ones(shape=[self.img_shape[0],self.img_shape[1],1])*self.T # T_channel
input_img = np.concatenate((input_img, T_channel), axis=2) # T_channel
label = {'x':info['x'], 'y':info['y']}
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
sample['time'] = time_step
return sample
def rescale_label(self, label, original_scale): # x,y & HxW
current_scale = self.check_img_shape()
rescale = (current_scale[0]/original_scale[0] , current_scale[1]/original_scale[1])
return (label[0]*rescale[1], label[1]*rescale[0])
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['t0'].split('_')[0] + self.ext
video_folder = info['index']
img_path = os.path.join(self.root_dir, video_folder, img_name)
image = self.togray(io.imread(self.archive.open(img_path)))
return image.shape
class ImageStackDatasetSDDtr(Dataset): # for trajectory
def __init__(self, csv_path, root_dir, ext='.jpg', transform=None, T_channel=None):
'''
Args:
csv_path: Path to the CSV file with dataset info.
root_dir: Directory with all image folders.
root_dir - video_folder - imgs
'''
super().__init__()
self.info_frame = pd.read_csv(csv_path)
self.root_dir = root_dir
self.tr = transform
self.ext = ext
self.nc = len([x for x in list(self.info_frame) if 't' in x]) # number of image channels in half
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
index = info['index']
traj = []
for i in range(self.nc):
img_name = info[f't{i}'].split('_')[0] + self.ext
video_idx = info['index']
img_path = os.path.join(self.root_dir, video_idx, img_name)
csv_name = glob.glob(os.path.join(self.root_dir, video_idx, '*.csv'))
original_scale = os.path.basename(csv_name[0]).split('.')[0]
original_scale = (int(original_scale.split('_')[0]), int(original_scale.split('_')[1])) # HxW
time_step = int(info[f't{i}'].split('_')[0])
this_x = float(info[f't{i}'].split('_')[1])
this_y = float(info[f't{i}'].split('_')[2])
traj.append([this_x,this_y])
image = self.togray(io.imread(img_path))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
white_canvas = np.zeros_like(image)
# obj_coords = self.rescale_label((this_x, this_y), original_scale)
obj_coords = (this_x, this_y)
obj_map = utils_np.np_gaudist_map(obj_coords, white_canvas, sigmas=[20,20])
input_img = np.concatenate((input_img, obj_map[:,:,np.newaxis]), axis=2)
label_name_list = [x for x in list(self.info_frame) if 'T' in x]
label_list = list(info[label_name_list].values)
label_list = [(float(x.split('_')[0]), float(x.split('_')[1])) for x in label_list]
label = dict(zip(label_name_list, label_list))
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
sample['time'] = time_step
return sample
def rescale_label(self, label, original_scale): # x,y & HxW
current_scale = self.check_img_shape()
rescale = (current_scale[0]/original_scale[0] , current_scale[1]/original_scale[1])
return (label[0]*rescale[1], label[1]*rescale[0])
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['t0'].split('_')[0] + self.ext
video_folder = info['index']
img_path = os.path.join(self.root_dir, video_folder, img_name)
image = self.togray(io.imread(img_path))
return image.shape
class ImageStackDatasetSDDtr_ZIP(Dataset): # for trajectory
def __init__(self, zip_path, csv_path, root_dir, ext='.jpg', transform=None, T_channel=None):
'''
Args:
zip_path: Path (absolute) to the ZIP file with everything
csv_path: Path (relative) to the CSV file with dataset info.
root_dir: Directory (relative) with all image folders.
root_dir - obj_folder - obj & other
'''
super().__init__()
self.archive = zipfile.ZipFile(zip_path, 'r')
self.info_frame = pd.read_csv(self.archive.open(csv_path))
self.root_dir = root_dir
self.tr = transform
self.ext = ext
self.nc = len([x for x in list(self.info_frame) if 't' in x]) # number of image channels in half
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
index = info['index']
traj = []
for i in range(self.nc):
img_name = info[f't{i}'].split('_')[0] + self.ext
video_idx = info['index']
img_path = os.path.join(self.root_dir, video_idx, img_name)
csv_name = [x for x in self.archive.namelist() if ((video_idx in x)&('csv' in x))]
original_scale = os.path.basename(csv_name[0]).split('.')[0]
original_scale = (int(original_scale.split('_')[0]), int(original_scale.split('_')[1])) # HxW
time_step = int(info[f't{i}'].split('_')[0])
this_x = float(info[f't{i}'].split('_')[1])
this_y = float(info[f't{i}'].split('_')[2])
traj.append([this_x,this_y])
image = self.togray(io.imread(self.archive.open(img_path)))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
white_canvas = np.zeros_like(image)
# obj_coords = self.rescale_label((this_x, this_y), original_scale)
obj_coords = (this_x, this_y)
obj_map = utils_np.np_gaudist_map(obj_coords, white_canvas, sigmas=[20,20])
input_img = np.concatenate((input_img, obj_map[:,:,np.newaxis]), axis=2)
label_name_list = [x for x in list(self.info_frame) if 'T' in x]
label_list = list(info[label_name_list].values)
label_list = [(float(x.split('_')[0]), float(x.split('_')[1])) for x in label_list]
label = dict(zip(label_name_list, label_list))
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
sample['time'] = time_step
return sample
def rescale_label(self, label, original_scale): # x,y & HxW
current_scale = self.check_img_shape()
rescale = (current_scale[0]/original_scale[0] , current_scale[1]/original_scale[1])
return (label[0]*rescale[1], label[1]*rescale[0])
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
info = self.info_frame.iloc[0]
img_name = info['t0'].split('_')[0] + self.ext
video_folder = info['index']
img_path = os.path.join(self.root_dir, video_folder, img_name)
image = self.togray(io.imread(self.archive.open(img_path)))
return image.shape
class MaskStackWithSegSDD(Dataset):
def __init__(self, csv_path, seg_path, channel_per_image=None, transform=None, T_channel=False):
'''
Args:
csv_path: Path to the CSV file with dataset info.
seg_path: Path to the segmentation image.
'''
super().__init__()
self.info_frame = pd.read_csv(csv_path)
self.seg_path = seg_path
self.tr = transform
self.with_T = T_channel
self.nc = len(list(self.info_frame))-5 # number of image channels in half
self.img_shape = self.check_img_shape()
def __len__(self):
return len(self.info_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
input_img = np.empty(shape=[self.img_shape[0],self.img_shape[1],0])
info = self.info_frame.iloc[idx]
self.T = info['T']
index = info['index']
traj = []
img_path = self.seg_path
image = self.togray(io.imread(img_path))
input_img = np.concatenate((input_img, image[:,:,np.newaxis]), axis=2)
for i in range(self.nc):
# img_name = info[f't{i}'].split('_')[0] + self.ext
# video_idx = info['index']
time_step = int(info[f't{i}'].split('_')[0])
this_x = float(info[f't{i}'].split('_')[1])
this_y = float(info[f't{i}'].split('_')[2])
traj.append([this_x,this_y])
white_canvas = np.zeros_like(image)
obj_coords = (this_x, this_y)
obj_map = utils_np.np_gaudist_map(obj_coords, white_canvas, sigmas=[20,20])
input_img = np.concatenate((input_img, obj_map[:,:,np.newaxis]), axis=2)
if self.with_T:
T_channel = np.ones(shape=[self.img_shape[0],self.img_shape[1],1])*self.T # T_channel
input_img = np.concatenate((input_img, T_channel), axis=2) # T_channel
label = {'x':info['x'], 'y':info['y']}
sample = {'image':input_img, 'label':label}
if self.tr:
sample = self.tr(sample)
sample['index'] = index
sample['traj'] = traj
sample['time'] = time_step
return sample
def togray(self, image):
if (len(image.shape)==2):
return image
elif (len(image.shape)==3) and (image.shape[2]==1):
return image[:,:,0]
else:
image = image[:,:,:3] # ignore alpha
img = image[:,:,0]/3 + image[:,:,1]/3 + image[:,:,2]/3
return img
def check_img_shape(self):
image = self.togray(io.imread(self.seg_path))
return image.shape
|
# environment variables
ATOM_PROGRAM = '/home/physics/bin/atm'
ATOM_UTILS_DIR ='/home/physics/bin/pseudo'
element = "Fe"
equil_volume = 11.3436
# general calculation parameters
calc = {"element": element,
"lattice": "BCC",
"xc": "pb",
"n_core": 5,
"n_val": 2,
"is_spin_pol": True,
"core": True,
}
# pseudopotential parameters
electrons = [(2,0), (6,0)]
radii = [2., 2.25, 2., 2., 0.7]
# SIESTA calculation parameters
siesta_calc = {"element": element,
"title": element + " SIESTA calc",
"xc_f": "GGA",
"xc": "PBE"
}
# electronic configurations
configs = [[(1,0),(7,0)],
[(2,0),(6,0)],
[(1.5,0),(6.5,0)],
[(0,0),(8,0)],
[(1,0),(6,0),(1,0)],
[(1,0),(5,0),(2,0)]]
# number of atoms in cubic cell
_nat_cell = {"SC": 1,
"BCC": 2,
"FCC": 4}
nat = _nat_cell[calc["lattice"]] |
#!/usr/bin/env python3
# how to pickle data
import os
import pickle
a = b'12345678'
b = b'9abcdef'
print('a = {}; b = {}'.format(a,b))
# write data to file
cwd = os.getcwd()
file_name = os.path.join(cwd,'program_state.bin')
with open(file_name,'wb+') as f:
pickle.dump(a,f)
pickle.dump(b,f)
a = b'9abcdef'
b = b'12345678'
print('a = {}; b = {}'.format(a,b))
with open(file_name,'rb') as f:
a = pickle.load(f)
b = pickle.load(f)
print('a = {}; b = {}'.format(a,b))
|
"""
Created on Sep 14, 2015
@author: Mikhail
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import *
# Project's imports
from mera.selenium_training_automation.pages.page import Page
__author__ = 'Mikhail'
class HomePage(Page):
def __init__(self, driver, url):
Page.__init__(self, driver, url)
self.url = "http://www.yahoo.com"
self.sign_in_css_selector = ".tab-label.fz-xs.accent.sign-in"
self.sign_in_xpath = "//a[@class='tab-label fz-xs accent sign-in ']"
self.sign_up_css_selector = ".y-hdr-link.sign-up"
def open_home_page(self):
self.open_page(self.url)
@property
def sign_in_field(self):
# return self.driver.find_element_by_css_selector(self.sign_in_css_selector)
return self.driver.find_element_by_xpath(self.sign_in_xpath)
def go_to_registration_page(self):
# put mouse cursor on it
webdriver.ActionChains(self.driver).move_to_element(self.sign_in_field).perform()
# wait until pop-up appear
sign_up_element = self.wait.until(visibility_of_element_located((By.CSS_SELECTOR, self.sign_up_css_selector)))
sign_up_element.click()
|
import bpy
class GESIO_OT_Paths(bpy.types.PropertyGroup):
footage_path: bpy.props.StringProperty(name="footage_000",
subtype='FILE_PATH',
default=r"โช")
json_path: bpy.props.StringProperty(name="JSON",
subtype='FILE_PATH',
default=r"โช")
scale: bpy.props.FloatProperty(name="Scale", default=10)
class GESIO_PT_Panel(bpy.types.Panel):
bl_idname = "gesio_PT_panel"
bl_label = "Google Earth Studio"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "GES"
def draw(self, context):
layout = self.layout
# layout.prop(gesio.scene_select, "filepath", text="")
# layout.operator("gesio.scene_select")
col = layout.column()
row = col.row()
row.label(text="First Image (*_000):", icon="IMAGE_DATA")
row = col.row()
row.prop(bpy.context.scene.GESIO_OT_Paths, "footage_path", text="")
col = layout.column()
row = col.row()
row.label(text="3D Tracking Data (.json):", icon="FILE_CACHE")
row = col.row()
row.prop(bpy.context.scene.GESIO_OT_Paths, "json_path", text="")
row = col.row()
row = col.row()
row.prop(bpy.context.scene.GESIO_OT_Paths,
"scale",
text="Shrink Factor")
row = col.row()
row = col.row()
row.operator("gesio.importscene", text="Import Scene", icon="IMPORT")
def register():
bpy.types.Scene.GESIO_OT_Paths = bpy.props.PointerProperty(
type=GESIO_OT_Paths)
|
def flatten_struct(df):
struct_cols = [name for name, dtype in df.dtypes if 'struct' in dtype]
if struct_cols:
col_to_flatten=[{'from':f'{nc}.{c}','to':f'{nc}_{c}'} for nc in struct_cols for c in df.select(f'{nc}.*').columns]
for col in col_to_flatten:
df = df.withColumn(col['to'],F.col(col['from']))
for col in struct_cols:
df = df.drop(col)
return df
else:
return df
|
from django.db import models
from datetime import datetime
from users.models import UserProfile
# Create your models here.
class Projects(models.Model):
PROJECT_FIELD = (
('1', "้่ไบงๅ"),
('2', "็ตไฟกไบงๅ"),
('3', "็ฉ่็ฝ็ธๅ
ณ"),
)
PROJECT_STATUS = (
('1', "็ซ้กน้ถๆฎต"),
('2', "ๅผๅ้ถๆฎต"),
('3', "ๆต่ฏ้ถๆฎต"),
('4', "็ป้กน"),
)
user = models.ManyToManyField(UserProfile, verbose_name="้กน็ฎไธป่ฆๅผๅ่
")
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="้กน็ฎๅ็งฐ")
field = models.CharField(max_length=30, choices=PROJECT_FIELD, default="ๆช่ฎพ็ฝฎ", verbose_name="้กน็ฎ็ฑปๅ")
status = models.CharField(max_length=30, choices=PROJECT_STATUS, default="ๆชๅผๅง", verbose_name="้กน็ฎ้ถๆฎต")
version = models.CharField(max_length=30, null=True, blank=True, verbose_name="้กน็ฎ็ๆฌ")
progress = models.IntegerField(default=0, verbose_name="้กน็ฎ่ฟๅบฆ")
desc = models.TextField(max_length=500, verbose_name="้กน็ฎๆ่ฟฐ")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
class Meta:
verbose_name = "้กน็ฎไฟกๆฏ"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def get_this_project_all_staffs(self):
return ",".join([user.username for user in self.user.all()])
def set_progress(self):
all_stages = self.stages_set.all()
# ่ฎก็ฎๅฝๅ้กน็ฎ็่ฟๅบฆๅผ, ไธไธชๅนณๅๆ้็่ฎก็ฎๆณ
self.progress = 0
for stage in all_stages:
self.progress += stage.progress * (1/all_stages.count())
class Stages(models.Model):
"""
้กน็ฎ-ๅญ้ถๆฎต
"""
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="้ถๆฎตๅ")
project = models.ForeignKey(Projects, null=True, blank=True, verbose_name="้กน็ฎ")
progress = models.IntegerField(default=0, verbose_name="้ถๆฎต่ฟๅบฆ")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
class Meta:
verbose_name = "้ถๆฎต"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.project.name + ':' + self.name)
def get_all_missions(self):
return self.missions_set.all()
def get_progress(self):
all_missions = self.missions_set.all()
# ่ฎก็ฎๅฝๅ้ถๆฎต็่ฟๅบฆๅผ, ไธไธชๅนณๅๆ้็่ฎก็ฎๆณ
self.progress = 0
for mission in all_missions:
self.progress += mission.progress * (1/all_missions.count())
class Missions(models.Model):
"""
้กน็ฎ-ๅญ้ถๆฎต-ๅญไปปๅก
"""
user = models.ManyToManyField(UserProfile, verbose_name="ไปปๅก่ด่ดฃไบบ")
stage = models.ForeignKey(Stages, null=True, blank=True, verbose_name="ๆๅฑ้ถๆฎต")
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="ไปปๅกๅ")
progress = models.IntegerField(default=0, verbose_name="ไปปๅก่ฟๅบฆ")
content = models.TextField(verbose_name="ไปปๅกๆ่ฟฐ", null=True, blank=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
end_time = models.DateTimeField(default=datetime.now, verbose_name=u"้ข่ฎกๅฎๆๆถ้ด")
class Meta:
verbose_name = "ๅญไปปๅก"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.stage.project.name + ':' + self.name)
def get_this_mission_all_staffs(self):
return ",".join([user.username for user in self.user.all()])
class Records(models.Model):
"""
้กน็ฎ-ๅญ้ถๆฎต-ๅญไปปๅก-ๅญ่ฎฐๅฝ
"""
user = models.ForeignKey(UserProfile, null=True, blank=True, verbose_name="่ฎฐๅฝ็ป่ฎฐไบบ")
mission = models.ForeignKey(Missions, null=True, blank=True, verbose_name="ๆๅฑไปปๅก")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
content = models.TextField(verbose_name="่ฎฐๅฝๆ่ฟฐ", null=True, blank=True)
class Meta:
verbose_name = "ๅญ่ฎฐๅฝ"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.mission.name + ':' + self.user.username)
# class ProjectUsers(models.Model):
# """
# ้กน็ฎๆๅๅ
ณ่่กจ
# """
# user = models.ForeignKey(UserProfile, null=True, blank=True, verbose_name="ๆๅ")
# project = models.ForeignKey(Projects, null=True, blank=True, verbose_name="้กน็ฎ")
# add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
#
# class Meta:
# verbose_name = "้กน็ฎๆๅๅ
ณ่่กจ"
# verbose_name_plural = verbose_name
#
# def __str__(self):
# return str(self.id)
# class MissionUsers(models.Model):
# """
# ไปปๅกๆๅๅ
ณ่่กจ
# """
# user = models.ForeignKey(UserProfile, null=True, blank=True, verbose_name="ๆๅ")
# mission = models.ForeignKey(Missions, null=True, blank=True, verbose_name="ไปปๅก")
# add_time = models.DateTimeField(default=datetime.now, verbose_name=u"ๆทปๅ ๆถ้ด")
#
# class Meta:
# verbose_name = "ไปปๅกๆๅๅ
ณ่่กจ"
# verbose_name_plural = verbose_name
#
# def __str__(self):
# return str(self.id)
|
import pygame
from Bullet import Pocisk
from Alien import Alien
from kropla import drop
from Boss import boss
from BulletBoss import PociskBoss
from time import sleep
from ExtraBullet import BulletPlus
from Animations import Explosion
clock = pygame.time.Clock()
pygame.init()
pygame.mixer.pre_init(44100, -16, 2, 512)
import sys
import os
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def update_bullets(ai_settings, stats, sb, new_bullet, aliens):
new_bullet.update()
# Usuniฤcie pociskรณw, ktรณre znajdujฤ
siฤ poza ekranem
for bullet in new_bullet.copy():
if bullet.rect.left <= 0 or ai_settings.SCREEN_WIDTH < bullet.rect.right:
new_bullet.remove(bullet)
collisions = pygame.sprite.groupcollide(new_bullet, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
def boss_bullet_update(ai_settings, boss_bullet):
boss_bullet.update()
for bullet in boss_bullet.copy():
if bullet.rect.left <= 0 or ai_settings.SCREEN_WIDTH < bullet.rect.left:
boss_bullet.remove(bullet)
def extra_bullet_update(ai_settings, extra_bullet):
extra_bullet.update()
for bullets in extra_bullet.copy():
if bullets.rect.right <= 0 or ai_settings.SCREEN_WIDTH < bullets.rect.left:
extra_bullet.remove(bullets)
def check_keydown_events(event, ai_settings, screen, player, new_bullet, stats):
if event.key == pygame.K_ESCAPE:
sys.exit(0)
if event.key == pygame.K_DOWN and player.rect.bottom < player.screen_rect.bottom:
player.moving_down = True
if event.key == pygame.K_UP and player.rect.top > 0:
player.moving_up = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, player, new_bullet)
elif event.key == pygame.K_z:
stats.game_active = not stats.game_active
def fire_bullet(ai_settings, screen, player, new_bullet):
# wystrzelenie nowego pociskuu, jeลผeli nie przekroczono okreลlonego limitu - 3 kul
shoot_sound = pygame.mixer.Sound(resource_path('hit2.wav'))
if len(new_bullet) <= ai_settings.bullets_allowed and len(new_bullet) < ai_settings.SCREEN_WIDTH:
bullets = Pocisk(ai_settings, screen, player)
new_bullet.add(bullets)
shoot_sound.play()
def fire_boss_bullet(ai_settings, screen, bosss, boss_bullet, player, pos, HPbar):
if len(boss_bullet) <= ai_settings.boss_bullets_allowed:
bullet = PociskBoss(ai_settings, screen, bosss, player, pos)
boss_bullet.add(bullet)
if HPbar.Health < 0:
bullet.kill()
def fire_extra_bullet(ai_settings, screen, bosss, player, pos, extra_bullet, HPbar):
if len(extra_bullet) <= float(ai_settings.extra_bullet_allowed):
extrabullet = BulletPlus(ai_settings, screen, bosss, player, pos)
extra_bullet.add(extrabullet)
if HPbar.Health < 0:
extrabullet.kill()
def AniExplosion(explosion, bosss):
animation = Explosion(bosss)
explosion.add(animation)
def check_keyup_events(event, player):
if event.key == pygame.K_DOWN:
player.moving_down = False
if event.key == pygame.K_UP:
player.moving_up = False
def check_events(ai_settings, screen, player, new_bullet, boss_bullet, bosss, pos, HPbar,
extra_bullet, stats):
milliseconds_delay = 1200 # 0.5 seconds
bullet_event = pygame.USEREVENT + 1
pygame.time.set_timer(bullet_event, milliseconds_delay // 60)
milliseconds_delay2 = 1850
bullet_event2 = pygame.USEREVENT + 2
pygame.time.set_timer(bullet_event2, milliseconds_delay2 // 60)
# Oczekiwwanie na nacisniecie klawisza lub przycisku myszy
for event in pygame.event.get():
pygame.time.set_timer(bullet_event, milliseconds_delay)
pygame.time.set_timer(bullet_event2, milliseconds_delay2)
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, player, new_bullet, stats)
elif event.type == pygame.KEYUP:
check_keyup_events(event, player)
elif event.type == bullet_event:
fire_boss_bullet(ai_settings, screen, bosss, boss_bullet, player, pos, HPbar)
elif event.type == bullet_event2:
fire_extra_bullet(ai_settings, screen, bosss, player, pos, extra_bullet, HPbar)
def get_number_rows(ai_settings, player_width, alien_width):
# Ustalenie, ile rzฤdรณw obcych zmieลci siฤ na ekranie.
available_space_y = (ai_settings.SCREEN_HEIGHT -
(4 * alien_width) - player_width)
number_rows = int(available_space_y / (2 * alien_width))
return number_rows
def get_number_rain(ai_settings, player_width, kropla_height):
available_space_y = (ai_settings.SCREEN_HEIGHT -
(3 * kropla_height) - player_width)
number_rows = int(available_space_y / (128 * kropla_height))
return number_rows
def get_number_aliens_x(ai_settings, alien_width):
# Ustalenie liczby obcych, ktรณrzy zmieszczฤ
siฤ w rzฤdzie
available_space_x = ai_settings.SCREEN_HEIGHT - 2 / alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_randomnumber_rain(ai_settings, kropla_height):
available_space_rain = ai_settings.SCREEN_WIDTH - 3 / kropla_height
number_rain_x = int(available_space_rain / (3 * kropla_height))
return number_rain_x
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
alien = Alien(ai_settings, screen)
alien_height = alien.rect.height
alien.y = alien_height + 2.5 * alien_height * alien_number
alien.rect.y = alien.y
alien.rect.x -= alien.rect.width + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_boss(ai_settings, screen, bosss):
Boss = boss(ai_settings, screen, bosss)
boss_height = Boss.rect.height
Boss.y = boss_height
Boss.rect.y = Boss.y
bosss.add(Boss)
def create_fleet(ai_settings, screen, player, aliens):
# Utworzenie peลnej floty obcych statkรณw.
# Utworzenie obcego i ustalenie liczby obcych, ktรณrzy zmieszczฤ
siฤ w rzฤdzie.
# Odlegลoลฤ miฤdzy poszczegรณlnymi obcymi jest rรณwna dลugoลฤi obcego.
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, player.rect.height,
alien.rect.width)
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
# Utworzenie nowego obcego i umieszczenie go w rzฤdzie.
create_alien(ai_settings, screen, aliens, alien_number,
row_number)
def create_fleet_drop(ai_settings, screen, player, deszcz):
kropla = drop(ai_settings, screen)
number_rain_x = get_randomnumber_rain(ai_settings, kropla.rect.width)
number_rows_rain = get_number_rain(ai_settings, player.rect.width,
kropla.rect.width)
for rownumberss in range(number_rows_rain):
for rain_number in range(number_rain_x):
create_rain(ai_settings, screen, deszcz, rain_number,
rownumberss)
def create_rain(ai_settings, screen, deszcz, rain_number, rownumberss):
kropla = drop(ai_settings, screen)
kropla_width = kropla.rect.height
kropla.x = kropla_width + 12 * kropla_width * rain_number
kropla.rect.x = kropla.x
kropla.rect.x += kropla.rect.width + 12 * kropla.rect.width * rownumberss
deszcz.add(kropla)
def change_fleet_direction(ai_settings, aliens):
# Przesuniฤcie caลej floty w dรณล i zmiana kierunku, w ktรณym siฤ ona porusza
for alien in aliens.sprites():
alien.rect.x -= ai_settings.fleet_drop_speed
ai_settings.fleet_direction /= -1
def check_fleet_edges(ai_settings, aliens):
# Odpowiednia reakcja, gdzy obce dotrze do krawฤdzi ekranu
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def check_boss_fleet_edges(ai_settings, bosss):
for BOSS in bosss.sprites():
if BOSS.check_edges():
change_boss_fleet_direction(ai_settings, bosss)
break
def change_boss_fleet_direction(ai_settings, bosss):
for BOSS in bosss.sprites():
BOSS.rect.y -= ai_settings.fleet_drop_speed
ai_settings.fleet_direction /= -1
def check_rain_down(ai_settings, deszcz):
for kropla in deszcz.sprites():
if kropla.check_rain_edges():
drop_rain_down(ai_settings, deszcz)
def drop_rain_down(ai_settings, deszcz):
for kropelka in deszcz.sprites():
kropelka.rect.y += ai_settings.speed_drop_rain
def update_rain(ai_settings, deszcz):
check_rain_down(ai_settings, deszcz)
deszcz.update(deszcz, ai_settings)
for rain in deszcz.copy():
if rain.rect.top <= 0 or ai_settings.SCREEN_HEIGHT < rain.rect.top:
deszcz.remove(rain)
def update_aliens(ai_settings, stats, sb, screen, player, aliens, new_bullet):
# uaktualnienie poลoลผenia wszystkich obcych we flocie
check_fleet_edges(ai_settings, aliens)
aliens.update()
hit_sound = pygame.mixer.Sound(resource_path('hit.wav'))
# Wykrycie kolizji miฤdzy statkiem a obcymi
if pygame.sprite.spritecollideany(player, aliens):
hit_sound.play()
alien_hit_player(ai_settings, stats, sb, screen, player, aliens, new_bullet)
check_aliens_left(ai_settings, stats, sb, screen, player, aliens, new_bullet)
def update_boss(ai_settings, bosss):
check_boss_fleet_edges(ai_settings, bosss)
bosss.update()
def update_animation(explosion):
explosion.update()
def alien_hit_player(ai_settings, stats, sb, screen, player, aliens, new_bullet):
if stats.ships_left > 0:
stats.ships_left -= 1
sb.prep_ships()
aliens.empty()
new_bullet.empty()
# Utworzenie caลej floty obcych od nowa
sleep(0.5)
create_fleet(ai_settings, screen, player, aliens)
player.player_center()
def create_bosss(ai_settings, screen, aliens, bosss, boss_bullet, new_bullet, extra_bullet, HPbar, player, explosion):
destroy = pygame.mixer.Sound(resource_path('BossDestroyed.wav'))
aliens.empty()
bosss.draw(screen)
boss_bullet.draw(screen)
extra_bullet.draw(screen)
HPbar.blitme(bosss)
HPbar.hit(bosss, new_bullet, boss_bullet, ai_settings, extra_bullet)
player.hit(boss_bullet, extra_bullet)
if HPbar.Health <= 4:
explosion.draw(screen)
hit = pygame.sprite.groupcollide(new_bullet, bosss, False, False)
if hit:
AniExplosion(explosion, bosss)
bosss.empty()
boss_bullet.empty()
extra_bullet.empty()
destroy.play()
def check_aliens_left(ai_settings, stats, sb, screen, player, aliens, new_bullet):
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.left <= screen_rect.left:
alien_hit_player(ai_settings, stats, sb, screen, player, aliens, new_bullet)
break
def time_txt(screen, txt, youlost):
screen.blit(youlost, (0, 0))
screen.blit(txt, (480, 500))
def update_screen(ai_settings, screen, player, bosss, aliens, new_bullet, boss_bullet,
HPbar, extra_bullet, explosion, stats, sb, pause_button):
player.blitme(HPbar, screen)
sb.show_score()
for bullets in new_bullet.sprites():
bullets.draw_pocisk()
# deszcz.draw(screen)
aliens.draw(screen)
if not stats.game_active:
pause_button.draw_button()
if len(aliens) == 0:
ai_settings.increase_speed()
stats.level += 1
sb.prep_ships()
aliens.empty()
if stats.level > 3 and stats.ships_left >= 0:
create_bosss(ai_settings, screen, aliens, bosss, boss_bullet, new_bullet, extra_bullet, HPbar, player,
explosion)
else:
sleep(0.2)
new_bullet.empty()
create_fleet(ai_settings, screen, player, aliens)
player.player_center()
# Updated screen
pygame.display.flip()
|
import torch
import math
import numpy as np
from utils import LeastSquares
def split_coeff(coeff):
# input: coeff with shape [1,257]
id_coeff = coeff[:, :80] # identity(shape) coeff of dim 80
ex_coeff = coeff[:, 80:144] # expression coeff of dim 64
tex_coeff = coeff[:, 144:224] # texture(albedo) coeff of dim 80
angles = coeff[:, 224:227] # ruler angles(x,y,z) for rotation of dim 3
# lighting coeff for 3 channel SH function of dim 27
gamma = coeff[:, 227:254]
translation = coeff[:, 254:] # translation coeff of dim 3
return id_coeff, ex_coeff, tex_coeff, angles, gamma, translation
class _need_const:
a0 = np.pi
a1 = 2 * np.pi / np.sqrt(3.0)
a2 = 2 * np.pi / np.sqrt(8.0)
c0 = 1 / np.sqrt(4 * np.pi)
c1 = np.sqrt(3.0) / np.sqrt(4 * np.pi)
c2 = 3 * np.sqrt(5.0) / np.sqrt(12 * np.pi)
d0 = 0.5 / np.sqrt(3.0)
illu_consts = [a0, a1, a2, c0, c1, c2, d0]
origin_size = 300
target_size = 224
camera_pos = 10.0
def shape_formation(id_coeff, ex_coeff, facemodel):
# compute face shape with identity and expression coeff, based on BFM model
# input: id_coeff with shape [1,80]
# ex_coeff with shape [1,64]
# output: face_shape with shape [1,N,3], N is number of vertices
'''
S = mean_shape + \alpha * B_id + \beta * B_exp
'''
n_b = id_coeff.size(0)
face_shape = torch.einsum('ij,aj->ai', facemodel.idBase, id_coeff) + \
torch.einsum('ij,aj->ai', facemodel.exBase, ex_coeff) + \
facemodel.meanshape
face_shape = face_shape.view(n_b, -1, 3)
# re-center face shape
face_shape = face_shape - \
facemodel.meanshape.view(1, -1, 3).mean(dim=1, keepdim=True)
return face_shape
def texture_formation(tex_coeff, facemodel):
# compute vertex texture(albedo) with tex_coeff
# input: tex_coeff with shape [1,N,3]
# output: face_texture with shape [1,N,3], RGB order, range from 0-255
'''
T = mean_texture + \gamma * B_texture
'''
n_b = tex_coeff.size(0)
face_texture = torch.einsum(
'ij,aj->ai', facemodel.texBase, tex_coeff) + facemodel.meantex
face_texture = face_texture.view(n_b, -1, 3)
return face_texture
def compute_norm(face_shape, facemodel):
# compute vertex normal using one-ring neighborhood (8 points)
# input: face_shape with shape [1,N,3]
# output: v_norm with shape [1,N,3]
# https://fredriksalomonsson.files.wordpress.com/2010/10/mesh-data-structuresv2.pdf
# vertex index for each triangle face, with shape [F,3], F is number of faces
face_id = facemodel.tri - 1
# adjacent face index for each vertex, with shape [N,8], N is number of vertex
point_id = facemodel.point_buf - 1
shape = face_shape
v1 = shape[:, face_id[:, 0], :]
v2 = shape[:, face_id[:, 1], :]
v3 = shape[:, face_id[:, 2], :]
e1 = v1 - v2
e2 = v2 - v3
face_norm = e1.cross(e2) # compute normal for each face
# normalized face_norm first
face_norm = torch.nn.functional.normalize(face_norm, p=2, dim=2)
empty = torch.zeros((face_norm.size(0), 1, 3),
dtype=face_norm.dtype, device=face_norm.device)
# concat face_normal with a zero vector at the end
face_norm = torch.cat((face_norm, empty), 1)
# compute vertex normal using one-ring neighborhood
v_norm = face_norm[:, point_id, :].sum(dim=2)
v_norm = torch.nn.functional.normalize(v_norm, p=2, dim=2) # normalize normal vectors
return v_norm
def compute_rotation_matrix(angles):
# compute rotation matrix based on 3 ruler angles
# input: angles with shape [1,3]
# output: rotation matrix with shape [1,3,3]
n_b = angles.size(0)
# https://www.cnblogs.com/larry-xia/p/11926121.html
device = angles.device
# compute rotation matrix for X-axis, Y-axis, Z-axis respectively
rotation_X = torch.cat(
[
torch.ones([n_b, 1]).to(device),
torch.zeros([n_b, 3]).to(device),
torch.reshape(torch.cos(angles[:, 0]), [n_b, 1]),
- torch.reshape(torch.sin(angles[:, 0]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 0]), [n_b, 1]),
torch.reshape(torch.cos(angles[:, 0]), [n_b, 1])
],
axis=1
)
rotation_Y = torch.cat(
[
torch.reshape(torch.cos(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.ones([n_b, 1]).to(device),
torch.zeros([n_b, 1]).to(device),
- torch.reshape(torch.sin(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.cos(angles[:, 1]), [n_b, 1]),
],
axis=1
)
rotation_Z = torch.cat(
[
torch.reshape(torch.cos(angles[:, 2]), [n_b, 1]),
- torch.reshape(torch.sin(angles[:, 2]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 2]), [n_b, 1]),
torch.reshape(torch.cos(angles[:, 2]), [n_b, 1]),
torch.zeros([n_b, 3]).to(device),
torch.ones([n_b, 1]).to(device),
],
axis=1
)
rotation_X = rotation_X.reshape([n_b, 3, 3])
rotation_Y = rotation_Y.reshape([n_b, 3, 3])
rotation_Z = rotation_Z.reshape([n_b, 3, 3])
# R = Rz*Ry*Rx
rotation = rotation_Z.bmm(rotation_Y).bmm(rotation_X)
# because our face shape is N*3, so compute the transpose of R, so that rotation shapes can be calculated as face_shape*R
rotation = rotation.permute(0, 2, 1)
return rotation
def projection_layer(face_shape, fx=1015.0, fy=1015.0, px=112.0, py=112.0):
# we choose the focal length and camera position empirically
# project 3D face onto image plane
# input: face_shape with shape [1,N,3]
# rotation with shape [1,3,3]
# translation with shape [1,3]
# output: face_projection with shape [1,N,2]
# z_buffer with shape [1,N,1]
cam_pos = 10
p_matrix = np.concatenate([[fx], [0.0], [px], [0.0], [fy], [py], [0.0], [0.0], [1.0]],
axis=0).astype(np.float32) # projection matrix
p_matrix = np.reshape(p_matrix, [1, 3, 3])
p_matrix = torch.from_numpy(p_matrix)
gpu_p_matrix = None
n_b, nV, _ = face_shape.size()
if face_shape.is_cuda:
gpu_p_matrix = p_matrix.cuda()
p_matrix = gpu_p_matrix.expand(n_b, 3, 3)
else:
p_matrix = p_matrix.expand(n_b, 3, 3)
face_shape[:, :, 2] = cam_pos - face_shape[:, :, 2]
aug_projection = face_shape.bmm(p_matrix.permute(0, 2, 1))
face_projection = aug_projection[:, :, 0:2] / aug_projection[:, :, 2:]
z_buffer = cam_pos - aug_projection[:, :, 2:]
return face_projection, z_buffer
def illumination_layer(face_texture, norm, gamma):
# CHJ: It's different from what I knew.
# compute vertex color using face_texture and SH function lighting approximation
# input: face_texture with shape [1,N,3]
# norm with shape [1,N,3]
# gamma with shape [1,27]
# output: face_color with shape [1,N,3], RGB order, range from 0-255
# lighting with shape [1,N,3], color under uniform texture
n_b, num_vertex, _ = face_texture.size()
n_v_full = n_b * num_vertex
gamma = gamma.view(-1, 3, 9).clone()
gamma[:, :, 0] += 0.8
gamma = gamma.permute(0, 2, 1)
a0, a1, a2, c0, c1, c2, d0 = _need_const.illu_consts
Y0 = torch.ones(n_v_full).float() * a0*c0
if gamma.is_cuda:
Y0 = Y0.cuda()
norm = norm.view(-1, 3)
nx, ny, nz = norm[:, 0], norm[:, 1], norm[:, 2]
arrH = []
arrH.append(Y0)
arrH.append(-a1*c1*ny)
arrH.append(a1*c1*nz)
arrH.append(-a1*c1*nx)
arrH.append(a2*c2*nx*ny)
arrH.append(-a2*c2*ny*nz)
arrH.append(a2*c2*d0*(3*nz.pow(2)-1))
arrH.append(-a2*c2*nx*nz)
arrH.append(a2*c2*0.5*(nx.pow(2)-ny.pow(2)))
H = torch.stack(arrH, 1)
Y = H.view(n_b, num_vertex, 9)
# Y shape:[batch,N,9].
# shape:[batch,N,3]
lighting = Y.bmm(gamma)
face_color = face_texture * lighting
return face_color, lighting
def rigid_transform(face_shape, rotation, translation):
n_b = face_shape.shape[0]
face_shape_r = face_shape.bmm(rotation) # R has been transposed
face_shape_t = face_shape_r + translation.view(n_b, 1, 3)
return face_shape_t
def compute_landmarks(face_shape, facemodel):
# compute 3D landmark postitions with pre-computed 3D face shape
keypoints_idx = facemodel.keypoints - 1
face_landmarks = face_shape[:, keypoints_idx, :]
return face_landmarks
def compute_3d_landmarks(face_shape, facemodel, angles, translation):
rotation = compute_rotation_matrix(angles)
face_shape_t = rigid_transform(face_shape, rotation, translation)
landmarks_3d = compute_landmarks(face_shape_t, facemodel)
return landmarks_3d
def transform_face_shape(face_shape, angles, translation):
rotation = compute_rotation_matrix(angles)
face_shape_t = rigid_transform(face_shape, rotation, translation)
return face_shape_t
def render_img(face_shape, face_color, facemodel, image_size=224, fx=1015.0, fy=1015.0, px=112.0, py=112.0, device='cuda:0'):
'''
ref: https://github.com/facebookresearch/pytorch3d/issues/184
The rendering function (just for test)
Input:
face_shape: Tensor[1, 35709, 3]
face_color: Tensor[1, 35709, 3] in [0, 1]
facemodel: contains `tri` (triangles[70789, 3], index start from 1)
'''
from pytorch3d.structures import Meshes
from pytorch3d.renderer.mesh.textures import TexturesVertex
from pytorch3d.renderer import (
PerspectiveCameras,
PointLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
BlendParams
)
face_color = TexturesVertex(verts_features=face_color.to(device))
face_buf = torch.from_numpy(facemodel.tri - 1) # index start from 1
face_idx = face_buf.unsqueeze(0)
mesh = Meshes(face_shape.to(device), face_idx.to(device), face_color)
R = torch.eye(3).view(1, 3, 3).to(device)
R[0, 0, 0] *= -1.0
T = torch.zeros([1, 3]).to(device)
half_size = (image_size - 1.0) / 2
focal_length = torch.tensor([fx / half_size, fy / half_size], dtype=torch.float32).reshape(1, 2).to(device)
principal_point = torch.tensor([(half_size - px) / half_size, (py - half_size) / half_size], dtype=torch.float32).reshape(1, 2).to(device)
cameras = PerspectiveCameras(
device=device,
R=R,
T=T,
focal_length=focal_length,
principal_point=principal_point
)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=0.0,
faces_per_pixel=1
)
lights = PointLights(
device=device,
ambient_color=((1.0, 1.0, 1.0),),
diffuse_color=((0.0, 0.0, 0.0),),
specular_color=((0.0, 0.0, 0.0),),
location=((0.0, 0.0, 1e5),)
)
blend_params = BlendParams(background_color=(0.0, 0.0, 0.0))
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=cameras,
lights=lights,
blend_params=blend_params
)
)
images = renderer(mesh)
images = torch.clamp(images, 0.0, 1.0)
return images
def estimate_intrinsic(landmarks_2d, transform_params, z_buffer, face_shape, facemodel, angles, translation):
# estimate intrinsic parameters
def re_convert(landmarks_2d, trans_params, origin_size=_need_const.origin_size, target_size=_need_const.target_size):
# convert landmarks to un_cropped images
w = (origin_size * trans_params[2]).astype(np.int32)
h = (origin_size * trans_params[2]).astype(np.int32)
landmarks_2d[:, :, 1] = target_size - 1 - landmarks_2d[:, :, 1]
landmarks_2d[:, :, 0] = landmarks_2d[:, :, 0] + w / 2 - target_size / 2
landmarks_2d[:, :, 1] = landmarks_2d[:, :, 1] + h / 2 - target_size / 2
landmarks_2d = landmarks_2d / trans_params[2]
landmarks_2d[:, :, 0] = landmarks_2d[:, :, 0] + trans_params[3] - origin_size / 2
landmarks_2d[:, :, 1] = landmarks_2d[:, :, 1] + trans_params[4] - origin_size / 2
landmarks_2d[:, :, 1] = origin_size - 1 - landmarks_2d[:, :, 1]
return landmarks_2d
def POS(xp, x):
# calculating least sqaures problem
# ref https://github.com/pytorch/pytorch/issues/27036
ls = LeastSquares()
npts = xp.shape[1]
A = torch.zeros([2*npts, 4]).to(x.device)
A[0:2*npts-1:2, 0:2] = x[0, :, [0, 2]]
A[1:2*npts:2, 2:4] = x[0, :, [1, 2]]
b = torch.reshape(xp[0], [2*npts, 1])
k = ls.lstq(A, b, 0.010)
fx = k[0, 0]
px = k[1, 0]
fy = k[2, 0]
py = k[3, 0]
return fx, px, fy, py
# convert landmarks to un_cropped images
landmarks_2d = re_convert(landmarks_2d, transform_params)
landmarks_2d[:, :, 1] = _need_const.origin_size - 1.0 - landmarks_2d[:, :, 1]
landmarks_2d[:, :, :2] = landmarks_2d[:, :, :2] * (_need_const.camera_pos - z_buffer[:, :, :])
# compute 3d landmarks
landmarks_3d = compute_3d_landmarks(face_shape, facemodel, angles, translation)
# compute fx, fy, px, py
landmarks_3d_ = landmarks_3d.clone()
landmarks_3d_[:, :, 2] = _need_const.camera_pos - landmarks_3d_[:, :, 2]
fx, px, fy, py = POS(landmarks_2d, landmarks_3d_)
return fx, px, fy, py
def reconstruction(coeff, facemodel):
# The image size is 224 * 224
# face reconstruction with coeff and BFM model
id_coeff, ex_coeff, tex_coeff, angles, gamma, translation = split_coeff(coeff)
# compute face shape
face_shape = shape_formation(id_coeff, ex_coeff, facemodel)
# compute vertex texture(albedo)
face_texture = texture_formation(tex_coeff, facemodel)
# vertex normal
face_norm = compute_norm(face_shape, facemodel)
# rotation matrix
rotation = compute_rotation_matrix(angles)
face_norm_r = face_norm.bmm(rotation)
# print(face_norm_r[:, :3, :])
# do rigid transformation for face shape using predicted rotation and translation
face_shape_t = rigid_transform(face_shape, rotation, translation)
# compute 2d landmark projection
face_landmark_t = compute_landmarks(face_shape_t, facemodel)
# compute 68 landmark on image plane (with image sized 224*224)
landmarks_2d, z_buffer = projection_layer(face_landmark_t)
landmarks_2d[:, :, 1] = _need_const.target_size - 1.0 - landmarks_2d[:, :, 1]
# compute vertex color using SH function lighting approximation
face_color, lighting = illumination_layer(face_texture, face_norm_r, gamma)
return face_shape, face_texture, face_color, landmarks_2d, z_buffer, angles, translation, gamma
|
import logger
import time
import unittest
import threading
from threading import Thread
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.cluster import Cluster
from basetestcase import BaseTestCase
from remote.remote_util import RemoteMachineShellConnection
import json
import sys
from basetestcase import BaseTestCase
from membase.helper.spatial_helper import SpatialHelper
class SpatialQueryTests(BaseTestCase):
def setUp(self):
self.helper = SpatialHelper(self, "default")
super(SpatialQueryTests, self).setUp()
self.log = logger.Logger.get_logger()
self.helper.setup_cluster()
self.cluster = Cluster()
self.servers = self.helper.servers
def tearDown(self):
super(SpatialQueryTests, self).tearDown()
def test_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
def test_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init(data_set)
def test_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init(data_set)
def test_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init(data_set)
def test_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
def test_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init(data_set)
def test_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init(data_set)
def test_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init(data_set)
## Rebalance In
def test_rebalance_in_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._rebalance_cluster(data_set)
#Rebalance Out
def test_rebalance_out_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._rebalance_cluster(data_set)
# Warmup Tests
def test_warmup_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init_integration(data_set)
# Reboot Tests
def test_reboot_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init_integration(data_set)
# Failover Tests
def test_failover_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Failover and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._failover_cluster(data_set)
###
# load the data defined for this dataset.
# create views and query the data as it loads.
# verification is optional, and best practice is to
# set to False if you plan on running _query_all_views()
# later in the test case
###
def _query_test_init(self, data_set, verify_results = True):
views = data_set.views
# start loading data
t = Thread(target=data_set.load,
name="load_data_set",
args=())
t.start()
# run queries while loading data
while(t.is_alive()):
self._query_all_views(views, False)
time.sleep(5)
t.join()
# results will be verified if verify_results set
if verify_results:
self._query_all_views(views, verify_results)
else:
self._check_view_intergrity(views)
def _query_test_init_integration(self, data_set, verify_results = True):
views = data_set.views
inserted_keys = data_set.load()
target_fn = ()
if self.helper.num_nodes_reboot >= 1:
target_fn = self._reboot_cluster(data_set)
elif self.helper.num_nodes_warmup >= 1:
target_fn = self._warmup_cluster(data_set)
elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
target_fn = self._rebalance_cluster(data_set)
t = Thread(target=self._query_all_views(views, False))
t.start()
# run queries while loading data
while t.is_alive():
self._rebalance_cluster(data_set)
time.sleep(5)
t.join()
# results will be verified if verify_results set
if verify_results:
self._query_all_views(views, verify_results)
else:
self._check_view_intergrity(views)
##
# run all queries for all views in parallel
##
def _query_all_views(self, views, verify_results = True):
query_threads = []
for view in views:
t = RunQueriesThread(view, verify_results)
query_threads.append(t)
t.start()
[t.join() for t in query_threads]
self._check_view_intergrity(query_threads)
##
# If an error occured loading or querying data for a view
# it is queued and checked here. Fail on the first one that
# occurs.
##
def _check_view_intergrity(self, thread_results):
for result in thread_results:
if result.test_results.errors:
self.fail(result.test_results.errors[0][1])
if result.test_results.failures:
self.fail(result.test_results.failures[0][1])
###
# Rebalance
###
def _rebalance_cluster(self, data_set):
if self.helper.num_nodes_to_add >= 1:
rebalance = self.cluster.async_rebalance(self.servers[:1],
self.servers[1:self.helper.num_nodes_to_add + 1],
[])
self._query_test_init(data_set)
rebalance.result()
elif self.helper.num_nodes_to_remove >= 1:
rebalance = self.cluster.async_rebalance(self.servers[:1],[],
self.servers[1:self.helper.num_nodes_to_add + 1])
self._query_test_init(data_set)
rebalance.result()
def _failover_cluster(self, data_set):
failover_nodes = self.servers[1 : self.helper.failover_factor + 1]
try:
# failover and verify loaded data
#self.cluster.failover(self.servers, failover_nodes)
self.cluster.failover(self.servers, self.servers[1:2])
self.log.info("120 seconds sleep after failover before invoking rebalance...")
time.sleep(120)
rebalance = self.cluster.async_rebalance(self.servers,
[], self.servers[1:2])
self._query_test_init(data_set)
msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
self.assertTrue(rebalance.result(), msg=msg)
#verify queries after failover
self._query_test_init(data_set)
finally:
self.log.info("Completed the failover testing for spatial querying")
###
# Warmup
###
def _warmup_cluster(self, data_set):
for server in self.servers[0:self.helper.num_nodes_warmup]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
self.log.info("Node {0} should be warming up ".format(server.ip))
time.sleep(120)
self._query_test_init(data_set)
# REBOOT
def _reboot_cluster(self, data_set):
try:
for server in self.servers[0:self.helper.num_nodes_reboot]:
shell = RemoteMachineShellConnection(server)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} is being stopped".format(server.ip))
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} is being stopped".format(server.ip))
time.sleep(120)
shell = RemoteMachineShellConnection(server)
command = "/sbin/iptables -F"
o, r = shell.execute_command(command)
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} backup".format(server.ip))
finally:
self.log.info("Warming-up server ..".format(server.ip))
time.sleep(100)
class View:
def __init__(self, helper, index_size, fn_str, name='dev_test_view',
create_on_init=True):
self.helper = helper
self.index_size = index_size
self.name = name
self.log = logger.Logger.get_logger()
# Store failures in here. Don't forget to add them manually,
# else the failed assertions won't make the whole test fail
self._test_results = unittest.TestResult()
# queries defined for this view
self.queries = []
if create_on_init:
self.helper.create_index_fun(name, fn_str)
class SimpleDataSet:
def __init__(self, helper, num_docs):
self.helper = helper
self.num_docs = num_docs
self.views = self._create_views()
self.name = "simple_dataset"
def _create_views(self):
view_fn = 'function (doc) {if(doc.geometry !== undefined || doc.name !== undefined ) { emit(doc.geometry, doc.name);}}'
return [View(self.helper, self.num_docs, fn_str = view_fn)]
def load(self):
inserted_keys = self.helper.insert_docs(self.num_docs, self.name)
return inserted_keys
def add_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"limit": 10}, 10),
QueryHelper({"limit": 3417}, 3417),
QueryHelper({"limit": view.index_size}, view.index_size),
QueryHelper({"limit": 5*view.index_size}, view.index_size)]
def add_skip_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"skip": 10}, view.index_size-10),
QueryHelper({"skip": 2985}, view.index_size-2985),
QueryHelper({"skip": view.index_size}, 0),
QueryHelper({"skip": 5*view.index_size}, 0),
QueryHelper({"skip": 2985, "limit": 1539}, 1539),
QueryHelper({"skip": view.index_size-120, "limit": 1539}, 120),
QueryCompareHelper([{"skip": 6210, "limit": 1592}],
[{"skip": 6210, "limit": 1086},
{"skip": 7296, "limit": 506}])
]
def add_bbox_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"bbox": "-180,-90,180,90"}, view.index_size),
QueryHelper({"bbox": "-900,-900,900,900"}, view.index_size),
QueryHelper({}, view.index_size),
QueryHelper({"bbox": "-900,-900,900,900"}, view.index_size),
QueryCompareHelper([{"bbox": "-900,-900,900,900"}],
[{}]),
QueryCompareHelper([{"bbox": "-117,-76,34,43"}],
[{"bbox": "-117,-76,34,-5"},
{"bbox": "-117,-5,34,43"}]),
]
def add_range_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [-180, -90], "end_range": [180, 90]},
view.index_size),
QueryHelper(
{"start_range": [-900, -900], "end_range": [900, 900]},
view.index_size),
QueryCompareHelper([{"start_range": [-900, -900],
"end_range": [900, 900]}],
[{}]),
QueryCompareHelper([{"start_range": [-117, -76],
"end_range": [34, 43]}],
[{"start_range": [-117, -76],
"end_range": [34, -5]},
{"start_range": [-117, -5],
"end_range": [34, 43]}])
]
def add_all_query_sets(self):
self.add_limit_queries()
self.add_skip_queries()
self.add_bbox_queries()
self.add_range_queries()
class MultidimDataSet:
def __init__(self, helper, num_docs):
self.helper = helper
self.num_docs = num_docs
self.views = self._create_views()
self.name = "multidim_dataset"
def _create_views(self):
view_fn = '''function (doc) {
if (doc.age !== undefined || doc.height !== undefined ||
doc.bloom !== undefined || doc.shed_leaves !== undefined) {
emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name);
}}'''
return [View(self.helper, self.num_docs, fn_str = view_fn)]
def load(self):
inserted_keys = self.helper.insert_docs(self.num_docs, self.name)
return inserted_keys
def add_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"limit": 10}, 10),
QueryHelper({"limit": 3417}, 3417),
QueryHelper({"limit": view.index_size}, view.index_size),
QueryHelper({"limit": 5*view.index_size}, view.index_size)]
def add_skip_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"skip": 10}, view.index_size-10),
QueryHelper({"skip": 2985}, view.index_size-2985),
QueryHelper({"skip": view.index_size}, 0),
QueryHelper({"skip": 5*view.index_size}, 0),
QueryHelper({"skip": 2985, "limit": 1539}, 1539),
QueryHelper({"skip": view.index_size-120, "limit": 1539}, 120),
QueryCompareHelper([{"skip": 6210, "limit": 1592}],
[{"skip": 6210, "limit": 1086},
{"skip": 7296, "limit": 506}])
]
def add_range_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [0, 0, 0],
"end_range": [1001, 13001, 13]},
view.index_size),
QueryHelper(
{"start_range": [None, 0, None],
"end_range": [1001, None, None]},
view.index_size),
QueryHelper(
{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9]},
2066),
QueryHelper(
{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]},
2562),
QueryCompareHelper(
[{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, None, 3],
"end_range": [800, 11111, 9]}]),
QueryCompareHelper(
[{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, None, 3],
"end_range": [800, None, 9]}]),
QueryCompareHelper(
[{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, 2000, 3],
"end_range": [600, 8000, 9]},
{"start_range": [500, 8000, 3],
"end_range": [600, 11111, 9]},
{"start_range": [600, 2000, 3],
"end_range": [800, 11111, 9]}])
]
def add_range_and_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [0, 0, 0],
"end_range": [1001, 13001, 13],
"limit": self.num_docs / 2},
self.num_docs / 2),
QueryHelper(
{"start_range": [None, 0, None],
"end_range": [1001, None, None],
"limit": self.num_docs / 2},
self.num_docs / 2),
QueryHelper(
{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9],
"limit": 1000},
1000),
QueryHelper(
{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9],
"limit": 5},
5),
QueryCompareHelper(
[{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"limit": 700},
{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"skip": 700,
"limit": 100},
{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"skip": 800,
"limit": 10000},
]),
]
def add_all_query_sets(self):
self.add_limit_queries()
self.add_skip_queries()
self.add_range_queries()
self.add_range_and_limit_queries()
class QueryHelper:
def __init__(self, params, expected_num_docs):
self.params = params
# number of docs this query should return
self.expected_num_docs = expected_num_docs
# Put in two lists of queries, it will then join the results of the
# individual queries and compare both
class QueryCompareHelper:
def __init__(self, queries_a, queries_b):
self.queries_a = queries_a
self.queries_b = queries_b
class RunQueriesThread(threading.Thread):
def __init__(self, view, verify_results = False):
threading.Thread.__init__(self)
self.view = view
self.verify_results = verify_results
# The last retrieved results, useful when an exception happened
self._last_results = None
# Store failures in here. So we can make the whole test fail,
# normally only this thread will fail
self.test_results = unittest.TestResult()
self.helper = self.view.helper
self.log = self.view.log
def run(self):
if not len(self.view.queries) > 0 :
self.log.info("No queries to run for this view")
return
try:
self._run_queries()
except Exception:
self.log.error("Last query result:\n\n{0}\n\n"\
.format(json.dumps(self._last_results,
sort_keys=True)))
self.test_results.addFailure(self.helper.testcase, sys.exc_info())
def _run_queries(self):
for query in self.view.queries:
# Simple query
if isinstance(query, QueryHelper):
if self.verify_results:
self._last_results = self._run_query(
query.params, query.expected_num_docs)
else:
self._last_results = self._run_query(query.params)
# Compare queries, don't verify the individual queries
# but only the final result
elif isinstance(query, QueryCompareHelper):
result_keys_a = []
result_keys_b = []
for params in query.queries_a:
self._last_results = self._run_query(params)
result_keys_a.extend(
self.helper.get_keys(self._last_results))
for params in query.queries_b:
self._last_results = self._run_query(params)
result_keys_b.extend(
self.helper.get_keys(self._last_results))
if self.verify_results:
diff = set(result_keys_a) - set(result_keys_b)
self.helper.testcase.assertEqual(diff, set())
else:
self.helper.testcase.fail("no queries specified")
# If expected_num_docs is given, the results are verified
def _run_query(self, query_params, expected_num_docs=None):
params = {"debug": True}
params.update(query_params)
if expected_num_docs is not None:
self.log.info("Quering view {0} with params: {1}".format(
self.view.name, params));
results = self.helper.get_results(self.view.name, None, params)
num_keys = len(self.helper.get_keys(results))
self.log.info("{0}: retrieved value {1} expected: {2}"\
.format(self.view.name, num_keys,
expected_num_docs));
if(num_keys != expected_num_docs):
error = "Query failed: {0} Documents Retrieved, "\
"expected {1}".format(num_keys, expected_num_docs)
try:
self.helper.testcase.assertEquals(num_keys,
expected_num_docs,
error)
except Exception:
self.log.error(error)
raise
else:
return results
else:
# query without verification
self.log.info("Quering view {0} with params: {1}"\
.format(self.view.name, params));
return self.helper.get_results(self.view.name, None, params)
|
from random import randint
class Dice():
"""A class that represents a unique dice"""
def __init__(self, num_sides=6):
"""Assumes the dice has 6 sides"""
self.num_sides = num_sides
def roll(self):
"""Returns a random value between 1 and the number of sides"""
return randint(1, self.num_sides)
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import subprocess
import sys
from typing import List, Optional
from fontmake.errors import FontmakeError, TTFAError
def _which_ttfautohint() -> Optional[List[str]]:
# First check if ttfautohint-py is installed, else try to find the standalone
# ttfautohint command-line tool, or None if neither is found.
try:
import ttfautohint # noqa: F401
except ImportError:
ttfautohint_path = shutil.which("ttfautohint")
return [ttfautohint_path] if ttfautohint_path else None
else:
return [sys.executable, "-m", "ttfautohint"]
def ttfautohint(in_file, out_file, args=None, **kwargs):
"""Thin wrapper around the ttfautohint command line tool.
Can take in command line arguments directly as a string, or spelled out as
Python keyword arguments.
"""
file_args = [in_file, out_file]
ttfautohint = _which_ttfautohint()
if ttfautohint is None:
raise FontmakeError(
"ttfautohint not found; try `pip install ttfautohint-py`", in_file
)
if args is not None:
if kwargs:
raise TypeError("Should not provide both cmd args and kwargs.")
try:
rv = subprocess.call(ttfautohint + args.split() + file_args)
except OSError as e:
raise FontmakeError(
"Could not launch ttfautohint (is it installed?)", in_file
) from e
if rv != 0:
raise TTFAError(rv, in_file)
return
boolean_options = (
"debug",
"composites",
"dehint",
"help",
"ignore_restrictions",
"detailed_info",
"no_info",
"adjust_subglyphs",
"symbol",
"ttfa_table",
"verbose",
"version",
"windows_compatibility",
)
other_options = (
"default_script",
"fallback_script",
"family_suffix",
"hinting_limit",
"fallback_stem_width",
"hinting_range_min",
"control_file",
"hinting_range_max",
"strong_stem_width",
"increase_x_height",
"x_height_snapping_exceptions",
)
arg_list = []
for option in boolean_options:
if kwargs.pop(option, False):
arg_list.append("--" + option.replace("_", "-"))
for option in other_options:
arg = kwargs.pop(option, None)
if arg is not None:
arg_list.append("--{}={}".format(option.replace("_", "-"), arg))
if kwargs:
raise TypeError("Unexpected argument(s): " + ", ".join(kwargs.keys()))
rv = subprocess.call(ttfautohint + arg_list + file_args)
if rv != 0:
raise TTFAError(rv, in_file)
|
"""
Wrapper around the standard logging module to provide a simpler initiation and
consistent formatting.
The differences from the standard are;
- we always log to file
- we use a standardised format
- our default logging level is INFO, not WARN
- the formatter does not include the log name '%(name)s' as we assume that each
log is echoed to its own file
- we do not append to the log file, rather we open the file for writing each
time. I may change this back to the default file access of 'a'
This module should really just contain a class which sub-classes of
logging.Logger. In the first instance we are nice and simple and
just provide a function which returns the logger instance properly
set up.
"""
__author__ = "Andy Todd <andy47@halfcooked.com>"
__date__ = (2006, 3, 15)
__version__ = (1, 4, 0)
import logging
LOGS = {}
def get_log(log_name='', file_name=None, level='INFO'):
"""Instantiate log_name and make sure its output is written to file_name
Set the (output) level according to the value of level
If no file_name is provided then output should go to the screen
"""
# We don't want to use the root logger so we will substitute 'default'
if log_name == '':
log_name = 'default'
global LOGS
if log_name in LOGS:
return LOGS[log_name]
else:
# logging.basicConfig()
log = logging.getLogger(log_name)
log.setLevel(getattr(logging, level))
# Define the handler and formmatter
if file_name:
handler = logging.FileHandler(file_name, "a")
else:
handler = logging.StreamHandler()
# Attach the formatter to the handler and the handler to the log
formatter = logging.Formatter("%(asctime)s %(levelname)s:: %(message)s","%Y.%m.%d %H:%M:%S")
handler.setFormatter(formatter)
log.addHandler(handler)
LOGS[log_name] = log
return log
def set_level(log_name, level):
"""Set the level on <log_name> to level
See the standard documentation for the valid list of levels. They are
all implemented as module attributes so we just use getattr
"""
LOGS[log_name].setLevel(getattr(logging, level))
|
import socket
import re
import multiprocessing
import sys
# import dynamic.mini_frame
class WSGIServer(object):
def __init__(self, ip, port, application, static_path):
# 1. ๅๅปบๅฅๆฅๅญ
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2. ็ปๅฎ
self.tcp_server_socket.bind((ip, port))
# 3. ๅไธบ็ๅฌๅฅๆฅๅญ
self.tcp_server_socket.listen(128)
self.application = application
self.static_path = static_path
def service_client(self, new_socket):
"""ไธบ่ฟไธชๅฎขๆท็ซฏ่ฟๅๆฐๆฎ"""
# 1. ๆฅๆถๆต่งๅจๅ้่ฟๆฅ็่ฏทๆฑ ๏ผๅณhttp่ฏทๆฑ
# GET / HTTP/1.1
# .....
request = new_socket.recv(1024).decode("utf-8")
# print(">>>"*50)
# print(request)
request_lines = request.splitlines() # ๅฝๅฎขๆท็ซฏไธปๅจๅ
ณ้ญ๏ผ ไผๆถๅฐ็ฉบๅญ็ฌฆไธฒๅนถ่งฃ้ปๅก๏ผ ่ฟ้ไผ็ๆ็ฉบๅ่กจ
if not request_lines:
return
# GET /index.html HTTP/1.1
# get post put del
file_name = ""
ret = re.match(r"[^/]+(/[^ ]*)", request_lines[0])
if ret:
file_name = ret.group(1)
# print("*"*50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2. ่ฟๅhttpๆ ผๅผ็ๆฐๆฎ๏ผ็ปๆต่งๅจ
# 2.1 ๅฆๆ่ฏทๆฑ็่ตๆบไธๆฏไปฅ.py็ปๅฐพ๏ผ้ฃไนๅฐฑ่ฎคไธบๆฏ้ๆ่ตๆบ๏ผhtml/css/js/png๏ผjpg็ญ๏ผ
if not file_name.endswith(".py"):
try:
f = open(self.static_path + file_name, "rb")
except:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "------file not found-----"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 ๅๅคๅ้็ปๆต่งๅจ็ๆฐๆฎ---header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 ๅๅคๅ้็ปๆต่งๅจ็ๆฐๆฎ---boy
# response += "hahahhah"
# ๅฐresponse headerๅ้็ปๆต่งๅจ
new_socket.send(response.encode("utf-8"))
# ๅฐresponse ic.mini_frame.applicationbodyๅ้็ปๆต่งๅจ
new_socket.send(html_content)
else:
# 2.2 ๅฆๆๆฏไปฅ.py็ปๅฐพ๏ผ้ฃไนๅฐฑ่ฎคไธบๆฏๅจๆ่ตๆบ็่ฏทๆฑ
env = dict() # ่ฟไธชๅญๅ
ธไธญๅญๆพ็ๆฏwebๆๅกๅจ่ฆไผ ้็ป webๆกๆถ็ๆฐๆฎไฟกๆฏ
env['PATH_INFO'] = file_name
# {"PATH_INFO": "/index.py"}
# body = dynamic.mini_frame.application(env, self.set_response_header)
body = self.application(env, self.set_response_header)
header = "HTTP/1.1 %s\r\n" % self.status
for temp in self.headers:
header += "%s:%s\r\n" % (temp[0], temp[1])
header += "\r\n"
response = header + body
# ๅ้response็ปๆต่งๅจ
new_socket.send(response.encode("utf-8"))
# ๅ
ณ้ญๅฅๆฅ
new_socket.close()
def set_response_header(self, status, headers):
self.status = status
self.headers = [("server", "mini_web v8.8")]
self.headers += headers
def run_forever(self):
"""็จๆฅๅฎๆๆดไฝ็ๆงๅถ"""
while True:
# 4. ็ญๅพ
ๆฐๅฎขๆท็ซฏ็้พๆฅ
new_socket, client_addr = self.tcp_server_socket.accept()
# 5. ไธบ่ฟไธชๅฎขๆท็ซฏๆๅก
p = multiprocessing.Process(target=self.service_client, args=(new_socket,))
p.start()
new_socket.close()
# ๅ
ณ้ญ็ๅฌๅฅๆฅๅญ
self.tcp_server_socket.close()
def main():
print(sys.argv) # ๆพ็ถๆฏไธชๅ่กจ๏ผ ไธญ้ด็ๅ
็ด ๆฏๅญ็ฌฆไธฒ
if len(sys.argv) == 3:
try:
addr = sys.argv[1].split(":")
ip = addr[0] if len(addr) == 2 else ""
port = int(addr[1]) if len(addr) == 2 else int(addr[0])
# frame_name:app_name
ret = re.match(r"([^:]+):(.*)", sys.argv[2])
if ret:
frame_name_str = ret.group(1)
app_name_str = ret.group(2)
else:
raise Exception("ไผ ๅ้่ฏฏ")
except Exception as e:
print("่ฏทๆ็
ง๏ผpython3 web_server.py [ip:][port] frame_name:application_name ่ฐ็จๆๅกๅจ")
else:
# ่ฏปๅ้
็ฝฎๆไปถ
with open("web_config.conf") as f:
conf = f.read()
conf_dict = eval(conf)
# ๅฏผๅ
ฅๆจกๅ
# import frame_name # ่ชๅจๅปๅฏผๅ
ฅ frame_name.py ๆจกๅ
sys.path.append(conf_dict["DYNAMIC_PATH"])
frame_name = __import__(frame_name_str)
app_name = getattr(frame_name, app_name_str)
wsgi_server = WSGIServer(ip, port, app_name, conf_dict["STATIC_PATH"])
wsgi_server.run_forever()
else:
print("่ฏทๆ็
ง๏ผpython3 web_server.py [ip:][port] frame_name:application_name ่ฐ็จๆๅกๅจ")
if __name__ == "__main__":
main()
|
from flask import Flask
from flask import jsonify, render_template, make_response
from utils import test_user_input, add_result
from config import CACHE
app = Flask(__name__)
# ัะตะฝะดะตัะธั ััะฐััะพะฒัั ัััะฐะฝะธัั
@app.route('/', methods=['GET'])
def main():
return render_template('index.html'), 200
# ัะตะทัะปััะฐั ะทะฐะฟัะพัะฐ ะฟะพะปัะทะพะฒะฐัะตะปั ะฟะพ email
@app.route('/askAPI/find_info/<user_input>', methods=['GET'])
def email_acc_exists_response(user_input: str):
# ะฟัะพะฒะตัะบะฐ ะบััะฐ, ะตัะปะธ ัะฐะบะพะน ะฐะดัะตั ะตััั, ะฒัะดะฐะฒะฐัั ะธะท ะฝะตะณะพ, ะฑะตะท ะทะฐะฟัะพัะฐ ะบ ask.fm
if CACHE.get(user_input) is not None:
return jsonify(CACHE.get(user_input))
response = test_user_input(user_input)
if response.get('reason'):
return jsonify(response)
else:
CACHE.set(user_input, response) # ะทะฐะฟะธัั ะฒ ะบัั ััะฟะตัะฝะพะณะพ ะทะฐะฟัะพัะฐ
add_result(response) # ะทะฐะฟะธัั ะฒ ะะ
return jsonify(response)
# 404 error
@app.errorhandler(404)
def not_found(error):
response = {'server_response': 'error', 'reason': 'page_not_found'}
return make_response(jsonify(response), 404)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
|
# -*- coding: utf-8 -*-
""" Module to implement acoustic shadowing along with spherical spreading
of sound
Created on Mon Jun 17 16:08:50 2019
@author: tbeleyur
"""
import time
import sys
sys.path.append('..//bridson//')
sys.path.append('..//')
import numpy as np
import pandas as pd
import scipy.spatial as spatial
import statsmodels.api as sm
def soundprop_w_acoustic_shadowing(start_point, end_point, all_other_points,
**kwargs):
'''Calculate the received level of a sound emitter at the start point
and reaching the end point after potentially passing through other points on
the way.
Each point sound passes through creates a drop in the intensity because of
acoustic shadowing.
Parameters
----------
start_point : 1 x 2 array like
end_point : 1 x 2 array like
all_other_points : Nbats-2 x 2 array like
xy coordinates of all points between start and end point
in a rectangular area of given width
Keyword Arguments
----------------
implement_shadowing : Boolean. If True then shadowing calculations are done, else only simple spherical spreading.
R : float. straight line distance between soure and receiver
rectangle_width : float >0. width of the rectangle
emitted_source_level : dictionary with key 'dBSPL' and 'ref_distance' ,
indicating source level in dB SPL re 20muPa and reference distance in metres.
R : float >0. straight line distance between source and receiver. This is used
in case there are no obstacles between them.
acoustic_shadowing_model : statsmodel object that allows calculation of how much
shadowing will be observed.
min_spacing : float> 0.
Returns
-------
received_level : float.
received level of the sound
'''
if kwargs.get('implement_shadowing'):
all_points_between = get_points_in_between(start_point, end_point,
all_other_points, **kwargs)
received_level = calc_RL(kwargs['R'],kwargs['emitted_source_level']['dBSPL'],
kwargs['emitted_source_level']['ref_distance'])
num_obstacles = all_points_between.shape[0]
if num_obstacles >= 1:
acoustic_shadowing = calculate_acoustic_shadowing(num_obstacles,
**kwargs)
received_level += acoustic_shadowing
else:
received_level = calc_RL(kwargs['R'],kwargs['emitted_source_level']['dBSPL'],
kwargs['emitted_source_level']['ref_distance'])
return(received_level)
def get_distances_between_points(xy_between, start, end):
'''
'''
all_xy = np.row_stack((start, xy_between, end))
distance_matrix = spatial.distance_matrix(all_xy, all_xy)
distance_to_source = np.argsort(distance_matrix[:,0])
points_sorted = all_xy[distance_to_source,:]
distances_sorted = spatial.distance_matrix(points_sorted, points_sorted)
num_distances = all_xy.shape[0] - 1
point_2_point_distances = np.zeros(num_distances)
for i, (point0, point1) in enumerate(zip(range(all_xy.shape[0]-1),
range(1,all_xy.shape[0]))):
point_2_point_distances[i] = distances_sorted[point0, point1]
return(point_2_point_distances)
def calculate_acoustic_shadowing(num_obstacles,
**kwargs):
'''Calculates received level of a call with acoustic shadowing included.
The received level of the call with shadowing is calculated with an iterative
application of the bistatic sonar equation. The TS used here is the bistatic
target strength at emitter-receiver angular separations of 180 degrees.
Parameters
----------
num_obstacles : int >1 .
Number of obstacles between receiver and emitter.
Keyword Arguments
-----------------
acoustic_shadowing_model : statsmodel object
A statistical model that allows calculation of
the amount of acoustic shadowing in dB.
For predictions the model accepts a
pd.DataFrame which has the following
columns (this might depend on the exact model
loaded too!)
obstacles
spacing
min_spacing : float>0.
Separation between bats/obstacles
see the_cocktail_party_nightmare
Returns
-------
shadowing_reduction : float.
Reduction of received level due to shadowing in dB.
'''
no_obstacle = pd.DataFrame(data={'obstacles':[0],
'spacing':[kwargs['min_spacing']],
})
with_obstacles = pd.DataFrame(data={'obstacles':[num_obstacles],
'spacing':[kwargs['min_spacing']],
})
#convert_to_categorical(no_obstacle, 'spacing')
#convert_to_categorical(with_obstacles, 'spacing')
level_w_obstacles = kwargs['acoustic_shadowing_model'].predict(with_obstacles)
level_wo_obstacles = kwargs['acoustic_shadowing_model'].predict(no_obstacle)
shadowing_reduction = float(level_w_obstacles - level_wo_obstacles)
return(shadowing_reduction)
def convert_to_categorical(df, column):
'''
'''
df[column] = pd.Categorical(df[column])
return(df)
def calc_RL(distance, SL, ref_dist, **kwargs):
'''calculates received level only because of spherical spreading.
Parameters
-----------
distance : float>0. receiver distance from source in metres.
SL : float. source level in dB SPL re 20 muPa at the reference distance.
ref_dist : float >0. distance at which source level was measured in metres.
Typically 1metre by convention.
Keyword Arguments
-----------------
atmospheric_attenuation : float <= 0.
Atmospheric attenuation in dB/m.
This has to be negative number.
Defaults to no atmospheric attenuations (0 dB/m )
Returns
-------
RL : received level in dB SPL re 20muPa.
'''
RL = SL - 20*np.log10(float(distance/ref_dist))
RL += kwargs.get('atmospheric_attenuation', 0)*distance
return(RL)
def get_points_in_between(start_point, end_point, all_other_points,
**kwargs):
'''
Parameters
----------
start_point : 1x2 array like
xy coordinates of starting point
end_point : 1x2 array like
xy coordinates of end points
all_other_points : N x 2 array like
xy coordinates of all other points
Keyword Arguments
-----------------
rectangle_width : float >0.
The width of the rectangle between the start and end point.
Returns
-------
points_between : Mpoints_between x 2 np.array where Mpoints can be >= 0.
'''
rectangle_limits, rotation_matrix = make_rectangle_between_2_points(start_point,
end_point,**kwargs)
points_between = get_points_in_rectangle(rectangle_limits, start_point,
all_other_points, rotation_matrix)
return(points_between)
def get_points_in_between_thecircleversion(start_point, end_point,
all_other_points,**kwargs):
'''Take 2 at getting perhaps a faster version of the
previous get_points_in_between function.
It is fast *and* dirty ... and doesn't quite apply when many bats are packed tightly
together ... as long as the 'rectangle_width' is decently large -- then it
should be okay..
'''
# get line equation from A to B
diff_x_y = end_point-start_point
vertical, m = calculate_slope(diff_x_y)
numpoints = 100 # choose a default density for now
points_along_line = np.zeros((numpoints, 2))
if not vertical:
points_along_line[:,0] = np.linspace(start_point[0],end_point[0],
numpoints) # x coordinates
c = solve_for_intercept(start_point,end_point,m)
points_along_line[:,1] = m*points_along_line[:,0] + c # y coordinates
else:
points_along_line[:,0] = start_point[0] # x coordinates
points_along_line[:,1] = np.linspace(start_point[1], end_point[1],
numpoints)# y coordinates
# get the distance from each of the points to all other points
distance_from_line = spatial.distance_matrix(points_along_line,
all_other_points)
within_r_dist_from_line = distance_from_line<= kwargs['rectangle_width']
point_ids = np.argwhere(np.any(within_r_dist_from_line, axis=0))
return(all_other_points[point_ids])
def calculate_slope(deltax_deltay):
'''
Parameters
----------
deltax_deltay : 1 x array-like. [X2-X2, Y2-Y1 ]
Returns
-------
vertical : boolean. True if the line is vertically oriented (deltaX==0)
slope : float.
'''
zeros_present = tuple((deltax_deltay == 0).tolist())
if sum(zeros_present)>0:
return(slope_dict[zeros_present])
else:
return(False, deltax_deltay[1]/float(deltax_deltay[0]))
slope_dict = {(True,False) : (True, np.nan) ,# xchanges y doesnt
(False, True): (False, 0.0), #y doesnt, x changes
(True, True): Exception('Zero slopes for both not possible!!')}
def solve_for_intercept(x1y1, x2y2,m):
'''
'''
x1,y1 = x1y1
x2,y2 = x2y2
c = (y1 + y2 - m*x1 - m*x2)/2.0
return(c)
def make_rectangle_between_2_points(A, B, **kwargs):
'''First calculate the relative coordinates of B wref to A.
Then draw a straight line between 0,0 and B_rel and 'undo' the
slope. To this vertical line not apply rectangular bands on left and right.
Output bottom left and top right vertices along with the rotation matrix
along with the rotation matrix used to undo the slope of the 0,0-B_rel line
for application to other poitns.
Parameters
----------
A, B : 1x2 array like. xy coordinates of start(A) and end(B) points
Keyword Arguments
-----------------
rectangle_width : float>0. The width of the rectangle between A and B.
Returns
-------
corner_limits : tuple.
Consisting of x0,x1,y0,y1
'''
# treat A as origin, calculate slope between B and A
B_rel = B-A
# 'un-rotate' B and thus form a vertical rectangle easily
theta = np.arctan2(B_rel[1], B_rel[0])
#theta_tobe_rotated = np.remainder(theta, np.pi/2)
theta_tobe_rotated = np.pi/2.0 - theta
rotation_matrix = rot_mat(theta_tobe_rotated)
B_rotated = np.dot(rotation_matrix, B_rel)
x0, x1 = -kwargs['rectangle_width']*0.5, kwargs['rectangle_width']*0.5
y0, y1 = 0, B_rotated[1]
return([x0,x1,y0,y1], rotation_matrix)
def get_points_in_rectangle(corner_limits, startpt,
many_points, rotn_matrix):
''' Many points are checked if they are within
the rectangle defined by the bottom left point (x0,y0) and
the top right corner (x1,y1).
Corner limits is a tuple with four entries (x0,x1,y0,y1)
x0,x1 the x coordinates defining the width of the rectangle
y0,y1 the y coordinates defininf the height of the rectanlge
rotn_matrix is the rotation matrix to rotate the many points into
the same frame of reference as the rectangle. it is in the
form of a 2 x 2 array with the form described
[here](https://en.wikipedia.org/wiki/Rotation_matrix)
'''
x0,x1,y0,y1 = corner_limits
relative_posns = many_points - startpt
rotated_pts = np.apply_along_axis(dot_product_for_rows, 1, relative_posns,
rotn_matrix)
within_x = np.logical_and(rotated_pts[:,0] >= np.min([x0,x1]),
rotated_pts[:,0] <= np.max([x1,x0]))
within_y = np.logical_and(rotated_pts[:,1] >= np.min([y0,y1]),
rotated_pts[:,1] <= np.max([y0,y1]))
within_pts = np.logical_and(within_x, within_y)
return(many_points[within_pts])
def dot_product_for_rows(xy_row, rotation_matrix):
return(np.dot(rotation_matrix, xy_row))
def dot_product_w_sum(xy_row, rotation_matrix):
return(np.sum(rotation_matrix*xy_row, 1))
def rot_mat(theta):
rotation_matrix = np.float32(np.row_stack(([np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)])))
return(rotation_matrix)
if __name__ == '__main__':
# kwargs = {'rectangle_width':0.1, 'implement_shadowing':True,
# 'emitted_source_level': {'dBSPL':90, 'ref_distance':1.0}}
# kwargs['shadow_TS'] = [-15]
# np.random.seed(82319)
# #otherpts = np.random.normal(0,5,2000).reshape(-1,2)
#
# #otherpts = np.array(([1,0],[1,0.05]))
# #print(get_points_in_between(np.array([2,0]), np.array([0,0]), otherpts, **kwargs ) )
# start = time.time()
# numpoints = [5,90, 100, 1000, 2000, 4000, 8000, 10000, 100000]
# for num_points in numpoints:
# y_coods = np.random.choice(np.arange(0.1, 5, 0.01),num_points)
# x_coods = np.tile(0.05,num_points)
#
# between_points = np.column_stack((x_coods, y_coods))
# q=get_points_in_between(np.array([0,0]), np.array([0,10]), between_points,
# **kwargs)
# print(time.time()-start)
kwargs = {}
kwargs['bats_xy'] = np.array(([0,0],
[1,0],
[2,0]))
kwargs['focal_bat'] = np.array([0,0])
kwargs['R'] = 2.0
kwargs['implement_shadowing'] = False
kwargs['rectangle_width'] = 0.3
kwargs['acoustic_shadowing_model'] = sm.load('../data/acoustic_shadowing_model.pkl')
kwargs['min_spacing'] = 1.0
kwargs['emitted_source_level'] = {'dBSPL':90, 'ref_distance':1.0}
A = soundprop_w_acoustic_shadowing( kwargs['focal_bat'],
kwargs['bats_xy'][-1,:],
kwargs['bats_xy'],
**kwargs)
print(A) |
class Debug:
def __init__(self):
self.DEBUG=False
def Set(self):
self.DEBUG=True
def Unset(self):
self.DEBUG=False
def __call__(self):
return self.DEBUG
DEBUG=Debug()
DEBUG.Set()
DEBUG.Unset() |
#!/usr/bin/env python
__author__ = 'Leland Taylor'
__date__ = '2020-03-13'
__version__ = '0.0.1'
import argparse
import os
import pandas as pd
def main():
"""Run CLI."""
parser = argparse.ArgumentParser(
description="""
Filter and merge 10x data. Save to AnnData object.
"""
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s {version}'.format(version=__version__)
)
parser.add_argument(
'--barcodes_list',
action='store',
dest='barcodes_list',
required=True,
help='List of barcode files in the following format: \
<sample_id1>---barcodes.tsv.gz,<sample_id2>---barcodes.tsv.gz'
)
parser.add_argument(
'--features_list',
action='store',
dest='features_list',
required=True,
help='List of features files in the following format: \
<sample_id1>---features.tsv.gz,<sample_id2>---features.tsv.gz'
)
parser.add_argument(
'--matrix_list',
action='store',
dest='matrix_list',
required=True,
help='List of matrix files in the following format: \
<sample_id1>---matrix.mtx.gz,<sample_id2>---matrix.mtx.gz'
)
parser.add_argument(
'-txd', '--tenxdata_file',
action='store',
dest='txd',
required=False,
help='File with the following headers: experiment_id\
data_path_10x_format.'
)
parser.add_argument(
'-of', '--output_file',
action='store',
dest='of',
default='nf_prepped__file_paths_10x.tsv',
help='Basename of output anndata file, assuming output in current \
working directory. Will have .h5ad appended.\
(default: %(default)s)'
)
options = parser.parse_args()
# Get all of the input file lists
barcodes = options.barcodes_list.split(',')
features = options.features_list.split(',')
matricies = options.matrix_list.split(',')
input_dict = {}
for file in barcodes:
experiment_id = file.replace('---barcodes.tsv.gz', '')
if experiment_id not in input_dict:
input_dict[experiment_id] = {}
input_dict[experiment_id]['barcode'] = file
for file in features:
experiment_id = file.replace('---features.tsv.gz', '')
if experiment_id not in input_dict:
input_dict[experiment_id] = {}
input_dict[experiment_id]['feature'] = file
for file in matricies:
experiment_id = file.replace('---matrix.mtx.gz', '')
if experiment_id not in input_dict:
input_dict[experiment_id] = {}
input_dict[experiment_id]['matrix'] = file
# Check to make sure we have an entry in the dict for every experiment_id
# in the original txd file
if options.txd != '':
df = pd.read_csv(options.txd, sep='\t')
for i in df['experiment_id']:
if i not in input_dict:
raise Exception('Missing experiment_id:\t{}'.format(
i
))
output_dict = {}
output_dict['experiment_id'] = []
output_dict['data_path_10x_format'] = []
for experiment_id, value in input_dict.items():
os.makedirs(
'tenx_input_data/{}'.format(experiment_id),
exist_ok=True
)
output_dict['experiment_id'].append(experiment_id)
output_dict['data_path_10x_format'].append(
os.path.abspath('tenx_input_data/{}'.format(experiment_id))
)
# os.symlink(
# value['barcode'],
# 'tenx_input_data/{}/barcode.tsv.gz'.format(experiment_id)
# )
f = 'tenx_input_data/{}/barcodes.tsv.gz'.format(experiment_id)
if not os.path.exists(f):
os.link(value['barcode'], f)
f = 'tenx_input_data/{}/features.tsv.gz'.format(experiment_id)
if not os.path.exists(f):
os.link(value['feature'], f)
f = 'tenx_input_data/{}/matrix.mtx.gz'.format(experiment_id)
if not os.path.exists(f):
os.link(value['matrix'], f)
output_df = pd.DataFrame(output_dict)
output_df = output_df.sort_values(
by=['experiment_id'],
ascending=[True]
)
output_df.to_csv(options.of, sep='\t')
if __name__ == '__main__':
main()
|
import json
import logging
import multiprocessing as mp
import pickle
import time
from collections import Counter
from copy import deepcopy
from typing import List, Union, Tuple
import numpy as np
from flare.env import AtomicEnvironment
from flare.gp_algebra import (
get_like_from_mats,
get_neg_like_grad,
get_ky_mat_update,
_global_training_data,
_global_training_labels,
_global_training_structures,
_global_energy_labels,
get_Ky_mat,
get_kernel_vector,
en_kern_vec,
efs_kern_vec,
)
from flare.kernels.utils import (
str_to_kernel_set,
from_mask_to_args,
kernel_str_to_array,
)
from flare.output import Output, set_logger
from flare.parameters import Parameters
from flare.struc import Structure
from flare.utils.element_coder import NumpyEncoder, Z_to_element
from numpy.random import random
from scipy.linalg import solve_triangular
from scipy.optimize import minimize
class GaussianProcess:
"""Gaussian process force field. Implementation is based on Algorithm 2.1
(pg. 19) of "Gaussian Processes for Machine Learning" by Rasmussen and
Williams.
Methods within GaussianProcess allow you to make predictions on
AtomicEnvironment objects (see env.py) generated from
FLARE Structures (see struc.py), and after data points are added,
optimize hyperparameters based on available training data (train method).
Args:
kernels (list, optional): Determine the type of kernels. Example:
['twbody', 'threebody'], ['2', '3', 'mb'], ['2']. Defaults to [
'twboody', 'threebody']
component (str, optional): Determine single- ("sc") or multi-
component ("mc") kernel to use. Defaults to "mc"
hyps (np.ndarray, optional): Hyperparameters of the GP.
cutoffs (Dict, optional): Cutoffs of the GP kernel. For simple hyper-
parameter setups, formatted like {"twobody":7, "threebody":4.5},
etc.
hyp_labels (List, optional): List of hyperparameter labels. Defaults
to None.
opt_algorithm (str, optional): Hyperparameter optimization algorithm.
Defaults to 'L-BFGS-B'.
maxiter (int, optional): Maximum number of iterations of the
hyperparameter optimization algorithm. Defaults to 10.
parallel (bool, optional): If True, the covariance matrix K of the GP is
computed in parallel. Defaults to False.
n_cpus (int, optional): Number of cpus used for parallel
calculations. Defaults to 1 (serial)
n_sample (int, optional): Size of submatrix to use when parallelizing
predictions.
output (Output, optional): Output object used to dump hyperparameters
during optimization. Defaults to None.
hyps_mask (dict, optional): hyps_mask can set up which hyper parameter
is used for what interaction. Details see kernels/mc_sephyps.py
name (str, optional): Name for the GP instance which dictates global
memory access.
"""
def __init__(
self,
kernels: List[str] = None,
component: str = "mc",
hyps: "ndarray" = None,
cutoffs: dict = None,
hyps_mask: dict = None,
hyp_labels: List = None,
opt_algorithm: str = "L-BFGS-B",
maxiter: int = 10,
parallel: bool = False,
per_atom_par: bool = True,
n_cpus: int = 1,
n_sample: int = 100,
output: Output = None,
name="default_gp",
energy_noise: float = 0.01,
**kwargs,
):
"""Initialize GP parameters and training data."""
# load arguments into attributes
self.name = name
self.output = output
self.opt_algorithm = opt_algorithm
self.per_atom_par = per_atom_par
self.maxiter = maxiter
# set up parallelization
self.n_cpus = n_cpus
self.n_sample = n_sample
self.parallel = parallel
self.component = component
self.kernels = (
["twobody", "threebody"]
if kernels is None
else kernel_str_to_array("".join(kernels))
)
self.cutoffs = {} if cutoffs is None else cutoffs
self.hyp_labels = hyp_labels
self.hyps_mask = {} if hyps_mask is None else hyps_mask
self.hyps = hyps
GaussianProcess.backward_arguments(kwargs, self.__dict__)
GaussianProcess.backward_attributes(self.__dict__)
# ------------ "computed" attributes ------------
if self.output is None:
self.logger_name = self.name + "GaussianProcess"
set_logger(self.logger_name, stream=True, fileout_name=None, verbose="info")
else:
self.logger_name = self.output.basename + "log"
if self.hyps is None:
# If no hyperparameters are passed in, assume 2 hyps for each
# kernel, plus one noise hyperparameter, and use a guess value
self.hyps = np.array([0.1] * (1 + 2 * len(self.kernels)))
else:
self.hyps = np.array(self.hyps, dtype=np.float64)
kernel, grad, ek, efk, efs_e, efs_f, efs_self = str_to_kernel_set(
self.kernels, self.component, self.hyps_mask
)
self.kernel = kernel
self.kernel_grad = grad
self.energy_force_kernel = efk
self.energy_kernel = ek
self.efs_energy_kernel = efs_e
self.efs_force_kernel = efs_f
self.efs_self_kernel = efs_self
self.kernels = kernel_str_to_array(kernel.__name__)
# parallelization
if self.parallel:
if self.n_cpus is None:
self.n_cpus = mp.cpu_count()
else:
self.n_cpus = n_cpus
else:
self.n_cpus = 1
self.training_data = [] # Atomic environments
self.training_labels = [] # Forces acting on central atoms
self.training_labels_np = np.empty(
0,
)
self.n_envs_prev = len(self.training_data)
# Attributes to accomodate energy labels:
self.training_structures = [] # Environments of each structure
self.energy_labels = [] # Energies of training structures
self.energy_labels_np = np.empty(
0,
)
self.energy_noise = energy_noise
self.all_labels = np.empty(
0,
)
# Parameters set during training
self.ky_mat = None
self.force_block = None
self.energy_block = None
self.force_energy_block = None
self.l_mat = None
self.l_mat_inv = None
self.alpha = None
self.ky_mat_inv = None
self.likelihood = None
self.likelihood_gradient = None
self.bounds = None
# File used for reading / writing model if model is large
self.ky_mat_file = None
# Flag if too-big warning has been printed for this model
self.large_warning = False
if self.logger_name is None:
if self.output is None:
self.logger_name = self.name + "GaussianProcess"
set_logger(
self.logger_name, stream=True, fileout_name=None, verbose="info"
)
else:
self.logger_name = self.output.basename + "log"
logger = logging.getLogger(self.logger_name)
if self.cutoffs == {}:
# If no cutoffs are passed in, assume 7 A for 2 body, 3.5 for
# 3-body.
cutoffs = {}
if "twobody" in self.kernels:
cutoffs["twobody"] = 7
if "threebody" in self.kernels:
cutoffs["threebody"] = 3.5
if "manybody" in self.kernels:
raise ValueError(
"No cutoff was set for the manybody kernel."
"A default value will not be set by default."
)
self.cutoffs = cutoffs
logger.warning(
"Warning: No cutoffs were set for your GP."
"Default values have been assigned but you "
"should think carefully about which are "
"appropriate for your use case."
)
self.check_instantiation()
@property
def force_noise(self):
return Parameters.get_noise(self.hyps_mask, self.hyps, constraint=False)
@property
def hyps_and_labels(self):
return Parameters.get_hyps(
self.hyps_mask, self.hyps, constraint=False, label=True
)
def check_instantiation(self):
"""
Runs a series of checks to ensure that the user has not supplied
contradictory arguments which will result in undefined behavior
with multiple hyperparameters.
:return:
"""
logger = logging.getLogger(self.logger_name)
# check whether it's be loaded before
loaded = False
if self.name in _global_training_labels:
if (
_global_training_labels.get(self.name, None)
is not self.training_labels_np
):
loaded = True
if self.name in _global_energy_labels:
if _global_energy_labels.get(self.name, None) is not self.energy_labels_np:
loaded = True
if loaded:
base = f"{self.name}"
count = 2
while self.name in _global_training_labels and count < 100:
time.sleep(random())
self.name = f"{base}_{count}"
logger.debug(
"Specified GP name is present in global memory; "
"Attempting to rename the "
f"GP instance to {self.name}"
)
count += 1
if self.name in _global_training_labels:
milliseconds = int(round(time.time() * 1000) % 10000000)
self.name = f"{base}_{milliseconds}"
logger.debug(
"Specified GP name still present in global memory: "
f"renaming the gp instance to {self.name}"
)
logger.debug(f"Final name of the gp instance is {self.name}")
self.sync_data()
self.hyps_mask = Parameters.check_instantiation(
hyps=self.hyps,
cutoffs=self.cutoffs,
kernels=self.kernels,
param_dict=self.hyps_mask,
)
self.bounds = deepcopy(self.hyps_mask.get("bounds", None))
def update_kernel(
self,
kernels: List[str],
component: str = "mc",
hyps=None,
cutoffs: dict = None,
hyps_mask: dict = None,
):
kernel, grad, ek, efk, _, _, _ = str_to_kernel_set(
kernels, component, hyps_mask
)
self.kernel = kernel
self.kernel_grad = grad
self.energy_force_kernel = efk
self.energy_kernel = ek
self.kernels = kernel_str_to_array(kernel.__name__)
if hyps_mask is not None:
self.hyps_mask = hyps_mask
# Cutoffs argument will override hyps mask's cutoffs key, if present
if isinstance(hyps_mask, dict) and cutoffs is None:
cutoffs = hyps_mask.get("cutoffs", None)
if cutoffs is not None:
if self.cutoffs != cutoffs:
self.adjust_cutoffs(cutoffs, train=False, new_hyps_mask=hyps_mask)
self.cutoffs = cutoffs
if isinstance(hyps_mask, dict) and hyps is None:
hyps = hyps_mask.get("hyps", None)
if hyps is not None:
self.hyps = hyps
def update_db(
self,
struc: Structure,
forces: "ndarray" = None,
custom_range: List[int] = (),
energy: float = None,
stress: "ndarray" = None,
):
"""Given a structure and forces, add local environments from the
structure to the training set of the GP. If energy is given, add the
entire structure to the training set.
Args:
struc (Structure): Input structure. Local environments of atoms
in this structure will be added to the training set of the GP.
forces (np.ndarray): Forces on atoms in the structure.
custom_range (List[int]): Indices of atoms whose local
environments will be added to the training set of the GP.
energy (float): Energy of the structure.
stress (np.ndarray): Stress tensor of the structure. The stress
tensor components should be given in the following order:
xx, xy, xz, yy, yz, zz.
"""
# By default, use all atoms in the structure
noa = len(struc.positions)
update_indices = custom_range or list(range(noa))
# If forces are given, update the environment list.
if forces is not None:
for atom in update_indices:
env_curr = AtomicEnvironment(
struc, atom, self.cutoffs, cutoffs_mask=self.hyps_mask
)
forces_curr = np.array(forces[atom])
self.training_data.append(env_curr)
self.training_labels.append(forces_curr)
# create numpy array of training labels
self.training_labels_np = np.hstack(self.training_labels)
# If an energy is given, update the structure list.
if energy is not None:
structure_list = [] # Populate with all environments of the struc
for atom in range(noa):
env_curr = AtomicEnvironment(
struc, atom, self.cutoffs, cutoffs_mask=self.hyps_mask
)
structure_list.append(env_curr)
self.energy_labels.append(energy)
self.training_structures.append(structure_list)
self.energy_labels_np = np.array(self.energy_labels)
if forces is None and energy is None and stress is None:
logger = logging.getLogger(self.logger_name)
logger.warn(
"Update DB method called with data but no labels!"
"The GP has not been updated with data!"
)
# update list of all labels
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
self.sync_data()
def add_one_env(
self,
env: AtomicEnvironment,
force: "np.ndarray" = None,
train: bool = False,
**kwargs,
):
"""Add a single local environment to the training set of the GP.
Args:
env (AtomicEnvironment): Local environment to be added to the
training set of the GP.
force (np.ndarray): Force on the central atom of the local
environment in the form of a 3-component Numpy array
containing the x, y, and z components.
train (bool): If True, the GP is trained after the local
environment is added.
"""
self.training_data.append(env)
if force is None:
self.training_labels.append(env.force)
else:
self.training_labels.append(force)
self.training_labels_np = np.hstack(self.training_labels)
self.sync_data()
# update list of all labels
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
if train:
self.train(**kwargs)
#KIT important, the function where the training occurs
def train(
self,
logger_name: str = None,
custom_bounds=None,
grad_tol: float = 1e-4,
x_tol: float = 1e-5,
line_steps: int = 20,
print_progress: bool = False,
):
"""Train Gaussian Process model on training data. Tunes the
hyperparameters to maximize the likelihood, then computes L and alpha
(related to the covariance matrix of the training set).
Args:
logger (logging.logger): logger object specifying where to write the
progress of the optimization.
custom_bounds (np.ndarray): Custom bounds on the hyperparameters.
grad_tol (float): Tolerance of the hyperparameter gradient that
determines when hyperparameter optimization is terminated.
x_tol (float): Tolerance on the x values used to decide when
Nelder-Mead hyperparameter optimization is terminated.
line_steps (int): Maximum number of line steps for L-BFGS
hyperparameter optimization.
:param logger_name:
:param print_progress:
"""
verbose = "warning"
if print_progress:
verbose = "info"
if logger_name is None:
set_logger(
"gp_algebra",
stream=True,
fileout_name="log.gp_algebra",
verbose=verbose,
)
logger_name = "gp_algebra"
disp = print_progress
if (
max(len(self.training_data), len(self.training_labels)) > 5000
and not self.large_warning
):
self.large_warning = True
warning_message = (
"WARNING! Your GP is very large (>5000 atomic "
"environments). The hyperparameter optimization process "
"does not scale favorably with increasing atomic "
"environments"
" (roughly N^2)"
"and so your GP may take a very long time to train."
"Consider finding a way to reduce the number of atomic "
"environments in your model if you want to optimize the "
"hyperparameters or optimize them by a different route."
)
logger = logging.getLogger(self.logger_name)
logger.warning(warning_message)
if len(self.training_data) == 0 or len(self.training_labels) == 0:
raise Warning(
"You are attempting to train a GP with no "
"training data. Add environments and forces "
"to the GP and try again."
)
x_0 = self.hyps
args = (
self.name,
self.kernel_grad,
logger_name,
self.cutoffs,
self.hyps_mask,
self.n_cpus,
self.n_sample,
)
res = None
if self.opt_algorithm == "L-BFGS-B":
# bound signal noise below to avoid overfitting
if self.bounds is None:
bounds = np.array([(1e-6, np.inf)] * len(x_0))
bounds[-1, 0] = 1e-3
else:
bounds = self.bounds
#KIT the minimize function is called to minimize the negation of the likelihood, thus tuning the
#hyperparameters to maximize the likelihood
# Catch linear algebra errors and switch to BFGS if necessary
try:
res = minimize(
get_neg_like_grad,
x_0,
args,
method="L-BFGS-B",
jac=True,
bounds=bounds,
options={
"disp": disp,
"gtol": grad_tol,
"maxls": line_steps,
"maxiter": self.maxiter,
},
)
except np.linalg.LinAlgError:
logger = logging.getLogger(self.logger_name)
logger.warning(
"Algorithm for L-BFGS-B failed. Changing to "
"BFGS for remainder of run."
)
self.opt_algorithm = "BFGS"
if custom_bounds is not None:
res = minimize(
get_neg_like_grad,
x_0,
args,
method="L-BFGS-B",
jac=True,
bounds=custom_bounds,
options={
"disp": disp,
"gtol": grad_tol,
"maxls": line_steps,
"maxiter": self.maxiter,
},
)
elif self.opt_algorithm == "BFGS":
res = minimize(
get_neg_like_grad,
x_0,
args,
method="BFGS",
jac=True,
options={"disp": disp, "gtol": grad_tol, "maxiter": self.maxiter},
)
if res is None:
raise RuntimeError("Optimization failed for some reason.")
#KIT res is a scipy.optimize.OptimizeResult object
#KIT res.x is the optimized hyperparameters
self.hyps = res.x
self.set_L_alpha()
#KIT res.fun is the value of the objective function, it is also the loss function in this case
#which is why we take the negation of it to be our likelihood
self.likelihood = -res.fun
#KIT res.jac is the jacobian of the objective/loss function. The Jacobian is the matrix of all of
#a vector valued function's first order derivatives.
self.likelihood_gradient = -res.jac
return res
def check_L_alpha(self):
"""
Check that the alpha vector is up to date with the training set. If
not, update_L_alpha is called.
"""
# Check that alpha is up to date with training set
size3 = len(self.training_data) * 3 + len(self.training_structures)
# If model is empty, then just return
if size3 == 0:
return
if self.alpha is None:
self.update_L_alpha()
elif size3 > self.alpha.shape[0]:
self.update_L_alpha()
elif size3 != self.alpha.shape[0]:
self.set_L_alpha()
def predict(self, x_t: AtomicEnvironment, d: int) -> [float, float]:
"""
Predict a force component of the central atom of a local environment.
Args:
x_t (AtomicEnvironment): Input local environment.
d (int): Force component to be predicted (1 is x, 2 is y, and
3 is z).
Return:
(float, float): Mean and epistemic variance of the prediction.
"""
if d not in [1, 2, 3]:
raise ValueError("d should be 1, 2, or 3")
# Kernel vector allows for evaluation of atomic environments.
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
k_v = get_kernel_vector(
self.name,
self.kernel,
self.energy_force_kernel,
x_t,
d,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# Guarantee that alpha is up to date with training set
self.check_L_alpha()
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance without cholesky (possibly faster)
# pass args to kernel based on if mult. hyperparameters in use
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_kern = self.kernel(x_t, x_t, d, d, *args)
pred_var = self_kern - np.matmul(np.matmul(k_v, self.ky_mat_inv), k_v)
return pred_mean, pred_var
def predict_force_xyz(self, x_t: AtomicEnvironment) -> ("np.ndarray", "np.ndarray"):
"""
Simple wrapper to predict all three components of a force in one go.
:param x_t:
:return:
"""
forces = []
stds = []
for d in (1, 2, 3):
force, std = self.predict(x_t, d)
forces.append(force)
stds.append(std)
return np.array(forces), np.array(stds)
def predict_local_energy(self, x_t: AtomicEnvironment) -> float:
"""Predict the local energy of a local environment.
Args:
x_t (AtomicEnvironment): Input local environment.
Return:
float: Local energy predicted by the GP.
"""
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
k_v = en_kern_vec(
self.name,
self.energy_force_kernel,
self.energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
pred_mean = np.matmul(k_v, self.alpha)
return pred_mean
def predict_local_energy_and_var(self, x_t: AtomicEnvironment):
"""Predict the local energy of a local environment and its
uncertainty.
Args:
x_t (AtomicEnvironment): Input local environment.
Return:
(float, float): Mean and predictive variance predicted by the GP.
"""
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
# get kernel vector
k_v = en_kern_vec(
self.name,
self.energy_force_kernel,
self.energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance
v_vec = solve_triangular(self.l_mat, k_v, lower=True)
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_kern = self.energy_kernel(x_t, x_t, *args)
pred_var = self_kern - np.matmul(v_vec, v_vec)
return pred_mean, pred_var
def predict_efs(self, x_t: AtomicEnvironment):
"""Predict the local energy, forces, and partial stresses of an
atomic environment and their predictive variances."""
# Kernel vector allows for evaluation of atomic environments.
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
energy_vector, force_array, stress_array = efs_kern_vec(
self.name,
self.efs_force_kernel,
self.efs_energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# Check that alpha is up to date with training set.
self.check_L_alpha()
# Compute mean predictions.
en_pred = np.matmul(energy_vector, self.alpha)
force_pred = np.matmul(force_array, self.alpha)
stress_pred = np.matmul(stress_array, self.alpha)
# Compute uncertainties.
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_en, self_force, self_stress = self.efs_self_kernel(x_t, *args)
en_var = self_en - np.matmul(
np.matmul(energy_vector, self.ky_mat_inv), energy_vector
)
force_var = self_force - np.diag(
np.matmul(np.matmul(force_array, self.ky_mat_inv), force_array.transpose())
)
stress_var = self_stress - np.diag(
np.matmul(
np.matmul(stress_array, self.ky_mat_inv), stress_array.transpose()
)
)
return en_pred, force_pred, stress_pred, en_var, force_var, stress_var
def set_L_alpha(self):
"""
Invert the covariance matrix, setting L (a lower triangular
matrix s.t. L L^T = (K + sig_n^2 I)) and alpha, the inverse
covariance matrix multiplied by the vector of training labels.
The forces and variances are later obtained using alpha.
"""
self.sync_data()
ky_mat = get_Ky_mat(
self.hyps,
self.name,
self.kernel,
self.energy_kernel,
self.energy_force_kernel,
self.energy_noise,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=self.n_cpus,
n_sample=self.n_sample,
)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha = np.matmul(ky_mat_inv, self.all_labels)
self.ky_mat = ky_mat
self.l_mat = l_mat
self.alpha = alpha
self.ky_mat_inv = ky_mat_inv
self.likelihood = get_like_from_mats(ky_mat, l_mat, alpha, self.name)
self.n_envs_prev = len(self.training_data)
def update_L_alpha(self):
"""
Update the GP's L matrix and alpha vector without recalculating
the entire covariance matrix K.
"""
# Set L matrix and alpha if set_L_alpha has not been called yet
if self.l_mat is None or np.array(self.ky_mat) is np.array(None):
self.set_L_alpha()
return
# Reset global variables.
self.sync_data()
ky_mat = get_ky_mat_update(
self.ky_mat,
self.n_envs_prev,
self.hyps,
self.name,
self.kernel,
self.energy_kernel,
self.energy_force_kernel,
self.energy_noise,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=self.n_cpus,
n_sample=self.n_sample,
)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha = np.matmul(ky_mat_inv, self.all_labels)
self.ky_mat = ky_mat
self.l_mat = l_mat
self.alpha = alpha
self.ky_mat_inv = ky_mat_inv
self.n_envs_prev = len(self.training_data)
def __str__(self):
"""String representation of the GP model."""
thestr = ""
thestr += f"Number of cpu cores: {self.n_cpus}\n"
thestr += f"Kernel: {self.kernels}\n"
thestr += f"Training points: {len(self.training_data)}\n"
thestr += f"Cutoffs: {self.cutoffs}\n"
thestr += f"Number of hyperparameters: {len(self.hyps)}\n"
thestr += f"Hyperparameter array: {str(self.hyps)}\n"
if self.hyp_labels is None:
# Put unlabeled hyperparameters on one line
thestr = thestr[:-1]
thestr += str(self.hyps) + "\n"
else:
for hyp, label in zip(self.hyps, self.hyp_labels):
thestr += f"{label}: {hyp} \n"
return thestr
def as_dict(self):
"""Dictionary representation of the GP model."""
self.check_L_alpha()
out_dict = dict(vars(self))
out_dict["training_data"] = [env.as_dict() for env in self.training_data]
# Write training structures (which are just list of environments)
out_dict["training_structures"] = []
for n, env_list in enumerate(self.training_structures):
out_dict["training_structures"].append([])
for env_curr in env_list:
out_dict["training_structures"][n].append(env_curr.as_dict())
# Remove the callables
for key in [
"kernel",
"kernel_grad",
"energy_kernel",
"energy_force_kernel",
"efs_energy_kernel",
"efs_force_kernel",
"efs_self_kernel",
"output",
]:
out_dict.pop(key)
return out_dict
def sync_data(self):
_global_training_data[self.name] = self.training_data
_global_training_labels[self.name] = self.training_labels_np
_global_training_structures[self.name] = self.training_structures
_global_energy_labels[self.name] = self.energy_labels_np
@staticmethod
def from_dict(dictionary):
"""Create GP object from dictionary representation."""
GaussianProcess.backward_arguments(dictionary, dictionary)
GaussianProcess.backward_attributes(dictionary)
new_gp = GaussianProcess(**dictionary)
# Save time by attempting to load in computed attributes
if "training_data" in dictionary:
new_gp.training_data = [
AtomicEnvironment.from_dict(env) for env in dictionary["training_data"]
]
new_gp.training_labels = deepcopy(dictionary["training_labels"])
new_gp.training_labels_np = deepcopy(dictionary["training_labels_np"])
new_gp.sync_data()
# Reconstruct training structures.
if "training_structures" in dictionary:
new_gp.training_structures = []
for n, env_list in enumerate(dictionary["training_structures"]):
new_gp.training_structures.append([])
for env_curr in env_list:
new_gp.training_structures[n].append(
AtomicEnvironment.from_dict(env_curr)
)
new_gp.energy_labels = deepcopy(dictionary["energy_labels"])
new_gp.energy_labels_np = deepcopy(dictionary["energy_labels_np"])
new_gp.sync_data()
new_gp.all_labels = np.concatenate(
(new_gp.training_labels_np, new_gp.energy_labels_np)
)
new_gp.likelihood = dictionary.get("likelihood", None)
new_gp.likelihood_gradient = dictionary.get("likelihood_gradient", None)
new_gp.n_envs_prev = len(new_gp.training_data)
# Save time by attempting to load in computed attributes
if dictionary.get("ky_mat_file"):
try:
new_gp.ky_mat = np.load(dictionary["ky_mat_file"])
new_gp.compute_matrices()
new_gp.ky_mat_file = None
except FileNotFoundError:
new_gp.ky_mat = None
new_gp.l_mat = None
new_gp.alpha = None
new_gp.ky_mat_inv = None
filename = dictionary.get("ky_mat_file")
logger = logging.getLogger(new_gp.logger_name)
logger.warning(
"the covariance matrices are not loaded"
f"because {filename} cannot be found"
)
else:
new_gp.ky_mat = (
np.array(dictionary["ky_mat"])
if dictionary.get("ky_mat") is not None
else None
)
new_gp.ky_mat_inv = (
np.array(dictionary["ky_mat_inv"])
if dictionary.get("ky_mat_inv") is not None
else None
)
new_gp.ky_mat = (
np.array(dictionary["ky_mat"])
if dictionary.get("ky_mat") is not None
else None
)
new_gp.l_mat = (
np.array(dictionary["l_mat"])
if dictionary.get("l_mat") is not None
else None
)
new_gp.alpha = (
np.array(dictionary["alpha"])
if dictionary.get("alpha") is not None
else None
)
return new_gp
def compute_matrices(self):
"""
When covariance matrix is known, reconstruct other matrices.
Used in re-loading large GPs.
:return:
"""
ky_mat = self.ky_mat
if ky_mat is None or (isinstance(ky_mat, np.ndarray) and not np.any(ky_mat)):
Warning(
"Warning: Covariance matrix was not loaded but "
"compute_matrices was called. Computing covariance "
"matrix and proceeding..."
)
self.set_L_alpha()
else:
self.l_mat = np.linalg.cholesky(ky_mat)
self.l_mat_inv = np.linalg.inv(self.l_mat)
self.ky_mat_inv = self.l_mat_inv.T @ self.l_mat_inv
self.alpha = np.matmul(self.ky_mat_inv, self.all_labels)
def adjust_cutoffs(
self,
new_cutoffs: Union[list, tuple, "np.ndarray"] = None,
reset_L_alpha=True,
train=True,
new_hyps_mask=None,
):
"""
Loop through atomic environment objects stored in the training data,
and re-compute cutoffs for each. Useful if you want to gauge the
impact of cutoffs given a certain training set! Unless you know
*exactly* what you are doing for some development or test purpose,
it is **highly** suggested that you call set_L_alpha and
re-optimize your hyperparameters afterwards as is default here.
A helpful way to update the cutoffs and kernel for an extant
GP is to perform the following commands:
>> hyps_mask = pm.as_dict()
>> hyps = hyps_mask['hyps']
>> cutoffs = hyps_mask['cutoffs']
>> kernels = hyps_mask['kernels']
>> gp_model.update_kernel(kernels, 'mc', hyps, cutoffs, hyps_mask)
:param reset_L_alpha:
:param train:
:param new_hyps_mask:
:param new_cutoffs:
:return:
"""
if new_hyps_mask is not None:
hm = new_hyps_mask
self.hyps_mask = new_hyps_mask
else:
hm = self.hyps_mask
if new_cutoffs is None:
try:
new_cutoffs = hm["cutoffs"]
except KeyError:
raise KeyError(
"New cutoffs not found in the hyps_mask"
"dictionary via call to 'cutoffs' key."
)
# update environment
nenv = len(self.training_data)
for i in range(nenv):
self.training_data[i].cutoffs = new_cutoffs
self.training_data[i].cutoffs_mask = hm
self.training_data[i].setup_mask(hm)
self.training_data[i].compute_env()
# Ensure that training data and labels are still consistent
self.sync_data()
self.cutoffs = new_cutoffs
if reset_L_alpha:
del self.l_mat
del self.ky_mat
self.set_L_alpha()
if train:
self.train()
def remove_force_data(
self, indexes: Union[int, List[int]], update_matrices: bool = True
) -> Tuple[List[Structure], List["ndarray"]]:
"""
Remove force components from the model. Convenience function which
deletes individual data points.
Matrices should *always* be updated if you intend to use the GP to make
predictions afterwards. This might be time consuming for large GPs,
so, it is provided as an option, but, only do so with extreme caution.
(Undefined behavior may result if you try to make predictions and/or
add to the training set afterwards).
Returns training data which was removed akin to a pop method, in order
of lowest to highest index passed in.
:param indexes: Indexes of envs in training data to remove.
:param update_matrices: If false, will not update the GP's matrices
afterwards (which can be time consuming for large models).
This should essentially always be true except for niche development
applications.
:return:
"""
# Listify input even if one integer
if isinstance(indexes, int):
indexes = [indexes]
if max(indexes) > len(self.training_data):
raise ValueError("Index out of range of data")
if len(indexes) == 0:
return [], []
# Get in reverse order so that modifying higher indexes doesn't affect
# lower indexes
indexes.sort(reverse=True)
removed_data = []
removed_labels = []
for i in indexes:
removed_data.append(self.training_data.pop(i))
removed_labels.append(self.training_labels.pop(i))
self.training_labels_np = np.hstack(self.training_labels)
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
self.sync_data()
if update_matrices:
self.set_L_alpha()
# Put removed data in order of lowest to highest index
removed_data.reverse()
removed_labels.reverse()
return removed_data, removed_labels
def write_model(
self, name: str, format: str = None, split_matrix_size_cutoff: int = 5000
):
"""
Write model in a variety of formats to a file for later re-use.
JSON files are open to visual inspection and are easier to use
across different versions of FLARE or GP implementations. However,
they are larger and loading them in takes longer (by setting up a
new GP from the specifications). Pickled files can be faster to
read & write, and they take up less memory.
Args:
name (str): Output name.
format (str): Output format.
split_matrix_size_cutoff (int): If there are more than this
number of training points in the set, save the matrices seperately.
"""
if len(self.training_data) > split_matrix_size_cutoff:
np.save(f"{name}_ky_mat.npy", self.ky_mat)
self.ky_mat_file = f"{name}_ky_mat.npy"
temp_ky_mat = self.ky_mat
temp_l_mat = self.l_mat
temp_alpha = self.alpha
temp_ky_mat_inv = self.ky_mat_inv
self.ky_mat = None
self.l_mat = None
self.alpha = None
self.ky_mat_inv = None
# Automatically detect output format from name variable
for detect in ["json", "pickle", "binary"]:
if detect in name.lower():
format = detect
break
if format is None:
format = "json"
supported_formats = ["json", "pickle", "binary"]
if format.lower() == "json":
if ".json" != name[-5:]:
name += ".json"
with open(name, "w") as f:
json.dump(self.as_dict(), f, cls=NumpyEncoder)
elif format.lower() == "pickle" or format.lower() == "binary":
if ".pickle" != name[-7:]:
name += ".pickle"
with open(name, "wb") as f:
pickle.dump(self, f)
else:
raise ValueError(
"Output format not supported: try from {}".format(supported_formats)
)
if len(self.training_data) > split_matrix_size_cutoff:
self.ky_mat = temp_ky_mat
self.l_mat = temp_l_mat
self.alpha = temp_alpha
self.ky_mat_inv = temp_ky_mat_inv
@staticmethod
def from_file(filename: str, format: str = ""):
"""
One-line convenience method to load a GP from a file stored using
write_file
Args:
filename (str): path to GP model
format (str): json or pickle if format is not in filename
:return:
"""
if ".json" in filename or "json" in format:
with open(filename, "r") as f:
gp_model = GaussianProcess.from_dict(json.loads(f.readline()))
elif ".pickle" in filename or "pickle" in format:
with open(filename, "rb") as f:
gp_model = pickle.load(f)
GaussianProcess.backward_arguments(gp_model.__dict__, gp_model.__dict__)
GaussianProcess.backward_attributes(gp_model.__dict__)
if hasattr(gp_model, "ky_mat_file") and gp_model.ky_mat_file:
try:
gp_model.ky_mat = np.load(
gp_model.ky_mat_file, allow_pickle=True
)
gp_model.compute_matrices()
except FileNotFoundError:
gp_model.ky_mat = None
gp_model.l_mat = None
gp_model.alpha = None
gp_model.ky_mat_inv = None
Warning(
f"the covariance matrices are not loaded, "
f"this can take a long time to recompute"
)
else:
raise ValueError(
"Warning: Format unspecieified or file is not .json or .pickle format."
)
gp_model.check_instantiation()
return gp_model
def __len__(self):
return len(self.training_data)
@property
def training_statistics(self) -> dict:
"""
Return a dictionary with statistics about the current training data.
Useful for quickly summarizing info about the GP.
:return:
"""
data = dict()
data["N"] = len(self.training_data)
# Count all of the present species in the atomic env. data
present_species = []
for env, _ in zip(self.training_data, self.training_labels):
present_species.append(Z_to_element(env.structure.coded_species[env.atom]))
# Summarize the relevant information
data["species"] = list(set(present_species))
data["envs_by_species"] = dict(Counter(present_species))
return data
@property
def par(self):
"""
Backwards compability attribute
:return:
"""
return self.parallel
def __deepcopy__(self, memo):
# this way can also deepcopy the training data in _global_training dicts
return GaussianProcess.from_dict(self.as_dict())
def __del__(self):
if self is None:
return
if self.name in _global_training_labels:
return (
_global_training_data.pop(self.name, None),
_global_training_labels.pop(self.name, None),
_global_training_structures.pop(self.name, None),
_global_energy_labels.pop(self.name, None),
)
@staticmethod
def backward_arguments(kwargs, new_args={}):
"""
update the initialize arguments that were renamed
"""
if "kernel_name" in kwargs:
DeprecationWarning("kernel_name is being replaced with kernels")
new_args["kernels"] = kernel_str_to_array(kwargs["kernel_name"])
kwargs.pop("kernel_name")
if "nsample" in kwargs:
DeprecationWarning("nsample is being replaced with n_sample")
new_args["n_sample"] = kwargs["nsample"]
kwargs.pop("nsample")
if "par" in kwargs:
DeprecationWarning("par is being replaced with parallel")
new_args["parallel"] = kwargs["par"]
kwargs.pop("par")
if "no_cpus" in kwargs:
DeprecationWarning("no_cpus is being replaced with n_cpu")
new_args["n_cpus"] = kwargs["no_cpus"]
kwargs.pop("no_cpus")
if "multihyps" in kwargs:
DeprecationWarning("multihyps is removed")
kwargs.pop("multihyps")
return new_args
@staticmethod
def backward_attributes(dictionary):
"""
add new attributes to old instance
or update attribute types
"""
if "name" not in dictionary:
dictionary["name"] = "default_gp"
if "per_atom_par" not in dictionary:
dictionary["per_atom_par"] = True
if "opt_algorithm" not in dictionary:
dictionary["opt_algorithm"] = "L-BFGS-B"
if "hyps_mask" not in dictionary:
dictionary["hyps_mask"] = None
if "parallel" not in dictionary:
dictionary["parallel"] = False
if "component" not in dictionary:
dictionary["component"] = "mc"
if "training_structures" not in dictionary:
# Environments of each structure
dictionary["training_structures"] = []
dictionary["energy_labels"] = [] # Energies of training structures
dictionary["energy_labels_np"] = np.empty(
0,
)
if "training_labels" not in dictionary:
dictionary["training_labels"] = []
dictionary["training_labels_np"] = np.empty(
0,
)
if "energy_noise" not in dictionary:
dictionary["energy_noise"] = 0.01
if not isinstance(dictionary["cutoffs"], dict):
dictionary["cutoffs"] = Parameters.cutoff_array_to_dict(
dictionary["cutoffs"]
)
dictionary["hyps_mask"] = Parameters.backward(
dictionary["kernels"], deepcopy(dictionary["hyps_mask"])
)
if "logger_name" not in dictionary:
dictionary["logger_name"] = None
|
import unittest
from import_new_tournaments.process_hh_files.process.hands.extract.main_pot_winner import main_pot_winner
from GLOBAL_VARIABLES import TEST_RANDOM_HAND_HISTORIES_FOLDER
from import_new_tournaments.process_hh_files.process.tournament.extract.hands import get_hands_in_list
class test(unittest.TestCase):
def test_level(self):
hands = get_hands_in_list(TEST_RANDOM_HAND_HISTORIES_FOLDER, ["HH20201217 SITGOID-G23140753T3 TN-$0{FULLSTOP}50ย Hold'Em Turbo - On Demand GAMETYPE-Hold'em LIMIT-no CUR-REAL OND-T BUYIN-0.txt"])
expected_main_pot_winner = [
"bacchus5555",
"WBRoy",
"PotNoodle99912",
"OffMyMedz",
"PotNoodle99912",
"PotNoodle99912",
"Burn Card",
"WBRoy",
"bacchus5555",
"WBRoy",
"OffMyMedz",
]
for idx, h in enumerate(hands):
self.assertEqual(main_pot_winner(h), expected_main_pot_winner[idx])
|
import os
from setuptools import find_packages, setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# This call to setup() does all the work
setup(
name="pypokemontcg",
version="0.0.0",
description="SDK for pokemontcg APIs",
long_description=read('README.md'),
long_description_content_type="text/markdown",
url="https://github.com/eduardo-prjadko/pypokemontcg.git",
author="Eduardo Prjadko",
author_email="eduardoabp@gmail.com",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=("tests",))
) |
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
"""This module define the scored label entity."""
import datetime
from ote_sdk.entities.color import Color
from ote_sdk.entities.id import ID
from ote_sdk.entities.label import Domain, LabelEntity
class ScoredLabel:
"""
This represents a label along with a probability. This is used inside :class:`Annotation`.
:param label: a label. See :class:`Label`
:param probability: a float denoting the probability of the shape belonging to the label.
"""
def __init__(self, label: LabelEntity, probability: float = 0.0):
self.label = label
self.probability = probability
@property
def name(self) -> str:
"""
Name of the label.
"""
return self.label.name
@property
def id(self) -> ID:
"""
Returns the label id.
"""
return self.label.id
@property
def color(self) -> Color:
"""
Color of the label.
"""
return self.label.color
@property
def hotkey(self) -> str:
"""
Hotkey of the label.
"""
return self.label.hotkey
@property
def domain(self) -> Domain:
"""
Domain of the label.
"""
return self.label.domain
@property
def is_empty(self) -> bool:
"""
Check if the label is empty
"""
return self.label.is_empty
@property
def creation_date(self) -> datetime.datetime:
"""
Creation data of the label
"""
return self.label.creation_date
def get_label(self) -> LabelEntity:
"""
Gets the label that the ScoredLabel object was initialized with.
"""
return self.label
def __repr__(self):
return (
f"ScoredLabel({self.id}, name={self.name}, probability={self.probability}, "
f"domain={self.domain}, color={self.color}, hotkey={self.hotkey})"
)
def __eq__(self, other):
if isinstance(other, ScoredLabel):
return (
self.id == other.id
and self.name == other.name
and self.color == other.color
and self.hotkey == other.hotkey
and self.probability == other.probability
and self.domain == other.domain
)
return False
def __hash__(self):
return hash(str(self))
|
import asyncio
from functools import partial
from typing import Any
import aiohttp
from python_keycloak_client.aio.abc import AsyncInit
from python_keycloak_client.client import KeycloakClient as SyncKeycloakClient
from python_keycloak_client.exceptions import KeycloakClientError
__all__ = (
'KeycloakClient',
)
class KeycloakClient(AsyncInit, SyncKeycloakClient):
_lock = None
_loop = None
_session_factory = None
def __init__(self, server_url, *, headers, logger=None, loop=None,
session_factory=aiohttp.client.ClientSession,
**session_params):
super().__init__(server_url, headers=headers, logger=logger)
self._lock = asyncio.Lock()
self._loop = loop or asyncio.get_event_loop()
session_params['loop'] = self._loop
session_params['headers'] = self._headers
self._session_factory = partial(session_factory, **session_params)
@property
def loop(self):
return self._loop
@property
def session(self):
if not self._session:
raise RuntimeError
return self._session
async def _handle_response(self, req_ctx) -> Any:
"""
:param aiohttp.client._RequestContextManager req_ctx
:return:
"""
async with req_ctx as response:
try:
response.raise_for_status()
except aiohttp.client.ClientResponseError as cre:
text = await response.text(errors='replace')
self.logger.debug('{cre}; '
'Request info: {cre.request_info}; '
'Response headers: {cre.headers}; '
'Response status: {cre.status}; '
'Content: {text}'.format(cre=cre, text=text))
raise KeycloakClientError(original_exc=cre)
try:
result = await response.json(content_type=None)
except ValueError:
result = await response.read()
return result
async def __async_init__(self) -> 'KeycloakClient':
async with self._lock:
if self._session is None:
self._session = self._session_factory()
await self._session.__aenter__()
return self
async def close(self) -> None:
if self._session is not None:
await self._session.close()
self._session = None
|
import os
import sys
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.dirname(DATA_DIR)))
from selenium import webdriver
from Screenshot import Screenshot_Clipping
chromedriver_path = os.path.abspath(DATA_DIR + '/chromedriver.exe')
def test_full_screenshot():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(executable_path=chromedriver_path)
# url = "https://github.com/sam4u3/Selenium_Screenshot/tree/master/test"
url = 'http://yandex.ru'
driver.get(url)
img_url = ob.full_Screenshot(driver, save_path=r'.',image_name='Myimage.png',is_load_at_runtime=True,load_wait_time=3)
os.remove(img_url)
driver.close()
driver.quit()
def test_element_screenshot():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(executable_path=chromedriver_path)
url = "https://github.com/sam4u3/Selenium_Screenshot/blob/master/Screenshot/Screenshot_Clipping.py"
driver.get(url)
element = driver.find_element_by_class_name('signup-prompt')
img_url = ob.get_element(driver, element, r'.')
os.remove(img_url)
driver.close()
driver.quit()
def test_hide_element():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(executable_path=chromedriver_path)
url = "https://github.com/sam4u3"
driver.get(url)
Hide_elements = ['class=avatar width-full height-full avatar-before-user-status'] # Use full class name
img_url = ob.full_Screenshot(driver, save_path=r'.', elements=Hide_elements,
image_name='Myimage.png')
os.remove(img_url)
driver.close()
driver.quit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-19 20:56:46
# @Author : ZubinGou (zebgou@gmail.com)
# @Link : https://github.com/ZubinGou
# @Version : $Id$
from __future__ import print_function
import tensorflow as tf
hello = tf.constant("Heyyyy, TensorFlow!")
print(hello)
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("paper", rc={'font.sans-serif': 'Helvetica',
'font.size': 12})
df_green = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-green/train/history.csv')
df_red = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-red/train/history.csv')
df_nir = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-nir/train/history.csv')
df_green = df_green.head(50)
df_red = df_red.head(50)
df_nir = df_nir.head(50)
epoch = df_green['epoch']
metrics = ('r2', 'val_r2')
labels = ('Green', 'Red', 'NIR')
colors = ('green', 'red', 'orange')
linestyles = ('-', '--')
fig, ax = plt.subplots()
for metric, linestyle in zip(metrics, linestyles):
score = (df_green[metric], df_red[metric], df_nir[metric])
for i in range(3):
ax.plot(epoch + 1, score[i], label=labels[i], color=colors[i],
linestyle=linestyle)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel(r'$R^2$', fontsize=12)
ax.tick_params(axis='both', which='both', labelsize=9)
ax.set_xticks(range(0, epoch.size + 1, 10))
ax.set_ylim([0.5, 0.9])
ax.grid(True, color=(0.95, 0.95, 0.95))
for i in range(2):
ax.plot([], [], color='black', linestyle=linestyles[i])
ax.grid(True)
lines = ax.get_lines()
color_legend = ax.legend(handles=[lines[i] for i in range(3)], labels=labels,
loc=4, bbox_to_anchor=(0.967, 0.0), fontsize=10, frameon=False)
line_legend = ax.legend(handles=[lines[i] for i in range(-2, 0)], labels=('Training', 'Validation'),
loc=4, bbox_to_anchor=(0.778, 0.0), fontsize=10, frameon=False)
ax.add_artist(color_legend)
ax.add_artist(line_legend)
ax.set_title('Fitted Curve', fontsize=14, fontweight='bold')
plt.savefig('r2.png', dpi=900)
plt.close()
|
import numpy
import os
def get_minimums_and_risks(height_map):
mins_ind = []
risk_level = 0
len_x = len(height_map[0])
len_y = len(height_map)
# go over y
for y in range(0, len_y):
# go over x
for x in range(0, len_x):
neighb_indexs = get_neighb_indexs(x, y, len_x - 1, len_y - 1)
neighb_vals = get_neighb_values(neighb_indexs, height_map)
if is_position_minimum(height_map[y][x], neighb_vals):
mins_ind.append([x, y])
risk_level += height_map[y][x] + 1
return mins_ind, risk_level
def get_neighb_values(neighb_indexs, height_map):
neighb_vals = []
for pos in neighb_indexs:
neighb_vals.append(height_map[pos[1]][pos[0]])
return neighb_vals
def get_neighb_indexs(x, y, max_x, max_y):
neighb = []
temp_nghb = []
temp_nghb.append([x - 1 if x != 0 else x, y]) # left
temp_nghb.append([x + 1 if x != max_x else x, y]) # right
temp_nghb.append([x, y - 1 if y != 0 else y]) # down
temp_nghb.append([x, y + 1 if y != max_y else y]) # up
# Take -1 out
for nghb in temp_nghb:
if nghb[0] != x or nghb[1] != y:
neighb.append(nghb)
return neighb
def is_position_minimum(pos_val, neighb):
val_smaller = 0
is_smaller = False
# Go through all neighbour and check if all are smaller
for neighb_val in neighb:
val_smaller += 1 if pos_val < neighb_val else 0
# smaller the length so it is a minimum
if val_smaller >= len(neighb):
is_smaller = True
else:
is_smaller = False
return is_smaller
def get_basin_size(x, y, height_map):
# current basin size
basin_size = 0
len_x = len(height_map[0])
len_y = len(height_map)
# Get if field is visited already
value = height_map[y][x]
# Check for 0 as all number except of 9 are 0
# Flood fill algorithm
if value == 0:
# Mark field as visited
height_map[y][x] = 1
# And current field is already 1 in size
basin_size = 1
if x > 0:
basin_size += get_basin_size(x-1, y, height_map)
if x < len_x - 1:
basin_size += get_basin_size(x+1, y, height_map)
if y > 0:
basin_size += get_basin_size(x, y-1, height_map)
if y < len_y - 1:
basin_size += get_basin_size(x, y+1, height_map)
return basin_size
def main():
# Get file in lines and convert to int
with open("input.txt", "r", encoding="utf-8") as file:
# strip out the new line
puzzle_lines = [line.rstrip('\n') for line in file]
# Convert to int
height_map = [[int(chars) for chars in elem] for elem in puzzle_lines]
mins_index, risk_level = get_minimums_and_risks(height_map)
# Part 1
result_p1 = risk_level
# Output
print("Part1: " + str(result_p1))
# All numbers except of 9 are common, fill with 0
# Later the field are used as visited with 1
height_map = [[0 if num != 9 else 9 for num in line]
for line in height_map]
# [print(height_map_line) for height_map_line in height_map]
# list if len for the minima
basins_len_list = []
# Go through the minimums
for min in mins_index:
basins_len_list.append(get_basin_size(min[0], min[1], height_map))
# [print(height_map_line) for height_map_line in height_map]
# Sort list of basin and use biggest three
basin_list_sort = sorted(basins_len_list)
mul_lbasins = basin_list_sort[-3] * \
basin_list_sort[-2] * basin_list_sort[-1]
# Part 2
result_p2 = mul_lbasins
# Output
print("Part2: " + str(result_p2))
if __name__ == "__main__":
main()
|
from functools import lru_cache
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from menus.menu_pool import menu_pool
def get_admin_name(model, name):
name = '{}_{}_{}'.format(
model._meta.app_label,
model._meta.model_name,
name
)
return name
def reverse_admin_name(model, name, args=None, kwargs=None):
name = get_admin_name(model, name)
url = reverse('admin:{}'.format(name), args=args, kwargs=kwargs)
return url
@lru_cache(maxsize=1)
def supported_models(model):
try:
app_config = apps.get_app_config(model._meta.app_label)
except LookupError:
return {}
else:
extension = app_config.cms_extension
return extension.navigation_apps_models
@lru_cache(maxsize=1)
def supported_content_type_pks(model):
app_config = apps.get_app_config(model._meta.app_label)
models = app_config.cms_extension.navigation_apps_models
content_type_dict = ContentType.objects.get_for_models(*models)
return [ct.pk for ct in content_type_dict.values()]
@lru_cache(maxsize=1)
def is_model_supported(app_model, model):
"""Return bool value if model is in supported_models"""
return model in supported_models(app_model).keys()
def get_versionable_for_content(content):
try:
from djangocms_versioning import versionables
except ImportError:
return
try:
return versionables.for_content(content)
except KeyError:
return
def purge_menu_cache(site_id=None, language=None):
menu_pool.clear(site_id=site_id, language=language)
|
import numpy as np
def apply_threshold(output, speech_threshold=0.5, music_threshold=0.5):
output[0] = np.where(output[0] >= speech_threshold, 1, 0)
output[1] = np.where(output[1] >= music_threshold, 1, 0)
return output.astype(int)
|
from stationdata import build_station_list, update_water_levels
from utils import sorted_by_key
def stations_level_over_threshold(stations,tol):
list_of_tup = []
for station in stations:
if station.typical_range_consistent() == True:
if station.latest_level != None and station.relative_water_level() > tol:
tup = (station,station.relative_water_level())
list_of_tup.append(tup)
list_of_tup_sorted = sorted_by_key(list_of_tup,1,True)
return list_of_tup_sorted
def stations_highest_rel_level(stations, N):
list_of_tup = []
for station in stations:
if station.typical_range_consistent() == True and station.relative_water_level() != None:
tup = (station,station.relative_water_level())
list_of_tup.append(tup)
list_of_tup_sorted = sorted_by_key(list_of_tup,1,True)[:N]
risk_stations = []
for tup in list_of_tup_sorted:
risk_stations.append(tup[0])
return risk_stations
|
from django.test import TestCase
import json
from app.tests.test_modules.loader import test_flow, TestCaseFlow, TestCaseFlowRunner
from module.MicrocloudchipException.base_exception import MicrocloudchipException
from module.data.user_data import UserData
from app.tests.test_modules.testing_user_module import *
class UserUnittest(TestCase):
"""
์ ์ ์์ฑ๋ง ํ
์คํธ ํ๊ณ
์์ ๋ฐ ์ญ์ ๋ Manager ๋จ๊ณ์์ ์งํํ๋ค.
"""
CONFIG_FILE_ROOT: str = ["server", "config.json"]
TEST_CASE_FILE_ROOT: str = ["app", "tests", "test-input-data", "user", "test-case.json"]
EXAMPLE_IMAGE_ROOT: str = ["app", "tests", "test-input-data", "user"]
static_id_list: list[str] = []
# ์ค์ ๋ฑ๋ก๋ config ํ์ผ์์ ์๋
# ๋ฐ๋ผ์ ํ
์คํธ ํ๊ธฐ ์ ์ migration ์ ๋ฐ๋์ ํด์ผ ํ๋ค.
config: SystemConfig = SystemConfig(os.path.join(*CONFIG_FILE_ROOT))
@classmethod
def setUpClass(cls):
# ํ
์คํธ ํ์ผ์ด ์ ์์ ์ผ๋ก ์กด์ฌํ๋ ์ง ํ์ธ
super(UserUnittest, cls).setUpClass()
test_case_file_root_str = os.path.join(*UserUnittest.TEST_CASE_FILE_ROOT)
if not os.path.isfile(test_case_file_root_str):
raise NotADirectoryError(f"{test_case_file_root_str} is not exist")
def test_add_user(self):
""" TestCase(user/test-case.json)์ ๋ํ ์ค๋ช
1. ๋ฐ์ดํฐ๊ฐ ๋น ์ ธ์๋ ๊ฒ์ ๋ํ ์ธก์ (์ ๋ถ๋ค parsing ์ ์คํจํด์ผ ํ๋ค)
2. ๋ฐ์ดํฐ ์ ํจ์ฑ ์ธก์ (์๋ ์ ๋ถ๋ค false)
3. ์ ํจ์ฑ์ ํต๊ณผ๋๋ ๋ฐ์ดํฐ
๊ทธ๋ฌ๋ ์ค๋ณต ๊ฒ์ฌ๋ฅผ ํตํด ์ผ๋ถ๋ false ์ฒ๋ฆฌ๊ฐ ๋๋ค
"""
# ์ ์์ ์ธ request ๋ง ๊ณจ๋ผ์ DB์ ์ ์ฅํ ์ ์๋์ง ํ
์คํธํ๋ค.
# True ๋ก ํ์ ๋ ๊ฒฝ์ฐ ์ค์ ๋ก DB์ ์ ์ฅํ๋ค
# ์ด๋ฏธ์ง ์
๋ก๋ ๊ธฐ๋ฅ์ API ๋จ๊ณ์์ ํ
์คํธํ๋ค ์ฌ๊ธฐ์๋ ๊ทธ๋ฅ ์ํํ์ด์ง๋ฅผ ์ฌ์ฉ
# ํ
์คํธ ์ผ์ด์ค ๊ฐ๊ณ ์ค๊ธฐ
test_case_file_root_str = os.path.join(*UserUnittest.TEST_CASE_FILE_ROOT)
with open(test_case_file_root_str) as _f:
test_cases = json.load(_f)["test-case"]
for idx, test_case in enumerate(test_cases):
user_builder = UserBuilder()
req = test_case['request']
if not test_case["is-passed"]:
# ์คํจ ์ผ์ด์ค์ผ ๊ฒฝ์ฐ
self.assertRaises(MicrocloudchipException,
lambda: test_add_user(user_builder, req, self.config.system_root))
else:
# Success
test_add_user(user_builder, req, self.config.system_root)
# ๋ค์ ํ
์คํธ์ ์ฌ์ฉํ static_id ์ถ๊ฐ
self.static_id_list.append(user_builder.static_id)
# ๋๋ ํ ๋ฆฌ ์ด๊ธฐํ
test_reset_because_of_failed_upload_failed(self.static_id_list, self.config.get_system_root())
@test_flow("app/tests/test-input-data/user/test_modify_user.json")
def test_modify_user(self, test_flow: TestCaseFlow):
def __cmd_add_user(
name: str,
pswd: str,
email: str,
volume_type: str,
is_admin: bool
):
UserBuilder().set_name(name) \
.set_email(email) \
.set_password(pswd) \
.set_is_admin(is_admin) \
.set_volume_type(volume_type) \
.set_static_id() \
.set_system_root(self.config.system_root) \
.save()
def __cmd_modify_username(target_email: str, n_name: str, is_succeed: bool, exception_str: str):
try:
d: UserData = UserData(email=target_email, system_root=self.config.system_root)()
d.update(new_name=n_name)
except MicrocloudchipException as e:
# Failed
self.assertFalse(is_succeed)
self.assertEqual(type(e).__name__, exception_str)
except Exception as e:
print(f"[External Error]: {type(e)}:{e}")
print(f"[Req]: {target_email}:{n_name}:{is_succeed}:{exception_str}")
else:
# Succeed
self.assertTrue(is_succeed, msg=n_name)
def __cmd_modify_password(target_email: str, new_password: str, is_succeed: bool, exception_str: str):
try:
d: UserData = UserData(email=target_email, system_root=self.config.system_root)()
d.update(new_password=new_password)
except MicrocloudchipException as e:
# Failed
self.assertFalse(is_succeed)
self.assertEqual(type(e).__name__, exception_str)
else:
# Succeed
self.assertTrue(is_succeed)
def __cmd_modify_volumetype(target_email: str, volume_type: str, is_succeed: bool, exception_str: str):
try:
d: UserData = UserData(email=target_email, system_root=self.config.system_root)()
d.update(new_volume_type=volume_type)
except MicrocloudchipException as e:
# Failed
self.assertFalse(is_succeed)
self.assertEqual(type(e).__name__, exception_str)
else:
# Succeed
self.assertTrue(is_succeed)
def __cmd_modify_image(target_email: str, img_name: str, is_succeed: bool, exception_str: str):
img_root: str = os.path.join(*(self.EXAMPLE_IMAGE_ROOT + [img_name]))
with open(img_root, 'rb') as _f:
img_raw_data = _f.read()
try:
d: UserData = UserData(email=target_email, system_root=self.config.system_root)()
d.update(will_change_image=True, img_extension=img_root.split('.')[-1], img_raw_data=img_raw_data)
except MicrocloudchipException as e:
self.assertFalse(is_succeed)
self.assertEqual(type(e).__name__, exception_str)
else:
self.assertTrue(is_succeed)
# Run Test Code
TestCaseFlowRunner(test_flow).set_process('add-user', __cmd_add_user) \
.set_process("modify-username", __cmd_modify_username) \
.set_process("modify-password", __cmd_modify_password) \
.set_process("modify-volumetype", __cmd_modify_volumetype) \
.set_process("modify-image", __cmd_modify_image) \
.run()
|
n = int(input())
temp = list(map(int, input().split()))
maxx = temp[0]
minn = temp[0]
for i in range(n):
if maxx<=temp[i]:
maxx = temp[i]
for i in range(n):
if minn>=temp[i]:
minn = temp[i]
print(maxx, minn)
|
# -*- coding: utf-8 -*-
COUNTRIES_LIST = [(u'Afghanistan', u'AF', u'AFG', 4, u'ISO 3166-2:AF'),
(u'\xc5land Islands', u'AX', u'ALA', 248, u'ISO 3166-2:AX'),
(u'Albania', u'AL', u'ALB', 8, u'ISO 3166-2:AL'),
(u'Algeria', u'DZ', u'DZA', 12, u'ISO 3166-2:DZ'),
(u'American Samoa', u'AS', u'ASM', 16, u'ISO 3166-2:AS'),
(u'Andorra', u'AD', u'AND', 20, u'ISO 3166-2:AD'),
(u'Angola', u'AO', u'AGO', 24, u'ISO 3166-2:AO'),
(u'Anguilla', u'AI', u'AIA', 660, u'ISO 3166-2:AI'),
(u'Antarctica', u'AQ', u'ATA', 10, u'ISO 3166-2:AQ'),
(u'Antigua and Barbuda', u'AG', u'ATG', 28, u'ISO 3166-2:AG'),
(u'Argentina', u'AR', u'ARG', 32, u'ISO 3166-2:AR'),
(u'Armenia', u'AM', u'ARM', 51, u'ISO 3166-2:AM'),
(u'Aruba', u'AW', u'ABW', 533, u'ISO 3166-2:AW'),
(u'Australia', u'AU', u'AUS', 36, u'ISO 3166-2:AU'),
(u'Austria', u'AT', u'AUT', 40, u'ISO 3166-2:AT'),
(u'Azerbaijan', u'AZ', u'AZE', 31, u'ISO 3166-2:AZ'),
(u'Bahamas', u'BS', u'BHS', 44, u'ISO 3166-2:BS'),
(u'Bahrain', u'BH', u'BHR', 48, u'ISO 3166-2:BH'),
(u'Bangladesh', u'BD', u'BGD', 50, u'ISO 3166-2:BD'),
(u'Barbados', u'BB', u'BRB', 52, u'ISO 3166-2:BB'),
(u'Belarus', u'BY', u'BLR', 112, u'ISO 3166-2:BY'),
(u'Belgium', u'BE', u'BEL', 56, u'ISO 3166-2:BE'),
(u'Belize', u'BZ', u'BLZ', 84, u'ISO 3166-2:BZ'),
(u'Benin', u'BJ', u'BEN', 204, u'ISO 3166-2:BJ'),
(u'Bermuda', u'BM', u'BMU', 60, u'ISO 3166-2:BM'),
(u'Bhutan', u'BT', u'BTN', 64, u'ISO 3166-2:BT'),
(u'Bolivia', u'BO', u'BOL', 68, u'ISO 3166-2:BO'),
(u'Bosnia and Herzegovina', u'BA', u'BIH', 70, u'ISO 3166-2:BA'),
(u'Botswana', u'BW', u'BWA', 72, u'ISO 3166-2:BW'),
(u'Bouvet Island', u'BV', u'BVT', 74, u'ISO 3166-2:BV'),
(u'Brazil', u'BR', u'BRA', 76, u'ISO 3166-2:BR'),
(u'British Indian Ocean Territory', u'IO', u'IOT', 86, u'ISO 3166-2:IO'),
(u'Brunei Darussalam', u'BN', u'BRN', 96, u'ISO 3166-2:BN'),
(u'Bulgaria', u'BG', u'BGR', 100, u'ISO 3166-2:BG'),
(u'Burkina Faso', u'BF', u'BFA', 854, u'ISO 3166-2:BF'),
(u'Burundi', u'BI', u'BDI', 108, u'ISO 3166-2:BI'),
(u'Cambodia', u'KH', u'KHM', 116, u'ISO 3166-2:KH'),
(u'Cameroon', u'CM', u'CMR', 120, u'ISO 3166-2:CM'),
(u'Canada', u'CA', u'CAN', 124, u'ISO 3166-2:CA'),
(u'Cape Verde', u'CV', u'CPV', 132, u'ISO 3166-2:CV'),
(u'Cayman Islands', u'KY', u'CYM', 136, u'ISO 3166-2:KY'),
(u'Central African Republic', u'CF', u'CAF', 140, u'ISO 3166-2:CF'),
(u'Chad', u'TD', u'TCD', 148, u'ISO 3166-2:TD'),
(u'Chile', u'CL', u'CHL', 152, u'ISO 3166-2:CL'),
(u'China', u'CN', u'CHN', 156, u'ISO 3166-2:CN'),
(u'Christmas Island', u'CX', u'CXR', 162, u'ISO 3166-2:CX'),
(u'Cocos (Keeling) Islands', u'CC', u'CCK', 166, u'ISO 3166-2:CC'),
(u'Colombia', u'CO', u'COL', 170, u'ISO 3166-2:CO'),
(u'Comoros', u'KM', u'COM', 174, u'ISO 3166-2:KM'),
(u'Congo', u'CG', u'COG', 178, u'ISO 3166-2:CG'),
(u'Congo, Democratic Republic of the', u'CD', u'COD', 180, u'ISO 3166-2:CD'),
(u'Cook Islands', u'CK', u'COK', 184, u'ISO 3166-2:CK'),
(u'Costa Rica', u'CR', u'CRI', 188, u'ISO 3166-2:CR'),
(u"Cote d'Ivoire C\xf4te d'Ivoire", u'CI', u'CIV', 384, u'ISO 3166-2:CI'),
(u'Croatia', u'HR', u'HRV', 191, u'ISO 3166-2:HR'),
(u'Cuba', u'CU', u'CUB', 192, u'ISO 3166-2:CU'),
(u'Cyprus', u'CY', u'CYP', 196, u'ISO 3166-2:CY'),
(u'Czech Republic', u'CZ', u'CZE', 203, u'ISO 3166-2:CZ'),
(u'Denmark', u'DK', u'DNK', 208, u'ISO 3166-2:DK'),
(u'Djibouti', u'DJ', u'DJI', 262, u'ISO 3166-2:DJ'),
(u'Dominica', u'DM', u'DMA', 212, u'ISO 3166-2:DM'),
(u'Dominican Republic', u'DO', u'DOM', 214, u'ISO 3166-2:DO'),
(u'Ecuador', u'EC', u'ECU', 218, u'ISO 3166-2:EC'),
(u'Egypt', u'EG', u'EGY', 818, u'ISO 3166-2:EG'),
(u'El Salvador', u'SV', u'SLV', 222, u'ISO 3166-2:SV'),
(u'Equatorial Guinea', u'GQ', u'GNQ', 226, u'ISO 3166-2:GQ'),
(u'Eritrea', u'ER', u'ERI', 232, u'ISO 3166-2:ER'),
(u'Estonia', u'EE', u'EST', 233, u'ISO 3166-2:EE'),
(u'Ethiopia', u'ET', u'ETH', 231, u'ISO 3166-2:ET'),
(u'Falkland Islands (Malvinas)', u'FK', u'FLK', 238, u'ISO 3166-2:FK'),
(u'Faroe Islands', u'FO', u'FRO', 234, u'ISO 3166-2:FO'),
(u'Fiji', u'FJ', u'FJI', 242, u'ISO 3166-2:FJ'),
(u'Finland', u'FI', u'FIN', 246, u'ISO 3166-2:FI'),
(u'France', u'FR', u'FRA', 250, u'ISO 3166-2:FR'),
(u'French Guiana', u'GF', u'GUF', 254, u'ISO 3166-2:GF'),
(u'French Polynesia', u'PF', u'PYF', 258, u'ISO 3166-2:PF'),
(u'French Southern Territories', u'TF', u'ATF', 260, u'ISO 3166-2:TF'),
(u'Gabon', u'GA', u'GAB', 266, u'ISO 3166-2:GA'),
(u'Gambia', u'GM', u'GMB', 270, u'ISO 3166-2:GM'),
(u'Georgia', u'GE', u'GEO', 268, u'ISO 3166-2:GE'),
(u'Germany', u'DE', u'DEU', 276, u'ISO 3166-2:DE'),
(u'Ghana', u'GH', u'GHA', 288, u'ISO 3166-2:GH'),
(u'Gibraltar', u'GI', u'GIB', 292, u'ISO 3166-2:GI'),
(u'Greece', u'GR', u'GRC', 300, u'ISO 3166-2:GR'),
(u'Greenland', u'GL', u'GRL', 304, u'ISO 3166-2:GL'),
(u'Grenada', u'GD', u'GRD', 308, u'ISO 3166-2:GD'),
(u'Guadeloupe', u'GP', u'GLP', 312, u'ISO 3166-2:GP'),
(u'Guam', u'GU', u'GUM', 316, u'ISO 3166-2:GU'),
(u'Guatemala', u'GT', u'GTM', 320, u'ISO 3166-2:GT'),
(u'Guernsey', u'GG', u'GGY', 831, u'ISO 3166-2:GG'),
(u'Guinea', u'GN', u'GIN', 324, u'ISO 3166-2:GN'),
(u'Guinea-Bissau', u'GW', u'GNB', 624, u'ISO 3166-2:GW'),
(u'Guyana', u'GY', u'GUY', 328, u'ISO 3166-2:GY'),
(u'Haiti', u'HT', u'HTI', 332, u'ISO 3166-2:HT'),
(u'Heard Island and McDonald Islands', u'HM', u'HMD', 334, u'ISO 3166-2:HM'),
(u'Holy See (Vatican City State)', u'VA', u'VAT', 336, u'ISO 3166-2:VA'),
(u'Honduras', u'HN', u'HND', 340, u'ISO 3166-2:HN'),
(u'Hong Kong', u'HK', u'HKG', 344, u'ISO 3166-2:HK'),
(u'Hungary', u'HU', u'HUN', 348, u'ISO 3166-2:HU'),
(u'Iceland', u'IS', u'ISL', 352, u'ISO 3166-2:IS'),
(u'India', u'IN', u'IND', 356, u'ISO 3166-2:IN'),
(u'Indonesia', u'ID', u'IDN', 360, u'ISO 3166-2:ID'),
(u'Iran, Islamic Republic of', u'IR', u'IRN', 364, u'ISO 3166-2:IR'),
(u'Iraq', u'IQ', u'IRQ', 368, u'ISO 3166-2:IQ'),
(u'Ireland', u'IE', u'IRL', 372, u'ISO 3166-2:IE'),
(u'Isle of Man', u'IM', u'IMN', 833, u'ISO 3166-2:IM'),
(u'Israel', u'IL', u'ISR', 376, u'ISO 3166-2:IL'),
(u'Italy', u'IT', u'ITA', 380, u'ISO 3166-2:IT'),
(u'Jamaica', u'JM', u'JAM', 388, u'ISO 3166-2:JM'),
(u'Japan', u'JP', u'JPN', 392, u'ISO 3166-2:JP'),
(u'Jersey', u'JE', u'JEY', 832, u'ISO 3166-2:JE'),
(u'Jordan', u'JO', u'JOR', 400, u'ISO 3166-2:JO'),
(u'Kazakhstan', u'KZ', u'KAZ', 398, u'ISO 3166-2:KZ'),
(u'Kenya', u'KE', u'KEN', 404, u'ISO 3166-2:KE'),
(u'Kiribati', u'KI', u'KIR', 296, u'ISO 3166-2:KI'),
(u"Korea, Democratic People's Republic of",
u'KP',
u'PRK',
408,
u'ISO 3166-2:KP'),
(u'Korea, Republic of', u'KR', u'KOR', 410, u'ISO 3166-2:KR'),
(u'Kuwait', u'KW', u'KWT', 414, u'ISO 3166-2:KW'),
(u'Kyrgyzstan', u'KG', u'KGZ', 417, u'ISO 3166-2:KG'),
(u"Lao People's Democratic Republic", u'LA', u'LAO', 418, u'ISO 3166-2:LA'),
(u'Latvia', u'LV', u'LVA', 428, u'ISO 3166-2:LV'),
(u'Lebanon', u'LB', u'LBN', 422, u'ISO 3166-2:LB'),
(u'Lesotho', u'LS', u'LSO', 426, u'ISO 3166-2:LS'),
(u'Liberia', u'LR', u'LBR', 430, u'ISO 3166-2:LR'),
(u'Libyan Arab Jamahiriya', u'LY', u'LBY', 434, u'ISO 3166-2:LY'),
(u'Liechtenstein', u'LI', u'LIE', 438, u'ISO 3166-2:LI'),
(u'Lithuania', u'LT', u'LTU', 440, u'ISO 3166-2:LT'),
(u'Luxembourg', u'LU', u'LUX', 442, u'ISO 3166-2:LU'),
(u'Macao', u'MO', u'MAC', 446, u'ISO 3166-2:MO'),
(u'Macedonia, the former Yugoslav Republic of',
u'MK',
u'MKD',
807,
u'ISO 3166-2:MK'),
(u'Madagascar', u'MG', u'MDG', 450, u'ISO 3166-2:MG'),
(u'Malawi', u'MW', u'MWI', 454, u'ISO 3166-2:MW'),
(u'Malaysia', u'MY', u'MYS', 458, u'ISO 3166-2:MY'),
(u'Maldives', u'MV', u'MDV', 462, u'ISO 3166-2:MV'),
(u'Mali', u'ML', u'MLI', 466, u'ISO 3166-2:ML'),
(u'Malta', u'MT', u'MLT', 470, u'ISO 3166-2:MT'),
(u'Marshall Islands', u'MH', u'MHL', 584, u'ISO 3166-2:MH'),
(u'Martinique', u'MQ', u'MTQ', 474, u'ISO 3166-2:MQ'),
(u'Mauritania', u'MR', u'MRT', 478, u'ISO 3166-2:MR'),
(u'Mauritius', u'MU', u'MUS', 480, u'ISO 3166-2:MU'),
(u'Mayotte', u'YT', u'MYT', 175, u'ISO 3166-2:YT'),
(u'Mexico', u'MX', u'MEX', 484, u'ISO 3166-2:MX'),
(u'Micronesia, Federated States of', u'FM', u'FSM', 583, u'ISO 3166-2:FM'),
(u'Moldova, Republic of', u'MD', u'MDA', 498, u'ISO 3166-2:MD'),
(u'Monaco', u'MC', u'MCO', 492, u'ISO 3166-2:MC'),
(u'Mongolia', u'MN', u'MNG', 496, u'ISO 3166-2:MN'),
(u'Montenegro', u'ME', u'MNE', 499, u'ISO 3166-2:ME'),
(u'Montserrat', u'MS', u'MSR', 500, u'ISO 3166-2:MS'),
(u'Morocco', u'MA', u'MAR', 504, u'ISO 3166-2:MA'),
(u'Mozambique', u'MZ', u'MOZ', 508, u'ISO 3166-2:MZ'),
(u'Myanmar', u'MM', u'MMR', 104, u'ISO 3166-2:MM'),
(u'Namibia', u'NA', u'NAM', 516, u'ISO 3166-2:NA'),
(u'Nauru', u'NR', u'NRU', 520, u'ISO 3166-2:NR'),
(u'Nepal', u'NP', u'NPL', 524, u'ISO 3166-2:NP'),
(u'Netherlands', u'NL', u'NLD', 528, u'ISO 3166-2:NL'),
(u'Netherlands Antilles', u'AN', u'ANT', 530, u'ISO 3166-2:AN'),
(u'New Caledonia', u'NC', u'NCL', 540, u'ISO 3166-2:NC'),
(u'New Zealand', u'NZ', u'NZL', 554, u'ISO 3166-2:NZ'),
(u'Nicaragua', u'NI', u'NIC', 558, u'ISO 3166-2:NI'),
(u'Niger', u'NE', u'NER', 562, u'ISO 3166-2:NE'),
(u'Nigeria', u'NG', u'NGA', 566, u'ISO 3166-2:NG'),
(u'Niue', u'NU', u'NIU', 570, u'ISO 3166-2:NU'),
(u'Norfolk Island', u'NF', u'NFK', 574, u'ISO 3166-2:NF'),
(u'Northern Mariana Islands', u'MP', u'MNP', 580, u'ISO 3166-2:MP'),
(u'Norway', u'NO', u'NOR', 578, u'ISO 3166-2:NO'),
(u'Oman', u'OM', u'OMN', 512, u'ISO 3166-2:OM'),
(u'Pakistan', u'PK', u'PAK', 586, u'ISO 3166-2:PK'),
(u'Palau', u'PW', u'PLW', 585, u'ISO 3166-2:PW'),
(u'Palestinian Territory, Occupied', u'PS', u'PSE', 275, u'ISO 3166-2:PS'),
(u'Panama', u'PA', u'PAN', 591, u'ISO 3166-2:PA'),
(u'Papua New Guinea', u'PG', u'PNG', 598, u'ISO 3166-2:PG'),
(u'Paraguay', u'PY', u'PRY', 600, u'ISO 3166-2:PY'),
(u'Peru', u'PE', u'PER', 604, u'ISO 3166-2:PE'),
(u'Philippines', u'PH', u'PHL', 608, u'ISO 3166-2:PH'),
(u'Pitcairn', u'PN', u'PCN', 612, u'ISO 3166-2:PN'),
(u'Poland', u'PL', u'POL', 616, u'ISO 3166-2:PL'),
(u'Portugal', u'PT', u'PRT', 620, u'ISO 3166-2:PT'),
(u'Puerto Rico', u'PR', u'PRI', 630, u'ISO 3166-2:PR'),
(u'Qatar', u'QA', u'QAT', 634, u'ISO 3166-2:QA'),
(u'Reunion R\xe9union', u'RE', u'REU', 638, u'ISO 3166-2:RE'),
(u'Romania', u'RO', u'ROU', 642, u'ISO 3166-2:RO'),
(u'Russian Federation', u'RU', u'RUS', 643, u'ISO 3166-2:RU'),
(u'Rwanda', u'RW', u'RWA', 646, u'ISO 3166-2:RW'),
(u'Saint Barth\xe9lemy', u'BL', u'BLM', 652, u'ISO 3166-2:BL'),
(u'Saint Helena', u'SH', u'SHN', 654, u'ISO 3166-2:SH'),
(u'Saint Kitts and Nevis', u'KN', u'KNA', 659, u'ISO 3166-2:KN'),
(u'Saint Lucia', u'LC', u'LCA', 662, u'ISO 3166-2:LC'),
(u'Saint Martin (French part)', u'MF', u'MAF', 663, u'ISO 3166-2:MF'),
(u'Saint Pierre and Miquelon', u'PM', u'SPM', 666, u'ISO 3166-2:PM'),
(u'Saint Vincent and the Grenadines', u'VC', u'VCT', 670, u'ISO 3166-2:VC'),
(u'Samoa', u'WS', u'WSM', 882, u'ISO 3166-2:WS'),
(u'San Marino', u'SM', u'SMR', 674, u'ISO 3166-2:SM'),
(u'Sao Tome and Principe', u'ST', u'STP', 678, u'ISO 3166-2:ST'),
(u'Saudi Arabia', u'SA', u'SAU', 682, u'ISO 3166-2:SA'),
(u'Senegal', u'SN', u'SEN', 686, u'ISO 3166-2:SN'),
(u'Serbia', u'RS', u'SRB', 688, u'ISO 3166-2:RS'),
(u'Seychelles', u'SC', u'SYC', 690, u'ISO 3166-2:SC'),
(u'Sierra Leone', u'SL', u'SLE', 694, u'ISO 3166-2:SL'),
(u'Singapore', u'SG', u'SGP', 702, u'ISO 3166-2:SG'),
(u'Slovakia', u'SK', u'SVK', 703, u'ISO 3166-2:SK'),
(u'Slovenia', u'SI', u'SVN', 705, u'ISO 3166-2:SI'),
(u'Solomon Islands', u'SB', u'SLB', 90, u'ISO 3166-2:SB'),
(u'Somalia', u'SO', u'SOM', 706, u'ISO 3166-2:SO'),
(u'South Africa', u'ZA', u'ZAF', 710, u'ISO 3166-2:ZA'),
(u'South Georgia and the South Sandwich Islands',
u'GS',
u'SGS',
239,
u'ISO 3166-2:GS'),
(u'Spain', u'ES', u'ESP', 724, u'ISO 3166-2:ES'),
(u'Sri Lanka', u'LK', u'LKA', 144, u'ISO 3166-2:LK'),
(u'Sudan', u'SD', u'SDN', 736, u'ISO 3166-2:SD'),
(u'Suriname', u'SR', u'SUR', 740, u'ISO 3166-2:SR'),
(u'Svalbard and Jan Mayen', u'SJ', u'SJM', 744, u'ISO 3166-2:SJ'),
(u'Swaziland', u'SZ', u'SWZ', 748, u'ISO 3166-2:SZ'),
(u'Sweden', u'SE', u'SWE', 752, u'ISO 3166-2:SE'),
(u'Switzerland', u'CH', u'CHE', 756, u'ISO 3166-2:CH'),
(u'Syrian Arab Republic', u'SY', u'SYR', 760, u'ISO 3166-2:SY'),
(u'Taiwan, Province of China', u'TW', u'TWN', 158, u'ISO 3166-2:TW'),
(u'Tajikistan', u'TJ', u'TJK', 762, u'ISO 3166-2:TJ'),
(u'Tanzania, United Republic of', u'TZ', u'TZA', 834, u'ISO 3166-2:TZ'),
(u'Thailand', u'TH', u'THA', 764, u'ISO 3166-2:TH'),
(u'Timor-Leste', u'TL', u'TLS', 626, u'ISO 3166-2:TL'),
(u'Togo', u'TG', u'TGO', 768, u'ISO 3166-2:TG'),
(u'Tokelau', u'TK', u'TKL', 772, u'ISO 3166-2:TK'),
(u'Tonga', u'TO', u'TON', 776, u'ISO 3166-2:TO'),
(u'Trinidad and Tobago', u'TT', u'TTO', 780, u'ISO 3166-2:TT'),
(u'Tunisia', u'TN', u'TUN', 788, u'ISO 3166-2:TN'),
(u'Turkey', u'TR', u'TUR', 792, u'ISO 3166-2:TR'),
(u'Turkmenistan', u'TM', u'TKM', 795, u'ISO 3166-2:TM'),
(u'Turks and Caicos Islands', u'TC', u'TCA', 796, u'ISO 3166-2:TC'),
(u'Tuvalu', u'TV', u'TUV', 798, u'ISO 3166-2:TV'),
(u'Uganda', u'UG', u'UGA', 800, u'ISO 3166-2:UG'),
(u'Ukraine', u'UA', u'UKR', 804, u'ISO 3166-2:UA'),
(u'United Arab Emirates', u'AE', u'ARE', 784, u'ISO 3166-2:AE'),
(u'United Kingdom', u'GB', u'GBR', 826, u'ISO 3166-2:GB'),
(u'United States', u'US', u'USA', 840, u'ISO 3166-2:US'),
(u'United States Minor Outlying Islands',
u'UM',
u'UMI',
581,
u'ISO 3166-2:UM'),
(u'Uruguay', u'UY', u'URY', 858, u'ISO 3166-2:UY'),
(u'Uzbekistan', u'UZ', u'UZB', 860, u'ISO 3166-2:UZ'),
(u'Vanuatu', u'VU', u'VUT', 548, u'ISO 3166-2:VU'),
(u'Venezuela', u'VE', u'VEN', 862, u'ISO 3166-2:VE'),
(u'Viet Nam', u'VN', u'VNM', 704, u'ISO 3166-2:VN'),
(u'Virgin Islands, British', u'VG', u'VGB', 92, u'ISO 3166-2:VG'),
(u'Virgin Islands, U.S.', u'VI', u'VIR', 850, u'ISO 3166-2:VI'),
(u'Wallis and Futuna', u'WF', u'WLF', 876, u'ISO 3166-2:WF'),
(u'Western Sahara', u'EH', u'ESH', 732, u'ISO 3166-2:EH'),
(u'Yemen', u'YE', u'YEM', 887, u'ISO 3166-2:YE'),
(u'Zambia', u'ZM', u'ZMB', 894, u'ISO 3166-2:ZM'),
(u'Zimbabwe', u'ZW', u'ZWE', 716, u'ISO 3166-2:ZW')]
COUNTRIES_SELECTION_LIST = [(u'Afghanistan', u'AF', u'AFG', 4),
(u'\xc5land Islands', u'AX', u'ALA', 248),
(u'Albania', u'AL', u'ALB', 8),
(u'Algeria', u'DZ', u'DZA', 12),
(u'American Samoa', u'AS', u'ASM', 16),
(u'Andorra', u'AD', u'AND', 20),
(u'Angola', u'AO', u'AGO', 24),
(u'Anguilla', u'AI', u'AIA', 660),
(u'Antarctica', u'AQ', u'ATA', 10),
(u'Antigua and Barbuda', u'AG', u'ATG', 28),
(u'Argentina', u'AR', u'ARG', 32),
(u'Armenia', u'AM', u'ARM', 51),
(u'Aruba', u'AW', u'ABW', 533),
(u'Australia', u'AU', u'AUS', 36),
(u'Austria', u'AT', u'AUT', 40),
(u'Azerbaijan', u'AZ', u'AZE', 31),
(u'Bahamas', u'BS', u'BHS', 44),
(u'Bahrain', u'BH', u'BHR', 48),
(u'Bangladesh', u'BD', u'BGD', 50),
(u'Barbados', u'BB', u'BRB', 52),
(u'Belarus', u'BY', u'BLR', 112),
(u'Belgium', u'BE', u'BEL', 56),
(u'Belize', u'BZ', u'BLZ', 84),
(u'Benin', u'BJ', u'BEN', 204),
(u'Bermuda', u'BM', u'BMU', 60),
(u'Bhutan', u'BT', u'BTN', 64),
(u'Bolivia', u'BO', u'BOL', 68),
(u'Bosnia and Herzegovina', u'BA', u'BIH', 70),
(u'Botswana', u'BW', u'BWA', 72),
(u'Bouvet Island', u'BV', u'BVT', 74),
(u'Brazil', u'BR', u'BRA', 76),
(u'British Indian Ocean Territory', u'IO', u'IOT', 86),
(u'Brunei Darussalam', u'BN', u'BRN', 96),
(u'Bulgaria', u'BG', u'BGR', 100),
(u'Burkina Faso', u'BF', u'BFA', 854),
(u'Burundi', u'BI', u'BDI', 108),
(u'Cambodia', u'KH', u'KHM', 116),
(u'Cameroon', u'CM', u'CMR', 120),
(u'Canada', u'CA', u'CAN', 124),
(u'Cape Verde', u'CV', u'CPV', 132),
(u'Cayman Islands', u'KY', u'CYM', 136),
(u'Central African Republic', u'CF', u'CAF', 140),
(u'Chad', u'TD', u'TCD', 148),
(u'Chile', u'CL', u'CHL', 152),
(u'China', u'CN', u'CHN', 156),
(u'Christmas Island', u'CX', u'CXR', 162),
(u'Cocos (Keeling) Islands', u'CC', u'CCK', 166),
(u'Colombia', u'CO', u'COL', 170),
(u'Comoros', u'KM', u'COM', 174),
(u'Congo', u'CG', u'COG', 178),
(u'Congo, Democratic Republic of the', u'CD', u'COD', 180),
(u'Cook Islands', u'CK', u'COK', 184),
(u'Costa Rica', u'CR', u'CRI', 188),
(u"Cote d'Ivoire C\xf4te d'Ivoire", u'CI', u'CIV', 384),
(u'Croatia', u'HR', u'HRV', 191),
(u'Cuba', u'CU', u'CUB', 192),
(u'Cyprus', u'CY', u'CYP', 196),
(u'Czech Republic', u'CZ', u'CZE', 203),
(u'Denmark', u'DK', u'DNK', 208),
(u'Djibouti', u'DJ', u'DJI', 262),
(u'Dominica', u'DM', u'DMA', 212),
(u'Dominican Republic', u'DO', u'DOM', 214),
(u'Ecuador', u'EC', u'ECU', 218),
(u'Egypt', u'EG', u'EGY', 818),
(u'El Salvador', u'SV', u'SLV', 222),
(u'Equatorial Guinea', u'GQ', u'GNQ', 226),
(u'Eritrea', u'ER', u'ERI', 232),
(u'Estonia', u'EE', u'EST', 233),
(u'Ethiopia', u'ET', u'ETH', 231),
(u'Falkland Islands (Malvinas)', u'FK', u'FLK', 238),
(u'Faroe Islands', u'FO', u'FRO', 234),
(u'Fiji', u'FJ', u'FJI', 242),
(u'Finland', u'FI', u'FIN', 246),
(u'France', u'FR', u'FRA', 250),
(u'French Guiana', u'GF', u'GUF', 254),
(u'French Polynesia', u'PF', u'PYF', 258),
(u'French Southern Territories', u'TF', u'ATF', 260),
(u'Gabon', u'GA', u'GAB', 266),
(u'Gambia', u'GM', u'GMB', 270),
(u'Georgia', u'GE', u'GEO', 268),
(u'Germany', u'DE', u'DEU', 276),
(u'Ghana', u'GH', u'GHA', 288),
(u'Gibraltar', u'GI', u'GIB', 292),
(u'Greece', u'GR', u'GRC', 300),
(u'Greenland', u'GL', u'GRL', 304),
(u'Grenada', u'GD', u'GRD', 308),
(u'Guadeloupe', u'GP', u'GLP', 312),
(u'Guam', u'GU', u'GUM', 316),
(u'Guatemala', u'GT', u'GTM', 320),
(u'Guernsey', u'GG', u'GGY', 831),
(u'Guinea', u'GN', u'GIN', 324),
(u'Guinea-Bissau', u'GW', u'GNB', 624),
(u'Guyana', u'GY', u'GUY', 328),
(u'Haiti', u'HT', u'HTI', 332),
(u'Heard Island and McDonald Islands', u'HM', u'HMD', 334),
(u'Holy See (Vatican City State)', u'VA', u'VAT', 336),
(u'Honduras', u'HN', u'HND', 340),
(u'Hong Kong', u'HK', u'HKG', 344),
(u'Hungary', u'HU', u'HUN', 348),
(u'Iceland', u'IS', u'ISL', 352),
(u'India', u'IN', u'IND', 356),
(u'Indonesia', u'ID', u'IDN', 360),
(u'Iran, Islamic Republic of', u'IR', u'IRN', 364),
(u'Iraq', u'IQ', u'IRQ', 368),
(u'Ireland', u'IE', u'IRL', 372),
(u'Isle of Man', u'IM', u'IMN', 833),
(u'Israel', u'IL', u'ISR', 376),
(u'Italy', u'IT', u'ITA', 380),
(u'Jamaica', u'JM', u'JAM', 388),
(u'Japan', u'JP', u'JPN', 392),
(u'Jersey', u'JE', u'JEY', 832),
(u'Jordan', u'JO', u'JOR', 400),
(u'Kazakhstan', u'KZ', u'KAZ', 398),
(u'Kenya', u'KE', u'KEN', 404),
(u'Kiribati', u'KI', u'KIR', 296),
(u"Korea, Democratic People's Republic of", u'KP', u'PRK', 408),
(u'Korea, Republic of', u'KR', u'KOR', 410),
(u'Kuwait', u'KW', u'KWT', 414),
(u'Kyrgyzstan', u'KG', u'KGZ', 417),
(u"Lao People's Democratic Republic", u'LA', u'LAO', 418),
(u'Latvia', u'LV', u'LVA', 428),
(u'Lebanon', u'LB', u'LBN', 422),
(u'Lesotho', u'LS', u'LSO', 426),
(u'Liberia', u'LR', u'LBR', 430),
(u'Libyan Arab Jamahiriya', u'LY', u'LBY', 434),
(u'Liechtenstein', u'LI', u'LIE', 438),
(u'Lithuania', u'LT', u'LTU', 440),
(u'Luxembourg', u'LU', u'LUX', 442),
(u'Macao', u'MO', u'MAC', 446),
(u'Macedonia, the former Yugoslav Republic of', u'MK', u'MKD', 807),
(u'Madagascar', u'MG', u'MDG', 450),
(u'Malawi', u'MW', u'MWI', 454),
(u'Malaysia', u'MY', u'MYS', 458),
(u'Maldives', u'MV', u'MDV', 462),
(u'Mali', u'ML', u'MLI', 466),
(u'Malta', u'MT', u'MLT', 470),
(u'Marshall Islands', u'MH', u'MHL', 584),
(u'Martinique', u'MQ', u'MTQ', 474),
(u'Mauritania', u'MR', u'MRT', 478),
(u'Mauritius', u'MU', u'MUS', 480),
(u'Mayotte', u'YT', u'MYT', 175),
(u'Mexico', u'MX', u'MEX', 484),
(u'Micronesia, Federated States of', u'FM', u'FSM', 583),
(u'Moldova, Republic of', u'MD', u'MDA', 498),
(u'Monaco', u'MC', u'MCO', 492),
(u'Mongolia', u'MN', u'MNG', 496),
(u'Montenegro', u'ME', u'MNE', 499),
(u'Montserrat', u'MS', u'MSR', 500),
(u'Morocco', u'MA', u'MAR', 504),
(u'Mozambique', u'MZ', u'MOZ', 508),
(u'Myanmar', u'MM', u'MMR', 104),
(u'Namibia', u'NA', u'NAM', 516),
(u'Nauru', u'NR', u'NRU', 520),
(u'Nepal', u'NP', u'NPL', 524),
(u'Netherlands', u'NL', u'NLD', 528),
(u'Netherlands Antilles', u'AN', u'ANT', 530),
(u'New Caledonia', u'NC', u'NCL', 540),
(u'New Zealand', u'NZ', u'NZL', 554),
(u'Nicaragua', u'NI', u'NIC', 558),
(u'Niger', u'NE', u'NER', 562),
(u'Nigeria', u'NG', u'NGA', 566),
(u'Niue', u'NU', u'NIU', 570),
(u'Norfolk Island', u'NF', u'NFK', 574),
(u'Northern Mariana Islands', u'MP', u'MNP', 580),
(u'Norway', u'NO', u'NOR', 578),
(u'Oman', u'OM', u'OMN', 512),
(u'Pakistan', u'PK', u'PAK', 586),
(u'Palau', u'PW', u'PLW', 585),
(u'Palestinian Territory, Occupied', u'PS', u'PSE', 275),
(u'Panama', u'PA', u'PAN', 591),
(u'Papua New Guinea', u'PG', u'PNG', 598),
(u'Paraguay', u'PY', u'PRY', 600),
(u'Peru', u'PE', u'PER', 604),
(u'Philippines', u'PH', u'PHL', 608),
(u'Pitcairn', u'PN', u'PCN', 612),
(u'Poland', u'PL', u'POL', 616),
(u'Portugal', u'PT', u'PRT', 620),
(u'Puerto Rico', u'PR', u'PRI', 630),
(u'Qatar', u'QA', u'QAT', 634),
(u'Reunion R\xe9union', u'RE', u'REU', 638),
(u'Romania', u'RO', u'ROU', 642),
(u'Russian Federation', u'RU', u'RUS', 643),
(u'Rwanda', u'RW', u'RWA', 646),
(u'Saint Barth\xe9lemy', u'BL', u'BLM', 652),
(u'Saint Helena', u'SH', u'SHN', 654),
(u'Saint Kitts and Nevis', u'KN', u'KNA', 659),
(u'Saint Lucia', u'LC', u'LCA', 662),
(u'Saint Martin (French part)', u'MF', u'MAF', 663),
(u'Saint Pierre and Miquelon', u'PM', u'SPM', 666),
(u'Saint Vincent and the Grenadines', u'VC', u'VCT', 670),
(u'Samoa', u'WS', u'WSM', 882),
(u'San Marino', u'SM', u'SMR', 674),
(u'Sao Tome and Principe', u'ST', u'STP', 678),
(u'Saudi Arabia', u'SA', u'SAU', 682),
(u'Senegal', u'SN', u'SEN', 686),
(u'Serbia', u'RS', u'SRB', 688),
(u'Seychelles', u'SC', u'SYC', 690),
(u'Sierra Leone', u'SL', u'SLE', 694),
(u'Singapore', u'SG', u'SGP', 702),
(u'Slovakia', u'SK', u'SVK', 703),
(u'Slovenia', u'SI', u'SVN', 705),
(u'Solomon Islands', u'SB', u'SLB', 90),
(u'Somalia', u'SO', u'SOM', 706),
(u'South Africa', u'ZA', u'ZAF', 710),
(u'South Georgia and the South Sandwich Islands', u'GS', u'SGS', 239),
(u'Spain', u'ES', u'ESP', 724),
(u'Sri Lanka', u'LK', u'LKA', 144),
(u'Sudan', u'SD', u'SDN', 736),
(u'Suriname', u'SR', u'SUR', 740),
(u'Svalbard and Jan Mayen', u'SJ', u'SJM', 744),
(u'Swaziland', u'SZ', u'SWZ', 748),
(u'Sweden', u'SE', u'SWE', 752),
(u'Switzerland', u'CH', u'CHE', 756),
(u'Syrian Arab Republic', u'SY', u'SYR', 760),
(u'Taiwan, Province of China', u'TW', u'TWN', 158),
(u'Tajikistan', u'TJ', u'TJK', 762),
(u'Tanzania, United Republic of', u'TZ', u'TZA', 834),
(u'Thailand', u'TH', u'THA', 764),
(u'Timor-Leste', u'TL', u'TLS', 626),
(u'Togo', u'TG', u'TGO', 768),
(u'Tokelau', u'TK', u'TKL', 772),
(u'Tonga', u'TO', u'TON', 776),
(u'Trinidad and Tobago', u'TT', u'TTO', 780),
(u'Tunisia', u'TN', u'TUN', 788),
(u'Turkey', u'TR', u'TUR', 792),
(u'Turkmenistan', u'TM', u'TKM', 795),
(u'Turks and Caicos Islands', u'TC', u'TCA', 796),
(u'Tuvalu', u'TV', u'TUV', 798),
(u'Uganda', u'UG', u'UGA', 800),
(u'Ukraine', u'UA', u'UKR', 804),
(u'United Arab Emirates', u'AE', u'ARE', 784),
(u'United Kingdom', u'GB', u'GBR', 826),
(u'United States', u'US', u'USA', 840),
(u'United States Minor Outlying Islands', u'UM', u'UMI', 581),
(u'Uruguay', u'UY', u'URY', 858),
(u'Uzbekistan', u'UZ', u'UZB', 860),
(u'Vanuatu', u'VU', u'VUT', 548),
(u'Venezuela', u'VE', u'VEN', 862),
(u'Viet Nam', u'VN', u'VNM', 704),
(u'Virgin Islands, British', u'VG', u'VGB', 92),
(u'Virgin Islands, U.S.', u'VI', u'VIR', 850),
(u'Wallis and Futuna', u'WF', u'WLF', 876),
(u'Western Sahara', u'EH', u'ESH', 732),
(u'Yemen', u'YE', u'YEM', 887),
(u'Zambia', u'ZM', u'ZMB', 894),
(u'Zimbabwe', u'ZW', u'ZWE', 716)]
COUNTRY_NAMES = [u'Afghanistan',
u'\xc5land Islands',
u'Albania',
u'Algeria',
u'American Samoa',
u'Andorra',
u'Angola',
u'Anguilla',
u'Antarctica',
u'Antigua and Barbuda',
u'Argentina',
u'Armenia',
u'Aruba',
u'Australia',
u'Austria',
u'Azerbaijan',
u'Bahamas',
u'Bahrain',
u'Bangladesh',
u'Barbados',
u'Belarus',
u'Belgium',
u'Belize',
u'Benin',
u'Bermuda',
u'Bhutan',
u'Bolivia',
u'Bosnia and Herzegovina',
u'Botswana',
u'Bouvet Island',
u'Brazil',
u'British Indian Ocean Territory',
u'Brunei Darussalam',
u'Bulgaria',
u'Burkina Faso',
u'Burundi',
u'Cambodia',
u'Cameroon',
u'Canada',
u'Cape Verde',
u'Cayman Islands',
u'Central African Republic',
u'Chad',
u'Chile',
u'China',
u'Christmas Island',
u'Cocos (Keeling) Islands',
u'Colombia',
u'Comoros',
u'Congo',
u'Congo, Democratic Republic of the',
u'Cook Islands',
u'Costa Rica',
u"Cote d'Ivoire C\xf4te d'Ivoire",
u'Croatia',
u'Cuba',
u'Cyprus',
u'Czech Republic',
u'Denmark',
u'Djibouti',
u'Dominica',
u'Dominican Republic',
u'Ecuador',
u'Egypt',
u'El Salvador',
u'Equatorial Guinea',
u'Eritrea',
u'Estonia',
u'Ethiopia',
u'Falkland Islands (Malvinas)',
u'Faroe Islands',
u'Fiji',
u'Finland',
u'France',
u'French Guiana',
u'French Polynesia',
u'French Southern Territories',
u'Gabon',
u'Gambia',
u'Georgia',
u'Germany',
u'Ghana',
u'Gibraltar',
u'Greece',
u'Greenland',
u'Grenada',
u'Guadeloupe',
u'Guam',
u'Guatemala',
u'Guernsey',
u'Guinea',
u'Guinea-Bissau',
u'Guyana',
u'Haiti',
u'Heard Island and McDonald Islands',
u'Holy See (Vatican City State)',
u'Honduras',
u'Hong Kong',
u'Hungary',
u'Iceland',
u'India',
u'Indonesia',
u'Iran, Islamic Republic of',
u'Iraq',
u'Ireland',
u'Isle of Man',
u'Israel',
u'Italy',
u'Jamaica',
u'Japan',
u'Jersey',
u'Jordan',
u'Kazakhstan',
u'Kenya',
u'Kiribati',
u"Korea, Democratic People's Republic of",
u'Korea, Republic of',
u'Kuwait',
u'Kyrgyzstan',
u"Lao People's Democratic Republic",
u'Latvia',
u'Lebanon',
u'Lesotho',
u'Liberia',
u'Libyan Arab Jamahiriya',
u'Liechtenstein',
u'Lithuania',
u'Luxembourg',
u'Macao',
u'Macedonia, the former Yugoslav Republic of',
u'Madagascar',
u'Malawi',
u'Malaysia',
u'Maldives',
u'Mali',
u'Malta',
u'Marshall Islands',
u'Martinique',
u'Mauritania',
u'Mauritius',
u'Mayotte',
u'Mexico',
u'Micronesia, Federated States of',
u'Moldova, Republic of',
u'Monaco',
u'Mongolia',
u'Montenegro',
u'Montserrat',
u'Morocco',
u'Mozambique',
u'Myanmar',
u'Namibia',
u'Nauru',
u'Nepal',
u'Netherlands',
u'Netherlands Antilles',
u'New Caledonia',
u'New Zealand',
u'Nicaragua',
u'Niger',
u'Nigeria',
u'Niue',
u'Norfolk Island',
u'Northern Mariana Islands',
u'Norway',
u'Oman',
u'Pakistan',
u'Palau',
u'Palestinian Territory, Occupied',
u'Panama',
u'Papua New Guinea',
u'Paraguay',
u'Peru',
u'Philippines',
u'Pitcairn',
u'Poland',
u'Portugal',
u'Puerto Rico',
u'Qatar',
u'Reunion R\xe9union',
u'Romania',
u'Russian Federation',
u'Rwanda',
u'Saint Barth\xe9lemy',
u'Saint Helena',
u'Saint Kitts and Nevis',
u'Saint Lucia',
u'Saint Martin (French part)',
u'Saint Pierre and Miquelon',
u'Saint Vincent and the Grenadines',
u'Samoa',
u'San Marino',
u'Sao Tome and Principe',
u'Saudi Arabia',
u'Senegal',
u'Serbia',
u'Seychelles',
u'Sierra Leone',
u'Singapore',
u'Slovakia',
u'Slovenia',
u'Solomon Islands',
u'Somalia',
u'South Africa',
u'South Georgia and the South Sandwich Islands',
u'Spain',
u'Sri Lanka',
u'Sudan',
u'Suriname',
u'Svalbard and Jan Mayen',
u'Swaziland',
u'Sweden',
u'Switzerland',
u'Syrian Arab Republic',
u'Taiwan, Province of China',
u'Tajikistan',
u'Tanzania, United Republic of',
u'Thailand',
u'Timor-Leste',
u'Togo',
u'Tokelau',
u'Tonga',
u'Trinidad and Tobago',
u'Tunisia',
u'Turkey',
u'Turkmenistan',
u'Turks and Caicos Islands',
u'Tuvalu',
u'Uganda',
u'Ukraine',
u'United Arab Emirates',
u'United Kingdom',
u'United States',
u'United States Minor Outlying Islands',
u'Uruguay',
u'Uzbekistan',
u'Vanuatu',
u'Venezuela',
u'Viet Nam',
u'Virgin Islands, British',
u'Virgin Islands, U.S.',
u'Wallis and Futuna',
u'Western Sahara',
u'Yemen',
u'Zambia',
u'Zimbabwe']
ISO_ALPHA_2_CODES = [u'AF',
u'AX',
u'AL',
u'DZ',
u'AS',
u'AD',
u'AO',
u'AI',
u'AQ',
u'AG',
u'AR',
u'AM',
u'AW',
u'AU',
u'AT',
u'AZ',
u'BS',
u'BH',
u'BD',
u'BB',
u'BY',
u'BE',
u'BZ',
u'BJ',
u'BM',
u'BT',
u'BO',
u'BA',
u'BW',
u'BV',
u'BR',
u'IO',
u'BN',
u'BG',
u'BF',
u'BI',
u'KH',
u'CM',
u'CA',
u'CV',
u'KY',
u'CF',
u'TD',
u'CL',
u'CN',
u'CX',
u'CC',
u'CO',
u'KM',
u'CG',
u'CD',
u'CK',
u'CR',
u'CI',
u'HR',
u'CU',
u'CY',
u'CZ',
u'DK',
u'DJ',
u'DM',
u'DO',
u'EC',
u'EG',
u'SV',
u'GQ',
u'ER',
u'EE',
u'ET',
u'FK',
u'FO',
u'FJ',
u'FI',
u'FR',
u'GF',
u'PF',
u'TF',
u'GA',
u'GM',
u'GE',
u'DE',
u'GH',
u'GI',
u'GR',
u'GL',
u'GD',
u'GP',
u'GU',
u'GT',
u'GG',
u'GN',
u'GW',
u'GY',
u'HT',
u'HM',
u'VA',
u'HN',
u'HK',
u'HU',
u'IS',
u'IN',
u'ID',
u'IR',
u'IQ',
u'IE',
u'IM',
u'IL',
u'IT',
u'JM',
u'JP',
u'JE',
u'JO',
u'KZ',
u'KE',
u'KI',
u'KP',
u'KR',
u'KW',
u'KG',
u'LA',
u'LV',
u'LB',
u'LS',
u'LR',
u'LY',
u'LI',
u'LT',
u'LU',
u'MO',
u'MK',
u'MG',
u'MW',
u'MY',
u'MV',
u'ML',
u'MT',
u'MH',
u'MQ',
u'MR',
u'MU',
u'YT',
u'MX',
u'FM',
u'MD',
u'MC',
u'MN',
u'ME',
u'MS',
u'MA',
u'MZ',
u'MM',
u'NA',
u'NR',
u'NP',
u'NL',
u'AN',
u'NC',
u'NZ',
u'NI',
u'NE',
u'NG',
u'NU',
u'NF',
u'MP',
u'NO',
u'OM',
u'PK',
u'PW',
u'PS',
u'PA',
u'PG',
u'PY',
u'PE',
u'PH',
u'PN',
u'PL',
u'PT',
u'PR',
u'QA',
u'RE',
u'RO',
u'RU',
u'RW',
u'BL',
u'SH',
u'KN',
u'LC',
u'MF',
u'PM',
u'VC',
u'WS',
u'SM',
u'ST',
u'SA',
u'SN',
u'RS',
u'SC',
u'SL',
u'SG',
u'SK',
u'SI',
u'SB',
u'SO',
u'ZA',
u'GS',
u'ES',
u'LK',
u'SD',
u'SR',
u'SJ',
u'SZ',
u'SE',
u'CH',
u'SY',
u'TW',
u'TJ',
u'TZ',
u'TH',
u'TL',
u'TG',
u'TK',
u'TO',
u'TT',
u'TN',
u'TR',
u'TM',
u'TC',
u'TV',
u'UG',
u'UA',
u'AE',
u'GB',
u'US',
u'UM',
u'UY',
u'UZ',
u'VU',
u'VE',
u'VN',
u'VG',
u'VI',
u'WF',
u'EH',
u'YE',
u'ZM',
u'ZW']
ISO_ALPHA_3_CODES = [u'AFG',
u'ALA',
u'ALB',
u'DZA',
u'ASM',
u'AND',
u'AGO',
u'AIA',
u'ATA',
u'ATG',
u'ARG',
u'ARM',
u'ABW',
u'AUS',
u'AUT',
u'AZE',
u'BHS',
u'BHR',
u'BGD',
u'BRB',
u'BLR',
u'BEL',
u'BLZ',
u'BEN',
u'BMU',
u'BTN',
u'BOL',
u'BIH',
u'BWA',
u'BVT',
u'BRA',
u'IOT',
u'BRN',
u'BGR',
u'BFA',
u'BDI',
u'KHM',
u'CMR',
u'CAN',
u'CPV',
u'CYM',
u'CAF',
u'TCD',
u'CHL',
u'CHN',
u'CXR',
u'CCK',
u'COL',
u'COM',
u'COG',
u'COD',
u'COK',
u'CRI',
u'CIV',
u'HRV',
u'CUB',
u'CYP',
u'CZE',
u'DNK',
u'DJI',
u'DMA',
u'DOM',
u'ECU',
u'EGY',
u'SLV',
u'GNQ',
u'ERI',
u'EST',
u'ETH',
u'FLK',
u'FRO',
u'FJI',
u'FIN',
u'FRA',
u'GUF',
u'PYF',
u'ATF',
u'GAB',
u'GMB',
u'GEO',
u'DEU',
u'GHA',
u'GIB',
u'GRC',
u'GRL',
u'GRD',
u'GLP',
u'GUM',
u'GTM',
u'GGY',
u'GIN',
u'GNB',
u'GUY',
u'HTI',
u'HMD',
u'VAT',
u'HND',
u'HKG',
u'HUN',
u'ISL',
u'IND',
u'IDN',
u'IRN',
u'IRQ',
u'IRL',
u'IMN',
u'ISR',
u'ITA',
u'JAM',
u'JPN',
u'JEY',
u'JOR',
u'KAZ',
u'KEN',
u'KIR',
u'PRK',
u'KOR',
u'KWT',
u'KGZ',
u'LAO',
u'LVA',
u'LBN',
u'LSO',
u'LBR',
u'LBY',
u'LIE',
u'LTU',
u'LUX',
u'MAC',
u'MKD',
u'MDG',
u'MWI',
u'MYS',
u'MDV',
u'MLI',
u'MLT',
u'MHL',
u'MTQ',
u'MRT',
u'MUS',
u'MYT',
u'MEX',
u'FSM',
u'MDA',
u'MCO',
u'MNG',
u'MNE',
u'MSR',
u'MAR',
u'MOZ',
u'MMR',
u'NAM',
u'NRU',
u'NPL',
u'NLD',
u'ANT',
u'NCL',
u'NZL',
u'NIC',
u'NER',
u'NGA',
u'NIU',
u'NFK',
u'MNP',
u'NOR',
u'OMN',
u'PAK',
u'PLW',
u'PSE',
u'PAN',
u'PNG',
u'PRY',
u'PER',
u'PHL',
u'PCN',
u'POL',
u'PRT',
u'PRI',
u'QAT',
u'REU',
u'ROU',
u'RUS',
u'RWA',
u'BLM',
u'SHN',
u'KNA',
u'LCA',
u'MAF',
u'SPM',
u'VCT',
u'WSM',
u'SMR',
u'STP',
u'SAU',
u'SEN',
u'SRB',
u'SYC',
u'SLE',
u'SGP',
u'SVK',
u'SVN',
u'SLB',
u'SOM',
u'ZAF',
u'SGS',
u'ESP',
u'LKA',
u'SDN',
u'SUR',
u'SJM',
u'SWZ',
u'SWE',
u'CHE',
u'SYR',
u'TWN',
u'TJK',
u'TZA',
u'THA',
u'TLS',
u'TGO',
u'TKL',
u'TON',
u'TTO',
u'TUN',
u'TUR',
u'TKM',
u'TCA',
u'TUV',
u'UGA',
u'UKR',
u'ARE',
u'GBR',
u'USA',
u'UMI',
u'URY',
u'UZB',
u'VUT',
u'VEN',
u'VNM',
u'VGB',
u'VIR',
u'WLF',
u'ESH',
u'YEM',
u'ZMB',
u'ZWE']
ISO_3166_2_CODES = [u'ISO 3166-2:AF',
u'ISO 3166-2:AX',
u'ISO 3166-2:AL',
u'ISO 3166-2:DZ',
u'ISO 3166-2:AS',
u'ISO 3166-2:AD',
u'ISO 3166-2:AO',
u'ISO 3166-2:AI',
u'ISO 3166-2:AQ',
u'ISO 3166-2:AG',
u'ISO 3166-2:AR',
u'ISO 3166-2:AM',
u'ISO 3166-2:AW',
u'ISO 3166-2:AU',
u'ISO 3166-2:AT',
u'ISO 3166-2:AZ',
u'ISO 3166-2:BS',
u'ISO 3166-2:BH',
u'ISO 3166-2:BD',
u'ISO 3166-2:BB',
u'ISO 3166-2:BY',
u'ISO 3166-2:BE',
u'ISO 3166-2:BZ',
u'ISO 3166-2:BJ',
u'ISO 3166-2:BM',
u'ISO 3166-2:BT',
u'ISO 3166-2:BO',
u'ISO 3166-2:BA',
u'ISO 3166-2:BW',
u'ISO 3166-2:BV',
u'ISO 3166-2:BR',
u'ISO 3166-2:IO',
u'ISO 3166-2:BN',
u'ISO 3166-2:BG',
u'ISO 3166-2:BF',
u'ISO 3166-2:BI',
u'ISO 3166-2:KH',
u'ISO 3166-2:CM',
u'ISO 3166-2:CA',
u'ISO 3166-2:CV',
u'ISO 3166-2:KY',
u'ISO 3166-2:CF',
u'ISO 3166-2:TD',
u'ISO 3166-2:CL',
u'ISO 3166-2:CN',
u'ISO 3166-2:CX',
u'ISO 3166-2:CC',
u'ISO 3166-2:CO',
u'ISO 3166-2:KM',
u'ISO 3166-2:CG',
u'ISO 3166-2:CD',
u'ISO 3166-2:CK',
u'ISO 3166-2:CR',
u'ISO 3166-2:CI',
u'ISO 3166-2:HR',
u'ISO 3166-2:CU',
u'ISO 3166-2:CY',
u'ISO 3166-2:CZ',
u'ISO 3166-2:DK',
u'ISO 3166-2:DJ',
u'ISO 3166-2:DM',
u'ISO 3166-2:DO',
u'ISO 3166-2:EC',
u'ISO 3166-2:EG',
u'ISO 3166-2:SV',
u'ISO 3166-2:GQ',
u'ISO 3166-2:ER',
u'ISO 3166-2:EE',
u'ISO 3166-2:ET',
u'ISO 3166-2:FK',
u'ISO 3166-2:FO',
u'ISO 3166-2:FJ',
u'ISO 3166-2:FI',
u'ISO 3166-2:FR',
u'ISO 3166-2:GF',
u'ISO 3166-2:PF',
u'ISO 3166-2:TF',
u'ISO 3166-2:GA',
u'ISO 3166-2:GM',
u'ISO 3166-2:GE',
u'ISO 3166-2:DE',
u'ISO 3166-2:GH',
u'ISO 3166-2:GI',
u'ISO 3166-2:GR',
u'ISO 3166-2:GL',
u'ISO 3166-2:GD',
u'ISO 3166-2:GP',
u'ISO 3166-2:GU',
u'ISO 3166-2:GT',
u'ISO 3166-2:GG',
u'ISO 3166-2:GN',
u'ISO 3166-2:GW',
u'ISO 3166-2:GY',
u'ISO 3166-2:HT',
u'ISO 3166-2:HM',
u'ISO 3166-2:VA',
u'ISO 3166-2:HN',
u'ISO 3166-2:HK',
u'ISO 3166-2:HU',
u'ISO 3166-2:IS',
u'ISO 3166-2:IN',
u'ISO 3166-2:ID',
u'ISO 3166-2:IR',
u'ISO 3166-2:IQ',
u'ISO 3166-2:IE',
u'ISO 3166-2:IM',
u'ISO 3166-2:IL',
u'ISO 3166-2:IT',
u'ISO 3166-2:JM',
u'ISO 3166-2:JP',
u'ISO 3166-2:JE',
u'ISO 3166-2:JO',
u'ISO 3166-2:KZ',
u'ISO 3166-2:KE',
u'ISO 3166-2:KI',
u'ISO 3166-2:KP',
u'ISO 3166-2:KR',
u'ISO 3166-2:KW',
u'ISO 3166-2:KG',
u'ISO 3166-2:LA',
u'ISO 3166-2:LV',
u'ISO 3166-2:LB',
u'ISO 3166-2:LS',
u'ISO 3166-2:LR',
u'ISO 3166-2:LY',
u'ISO 3166-2:LI',
u'ISO 3166-2:LT',
u'ISO 3166-2:LU',
u'ISO 3166-2:MO',
u'ISO 3166-2:MK',
u'ISO 3166-2:MG',
u'ISO 3166-2:MW',
u'ISO 3166-2:MY',
u'ISO 3166-2:MV',
u'ISO 3166-2:ML',
u'ISO 3166-2:MT',
u'ISO 3166-2:MH',
u'ISO 3166-2:MQ',
u'ISO 3166-2:MR',
u'ISO 3166-2:MU',
u'ISO 3166-2:YT',
u'ISO 3166-2:MX',
u'ISO 3166-2:FM',
u'ISO 3166-2:MD',
u'ISO 3166-2:MC',
u'ISO 3166-2:MN',
u'ISO 3166-2:ME',
u'ISO 3166-2:MS',
u'ISO 3166-2:MA',
u'ISO 3166-2:MZ',
u'ISO 3166-2:MM',
u'ISO 3166-2:NA',
u'ISO 3166-2:NR',
u'ISO 3166-2:NP',
u'ISO 3166-2:NL',
u'ISO 3166-2:AN',
u'ISO 3166-2:NC',
u'ISO 3166-2:NZ',
u'ISO 3166-2:NI',
u'ISO 3166-2:NE',
u'ISO 3166-2:NG',
u'ISO 3166-2:NU',
u'ISO 3166-2:NF',
u'ISO 3166-2:MP',
u'ISO 3166-2:NO',
u'ISO 3166-2:OM',
u'ISO 3166-2:PK',
u'ISO 3166-2:PW',
u'ISO 3166-2:PS',
u'ISO 3166-2:PA',
u'ISO 3166-2:PG',
u'ISO 3166-2:PY',
u'ISO 3166-2:PE',
u'ISO 3166-2:PH',
u'ISO 3166-2:PN',
u'ISO 3166-2:PL',
u'ISO 3166-2:PT',
u'ISO 3166-2:PR',
u'ISO 3166-2:QA',
u'ISO 3166-2:RE',
u'ISO 3166-2:RO',
u'ISO 3166-2:RU',
u'ISO 3166-2:RW',
u'ISO 3166-2:BL',
u'ISO 3166-2:SH',
u'ISO 3166-2:KN',
u'ISO 3166-2:LC',
u'ISO 3166-2:MF',
u'ISO 3166-2:PM',
u'ISO 3166-2:VC',
u'ISO 3166-2:WS',
u'ISO 3166-2:SM',
u'ISO 3166-2:ST',
u'ISO 3166-2:SA',
u'ISO 3166-2:SN',
u'ISO 3166-2:RS',
u'ISO 3166-2:SC',
u'ISO 3166-2:SL',
u'ISO 3166-2:SG',
u'ISO 3166-2:SK',
u'ISO 3166-2:SI',
u'ISO 3166-2:SB',
u'ISO 3166-2:SO',
u'ISO 3166-2:ZA',
u'ISO 3166-2:GS',
u'ISO 3166-2:ES',
u'ISO 3166-2:LK',
u'ISO 3166-2:SD',
u'ISO 3166-2:SR',
u'ISO 3166-2:SJ',
u'ISO 3166-2:SZ',
u'ISO 3166-2:SE',
u'ISO 3166-2:CH',
u'ISO 3166-2:SY',
u'ISO 3166-2:TW',
u'ISO 3166-2:TJ',
u'ISO 3166-2:TZ',
u'ISO 3166-2:TH',
u'ISO 3166-2:TL',
u'ISO 3166-2:TG',
u'ISO 3166-2:TK',
u'ISO 3166-2:TO',
u'ISO 3166-2:TT',
u'ISO 3166-2:TN',
u'ISO 3166-2:TR',
u'ISO 3166-2:TM',
u'ISO 3166-2:TC',
u'ISO 3166-2:TV',
u'ISO 3166-2:UG',
u'ISO 3166-2:UA',
u'ISO 3166-2:AE',
u'ISO 3166-2:GB',
u'ISO 3166-2:US',
u'ISO 3166-2:UM',
u'ISO 3166-2:UY',
u'ISO 3166-2:UZ',
u'ISO 3166-2:VU',
u'ISO 3166-2:VE',
u'ISO 3166-2:VN',
u'ISO 3166-2:VG',
u'ISO 3166-2:VI',
u'ISO 3166-2:WF',
u'ISO 3166-2:EH',
u'ISO 3166-2:YE',
u'ISO 3166-2:ZM',
u'ISO 3166-2:ZW']
COUNTRY_NAME_ISO_ALPHA_2_TABLE = {u'AD': u'Andorra',
u'AE': u'United Arab Emirates',
u'AF': u'Afghanistan',
u'AG': u'Antigua and Barbuda',
u'AI': u'Anguilla',
u'AL': u'Albania',
u'AM': u'Armenia',
u'AN': u'Netherlands Antilles',
u'AO': u'Angola',
u'AQ': u'Antarctica',
u'AR': u'Argentina',
u'AS': u'American Samoa',
u'AT': u'Austria',
u'AU': u'Australia',
u'AW': u'Aruba',
u'AX': u'\xc5land Islands',
u'AZ': u'Azerbaijan',
u'Afghanistan': u'AF',
u'Albania': u'AL',
u'Algeria': u'DZ',
u'American Samoa': u'AS',
u'Andorra': u'AD',
u'Angola': u'AO',
u'Anguilla': u'AI',
u'Antarctica': u'AQ',
u'Antigua and Barbuda': u'AG',
u'Argentina': u'AR',
u'Armenia': u'AM',
u'Aruba': u'AW',
u'Australia': u'AU',
u'Austria': u'AT',
u'Azerbaijan': u'AZ',
u'BA': u'Bosnia and Herzegovina',
u'BB': u'Barbados',
u'BD': u'Bangladesh',
u'BE': u'Belgium',
u'BF': u'Burkina Faso',
u'BG': u'Bulgaria',
u'BH': u'Bahrain',
u'BI': u'Burundi',
u'BJ': u'Benin',
u'BL': u'Saint Barth\xe9lemy',
u'BM': u'Bermuda',
u'BN': u'Brunei Darussalam',
u'BO': u'Bolivia',
u'BR': u'Brazil',
u'BS': u'Bahamas',
u'BT': u'Bhutan',
u'BV': u'Bouvet Island',
u'BW': u'Botswana',
u'BY': u'Belarus',
u'BZ': u'Belize',
u'Bahamas': u'BS',
u'Bahrain': u'BH',
u'Bangladesh': u'BD',
u'Barbados': u'BB',
u'Belarus': u'BY',
u'Belgium': u'BE',
u'Belize': u'BZ',
u'Benin': u'BJ',
u'Bermuda': u'BM',
u'Bhutan': u'BT',
u'Bolivia': u'BO',
u'Bosnia and Herzegovina': u'BA',
u'Botswana': u'BW',
u'Bouvet Island': u'BV',
u'Brazil': u'BR',
u'British Indian Ocean Territory': u'IO',
u'Brunei Darussalam': u'BN',
u'Bulgaria': u'BG',
u'Burkina Faso': u'BF',
u'Burundi': u'BI',
u'CA': u'Canada',
u'CC': u'Cocos (Keeling) Islands',
u'CD': u'Congo, Democratic Republic of the',
u'CF': u'Central African Republic',
u'CG': u'Congo',
u'CH': u'Switzerland',
u'CI': u"Cote d'Ivoire C\xf4te d'Ivoire",
u'CK': u'Cook Islands',
u'CL': u'Chile',
u'CM': u'Cameroon',
u'CN': u'China',
u'CO': u'Colombia',
u'CR': u'Costa Rica',
u'CU': u'Cuba',
u'CV': u'Cape Verde',
u'CX': u'Christmas Island',
u'CY': u'Cyprus',
u'CZ': u'Czech Republic',
u'Cambodia': u'KH',
u'Cameroon': u'CM',
u'Canada': u'CA',
u'Cape Verde': u'CV',
u'Cayman Islands': u'KY',
u'Central African Republic': u'CF',
u'Chad': u'TD',
u'Chile': u'CL',
u'China': u'CN',
u'Christmas Island': u'CX',
u'Cocos (Keeling) Islands': u'CC',
u'Colombia': u'CO',
u'Comoros': u'KM',
u'Congo': u'CG',
u'Congo, Democratic Republic of the': u'CD',
u'Cook Islands': u'CK',
u'Costa Rica': u'CR',
u"Cote d'Ivoire C\xf4te d'Ivoire": u'CI',
u'Croatia': u'HR',
u'Cuba': u'CU',
u'Cyprus': u'CY',
u'Czech Republic': u'CZ',
u'DE': u'Germany',
u'DJ': u'Djibouti',
u'DK': u'Denmark',
u'DM': u'Dominica',
u'DO': u'Dominican Republic',
u'DZ': u'Algeria',
u'Denmark': u'DK',
u'Djibouti': u'DJ',
u'Dominica': u'DM',
u'Dominican Republic': u'DO',
u'EC': u'Ecuador',
u'EE': u'Estonia',
u'EG': u'Egypt',
u'EH': u'Western Sahara',
u'ER': u'Eritrea',
u'ES': u'Spain',
u'ET': u'Ethiopia',
u'Ecuador': u'EC',
u'Egypt': u'EG',
u'El Salvador': u'SV',
u'Equatorial Guinea': u'GQ',
u'Eritrea': u'ER',
u'Estonia': u'EE',
u'Ethiopia': u'ET',
u'FI': u'Finland',
u'FJ': u'Fiji',
u'FK': u'Falkland Islands (Malvinas)',
u'FM': u'Micronesia, Federated States of',
u'FO': u'Faroe Islands',
u'FR': u'France',
u'Falkland Islands (Malvinas)': u'FK',
u'Faroe Islands': u'FO',
u'Fiji': u'FJ',
u'Finland': u'FI',
u'France': u'FR',
u'French Guiana': u'GF',
u'French Polynesia': u'PF',
u'French Southern Territories': u'TF',
u'GA': u'Gabon',
u'GB': u'United Kingdom',
u'GD': u'Grenada',
u'GE': u'Georgia',
u'GF': u'French Guiana',
u'GG': u'Guernsey',
u'GH': u'Ghana',
u'GI': u'Gibraltar',
u'GL': u'Greenland',
u'GM': u'Gambia',
u'GN': u'Guinea',
u'GP': u'Guadeloupe',
u'GQ': u'Equatorial Guinea',
u'GR': u'Greece',
u'GS': u'South Georgia and the South Sandwich Islands',
u'GT': u'Guatemala',
u'GU': u'Guam',
u'GW': u'Guinea-Bissau',
u'GY': u'Guyana',
u'Gabon': u'GA',
u'Gambia': u'GM',
u'Georgia': u'GE',
u'Germany': u'DE',
u'Ghana': u'GH',
u'Gibraltar': u'GI',
u'Greece': u'GR',
u'Greenland': u'GL',
u'Grenada': u'GD',
u'Guadeloupe': u'GP',
u'Guam': u'GU',
u'Guatemala': u'GT',
u'Guernsey': u'GG',
u'Guinea': u'GN',
u'Guinea-Bissau': u'GW',
u'Guyana': u'GY',
u'HK': u'Hong Kong',
u'HM': u'Heard Island and McDonald Islands',
u'HN': u'Honduras',
u'HR': u'Croatia',
u'HT': u'Haiti',
u'HU': u'Hungary',
u'Haiti': u'HT',
u'Heard Island and McDonald Islands': u'HM',
u'Holy See (Vatican City State)': u'VA',
u'Honduras': u'HN',
u'Hong Kong': u'HK',
u'Hungary': u'HU',
u'ID': u'Indonesia',
u'IE': u'Ireland',
u'IL': u'Israel',
u'IM': u'Isle of Man',
u'IN': u'India',
u'IO': u'British Indian Ocean Territory',
u'IQ': u'Iraq',
u'IR': u'Iran, Islamic Republic of',
u'IS': u'Iceland',
u'IT': u'Italy',
u'Iceland': u'IS',
u'India': u'IN',
u'Indonesia': u'ID',
u'Iran, Islamic Republic of': u'IR',
u'Iraq': u'IQ',
u'Ireland': u'IE',
u'Isle of Man': u'IM',
u'Israel': u'IL',
u'Italy': u'IT',
u'JE': u'Jersey',
u'JM': u'Jamaica',
u'JO': u'Jordan',
u'JP': u'Japan',
u'Jamaica': u'JM',
u'Japan': u'JP',
u'Jersey': u'JE',
u'Jordan': u'JO',
u'KE': u'Kenya',
u'KG': u'Kyrgyzstan',
u'KH': u'Cambodia',
u'KI': u'Kiribati',
u'KM': u'Comoros',
u'KN': u'Saint Kitts and Nevis',
u'KP': u"Korea, Democratic People's Republic of",
u'KR': u'Korea, Republic of',
u'KW': u'Kuwait',
u'KY': u'Cayman Islands',
u'KZ': u'Kazakhstan',
u'Kazakhstan': u'KZ',
u'Kenya': u'KE',
u'Kiribati': u'KI',
u"Korea, Democratic People's Republic of": u'KP',
u'Korea, Republic of': u'KR',
u'Kuwait': u'KW',
u'Kyrgyzstan': u'KG',
u'LA': u"Lao People's Democratic Republic",
u'LB': u'Lebanon',
u'LC': u'Saint Lucia',
u'LI': u'Liechtenstein',
u'LK': u'Sri Lanka',
u'LR': u'Liberia',
u'LS': u'Lesotho',
u'LT': u'Lithuania',
u'LU': u'Luxembourg',
u'LV': u'Latvia',
u'LY': u'Libyan Arab Jamahiriya',
u"Lao People's Democratic Republic": u'LA',
u'Latvia': u'LV',
u'Lebanon': u'LB',
u'Lesotho': u'LS',
u'Liberia': u'LR',
u'Libyan Arab Jamahiriya': u'LY',
u'Liechtenstein': u'LI',
u'Lithuania': u'LT',
u'Luxembourg': u'LU',
u'MA': u'Morocco',
u'MC': u'Monaco',
u'MD': u'Moldova, Republic of',
u'ME': u'Montenegro',
u'MF': u'Saint Martin (French part)',
u'MG': u'Madagascar',
u'MH': u'Marshall Islands',
u'MK': u'Macedonia, the former Yugoslav Republic of',
u'ML': u'Mali',
u'MM': u'Myanmar',
u'MN': u'Mongolia',
u'MO': u'Macao',
u'MP': u'Northern Mariana Islands',
u'MQ': u'Martinique',
u'MR': u'Mauritania',
u'MS': u'Montserrat',
u'MT': u'Malta',
u'MU': u'Mauritius',
u'MV': u'Maldives',
u'MW': u'Malawi',
u'MX': u'Mexico',
u'MY': u'Malaysia',
u'MZ': u'Mozambique',
u'Macao': u'MO',
u'Macedonia, the former Yugoslav Republic of': u'MK',
u'Madagascar': u'MG',
u'Malawi': u'MW',
u'Malaysia': u'MY',
u'Maldives': u'MV',
u'Mali': u'ML',
u'Malta': u'MT',
u'Marshall Islands': u'MH',
u'Martinique': u'MQ',
u'Mauritania': u'MR',
u'Mauritius': u'MU',
u'Mayotte': u'YT',
u'Mexico': u'MX',
u'Micronesia, Federated States of': u'FM',
u'Moldova, Republic of': u'MD',
u'Monaco': u'MC',
u'Mongolia': u'MN',
u'Montenegro': u'ME',
u'Montserrat': u'MS',
u'Morocco': u'MA',
u'Mozambique': u'MZ',
u'Myanmar': u'MM',
u'NA': u'Namibia',
u'NC': u'New Caledonia',
u'NE': u'Niger',
u'NF': u'Norfolk Island',
u'NG': u'Nigeria',
u'NI': u'Nicaragua',
u'NL': u'Netherlands',
u'NO': u'Norway',
u'NP': u'Nepal',
u'NR': u'Nauru',
u'NU': u'Niue',
u'NZ': u'New Zealand',
u'Namibia': u'NA',
u'Nauru': u'NR',
u'Nepal': u'NP',
u'Netherlands': u'NL',
u'Netherlands Antilles': u'AN',
u'New Caledonia': u'NC',
u'New Zealand': u'NZ',
u'Nicaragua': u'NI',
u'Niger': u'NE',
u'Nigeria': u'NG',
u'Niue': u'NU',
u'Norfolk Island': u'NF',
u'Northern Mariana Islands': u'MP',
u'Norway': u'NO',
u'OM': u'Oman',
u'Oman': u'OM',
u'PA': u'Panama',
u'PE': u'Peru',
u'PF': u'French Polynesia',
u'PG': u'Papua New Guinea',
u'PH': u'Philippines',
u'PK': u'Pakistan',
u'PL': u'Poland',
u'PM': u'Saint Pierre and Miquelon',
u'PN': u'Pitcairn',
u'PR': u'Puerto Rico',
u'PS': u'Palestinian Territory, Occupied',
u'PT': u'Portugal',
u'PW': u'Palau',
u'PY': u'Paraguay',
u'Pakistan': u'PK',
u'Palau': u'PW',
u'Palestinian Territory, Occupied': u'PS',
u'Panama': u'PA',
u'Papua New Guinea': u'PG',
u'Paraguay': u'PY',
u'Peru': u'PE',
u'Philippines': u'PH',
u'Pitcairn': u'PN',
u'Poland': u'PL',
u'Portugal': u'PT',
u'Puerto Rico': u'PR',
u'QA': u'Qatar',
u'Qatar': u'QA',
u'RE': u'Reunion R\xe9union',
u'RO': u'Romania',
u'RS': u'Serbia',
u'RU': u'Russian Federation',
u'RW': u'Rwanda',
u'Reunion R\xe9union': u'RE',
u'Romania': u'RO',
u'Russian Federation': u'RU',
u'Rwanda': u'RW',
u'SA': u'Saudi Arabia',
u'SB': u'Solomon Islands',
u'SC': u'Seychelles',
u'SD': u'Sudan',
u'SE': u'Sweden',
u'SG': u'Singapore',
u'SH': u'Saint Helena',
u'SI': u'Slovenia',
u'SJ': u'Svalbard and Jan Mayen',
u'SK': u'Slovakia',
u'SL': u'Sierra Leone',
u'SM': u'San Marino',
u'SN': u'Senegal',
u'SO': u'Somalia',
u'SR': u'Suriname',
u'ST': u'Sao Tome and Principe',
u'SV': u'El Salvador',
u'SY': u'Syrian Arab Republic',
u'SZ': u'Swaziland',
u'Saint Barth\xe9lemy': u'BL',
u'Saint Helena': u'SH',
u'Saint Kitts and Nevis': u'KN',
u'Saint Lucia': u'LC',
u'Saint Martin (French part)': u'MF',
u'Saint Pierre and Miquelon': u'PM',
u'Saint Vincent and the Grenadines': u'VC',
u'Samoa': u'WS',
u'San Marino': u'SM',
u'Sao Tome and Principe': u'ST',
u'Saudi Arabia': u'SA',
u'Senegal': u'SN',
u'Serbia': u'RS',
u'Seychelles': u'SC',
u'Sierra Leone': u'SL',
u'Singapore': u'SG',
u'Slovakia': u'SK',
u'Slovenia': u'SI',
u'Solomon Islands': u'SB',
u'Somalia': u'SO',
u'South Africa': u'ZA',
u'South Georgia and the South Sandwich Islands': u'GS',
u'Spain': u'ES',
u'Sri Lanka': u'LK',
u'Sudan': u'SD',
u'Suriname': u'SR',
u'Svalbard and Jan Mayen': u'SJ',
u'Swaziland': u'SZ',
u'Sweden': u'SE',
u'Switzerland': u'CH',
u'Syrian Arab Republic': u'SY',
u'TC': u'Turks and Caicos Islands',
u'TD': u'Chad',
u'TF': u'French Southern Territories',
u'TG': u'Togo',
u'TH': u'Thailand',
u'TJ': u'Tajikistan',
u'TK': u'Tokelau',
u'TL': u'Timor-Leste',
u'TM': u'Turkmenistan',
u'TN': u'Tunisia',
u'TO': u'Tonga',
u'TR': u'Turkey',
u'TT': u'Trinidad and Tobago',
u'TV': u'Tuvalu',
u'TW': u'Taiwan, Province of China',
u'TZ': u'Tanzania, United Republic of',
u'Taiwan, Province of China': u'TW',
u'Tajikistan': u'TJ',
u'Tanzania, United Republic of': u'TZ',
u'Thailand': u'TH',
u'Timor-Leste': u'TL',
u'Togo': u'TG',
u'Tokelau': u'TK',
u'Tonga': u'TO',
u'Trinidad and Tobago': u'TT',
u'Tunisia': u'TN',
u'Turkey': u'TR',
u'Turkmenistan': u'TM',
u'Turks and Caicos Islands': u'TC',
u'Tuvalu': u'TV',
u'UA': u'Ukraine',
u'UG': u'Uganda',
u'UM': u'United States Minor Outlying Islands',
u'US': u'United States',
u'UY': u'Uruguay',
u'UZ': u'Uzbekistan',
u'Uganda': u'UG',
u'Ukraine': u'UA',
u'United Arab Emirates': u'AE',
u'United Kingdom': u'GB',
u'United States': u'US',
u'United States Minor Outlying Islands': u'UM',
u'Uruguay': u'UY',
u'Uzbekistan': u'UZ',
u'VA': u'Holy See (Vatican City State)',
u'VC': u'Saint Vincent and the Grenadines',
u'VE': u'Venezuela',
u'VG': u'Virgin Islands, British',
u'VI': u'Virgin Islands, U.S.',
u'VN': u'Viet Nam',
u'VU': u'Vanuatu',
u'Vanuatu': u'VU',
u'Venezuela': u'VE',
u'Viet Nam': u'VN',
u'Virgin Islands, British': u'VG',
u'Virgin Islands, U.S.': u'VI',
u'WF': u'Wallis and Futuna',
u'WS': u'Samoa',
u'Wallis and Futuna': u'WF',
u'Western Sahara': u'EH',
u'YE': u'Yemen',
u'YT': u'Mayotte',
u'Yemen': u'YE',
u'ZA': u'South Africa',
u'ZM': u'Zambia',
u'ZW': u'Zimbabwe',
u'Zambia': u'ZM',
u'Zimbabwe': u'ZW',
u'\xc5land Islands': u'AX'}
COUNTRY_NAME_ISO_ALPHA_3_TABLE = {u'ABW': u'Aruba',
u'AFG': u'Afghanistan',
u'AGO': u'Angola',
u'AIA': u'Anguilla',
u'ALA': u'\xc5land Islands',
u'ALB': u'Albania',
u'AND': u'Andorra',
u'ANT': u'Netherlands Antilles',
u'ARE': u'United Arab Emirates',
u'ARG': u'Argentina',
u'ARM': u'Armenia',
u'ASM': u'American Samoa',
u'ATA': u'Antarctica',
u'ATF': u'French Southern Territories',
u'ATG': u'Antigua and Barbuda',
u'AUS': u'Australia',
u'AUT': u'Austria',
u'AZE': u'Azerbaijan',
u'Afghanistan': u'AFG',
u'Albania': u'ALB',
u'Algeria': u'DZA',
u'American Samoa': u'ASM',
u'Andorra': u'AND',
u'Angola': u'AGO',
u'Anguilla': u'AIA',
u'Antarctica': u'ATA',
u'Antigua and Barbuda': u'ATG',
u'Argentina': u'ARG',
u'Armenia': u'ARM',
u'Aruba': u'ABW',
u'Australia': u'AUS',
u'Austria': u'AUT',
u'Azerbaijan': u'AZE',
u'BDI': u'Burundi',
u'BEL': u'Belgium',
u'BEN': u'Benin',
u'BFA': u'Burkina Faso',
u'BGD': u'Bangladesh',
u'BGR': u'Bulgaria',
u'BHR': u'Bahrain',
u'BHS': u'Bahamas',
u'BIH': u'Bosnia and Herzegovina',
u'BLM': u'Saint Barth\xe9lemy',
u'BLR': u'Belarus',
u'BLZ': u'Belize',
u'BMU': u'Bermuda',
u'BOL': u'Bolivia',
u'BRA': u'Brazil',
u'BRB': u'Barbados',
u'BRN': u'Brunei Darussalam',
u'BTN': u'Bhutan',
u'BVT': u'Bouvet Island',
u'BWA': u'Botswana',
u'Bahamas': u'BHS',
u'Bahrain': u'BHR',
u'Bangladesh': u'BGD',
u'Barbados': u'BRB',
u'Belarus': u'BLR',
u'Belgium': u'BEL',
u'Belize': u'BLZ',
u'Benin': u'BEN',
u'Bermuda': u'BMU',
u'Bhutan': u'BTN',
u'Bolivia': u'BOL',
u'Bosnia and Herzegovina': u'BIH',
u'Botswana': u'BWA',
u'Bouvet Island': u'BVT',
u'Brazil': u'BRA',
u'British Indian Ocean Territory': u'IOT',
u'Brunei Darussalam': u'BRN',
u'Bulgaria': u'BGR',
u'Burkina Faso': u'BFA',
u'Burundi': u'BDI',
u'CAF': u'Central African Republic',
u'CAN': u'Canada',
u'CCK': u'Cocos (Keeling) Islands',
u'CHE': u'Switzerland',
u'CHL': u'Chile',
u'CHN': u'China',
u'CIV': u"Cote d'Ivoire C\xf4te d'Ivoire",
u'CMR': u'Cameroon',
u'COD': u'Congo, Democratic Republic of the',
u'COG': u'Congo',
u'COK': u'Cook Islands',
u'COL': u'Colombia',
u'COM': u'Comoros',
u'CPV': u'Cape Verde',
u'CRI': u'Costa Rica',
u'CUB': u'Cuba',
u'CXR': u'Christmas Island',
u'CYM': u'Cayman Islands',
u'CYP': u'Cyprus',
u'CZE': u'Czech Republic',
u'Cambodia': u'KHM',
u'Cameroon': u'CMR',
u'Canada': u'CAN',
u'Cape Verde': u'CPV',
u'Cayman Islands': u'CYM',
u'Central African Republic': u'CAF',
u'Chad': u'TCD',
u'Chile': u'CHL',
u'China': u'CHN',
u'Christmas Island': u'CXR',
u'Cocos (Keeling) Islands': u'CCK',
u'Colombia': u'COL',
u'Comoros': u'COM',
u'Congo': u'COG',
u'Congo, Democratic Republic of the': u'COD',
u'Cook Islands': u'COK',
u'Costa Rica': u'CRI',
u"Cote d'Ivoire C\xf4te d'Ivoire": u'CIV',
u'Croatia': u'HRV',
u'Cuba': u'CUB',
u'Cyprus': u'CYP',
u'Czech Republic': u'CZE',
u'DEU': u'Germany',
u'DJI': u'Djibouti',
u'DMA': u'Dominica',
u'DNK': u'Denmark',
u'DOM': u'Dominican Republic',
u'DZA': u'Algeria',
u'Denmark': u'DNK',
u'Djibouti': u'DJI',
u'Dominica': u'DMA',
u'Dominican Republic': u'DOM',
u'ECU': u'Ecuador',
u'EGY': u'Egypt',
u'ERI': u'Eritrea',
u'ESH': u'Western Sahara',
u'ESP': u'Spain',
u'EST': u'Estonia',
u'ETH': u'Ethiopia',
u'Ecuador': u'ECU',
u'Egypt': u'EGY',
u'El Salvador': u'SLV',
u'Equatorial Guinea': u'GNQ',
u'Eritrea': u'ERI',
u'Estonia': u'EST',
u'Ethiopia': u'ETH',
u'FIN': u'Finland',
u'FJI': u'Fiji',
u'FLK': u'Falkland Islands (Malvinas)',
u'FRA': u'France',
u'FRO': u'Faroe Islands',
u'FSM': u'Micronesia, Federated States of',
u'Falkland Islands (Malvinas)': u'FLK',
u'Faroe Islands': u'FRO',
u'Fiji': u'FJI',
u'Finland': u'FIN',
u'France': u'FRA',
u'French Guiana': u'GUF',
u'French Polynesia': u'PYF',
u'French Southern Territories': u'ATF',
u'GAB': u'Gabon',
u'GBR': u'United Kingdom',
u'GEO': u'Georgia',
u'GGY': u'Guernsey',
u'GHA': u'Ghana',
u'GIB': u'Gibraltar',
u'GIN': u'Guinea',
u'GLP': u'Guadeloupe',
u'GMB': u'Gambia',
u'GNB': u'Guinea-Bissau',
u'GNQ': u'Equatorial Guinea',
u'GRC': u'Greece',
u'GRD': u'Grenada',
u'GRL': u'Greenland',
u'GTM': u'Guatemala',
u'GUF': u'French Guiana',
u'GUM': u'Guam',
u'GUY': u'Guyana',
u'Gabon': u'GAB',
u'Gambia': u'GMB',
u'Georgia': u'GEO',
u'Germany': u'DEU',
u'Ghana': u'GHA',
u'Gibraltar': u'GIB',
u'Greece': u'GRC',
u'Greenland': u'GRL',
u'Grenada': u'GRD',
u'Guadeloupe': u'GLP',
u'Guam': u'GUM',
u'Guatemala': u'GTM',
u'Guernsey': u'GGY',
u'Guinea': u'GIN',
u'Guinea-Bissau': u'GNB',
u'Guyana': u'GUY',
u'HKG': u'Hong Kong',
u'HMD': u'Heard Island and McDonald Islands',
u'HND': u'Honduras',
u'HRV': u'Croatia',
u'HTI': u'Haiti',
u'HUN': u'Hungary',
u'Haiti': u'HTI',
u'Heard Island and McDonald Islands': u'HMD',
u'Holy See (Vatican City State)': u'VAT',
u'Honduras': u'HND',
u'Hong Kong': u'HKG',
u'Hungary': u'HUN',
u'IDN': u'Indonesia',
u'IMN': u'Isle of Man',
u'IND': u'India',
u'IOT': u'British Indian Ocean Territory',
u'IRL': u'Ireland',
u'IRN': u'Iran, Islamic Republic of',
u'IRQ': u'Iraq',
u'ISL': u'Iceland',
u'ISR': u'Israel',
u'ITA': u'Italy',
u'Iceland': u'ISL',
u'India': u'IND',
u'Indonesia': u'IDN',
u'Iran, Islamic Republic of': u'IRN',
u'Iraq': u'IRQ',
u'Ireland': u'IRL',
u'Isle of Man': u'IMN',
u'Israel': u'ISR',
u'Italy': u'ITA',
u'JAM': u'Jamaica',
u'JEY': u'Jersey',
u'JOR': u'Jordan',
u'JPN': u'Japan',
u'Jamaica': u'JAM',
u'Japan': u'JPN',
u'Jersey': u'JEY',
u'Jordan': u'JOR',
u'KAZ': u'Kazakhstan',
u'KEN': u'Kenya',
u'KGZ': u'Kyrgyzstan',
u'KHM': u'Cambodia',
u'KIR': u'Kiribati',
u'KNA': u'Saint Kitts and Nevis',
u'KOR': u'Korea, Republic of',
u'KWT': u'Kuwait',
u'Kazakhstan': u'KAZ',
u'Kenya': u'KEN',
u'Kiribati': u'KIR',
u"Korea, Democratic People's Republic of": u'PRK',
u'Korea, Republic of': u'KOR',
u'Kuwait': u'KWT',
u'Kyrgyzstan': u'KGZ',
u'LAO': u"Lao People's Democratic Republic",
u'LBN': u'Lebanon',
u'LBR': u'Liberia',
u'LBY': u'Libyan Arab Jamahiriya',
u'LCA': u'Saint Lucia',
u'LIE': u'Liechtenstein',
u'LKA': u'Sri Lanka',
u'LSO': u'Lesotho',
u'LTU': u'Lithuania',
u'LUX': u'Luxembourg',
u'LVA': u'Latvia',
u"Lao People's Democratic Republic": u'LAO',
u'Latvia': u'LVA',
u'Lebanon': u'LBN',
u'Lesotho': u'LSO',
u'Liberia': u'LBR',
u'Libyan Arab Jamahiriya': u'LBY',
u'Liechtenstein': u'LIE',
u'Lithuania': u'LTU',
u'Luxembourg': u'LUX',
u'MAC': u'Macao',
u'MAF': u'Saint Martin (French part)',
u'MAR': u'Morocco',
u'MCO': u'Monaco',
u'MDA': u'Moldova, Republic of',
u'MDG': u'Madagascar',
u'MDV': u'Maldives',
u'MEX': u'Mexico',
u'MHL': u'Marshall Islands',
u'MKD': u'Macedonia, the former Yugoslav Republic of',
u'MLI': u'Mali',
u'MLT': u'Malta',
u'MMR': u'Myanmar',
u'MNE': u'Montenegro',
u'MNG': u'Mongolia',
u'MNP': u'Northern Mariana Islands',
u'MOZ': u'Mozambique',
u'MRT': u'Mauritania',
u'MSR': u'Montserrat',
u'MTQ': u'Martinique',
u'MUS': u'Mauritius',
u'MWI': u'Malawi',
u'MYS': u'Malaysia',
u'MYT': u'Mayotte',
u'Macao': u'MAC',
u'Macedonia, the former Yugoslav Republic of': u'MKD',
u'Madagascar': u'MDG',
u'Malawi': u'MWI',
u'Malaysia': u'MYS',
u'Maldives': u'MDV',
u'Mali': u'MLI',
u'Malta': u'MLT',
u'Marshall Islands': u'MHL',
u'Martinique': u'MTQ',
u'Mauritania': u'MRT',
u'Mauritius': u'MUS',
u'Mayotte': u'MYT',
u'Mexico': u'MEX',
u'Micronesia, Federated States of': u'FSM',
u'Moldova, Republic of': u'MDA',
u'Monaco': u'MCO',
u'Mongolia': u'MNG',
u'Montenegro': u'MNE',
u'Montserrat': u'MSR',
u'Morocco': u'MAR',
u'Mozambique': u'MOZ',
u'Myanmar': u'MMR',
u'NAM': u'Namibia',
u'NCL': u'New Caledonia',
u'NER': u'Niger',
u'NFK': u'Norfolk Island',
u'NGA': u'Nigeria',
u'NIC': u'Nicaragua',
u'NIU': u'Niue',
u'NLD': u'Netherlands',
u'NOR': u'Norway',
u'NPL': u'Nepal',
u'NRU': u'Nauru',
u'NZL': u'New Zealand',
u'Namibia': u'NAM',
u'Nauru': u'NRU',
u'Nepal': u'NPL',
u'Netherlands': u'NLD',
u'Netherlands Antilles': u'ANT',
u'New Caledonia': u'NCL',
u'New Zealand': u'NZL',
u'Nicaragua': u'NIC',
u'Niger': u'NER',
u'Nigeria': u'NGA',
u'Niue': u'NIU',
u'Norfolk Island': u'NFK',
u'Northern Mariana Islands': u'MNP',
u'Norway': u'NOR',
u'OMN': u'Oman',
u'Oman': u'OMN',
u'PAK': u'Pakistan',
u'PAN': u'Panama',
u'PCN': u'Pitcairn',
u'PER': u'Peru',
u'PHL': u'Philippines',
u'PLW': u'Palau',
u'PNG': u'Papua New Guinea',
u'POL': u'Poland',
u'PRI': u'Puerto Rico',
u'PRK': u"Korea, Democratic People's Republic of",
u'PRT': u'Portugal',
u'PRY': u'Paraguay',
u'PSE': u'Palestinian Territory, Occupied',
u'PYF': u'French Polynesia',
u'Pakistan': u'PAK',
u'Palau': u'PLW',
u'Palestinian Territory, Occupied': u'PSE',
u'Panama': u'PAN',
u'Papua New Guinea': u'PNG',
u'Paraguay': u'PRY',
u'Peru': u'PER',
u'Philippines': u'PHL',
u'Pitcairn': u'PCN',
u'Poland': u'POL',
u'Portugal': u'PRT',
u'Puerto Rico': u'PRI',
u'QAT': u'Qatar',
u'Qatar': u'QAT',
u'REU': u'Reunion R\xe9union',
u'ROU': u'Romania',
u'RUS': u'Russian Federation',
u'RWA': u'Rwanda',
u'Reunion R\xe9union': u'REU',
u'Romania': u'ROU',
u'Russian Federation': u'RUS',
u'Rwanda': u'RWA',
u'SAU': u'Saudi Arabia',
u'SDN': u'Sudan',
u'SEN': u'Senegal',
u'SGP': u'Singapore',
u'SGS': u'South Georgia and the South Sandwich Islands',
u'SHN': u'Saint Helena',
u'SJM': u'Svalbard and Jan Mayen',
u'SLB': u'Solomon Islands',
u'SLE': u'Sierra Leone',
u'SLV': u'El Salvador',
u'SMR': u'San Marino',
u'SOM': u'Somalia',
u'SPM': u'Saint Pierre and Miquelon',
u'SRB': u'Serbia',
u'STP': u'Sao Tome and Principe',
u'SUR': u'Suriname',
u'SVK': u'Slovakia',
u'SVN': u'Slovenia',
u'SWE': u'Sweden',
u'SWZ': u'Swaziland',
u'SYC': u'Seychelles',
u'SYR': u'Syrian Arab Republic',
u'Saint Barth\xe9lemy': u'BLM',
u'Saint Helena': u'SHN',
u'Saint Kitts and Nevis': u'KNA',
u'Saint Lucia': u'LCA',
u'Saint Martin (French part)': u'MAF',
u'Saint Pierre and Miquelon': u'SPM',
u'Saint Vincent and the Grenadines': u'VCT',
u'Samoa': u'WSM',
u'San Marino': u'SMR',
u'Sao Tome and Principe': u'STP',
u'Saudi Arabia': u'SAU',
u'Senegal': u'SEN',
u'Serbia': u'SRB',
u'Seychelles': u'SYC',
u'Sierra Leone': u'SLE',
u'Singapore': u'SGP',
u'Slovakia': u'SVK',
u'Slovenia': u'SVN',
u'Solomon Islands': u'SLB',
u'Somalia': u'SOM',
u'South Africa': u'ZAF',
u'South Georgia and the South Sandwich Islands': u'SGS',
u'Spain': u'ESP',
u'Sri Lanka': u'LKA',
u'Sudan': u'SDN',
u'Suriname': u'SUR',
u'Svalbard and Jan Mayen': u'SJM',
u'Swaziland': u'SWZ',
u'Sweden': u'SWE',
u'Switzerland': u'CHE',
u'Syrian Arab Republic': u'SYR',
u'TCA': u'Turks and Caicos Islands',
u'TCD': u'Chad',
u'TGO': u'Togo',
u'THA': u'Thailand',
u'TJK': u'Tajikistan',
u'TKL': u'Tokelau',
u'TKM': u'Turkmenistan',
u'TLS': u'Timor-Leste',
u'TON': u'Tonga',
u'TTO': u'Trinidad and Tobago',
u'TUN': u'Tunisia',
u'TUR': u'Turkey',
u'TUV': u'Tuvalu',
u'TWN': u'Taiwan, Province of China',
u'TZA': u'Tanzania, United Republic of',
u'Taiwan, Province of China': u'TWN',
u'Tajikistan': u'TJK',
u'Tanzania, United Republic of': u'TZA',
u'Thailand': u'THA',
u'Timor-Leste': u'TLS',
u'Togo': u'TGO',
u'Tokelau': u'TKL',
u'Tonga': u'TON',
u'Trinidad and Tobago': u'TTO',
u'Tunisia': u'TUN',
u'Turkey': u'TUR',
u'Turkmenistan': u'TKM',
u'Turks and Caicos Islands': u'TCA',
u'Tuvalu': u'TUV',
u'UGA': u'Uganda',
u'UKR': u'Ukraine',
u'UMI': u'United States Minor Outlying Islands',
u'URY': u'Uruguay',
u'USA': u'United States',
u'UZB': u'Uzbekistan',
u'Uganda': u'UGA',
u'Ukraine': u'UKR',
u'United Arab Emirates': u'ARE',
u'United Kingdom': u'GBR',
u'United States': u'USA',
u'United States Minor Outlying Islands': u'UMI',
u'Uruguay': u'URY',
u'Uzbekistan': u'UZB',
u'VAT': u'Holy See (Vatican City State)',
u'VCT': u'Saint Vincent and the Grenadines',
u'VEN': u'Venezuela',
u'VGB': u'Virgin Islands, British',
u'VIR': u'Virgin Islands, U.S.',
u'VNM': u'Viet Nam',
u'VUT': u'Vanuatu',
u'Vanuatu': u'VUT',
u'Venezuela': u'VEN',
u'Viet Nam': u'VNM',
u'Virgin Islands, British': u'VGB',
u'Virgin Islands, U.S.': u'VIR',
u'WLF': u'Wallis and Futuna',
u'WSM': u'Samoa',
u'Wallis and Futuna': u'WLF',
u'Western Sahara': u'ESH',
u'YEM': u'Yemen',
u'Yemen': u'YEM',
u'ZAF': u'South Africa',
u'ZMB': u'Zambia',
u'ZWE': u'Zimbabwe',
u'Zambia': u'ZMB',
u'Zimbabwe': u'ZWE',
u'\xc5land Islands': u'ALA'}
COUNTRY_ISO_ALPHA_TABLE = {u'ABW': u'AW',
u'AD': u'AND',
u'AE': u'ARE',
u'AF': u'AFG',
u'AFG': u'AF',
u'AG': u'ATG',
u'AGO': u'AO',
u'AI': u'AIA',
u'AIA': u'AI',
u'AL': u'ALB',
u'ALA': u'AX',
u'ALB': u'AL',
u'AM': u'ARM',
u'AN': u'ANT',
u'AND': u'AD',
u'ANT': u'AN',
u'AO': u'AGO',
u'AQ': u'ATA',
u'AR': u'ARG',
u'ARE': u'AE',
u'ARG': u'AR',
u'ARM': u'AM',
u'AS': u'ASM',
u'ASM': u'AS',
u'AT': u'AUT',
u'ATA': u'AQ',
u'ATF': u'TF',
u'ATG': u'AG',
u'AU': u'AUS',
u'AUS': u'AU',
u'AUT': u'AT',
u'AW': u'ABW',
u'AX': u'ALA',
u'AZ': u'AZE',
u'AZE': u'AZ',
u'BA': u'BIH',
u'BB': u'BRB',
u'BD': u'BGD',
u'BDI': u'BI',
u'BE': u'BEL',
u'BEL': u'BE',
u'BEN': u'BJ',
u'BF': u'BFA',
u'BFA': u'BF',
u'BG': u'BGR',
u'BGD': u'BD',
u'BGR': u'BG',
u'BH': u'BHR',
u'BHR': u'BH',
u'BHS': u'BS',
u'BI': u'BDI',
u'BIH': u'BA',
u'BJ': u'BEN',
u'BL': u'BLM',
u'BLM': u'BL',
u'BLR': u'BY',
u'BLZ': u'BZ',
u'BM': u'BMU',
u'BMU': u'BM',
u'BN': u'BRN',
u'BO': u'BOL',
u'BOL': u'BO',
u'BR': u'BRA',
u'BRA': u'BR',
u'BRB': u'BB',
u'BRN': u'BN',
u'BS': u'BHS',
u'BT': u'BTN',
u'BTN': u'BT',
u'BV': u'BVT',
u'BVT': u'BV',
u'BW': u'BWA',
u'BWA': u'BW',
u'BY': u'BLR',
u'BZ': u'BLZ',
u'CA': u'CAN',
u'CAF': u'CF',
u'CAN': u'CA',
u'CC': u'CCK',
u'CCK': u'CC',
u'CD': u'COD',
u'CF': u'CAF',
u'CG': u'COG',
u'CH': u'CHE',
u'CHE': u'CH',
u'CHL': u'CL',
u'CHN': u'CN',
u'CI': u'CIV',
u'CIV': u'CI',
u'CK': u'COK',
u'CL': u'CHL',
u'CM': u'CMR',
u'CMR': u'CM',
u'CN': u'CHN',
u'CO': u'COL',
u'COD': u'CD',
u'COG': u'CG',
u'COK': u'CK',
u'COL': u'CO',
u'COM': u'KM',
u'CPV': u'CV',
u'CR': u'CRI',
u'CRI': u'CR',
u'CU': u'CUB',
u'CUB': u'CU',
u'CV': u'CPV',
u'CX': u'CXR',
u'CXR': u'CX',
u'CY': u'CYP',
u'CYM': u'KY',
u'CYP': u'CY',
u'CZ': u'CZE',
u'CZE': u'CZ',
u'DE': u'DEU',
u'DEU': u'DE',
u'DJ': u'DJI',
u'DJI': u'DJ',
u'DK': u'DNK',
u'DM': u'DMA',
u'DMA': u'DM',
u'DNK': u'DK',
u'DO': u'DOM',
u'DOM': u'DO',
u'DZ': u'DZA',
u'DZA': u'DZ',
u'EC': u'ECU',
u'ECU': u'EC',
u'EE': u'EST',
u'EG': u'EGY',
u'EGY': u'EG',
u'EH': u'ESH',
u'ER': u'ERI',
u'ERI': u'ER',
u'ES': u'ESP',
u'ESH': u'EH',
u'ESP': u'ES',
u'EST': u'EE',
u'ET': u'ETH',
u'ETH': u'ET',
u'FI': u'FIN',
u'FIN': u'FI',
u'FJ': u'FJI',
u'FJI': u'FJ',
u'FK': u'FLK',
u'FLK': u'FK',
u'FM': u'FSM',
u'FO': u'FRO',
u'FR': u'FRA',
u'FRA': u'FR',
u'FRO': u'FO',
u'FSM': u'FM',
u'GA': u'GAB',
u'GAB': u'GA',
u'GB': u'GBR',
u'GBR': u'GB',
u'GD': u'GRD',
u'GE': u'GEO',
u'GEO': u'GE',
u'GF': u'GUF',
u'GG': u'GGY',
u'GGY': u'GG',
u'GH': u'GHA',
u'GHA': u'GH',
u'GI': u'GIB',
u'GIB': u'GI',
u'GIN': u'GN',
u'GL': u'GRL',
u'GLP': u'GP',
u'GM': u'GMB',
u'GMB': u'GM',
u'GN': u'GIN',
u'GNB': u'GW',
u'GNQ': u'GQ',
u'GP': u'GLP',
u'GQ': u'GNQ',
u'GR': u'GRC',
u'GRC': u'GR',
u'GRD': u'GD',
u'GRL': u'GL',
u'GS': u'SGS',
u'GT': u'GTM',
u'GTM': u'GT',
u'GU': u'GUM',
u'GUF': u'GF',
u'GUM': u'GU',
u'GUY': u'GY',
u'GW': u'GNB',
u'GY': u'GUY',
u'HK': u'HKG',
u'HKG': u'HK',
u'HM': u'HMD',
u'HMD': u'HM',
u'HN': u'HND',
u'HND': u'HN',
u'HR': u'HRV',
u'HRV': u'HR',
u'HT': u'HTI',
u'HTI': u'HT',
u'HU': u'HUN',
u'HUN': u'HU',
u'ID': u'IDN',
u'IDN': u'ID',
u'IE': u'IRL',
u'IL': u'ISR',
u'IM': u'IMN',
u'IMN': u'IM',
u'IN': u'IND',
u'IND': u'IN',
u'IO': u'IOT',
u'IOT': u'IO',
u'IQ': u'IRQ',
u'IR': u'IRN',
u'IRL': u'IE',
u'IRN': u'IR',
u'IRQ': u'IQ',
u'IS': u'ISL',
u'ISL': u'IS',
u'ISR': u'IL',
u'IT': u'ITA',
u'ITA': u'IT',
u'JAM': u'JM',
u'JE': u'JEY',
u'JEY': u'JE',
u'JM': u'JAM',
u'JO': u'JOR',
u'JOR': u'JO',
u'JP': u'JPN',
u'JPN': u'JP',
u'KAZ': u'KZ',
u'KE': u'KEN',
u'KEN': u'KE',
u'KG': u'KGZ',
u'KGZ': u'KG',
u'KH': u'KHM',
u'KHM': u'KH',
u'KI': u'KIR',
u'KIR': u'KI',
u'KM': u'COM',
u'KN': u'KNA',
u'KNA': u'KN',
u'KOR': u'KR',
u'KP': u'PRK',
u'KR': u'KOR',
u'KW': u'KWT',
u'KWT': u'KW',
u'KY': u'CYM',
u'KZ': u'KAZ',
u'LA': u'LAO',
u'LAO': u'LA',
u'LB': u'LBN',
u'LBN': u'LB',
u'LBR': u'LR',
u'LBY': u'LY',
u'LC': u'LCA',
u'LCA': u'LC',
u'LI': u'LIE',
u'LIE': u'LI',
u'LK': u'LKA',
u'LKA': u'LK',
u'LR': u'LBR',
u'LS': u'LSO',
u'LSO': u'LS',
u'LT': u'LTU',
u'LTU': u'LT',
u'LU': u'LUX',
u'LUX': u'LU',
u'LV': u'LVA',
u'LVA': u'LV',
u'LY': u'LBY',
u'MA': u'MAR',
u'MAC': u'MO',
u'MAF': u'MF',
u'MAR': u'MA',
u'MC': u'MCO',
u'MCO': u'MC',
u'MD': u'MDA',
u'MDA': u'MD',
u'MDG': u'MG',
u'MDV': u'MV',
u'ME': u'MNE',
u'MEX': u'MX',
u'MF': u'MAF',
u'MG': u'MDG',
u'MH': u'MHL',
u'MHL': u'MH',
u'MK': u'MKD',
u'MKD': u'MK',
u'ML': u'MLI',
u'MLI': u'ML',
u'MLT': u'MT',
u'MM': u'MMR',
u'MMR': u'MM',
u'MN': u'MNG',
u'MNE': u'ME',
u'MNG': u'MN',
u'MNP': u'MP',
u'MO': u'MAC',
u'MOZ': u'MZ',
u'MP': u'MNP',
u'MQ': u'MTQ',
u'MR': u'MRT',
u'MRT': u'MR',
u'MS': u'MSR',
u'MSR': u'MS',
u'MT': u'MLT',
u'MTQ': u'MQ',
u'MU': u'MUS',
u'MUS': u'MU',
u'MV': u'MDV',
u'MW': u'MWI',
u'MWI': u'MW',
u'MX': u'MEX',
u'MY': u'MYS',
u'MYS': u'MY',
u'MYT': u'YT',
u'MZ': u'MOZ',
u'NA': u'NAM',
u'NAM': u'NA',
u'NC': u'NCL',
u'NCL': u'NC',
u'NE': u'NER',
u'NER': u'NE',
u'NF': u'NFK',
u'NFK': u'NF',
u'NG': u'NGA',
u'NGA': u'NG',
u'NI': u'NIC',
u'NIC': u'NI',
u'NIU': u'NU',
u'NL': u'NLD',
u'NLD': u'NL',
u'NO': u'NOR',
u'NOR': u'NO',
u'NP': u'NPL',
u'NPL': u'NP',
u'NR': u'NRU',
u'NRU': u'NR',
u'NU': u'NIU',
u'NZ': u'NZL',
u'NZL': u'NZ',
u'OM': u'OMN',
u'OMN': u'OM',
u'PA': u'PAN',
u'PAK': u'PK',
u'PAN': u'PA',
u'PCN': u'PN',
u'PE': u'PER',
u'PER': u'PE',
u'PF': u'PYF',
u'PG': u'PNG',
u'PH': u'PHL',
u'PHL': u'PH',
u'PK': u'PAK',
u'PL': u'POL',
u'PLW': u'PW',
u'PM': u'SPM',
u'PN': u'PCN',
u'PNG': u'PG',
u'POL': u'PL',
u'PR': u'PRI',
u'PRI': u'PR',
u'PRK': u'KP',
u'PRT': u'PT',
u'PRY': u'PY',
u'PS': u'PSE',
u'PSE': u'PS',
u'PT': u'PRT',
u'PW': u'PLW',
u'PY': u'PRY',
u'PYF': u'PF',
u'QA': u'QAT',
u'QAT': u'QA',
u'RE': u'REU',
u'REU': u'RE',
u'RO': u'ROU',
u'ROU': u'RO',
u'RS': u'SRB',
u'RU': u'RUS',
u'RUS': u'RU',
u'RW': u'RWA',
u'RWA': u'RW',
u'SA': u'SAU',
u'SAU': u'SA',
u'SB': u'SLB',
u'SC': u'SYC',
u'SD': u'SDN',
u'SDN': u'SD',
u'SE': u'SWE',
u'SEN': u'SN',
u'SG': u'SGP',
u'SGP': u'SG',
u'SGS': u'GS',
u'SH': u'SHN',
u'SHN': u'SH',
u'SI': u'SVN',
u'SJ': u'SJM',
u'SJM': u'SJ',
u'SK': u'SVK',
u'SL': u'SLE',
u'SLB': u'SB',
u'SLE': u'SL',
u'SLV': u'SV',
u'SM': u'SMR',
u'SMR': u'SM',
u'SN': u'SEN',
u'SO': u'SOM',
u'SOM': u'SO',
u'SPM': u'PM',
u'SR': u'SUR',
u'SRB': u'RS',
u'ST': u'STP',
u'STP': u'ST',
u'SUR': u'SR',
u'SV': u'SLV',
u'SVK': u'SK',
u'SVN': u'SI',
u'SWE': u'SE',
u'SWZ': u'SZ',
u'SY': u'SYR',
u'SYC': u'SC',
u'SYR': u'SY',
u'SZ': u'SWZ',
u'TC': u'TCA',
u'TCA': u'TC',
u'TCD': u'TD',
u'TD': u'TCD',
u'TF': u'ATF',
u'TG': u'TGO',
u'TGO': u'TG',
u'TH': u'THA',
u'THA': u'TH',
u'TJ': u'TJK',
u'TJK': u'TJ',
u'TK': u'TKL',
u'TKL': u'TK',
u'TKM': u'TM',
u'TL': u'TLS',
u'TLS': u'TL',
u'TM': u'TKM',
u'TN': u'TUN',
u'TO': u'TON',
u'TON': u'TO',
u'TR': u'TUR',
u'TT': u'TTO',
u'TTO': u'TT',
u'TUN': u'TN',
u'TUR': u'TR',
u'TUV': u'TV',
u'TV': u'TUV',
u'TW': u'TWN',
u'TWN': u'TW',
u'TZ': u'TZA',
u'TZA': u'TZ',
u'UA': u'UKR',
u'UG': u'UGA',
u'UGA': u'UG',
u'UKR': u'UA',
u'UM': u'UMI',
u'UMI': u'UM',
u'URY': u'UY',
u'US': u'USA',
u'USA': u'US',
u'UY': u'URY',
u'UZ': u'UZB',
u'UZB': u'UZ',
u'VA': u'VAT',
u'VAT': u'VA',
u'VC': u'VCT',
u'VCT': u'VC',
u'VE': u'VEN',
u'VEN': u'VE',
u'VG': u'VGB',
u'VGB': u'VG',
u'VI': u'VIR',
u'VIR': u'VI',
u'VN': u'VNM',
u'VNM': u'VN',
u'VU': u'VUT',
u'VUT': u'VU',
u'WF': u'WLF',
u'WLF': u'WF',
u'WS': u'WSM',
u'WSM': u'WS',
u'YE': u'YEM',
u'YEM': u'YE',
u'YT': u'MYT',
u'ZA': u'ZAF',
u'ZAF': u'ZA',
u'ZM': u'ZMB',
u'ZMB': u'ZM',
u'ZW': u'ZWE',
u'ZWE': u'ZW'}
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('contact/', views.contact, name='contact'),
path('submit/', views.submit, name='submit'),
path('studentmanager/', views.studentmanager, name='studentmanager'),
path('assignments/', views.assignments, name="assignments"),
path('studenthome/', views.studentHomeTable.as_view(), name='studenthome'),
path('studentcontact/', views.studentContact, name='studentcontact'),
path('studentsubmit/', views.studentSubmit, name='studentsubmit'),
path('studentassignments/', views.studentAssignments, name="studentassignments"),
path('submissionviewer/', views.submissionViewer, name='submissionViewer'),
url('home/', views.homeTable.as_view(), name='home'),
] |
import json
def convert_sudoku(sudoku):
output = {i: "" for i in range(9)}
string_output = {i: "" for i in range(9)}
sudoku_row = [str(-1) for i in range(9)]
sudoku_mat = [sudoku_row.copy() for i in range(9)]
grid = 0
for item in sudoku["puzzle"]:
if int(item[0]) == grid:
grid = int(item[0])
given_row = item[2]
given_col = item[4]
given = item[6]
sudoku_mat[int(given_row)][int(given_col)] = str(int(given) - 1)
else:
output[int(item[0]) - 1] = sudoku_mat
sudoku_row = [str(-1) for i in range(9)]
sudoku_mat = [sudoku_row.copy() for i in range(9)]
grid = int(item[0])
given_row = item[2]
given_col = item[4]
given = item[6]
sudoku_mat[int(given_row)][int(given_col)] = str(int(given) - 1)
output[8] = sudoku_mat
for grid, puzzle in output.items():
for row in puzzle:
string_output[int(grid)] += ' '.join(row) + '\n'
return string_output
def read_sudoku(n, puzzle_dir, save_dir=""):
data = {}
puzzles = []
data_path = puzzle_dir
for i in range(n):
data[i] = {}
try:
with open(data_path + str(i + 1) + ".txt", "r") as infile:
puzzle = infile.readlines()
except FileNotFoundError:
puzzle = []
print(str(i + 1) + ".txt" + " Not found")
puzzles.append({"number": i + 1, "puzzle": puzzle})
for p in puzzles:
sudoku = convert_sudoku(p)
print("Reading puzzle ", p['number'])
data[p['number'] - 1] = sudoku
if save_dir:
outfile = "\\3d_sudoku_strings.json"
with open(save_dir + "\\" + outfile, 'w') as outfile:
json.dump(data, outfile, indent=4, separators=(',', ':'))
return data
|
from src.genotype.neat.operators.mutators.mutation_report import MutationReport
class Mutagen:
"""This is the base class for any mutate-able property"""
def __init__(self, name: str, mutation_chance: float):
self.name = name
self.mutation_chance = mutation_chance
def __repr__(self):
return self.name + ": " + repr(self.get_current_value())
value = property(lambda self: self.get_current_value())
def get_current_value(self):
raise NotImplementedError("method must be implemented in sub class")
def __call__(self):
return self.get_current_value()
def mutate(self) -> MutationReport:
raise NotImplementedError("method must be implemented in sub class")
def set_value(self, value):
raise NotImplementedError("method must be implemented in sub class")
def interpolate(self, other):
raise NotImplementedError("method must be implemented in sub class")
def interpolate(mutagen_a: Mutagen, mutagen_b: Mutagen) -> Mutagen:
if mutagen_a.name != mutagen_b.name:
raise Exception("cannot interpolate different types of mutagens: " + mutagen_a.name + " and " + mutagen_b.name)
return mutagen_a.interpolate(mutagen_b)
|
# -*- coding: utf-8 -*-
import ctypes
import time
from ctypes import c_short, c_char, c_uint8, c_int32, c_int, c_uint, c_uint32, c_long, byref, Structure, CFUNCTYPE, POINTER
from ctypes.wintypes import DWORD, BOOL, HHOOK, MSG, LPWSTR, WCHAR, WPARAM, LPARAM
LPMSG = POINTER(MSG)
import atexit
from ._mouse_event import ButtonEvent, WheelEvent, MoveEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE, WHEEL, HORIZONTAL, VERTICAL
#https://github.com/boppreh/mouse/issues/1
#user32 = ctypes.windll.user32
user32 = ctypes.WinDLL('user32', use_last_error = True)
class MSLLHOOKSTRUCT(Structure):
_fields_ = [("x", c_long),
("y", c_long),
('data', c_int32),
('reserved', c_int32),
("flags", DWORD),
("time", c_int),
]
LowLevelMouseProc = CFUNCTYPE(c_int, WPARAM, LPARAM, POINTER(MSLLHOOKSTRUCT))
SetWindowsHookEx = user32.SetWindowsHookExA
#SetWindowsHookEx.argtypes = [c_int, LowLevelMouseProc, c_int, c_int]
SetWindowsHookEx.restype = HHOOK
CallNextHookEx = user32.CallNextHookEx
#CallNextHookEx.argtypes = [c_int , c_int, c_int, POINTER(MSLLHOOKSTRUCT)]
CallNextHookEx.restype = c_int
UnhookWindowsHookEx = user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = [HHOOK]
UnhookWindowsHookEx.restype = BOOL
GetMessage = user32.GetMessageW
GetMessage.argtypes = [LPMSG, c_int, c_int, c_int]
GetMessage.restype = BOOL
TranslateMessage = user32.TranslateMessage
TranslateMessage.argtypes = [LPMSG]
TranslateMessage.restype = BOOL
DispatchMessage = user32.DispatchMessageA
DispatchMessage.argtypes = [LPMSG]
# Beware, as of 2016-01-30 the official docs have a very incomplete list.
# This one was compiled from experience and may be incomplete.
WM_MOUSEMOVE = 0x200
WM_LBUTTONDOWN = 0x201
WM_LBUTTONUP = 0x202
WM_LBUTTONDBLCLK = 0x203
WM_RBUTTONDOWN = 0x204
WM_RBUTTONUP = 0x205
WM_RBUTTONDBLCLK = 0x206
WM_MBUTTONDOWN = 0x207
WM_MBUTTONUP = 0x208
WM_MBUTTONDBLCLK = 0x209
WM_MOUSEWHEEL = 0x20A
WM_XBUTTONDOWN = 0x20B
WM_XBUTTONUP = 0x20C
WM_XBUTTONDBLCLK = 0x20D
WM_NCXBUTTONDOWN = 0x00AB
WM_NCXBUTTONUP = 0x00AC
WM_NCXBUTTONDBLCLK = 0x00AD
WM_MOUSEHWHEEL = 0x20E
WM_LBUTTONDOWN = 0x0201
WM_LBUTTONUP = 0x0202
WM_MOUSEMOVE = 0x0200
WM_MOUSEWHEEL = 0x020A
WM_MOUSEHWHEEL = 0x020E
WM_RBUTTONDOWN = 0x0204
WM_RBUTTONUP = 0x0205
buttons_by_wm_code = {
WM_LBUTTONDOWN: (DOWN, LEFT),
WM_LBUTTONUP: (UP, LEFT),
WM_LBUTTONDBLCLK: (DOUBLE, LEFT),
WM_RBUTTONDOWN: (DOWN, RIGHT),
WM_RBUTTONUP: (UP, RIGHT),
WM_RBUTTONDBLCLK: (DOUBLE, RIGHT),
WM_MBUTTONDOWN: (DOWN, MIDDLE),
WM_MBUTTONUP: (UP, MIDDLE),
WM_MBUTTONDBLCLK: (DOUBLE, MIDDLE),
WM_XBUTTONDOWN: (DOWN, X),
WM_XBUTTONUP: (UP, X),
WM_XBUTTONDBLCLK: (DOUBLE, X),
}
MOUSEEVENTF_ABSOLUTE = 0x8000
MOUSEEVENTF_MOVE = 0x1
MOUSEEVENTF_WHEEL = 0x800
MOUSEEVENTF_HWHEEL = 0x1000
MOUSEEVENTF_LEFTDOWN = 0x2
MOUSEEVENTF_LEFTUP = 0x4
MOUSEEVENTF_RIGHTDOWN = 0x8
MOUSEEVENTF_RIGHTUP = 0x10
MOUSEEVENTF_MIDDLEDOWN = 0x20
MOUSEEVENTF_MIDDLEUP = 0x40
MOUSEEVENTF_XDOWN = 0x0080
MOUSEEVENTF_XUP = 0x0100
simulated_mouse_codes = {
(WHEEL, HORIZONTAL): MOUSEEVENTF_HWHEEL,
(WHEEL, VERTICAL): MOUSEEVENTF_WHEEL,
(DOWN, LEFT): MOUSEEVENTF_LEFTDOWN,
(UP, LEFT): MOUSEEVENTF_LEFTUP,
(DOWN, RIGHT): MOUSEEVENTF_RIGHTDOWN,
(UP, RIGHT): MOUSEEVENTF_RIGHTUP,
(DOWN, MIDDLE): MOUSEEVENTF_MIDDLEDOWN,
(UP, MIDDLE): MOUSEEVENTF_MIDDLEUP,
(DOWN, X): MOUSEEVENTF_XDOWN,
(UP, X): MOUSEEVENTF_XUP,
}
NULL = c_int(0)
WHEEL_DELTA = 120
init = lambda: None
def listen(queue):
def low_level_mouse_handler(nCode, wParam, lParam):
struct = lParam.contents
# Can't use struct.time because it's usually zero.
t = time.time()
if wParam == WM_MOUSEMOVE:
event = MoveEvent(struct.x, struct.y, t)
elif wParam == WM_MOUSEWHEEL:
event = WheelEvent(struct.data / (WHEEL_DELTA * (2<<15)), t)
elif wParam in buttons_by_wm_code:
type, button = buttons_by_wm_code.get(wParam, ('?', '?'))
if wParam >= WM_XBUTTONDOWN:
button = {0x10000: X, 0x20000: X2}[struct.data]
event = ButtonEvent(type, button, t)
queue.put(event)
return CallNextHookEx(NULL, nCode, wParam, lParam)
WH_MOUSE_LL = c_int(14)
mouse_callback = LowLevelMouseProc(low_level_mouse_handler)
mouse_hook = SetWindowsHookEx(WH_MOUSE_LL, mouse_callback, NULL, NULL)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, mouse_hook)
msg = LPMSG()
while not GetMessage(msg, NULL, NULL, NULL):
TranslateMessage(msg)
DispatchMessage(msg)
def _translate_button(button):
if button == X or button == X2:
return X, {X: 0x10000, X2: 0x20000}[button]
else:
return button, 0
def press(button=LEFT):
button, data = _translate_button(button)
code = simulated_mouse_codes[(DOWN, button)]
user32.mouse_event(code, 0, 0, data, 0)
def release(button=LEFT):
button, data = _translate_button(button)
code = simulated_mouse_codes[(UP, button)]
user32.mouse_event(code, 0, 0, data, 0)
def wheel(delta=1):
code = simulated_mouse_codes[(WHEEL, VERTICAL)]
user32.mouse_event(code, 0, 0, int(delta * WHEEL_DELTA), 0)
def move_to(x, y):
user32.SetCursorPos(int(x), int(y))
def move_relative(x, y):
user32.mouse_event(MOUSEEVENTF_MOVE, int(x), int(y), 0, 0)
class POINT(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
def get_position():
point = POINT()
user32.GetCursorPos(byref(point))
return (point.x, point.y)
if __name__ == '__main__':
def p(e):
print(e)
listen(p)
|
from typing import List
from collections import Counter
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
paragraph = list(paragraph)
# print(paragraph)
for k in range(len(paragraph)):
if not (65<=ord(paragraph[k])<=90 or 97<=ord(paragraph[k])<=122):
paragraph[k] = '.'
paragraph = ''.join(paragraph)
p = paragraph.split('.')
p = [x.lower() for x in p]
b = [x.lower() for x in banned]
b = set(b)
d = Counter(p)
# print(p)
# print(b)
# print(d)
# print(ord('A')) #65
# print(ord('Z')) #90
# print(ord('a')) #97
# print(ord('z')) #122
max_ = 0
for k, v in d.items():
if k and k not in b and v > max_:
ans = k
max_ = v
return ans
paragraph, banned = "Bob hit a ball, the hit BALL flew far after it was hit.", ["hit"]
paragraph, banned = "Bob. hIt, baLl", ["bob", "hit"]
solu = Solution() #!?',;.
print(solu.mostCommonWord(paragraph, banned)) |
import torch
from torch.distributions import Bernoulli
from torch.distributions.continuous_bernoulli import ContinuousBernoulli
from distributions.conditional import ConditionalDistribution
from utils import sum_except_batch
class ConditionalBernoulli(ConditionalDistribution):
"""A Bernoulli distribution with conditional logits."""
def __init__(self, net):
super(ConditionalBernoulli, self).__init__(net)
def forward(self, context):
probs = self.net(context)
return probs
def cond_dist(self, context=None):
probs = self.net(context)
# return Bernoulli(probs=probs)
return ContinuousBernoulli(probs=probs)
def log_prob(self, x, context):
dist = self.cond_dist(context=context)
return sum_except_batch(dist.log_prob(x.float()))
def log_prob_wihout_context(self, x, probs):
# dist = Bernoulli(probs=probs)
dist = ContinuousBernoulli(probs=probs)
return sum_except_batch(dist.log_prob(x.float()))
def logits(self, context):
return self.cond_dist(context=context).logits
def probs(self, context):
return self.cond_dist(context=context).probs
def mean(self, context):
return self.cond_dist(context=context).mean |
"""
Read namelists
"""
from f90nml import Namelist, Parser
import multiprocessing as mp
import re
from typing import List, Union, Tuple
_nml_types = Union[int, float, bool, str]
_array_rg = re.compile(
"((?:[a-z][a-z0-9_]*))(\\()(\\d+)(\\))(.*)", re.IGNORECASE | re.DOTALL
)
###############################################################################
def _get_array_index(s: str) -> Tuple[int, str]:
"""
If the variable name represents an array element (e.g., 'VAR(1)'),
then return the array index (1-based) and the variable name.
Otherwise, return None.
"""
# initial quick check:
if "(" not in s:
return None, None
m = _array_rg.search(s.strip())
if m:
if m.group(5) == "":
return int(m.group(3)), m.group(1) # index, arrayname
else:
# invalid array string
return None, None
else:
return None, None
###############################################################################
def _pathSet(dictionary: dict, path: str, value: _nml_types, sep: str = "%"):
"""
Sets a variable in a dictionary, given the namelist path string.
Assumes the input path uses Fortran-style 1-based indexing of arrays
"""
path = path.split(sep)
key = path[-1]
dictionary = _pathGet(dictionary, sep.join(path[:-1]), sep=sep)
i, arrayname = _get_array_index(key)
if i is not None:
# it is an array element:
if arrayname not in dictionary:
dictionary[arrayname] = [None]
x = dictionary[arrayname]
lenx = len(x)
if lenx < i:
# have to add this element
for j in range(lenx, i):
x.append(None)
x[i - 1] = value
else:
# it is just a normal variable:
dictionary[key] = value
###############################################################################
def _pathGet(
dictionary: dict, path: str, sep: str = "%"
) -> Union[_nml_types, dict, list]:
"""
Returns an item in a dictionary given the namelist path string.
Assumes the input path uses Fortran-style 1-based indexing of arrays
"""
for item in path.split(sep):
i, arrayname = _get_array_index(item)
if i is not None:
# it is an array element:
# create this item since it isn't there
if arrayname not in dictionary:
dictionary[arrayname] = [None]
d = dictionary[arrayname]
lenx = len(d)
if lenx < i:
# have to add this element
for j in range(lenx, i):
d.append(None)
# make sure it's a dict:
if not isinstance(d[i - 1], dict):
d[i - 1] = Namelist({})
dictionary = d[i - 1]
else:
# it is just a normal variable:
# make sure it's a dict first
if not isinstance(dictionary, dict):
dictionary = Namelist({})
if item not in dictionary:
dictionary[item] = Namelist({})
dictionary = dictionary[item]
return dictionary
###############################################################################
def _nml_value_to_python_value(value: str) -> _nml_types:
"""
Convert the namelist value to a Python value.
"""
value_str = value.strip()
value_str_bool = value_str.lower().strip(".")
if value_str_bool == "t" or value_str_bool == "true":
# logical
return True
elif value_str_bool == "f" or value_str_bool == "false":
# logical
return False
elif value_str[0] == '"' and value_str[-1] == '"':
# string
# fortran to python convention
return value_str[1:-1].replace('""', '"')
elif value_str[0] == "'" and value_str[-1] == "'":
# string
# fortran to python convention
return value_str[1:-1].replace("''", "'")
else:
# int or double:
try:
return int(value_str)
except ValueError:
return float(value_str)
###############################################################################
def _read_single_namelist(lines: List[str], parser: Parser, simple: bool) -> Namelist:
"""
Read a namelist.
* Simple parser. Assumes one array element per line.
For example: `val%a(2)%b = value,`
* Otherwise (or if the simple parser fails) it defaults
to using f90nml to read it.
Note that comment lines and blank lines have already been removed.
"""
nml = None
if simple:
try:
namelist_name = lines[0].lstrip("&").strip().lower()
nml = Namelist({namelist_name: Namelist({})})
for line in lines[1:]:
d = line.split("=", 1)
if len(d) == 1:
if d[0].strip() == "/":
break # end of the namelist
else:
# something else - not valid
raise Exception("invalid line")
elif len(d) >= 2:
if d[0][0] == "'" or d[0][0] == '"':
# = in a string - not valid
raise Exception("invalid line")
else:
path = d[0].strip()
if ":" in path:
raise Exception(
"invalid line"
) # can't read multiple entries at once - not valid
else:
# warning: it will still read lines like
# this: `a = 1,2,3` as a single string
# convert the string to a Python value:
value = _nml_value_to_python_value(d[1].rstrip(", "))
# add this value to the namelist:
_pathSet(nml[namelist_name], path, value)
except Exception:
nml = None
if nml is None:
nml = parser.reads("\n".join(lines)) # f90nml 1.1 and above
return nml
###############################################################################
# def split_namelist_str(s: str):
# """alternate version of split_namelist_file with a string as an input"""
# namelists = []
# i = -1
# started = False
# f = s.split('\n')
# for line in f:
# line = line.strip()
# if (len(line) > 0):
# if (line[0] != '!'):
# if (line[0] == '&'): # start a namelist
# i = i + 1
# namelists.append([])
# started = True
# elif (line[0:1] == '/'): # end a namelist
# started = False
# namelists[i].append(line)
# continue
# if started:
# namelists[i].append(line)
# return namelists
###############################################################################
def _split_namelist_file(filename: str) -> List[str]:
"""split a namelist file into an array of namelist strings"""
namelists = list()
i = -1
started = False
with open(filename, "r") as f:
for line in f:
line = line.strip()
if len(line) > 0:
if line[0] != "!":
if line[0] == "&": # start a namelist
i = i + 1
namelists.append(list())
started = True
elif line[0:1] == "/": # end a namelist
started = False
namelists[i].append(line)
continue
if started:
namelists[i].append(line)
return namelists
###############################################################################
def read_namelist(
filename: str, *, n_threads: int = 0, parser: Parser = None, simple: bool = True
) -> Namelist:
"""
Read a namelist quickly.
For threaded use, set `n_threads` to the number of threads.
"""
nml = Namelist({})
def _loop_over_results(r):
for key, value in r.items():
if key in nml:
# array of namelists:
if isinstance(nml[key], list):
nml[key].append(value)
else:
nml[key] = [nml[key], value]
else:
nml[key] = value
if not parser:
parser = Parser()
namelists = _split_namelist_file(filename)
results = list()
results_append = results.append
if n_threads:
n_threads = max(1, min(mp.cpu_count(), n_threads))
pool = mp.Pool(processes=n_threads)
pool_apply_async = pool.apply_async
for lines in namelists:
results_append(
pool_apply_async(_read_single_namelist, (lines, parser, simple))
)
pool.close()
pool.join()
for r in results:
_loop_over_results(r.get())
else:
for lines in namelists:
results_append(_read_single_namelist(lines, parser, simple))
for r in results:
_loop_over_results(r)
return nml
|
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=protected-access
"""Unit test for worker_manager module"""
import asyncio
import hashlib
import importlib
import multiprocessing
import os
import secrets
import time
import typing
import unittest
import unittest.mock
import aiounittest
import google.auth
import google.cloud.container
import kubernetes
import sanic
from parallel_accel.shared import redis, schemas
from src import worker_manager
class TestASICWorkerManager(aiounittest.AsyncTestCase):
"""Tests ASICWorkerManager class behavior."""
API_KEY: str = secrets.token_hex(16)
API_KEY_HASH: str = None
@classmethod
def setUpClass(cls) -> None:
"""See base class documentation."""
# Compute API key hash
hasher = hashlib.sha1()
hasher.update(cls.API_KEY.encode())
cls.API_KEY_HASH = hasher.hexdigest()
# Patch imports
cls.patchers = []
cls.mocked_appsv1api = unittest.mock.Mock(
spec=kubernetes.client.AppsV1Api
)
cls.mocked_appsv1api.return_value = cls.mocked_appsv1api
patcher = unittest.mock.patch(
"kubernetes.client.AppsV1Api", cls.mocked_appsv1api
)
cls.patchers.append(patcher)
cls.mocked_corev1api = unittest.mock.Mock(
spec=kubernetes.client.CoreV1Api
)
cls.mocked_corev1api.return_value = cls.mocked_corev1api
patcher = unittest.mock.patch(
"kubernetes.client.CoreV1Api", cls.mocked_corev1api
)
cls.patchers.append(patcher)
cls.mocked_watch = unittest.mock.Mock(spec=kubernetes.watch.Watch)
cls.mocked_watch.return_value = cls.mocked_watch
patcher = unittest.mock.patch(
"kubernetes.watch.Watch", cls.mocked_watch
)
cls.patchers.append(patcher)
for patcher in cls.patchers:
patcher.start()
cls.mocked_event_loop = unittest.mock.Mock(
spec=asyncio.AbstractEventLoop
)
cls.mocked_event_loop.run_in_executor = unittest.mock.AsyncMock()
cls.mocked_redis_store = unittest.mock.Mock(
spec=redis.WorkersRedisStore
)
os.environ["GKE_CLUSTER"] = "test-cluster"
os.environ["GCP_PROJECT"] = "test-project"
importlib.reload(worker_manager)
cls.mocked_sanic_app = unittest.mock.Mock(spec=sanic.Sanic)
cls.mocked_sanic_app.loop = cls.mocked_event_loop
cls.manager = worker_manager.ASICWorkerManager(cls.mocked_redis_store)
cls.manager._app = cls.mocked_sanic_app
@classmethod
def tearDownClass(cls) -> None:
"""See base class documentation."""
del os.environ["GKE_CLUSTER"]
for patcher in cls.patchers:
patcher.stop()
def tearDown(self) -> None:
"""See base class documentation."""
for mock in [x for x in dir(self) if x.startswith("mocked_")]:
getattr(self, mock).reset_mock()
async def test_handler_start_command(self) -> None:
"""Tests START worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [{"type": "ADDED"}]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
self.manager._credentials.valid = True
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.START
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 1}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"ADDED",
),
),
(
(
None,
self.manager._wait_for_asic_cluster_readiness,
self.API_KEY,
),
),
]
)
async def test_handler_stop_command(self) -> None:
"""Tests STOP worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.STOP
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 0}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"DELETED",
),
),
]
)
async def test_handler_restart_command(self) -> None:
"""Tests RESTART worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-asic_cluster-1")
asic_cluster = kubernetes.client.V1ASICCluster(metadata=meta)
asic_cluster_list = kubernetes.client.V1ASICClusterList(items=[asic_cluster])
list_namespaced_asic_cluster_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_asic_cluster_thread.get.return_value = asic_cluster_list
self.mocked_corev1api.list_namespaced_asic_cluster.return_value = (
list_namespaced_asic_cluster_thread
)
delete_namespaced_asic_cluster_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_corev1api.delete_namespaced_asic_cluster.return_value = (
delete_namespaced_asic_cluster_thread
)
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.RESTART
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_corev1api.list_namespaced_asic_cluster.assert_called_once_with(
**kwargs
)
kwargs = {"async_req": True, "namespace": "default"}
self.mocked_corev1api.delete_namespaced_asic_cluster.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_asic_cluster_thread.wait,
),
),
(
(
None,
delete_namespaced_asic_cluster_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"ADDED",
),
),
(
(
None,
self.manager._wait_for_asic_cluster_readiness,
self.API_KEY,
),
),
]
)
@unittest.mock.patch("kubernetes.client.Configuration")
@unittest.mock.patch("google.cloud.container.ClusterManagerAsyncClient")
@unittest.mock.patch("google.cloud.container.GetClusterRequest")
@unittest.mock.patch("google.auth.transport.requests.Request")
@unittest.mock.patch("google.auth.default")
async def test_initialize(
self,
_mocked_default: unittest.mock.Mock,
mocked_request: unittest.mock.Mock,
mocked_get_cluster_request: unittest.mock.Mock,
mocked_cluster_manager: unittest.mock.Mock,
mocked_configuration: unittest.mock.Mock,
) -> None:
"""Tests initialize method behavior."""
cluster_name = "cluster-name"
gcp_project = "gcp-project"
mocked_request.return_value = mocked_request
mocked_credentials = unittest.mock.Mock(
spec=google.auth.compute_engine.Credentials
)
mocked_credentials.token = "API-TOKEN"
mocked_credentials.expired = False
mocked_credentials.valid = False
self.mocked_event_loop.run_in_executor.return_value = (
mocked_credentials,
None,
)
mocked_get_cluster_request.return_value = mocked_get_cluster_request
mocked_cluster = unittest.mock.Mock(spec=google.cloud.container.Cluster)
mocked_cluster.endpoint = "1.2.3.4"
mocked_cluster_manager.return_value = mocked_cluster_manager
mocked_cluster_manager.get_cluster = unittest.mock.AsyncMock(
return_value=mocked_cluster
)
mocked_configuration.get_default_copy.return_value = (
mocked_configuration
)
# Run test
await self.manager.initialize(
gcp_project, cluster_name, self.mocked_sanic_app
)
# Verification
self.assertEqual(self.mocked_event_loop.run_in_executor.call_count, 2)
mocked_get_cluster_request.assert_called_once_with()
self.assertEqual(
mocked_get_cluster_request.name,
f"projects/{gcp_project}/locations/us-central1/clusters/{cluster_name}",
)
mocked_cluster_manager.assert_called_once_with(
credentials=mocked_credentials,
)
mocked_cluster_manager.get_cluster.assert_called_once_with(
mocked_get_cluster_request
)
self.assertEqual(mocked_configuration.get_default_copy.call_count, 2)
self.assertEqual(mocked_configuration.set_default.call_count, 2)
self.assertEqual(
mocked_configuration.api_key,
{"authorization": f"Bearer {mocked_credentials.token}"},
)
self.assertEqual(
mocked_configuration.host, f"https://{mocked_cluster.endpoint}:443"
)
self.assertFalse(mocked_configuration.verify_ssl)
async def test_stop_idling_workers(self) -> None:
"""Tests stop_idling_workers method behavior."""
# Test setup
self.mocked_redis_store.get_workers_ids.return_value = [self.API_KEY]
self.mocked_redis_store.get_worker.return_value = (
schemas.WorkerInternal(schemas.WorkerState.IDLE)
)
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.stop_idling_workers()
# Verification
self.mocked_redis_store.get_workers_ids.assert_called_once()
self.mocked_redis_store.get_worker.assert_called_once_with(self.API_KEY)
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 0}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"DELETED",
),
),
]
)
async def test_stop_idling_workers_no_matches(self) -> None:
"""Tests stop_idling_workers method behavior: no worker is idling"""
self.mocked_redis_store.get_workers_ids.return_value = [
self.API_KEY
] * 2
self.mocked_redis_store.get_worker.side_effect = [
schemas.WorkerInternal(
schemas.WorkerState.IDLE, job_timestamp=int(time.time())
),
schemas.WorkerInternal(schemas.WorkerState.OFFLINE),
]
# Run test
await self.manager.stop_idling_workers()
# Verification
self.mocked_redis_store.get_workers_ids.assert_called_once()
call_args_list = [((self.API_KEY,),)] * 2
self.assertEqual(
self.mocked_redis_store.get_worker.call_args_list, call_args_list
)
self.mocked_appsv1api.list_namespaced_deployment.assert_not_called()
def test_wait_for_asic_cluster_events_added(self) -> None:
"""Tests _wait_for_asic_cluster_events method behavior: listen for ADDED asic_clusters"""
# Test setup
self.mocked_watch.stream.return_value = [{"type": "ADDED"}]
# Run test
self.manager._wait_for_asic_cluster_event(self.API_KEY, "ADDED")
# Verification
self._verify_watch(timeout_seconds=60)
self._verify_redis_store("set_booting")
def test_wait_for_asic_cluster_events_deleted(self) -> None:
"""Tests _wait_for_asic_cluster_events method behavior: listen for DELETED asic_clusters"""
# Test setup
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
# Run test
self.manager._wait_for_asic_cluster_event(self.API_KEY, "DELETED")
# Verification
self._verify_watch(timeout_seconds=60)
self._verify_redis_store("set_offline")
def test_wait_for_asic_cluster_readiness(self) -> None:
"""Tests _wait_for_asic_cluster_readiness method behavior"""
# Test setup
mocked_object = unittest.mock.Mock()
mocked_object.status = unittest.mock.Mock()
mocked_object.status.phase = "Running"
self.mocked_watch.stream.return_value = [
{"object": mocked_object, "type": "MODIFIED"}
]
# Run test
self.manager._wait_for_asic_cluster_readiness(self.API_KEY)
# Verification
self._verify_watch()
def _verify_run_in_executor(
self, call_args_list: typing.List[unittest.mock._Call]
) -> None:
"""Verifies calls to the mocked_event_loop.run_in_executor mock.
Args:
funcs: List of mocked AsyncResult object that were passed to the
ASICWorkerManager._wait_for_thread method.
"""
self.assertEqual(
self.mocked_event_loop.run_in_executor.call_args_list,
call_args_list,
)
def _verify_redis_store(self, func: str) -> None:
"""Verifies calls to mocked_redis_store mock.
Args:
func: Expected called mocked method.
"""
getattr(self.mocked_redis_store, func).assert_called_once_with(
self.API_KEY
)
def _verify_watch(self, **kwargs) -> None:
"""Verifies calls to the mocked_watch mock."""
self.mocked_watch.assert_called_once_with()
self.mocked_watch.stream.assert_called_once_with(
func=self.mocked_corev1api.list_namespaced_asic_cluster,
label_selector=f"id={self.API_KEY_HASH}",
namespace="default",
**kwargs,
)
self.mocked_watch.stop.assert_called_once_with()
|
from flask_pymongo import PyMongo
from flask import Flask, flash, render_template, redirect, request, url_for, \
session, flash, Markup
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
import os
from os import path
if path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.config['SECRET_KEY'] = os.urandom(32)
mongo = PyMongo(app)
# Taking the books and comments table and display the data on home page
@app.route('/')
def index():
''' function to display all books on the home page'''
books = list(mongo.db.bookInfo.find())
comments = list(mongo.db.comments.find())
return render_template('index.html', books=books, comments=comments)
# Register Page
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
# If the username already exist a flash message will notify the user
if existing_user:
flash("Username already taken")
return redirect(url_for("register"))
# Take user's username and password and save in database
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# Save user's details into the session cookie
session["user"] = request.form.get("username").lower()
flash("Registration sucessfull")
return redirect(url_for("index", username=session["user"]))
return render_template("register.html")
# Login Page
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# Check if the password matches with the password
# from the database for that user
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Nice to see you, {}!".format(
request.form.get("username")))
return redirect(url_for(
"index", username=session["user"]))
else:
# If the password is not valid a flash message
# will inform the user of invalid credentials
flash("Incorrect credentials.")
return redirect(url_for("login"))
else:
# If the username is not registered a flash message will inform the
# user of invalid credentials
flash("Incorrect credentials")
return redirect(url_for("login"))
return render_template('login.html')
# User Profile Page
@app.route("/profile/<username>", methods=['GET', 'POST'])
def profile(username):
# Get the session user's username from the database
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
# Display user's username on the page
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("profile"))
# Logout
@app.route("/logout")
def logout():
# Remove user from current session cookie
flash("You have been logged out. See you soon!")
session.pop("user")
return redirect(url_for("login"))
# Delete profile
@app.route("/delete-profile/<user_id>", methods=["GET", "POST"])
def delete_profile(user_id):
# Take the session user username and removes from database
mongo.db.users.remove({"username": session["user"]})
# Clears the cache after the username has been deleted
session.clear()
flash("Your profile has been deleted.")
return redirect(url_for("index"))
# Add comment to the books
@app.route("/add-comment/<book_id>", methods=["GET", "POST"])
def add_comment(book_id):
# Get the id of the book for which we want to comment
book = mongo.db.bookInfo.find_one({"_id": ObjectId(book_id)})
if request.method == "POST":
# New comment is saved in the correct format for the comments table
new_comment = {
"title": book["title"],
"comment": request.form.get("comment"),
"username": session["user"]
}
# New comment is added to the comments table
mongo.db.comments.insert_one(new_comment)
flash("Comment added")
return redirect(url_for("index"))
return render_template("add-comment.html", book=book)
# Delete comment
@app.route("/delete-comment/<comment_id>", methods=["GET", "POST"])
def delete_comment(comment_id):
# Remove the comment by using the comment id
mongo.db.comments.remove({"_id": ObjectId(comment_id)})
flash("Comment deleted")
return redirect(url_for("index"))
# Update comment
@app.route("/update-comment/<comment_id>", methods=["GET", "POST"])
def update_comment(comment_id):
comments = mongo.db.comments.find_one({"_id": ObjectId(comment_id)})
if request.method == "POST":
# The comment found by id is updated with the new comment
mongo.db.comments.update({'_id': ObjectId(comment_id)}, {
"title": comments["title"],
"comment": request.form.get("comment"),
"username": session["user"]
})
flash("Comment updated")
return redirect(url_for("index"))
return render_template("update-comment.html", comments=comments)
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=False)
|
#Done by Lauro Ribeiro (12/02/2021)
# Tutorial 11 - AND/OR
import sqlite3
#Connect the database
conn = sqlite3.connect('customer.db')
#Create a cursor
c = conn.cursor()
#Query the database - AND/OR
c.execute("SELECT rowid, * FROM customers WHERE last_name LIKE '%Ribeiro' \
AND email LIKE '%gmail.com'") #Instead of 'AND' we could've used 'OR' in case we wanted
#to identify a true and a false variable.
items = c.fetchall()
for item in items:
print(item)
# Commit our command
conn.commit()
# Close our command
conn.close() |
<info descr="null">async</info> def <info descr="null">foo</info>():
<info descr="null">await</info> x
await = 0
|
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from utils.utils import bbox_iou, merge_bboxes
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from nets.yolo_training import Generator
import cv2
import matplotlib.pyplot as plt
from random import uniform
class YoloDataset(Dataset):
def __init__(self, train_lines, image_size, mosaic=True, is_train=True,max_val=255):
super(YoloDataset, self).__init__()
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.image_size = image_size
self.mosaic = mosaic
self.flag = True
self.is_train = is_train
self.max_val=max_val
def __len__(self):
return self.train_batches
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def get_16_bit_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):
"""ๅฎๆถๆฐๆฎๅขๅผบ็้ๆบ้ขๅค็"""
line = annotation_line.split()
image=cv2.imread(line[0],-1)
iw, ih , _= image.shape
h, w = input_shape
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
# if not random:
# scale = min(w / iw, h / ih)
# nw = int(iw * scale)
# nh = int(ih * scale)
# dx = (w - nw) // 2
# dy = (h - nh) // 2
#
# image = image.resize((nw, nh), Image.BICUBIC)
# new_image = Image.new('RGB', (w, h), (128, 128, 128))
# new_image.paste(image, (dx, dy))
# image_data = np.array(new_image, np.float32)
#
# # ่ฐๆด็ฎๆ ๆกๅๆ
# box_data = np.zeros((len(box), 5))
# if len(box) > 0:
# np.random.shuffle(box)
# box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
# box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
# box[:, 0:2][box[:, 0:2] < 0] = 0
# box[:, 2][box[:, 2] > w] = w
# box[:, 3][box[:, 3] > h] = h
# box_w = box[:, 2] - box[:, 0]
# box_h = box[:, 3] - box[:, 1]
# box = box[np.logical_and(box_w > 1, box_h > 1)] # ไฟ็ๆๆๆก
# box_data = np.zeros((len(box), 5))
# box_data[:len(box)] = box
#
# return image_data, box_data
# ่ฐๆดๅพ็ๅคงๅฐ
# new_ar = w / h * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)
# scale = self.rand(.25, 2)
# if new_ar < 1:
# nh = int(scale * h)
# nw = int(nh * new_ar)
# else:
# nw = int(scale * w)
# nh = int(nw / new_ar)
# nw=int(uniform(0.9,1) * iw)
# nh=int(uniform(0.9,1) * ih)
nw= iw
nh= ih
# image=cv2.resize(image,(nw,nh),interpolation=cv2.INTER_NEAREST)
# ๆพ็ฝฎๅพ็
dx = int(uniform(0, w - iw))
dy = int(uniform(0, h - ih))
# new_image = Image.new('RGB', (w, h),(np.random.randint(0, 255,),
# np.random.randint(0, 255,),
# np.random.randint(0, 255)))
# print(w,h,nw,nh,dx,dy)
# print(nw > w, nh > h)
# print(dx>0,dy>0)
#
# print(dx+nw>nw,dy+nh>h)
# print(dx + nw , dy + nh)
# if nw > w and nh > h and dx>0 and dy>0 and dx+nw<nw and dy+nh<h:
# new_image.paste(test_image, (dx, dy))
new_image=np.random.randint(0,self.max_val,(w,h,3),dtype=np.uint16)
# new_image[dy:nh+dy,dx:nw+dx]=image
new_image[dx:nw + dx,dy:nh + dy,] = image
# print(dy+nh,dx+nw)
# print(w,h)
# print(nw,nh)
# new_image=np.random.randint(0,self.max_val,(w,h,_))
#
# if nw<w and nh<h:
# new_image[0:nh,0:nw]=image
#
# if nw > w and nh > h:
# new_image = image[0:h, 0:w]
#
# if nw > w and nh < h:
# new_image[0:nh,0:w] = image[0:nh,0:w]
#
# if nw < w and nh > h:
# new_image[0:h, 0:nw] = image[0:h, 0:nw]
# plt.imshow(new_image/self.max_val)
# plt.show()
# plt.close()
image = new_image
flip=False
# ๆฏๅฆ็ฟป่ฝฌๅพ็
# flip = self.rand() < .5
# if flip:
# image = image.transpose(Image.FLIP_LEFT_RIGHT)
# ่ฒๅๅๆข
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)
val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)
#
x = cv2.cvtColor(np.array(image, np.float32) / self.max_val, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * self.max_val
# ่ฐๆด็ฎๆ ๆกๅๆ
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
# box[:, [0, 2]] = box[:, [0, 2]] * w / nw + dx
# box[:, [1, 3]] = box[:, [1, 3]] * h / nh + dy
box[:, [0, 2]] = box[:, [0, 2]]+dy
box[:, [1, 3]] = box[:, [1, 3]]+dx
# if flip:
# box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # ไฟ็ๆๆๆก
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
# bw,bh=box_data.shape
# if len(box_data)>0:
# print(box_data[0][0],box_data[0][1],box_data[0][2],box_data[0][3])
# img1 = np.power(image_data / float(np.max(image_data)), 1 / 64)
# img1 = np.uint8(cv2.normalize(img1, None, 0, 255, cv2.NORM_MINMAX))
# for b in range(bw):
# img1=cv2.rectangle(img1,(int(box_data[b][0]),int(box_data[b][1])),(int(box_data[b][2]),int(box_data[b][3])),(255,255,255),1)
#
# cv2.imshow("im",img1)
# cv2.waitKey(0)
return image_data, box_data
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):
"""ๅฎๆถๆฐๆฎๅขๅผบ็้ๆบ้ขๅค็"""
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
if not random:
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32)
# ่ฐๆด็ฎๆ ๆกๅๆ
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # ไฟ็ๆๆๆก
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
return image_data, box_data
# ่ฐๆดๅพ็ๅคงๅฐ
new_ar = w / h * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)
scale = self.rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# ๆพ็ฝฎๅพ็
dx = int(self.rand(0, w - nw))
dy = int(self.rand(0, h - nh))
new_image = Image.new('RGB', (w, h),(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))
new_image.paste(image, (dx, dy))
# print(dx,dy)
# print(nw,nh)
# print(w,h)
image = new_image
# ๆฏๅฆ็ฟป่ฝฌๅพ็
flip = self.rand() < .5
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
# ่ฒๅๅๆข
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)
val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255
# ่ฐๆด็ฎๆ ๆกๅๆ
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip:
box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # ไฟ็ๆๆๆก
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
# bw, bh = box_data.shape
# if len(box_data) > 0:
# # print(box_data[0][0],box_data[0][1],box_data[0][2],box_data[0][3])
# img1 = np.power(image_data / float(np.max(image_data)), 1 / 64)
# img1 = np.uint8(cv2.normalize(img1, None, 0, 255, cv2.NORM_MINMAX))
# # img1=image_data
# for b in range(bw):
# img1 = cv2.rectangle(img1, (int(box_data[b][0]), int(box_data[b][1])),
# (int(box_data[b][2]), int(box_data[b][3])), (0, 0, 0), 1)
#
# cv2.imshow("im", img1)
# cv2.waitKey(0)
return image_data, box_data
def get_random_data_with_Mosaic(self, annotation_line, input_shape, hue=.1, sat=1.5, val=1.5):
h, w = input_shape
min_offset_x = 0.3
min_offset_y = 0.3
scale_low = 1 - min(min_offset_x, min_offset_y)
scale_high = scale_low + 0.2
image_datas = []
box_datas = []
index = 0
place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
place_y = [0, int(h * min_offset_y), int(h * min_offset_y), 0]
for line in annotation_line:
# ๆฏไธ่ก่ฟ่กๅๅฒ
line_content = line.split()
# ๆๅผๅพ็
image = Image.open(line_content[0])
image = image.convert("RGB")
# ๅพ็็ๅคงๅฐ
iw, ih = image.size
# ไฟๅญๆก็ไฝ็ฝฎ
box = np.array([np.array(list(map(int, box.split(',')))) for box in line_content[1:]])
# ๆฏๅฆ็ฟป่ฝฌๅพ็
flip = self.rand() < .5
if flip and len(box) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0, 2]] = iw - box[:, [2, 0]]
# ๅฏน่พๅ
ฅ่ฟๆฅ็ๅพ็่ฟ่ก็ผฉๆพ
new_ar = w / h
scale = self.rand(scale_low, scale_high)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# ่ฟ่ก่ฒๅๅๆข
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)
val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1
image = Image.fromarray((image * 255).astype(np.uint8))
# ๅฐๅพ็่ฟ่กๆพ็ฝฎ๏ผๅๅซๅฏนๅบๅๅผ ๅๅฒๅพ็็ไฝ็ฝฎ
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w, h),
(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)
index = index + 1
box_data = []
# ๅฏนbox่ฟ่ก้ๆฐๅค็
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
# ๅฐๅพ็ๅๅฒ๏ผๆพๅจไธ่ตท
cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# ๅฏนๆก่ฟ่ก่ฟไธๆญฅ็ๅค็
new_boxes = np.array(merge_bboxes(box_datas, cutx, cuty))
return new_image, new_boxes
def __getitem__(self, index):
lines = self.train_lines
n = self.train_batches
index = index % n
if self.mosaic:
if self.flag and (index + 4) < n:
img, y = self.get_random_data_with_Mosaic(lines[index:index + 4], self.image_size[0:2])
else:
img, y = self.get_random_data(lines[index], self.image_size[0:2], random=self.is_train)
self.flag = bool(1 - self.flag)
else:
if self.max_val==255:
img, y = self.get_random_data(lines[index], self.image_size[0:2], random=self.is_train)
else:
img, y = self.get_16_bit_random_data(lines[index], self.image_size[0:2], random=self.is_train)
if len(y) != 0:
# ไปๅๆ ่ฝฌๆขๆ0~1็็พๅๆฏ
boxes = np.array(y[:, :4], dtype=np.float32)
boxes[:, 0] = boxes[:, 0] / self.image_size[1]
boxes[:, 1] = boxes[:, 1] / self.image_size[0]
boxes[:, 2] = boxes[:, 2] / self.image_size[1]
boxes[:, 3] = boxes[:, 3] / self.image_size[0]
boxes = np.maximum(np.minimum(boxes, 1), 0)
boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
boxes[:, 0] = boxes[:, 0] + boxes[:, 2] / 2
boxes[:, 1] = boxes[:, 1] + boxes[:, 3] / 2
y = np.concatenate([boxes, y[:, -1:]], axis=-1)
img = np.array(img, dtype=np.float32)
tmp_inp = np.transpose(img / 255.0, (2, 0, 1))
tmp_targets = np.array(y, dtype=np.float32)
return tmp_inp, tmp_targets
# DataLoaderไธญcollate_fnไฝฟ็จ
def yolo_dataset_collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append(img)
bboxes.append(box)
images = np.array(images)
return images, bboxes |
"""
This example runs a dummy 1D bayesian optimization.
1. set up data
2. Then file runs the run_one_1d_bayesian_optimization function.
The function returns a grid of predicted mean/variance/etc.
3. Finally step plots the predicted mean & variance.
"""
from src.bayesian import run_one_1d_bayesian_optimization
import plotly.graph_objects as go
# 1. set up data
data = [{"x": 1, "y": 0},
{"x": 1.5, "y": 1.5},
{"x": 2, "y": 1},
{"x": 3, "y": -1}]
# 2. run bayesian optimization to get suggestion
mu, sigma, grid, s = run_one_1d_bayesian_optimization(data=data, limits={"x": (0, 4)})
# mu represents the expected return and
# sigma represents the standard deviation on this estimate.
# 3. plot
traces = []
traces.append(go.Scatter(x=grid, y=mu, mode='lines', name='mu'))
traces.append(go.Scatter(
x=[s,s],
y=[-2,3],
mode='lines',name="sugestion"))
traces.append(go.Scatter(x=grid, y=mu-sigma,
fill=None,
mode='lines',
line_color='indigo', name='muยฑsigma'
))
traces.append(go.Scatter(
x=grid,
y=mu+sigma,
fill='tonexty', # fill area between trace0 and trace1
mode='lines', line_color='indigo',showlegend=False))
fig = go.Figure(data=traces)
fig.update_layout(
title="1D Bayesian Optimization",
xaxis_title="Optimization space",
yaxis_title="Maximum",
)
fig.show()
|
"""
Code to develop and test the sinking algorithm.
"""
import numpy as np
import matplotlib.pyplot as plt
# z-coordinates (bottom to top, positive up)
H = 50 # max depth [m]
N = 25 # number of vertical grid cells
C = np.exp(-np.linspace(-2,2,N)**2)
Wsink = .1#80 # m per day
dt = .1 # time step in days
Dz = H/N
z_w = np.arange(-H,Dz,Dz)
z_rho = z_w[:-1] + Dz/2
# new algorithm
h = Wsink * dt
nn = int(np.floor(h / Dz))
delta = h - nn * Dz
Next = nn + 2
NN = N + Next
Cext = np.concatenate((C, np.zeros(Next)))
Zwext = np.concatenate((z_w, np.arange(Dz,Next*Dz + Dz,Dz)))
Zext = Zwext[:-1] + Dz/2
Cnew = np.zeros(N)
for ii in range(N):
Cnew[ii] = Cext[ii + nn]*(Dz - delta)/Dz + Cext[ii + nn + 1]*(delta/Dz)
Cnet_old = Dz * np.sum(C)
Cnet_new = Dz * np.sum(Cnew)
Cnet_lost = Cnet_old - Cnet_new
print('Cnet_old = %0.7f' % (Cnet_old))
print('Cnet_new = %0.7f' % (Cnet_new))
print('Cnet_lost = %0.7f' % (Cnet_lost))
plt.close('all')
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
ax.plot(Cext,Zext,'-og')
# ax.plot(Cb,Zb,'-b')
# ax.plot(Cb,Zb_new,'--b')
ax.plot(Cnew,z_rho,'-or')
plt.show()
|
import os
from django.forms.fields import FileField
from crispy_forms.layout import HTML
from django_modals.modal_upload import ModalAjaxFileMixin
from django_modals.forms import CrispyForm
from django_modals.modals import FormModal
from django_modals.helper import progress_bar
from .views import MainMenuTemplateView
class Upload(ModalAjaxFileMixin, MainMenuTemplateView):
template_name = 'example_views/upload.html'
@staticmethod
def upload_files(filename, _size, file, **_kwargs):
path = '/media/' + filename
with open(path, 'wb+') as destination:
destination.write(file.read())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['files'] = os.listdir('/media/')
return context
class UploadForm(CrispyForm):
File = FileField()
class Meta:
progress_bar = True
def post_init(self, *args, **kwargs):
return ['File', HTML(progress_bar())]
class UploadModal(FormModal):
form_class = UploadForm
modal_title = 'Upload Form'
# Due to timeout in modals.js this will not upload large files
def form_valid(self, form):
print(form.cleaned_data)
|
import numpy as np
def draw_line(xyz1, xyz2, num=1000):
x1 = xyz1[0]
x2 = xyz2[0]
y1 = xyz1[1]
y2 = xyz2[1]
z1 = xyz1[2]
z2 = xyz2[2]
x_line = np.linspace(x1, x2, num).reshape([-1, 1])
y_line = np.linspace(y1, y2, num).reshape([-1, 1])
z_line = np.linspace(z1, z2, num).reshape([-1, 1])
return np.concatenate([x_line, y_line, z_line], axis=-1)
def draw_3d_box_pcds(center, lx, ly, lz, pointnum):
"""
Args:
center: x, y, z
lx:
ly:
lz:
Returns:
"""
pt0 = np.array([center[0] - lx / 2.0, center[1] - ly / 2.0, center[2] - lz / 2.0])
pt1 = np.array([center[0] + lx / 2.0, center[1] - ly / 2.0, center[2] - lz / 2.0])
pt2 = np.array([center[0] + lx / 2.0, center[1] - ly / 2.0, center[2] + lz / 2.0])
pt3 = np.array([center[0] - lx / 2.0, center[1] - ly / 2.0, center[2] + lz / 2.0])
pt4 = np.array([center[0] - lx / 2.0, center[1] + ly / 2.0, center[2] - lz / 2.0])
pt5 = np.array([center[0] + lx / 2.0, center[1] + ly / 2.0, center[2] - lz / 2.0])
pt6 = np.array([center[0] + lx / 2.0, center[1] + ly / 2.0, center[2] + lz / 2.0])
pt7 = np.array([center[0] - lx / 2.0, center[1] + ly / 2.0, center[2] + lz / 2.0])
line0 = draw_line(pt0, pt1, pointnum)
line1 = draw_line(pt1, pt2, pointnum)
line2 = draw_line(pt2, pt3, pointnum)
line3 = draw_line(pt3, pt0, pointnum)
line4 = draw_line(pt4, pt5, pointnum)
line5 = draw_line(pt5, pt6, pointnum)
line6 = draw_line(pt6, pt7, pointnum)
line7 = draw_line(pt7, pt4, pointnum)
line8 = draw_line(pt0, pt4, pointnum)
line9 = draw_line(pt1, pt5, pointnum)
line10 = draw_line(pt2, pt6, pointnum)
line11 = draw_line(pt3, pt7, pointnum)
return np.concatenate([line0, line1, line2, line3, line4, line5, line6, line7, line8, line9, line10, line11], axis=0)
def draw_3d_box_pcds2(center, lx1, lx2, ly1, ly2, lz1, lz2, pointnum):
xmin = center[0] - lx1
xmax = center[0] + lx2
ymin = center[1] - ly1
ymax = center[1] + ly2
zmin = center[2] - lz1
zmax = center[2] + lz2
center = [(xmin+xmax)/2.0, (ymin+ymax)/2.0, (zmin+zmax)/2.0]
return draw_3d_box_pcds(center=center, lx=lx1+lx2, ly=ly1+ly2, lz=lz1+lz2, pointnum=pointnum)
def write_ply_color(points, labels, out_filename, num_classes=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
else:
assert(num_classes>np.max(labels))
fout = open(out_filename, 'w')
colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x*255) for x in c]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def write_ply_rgb(points, colors, out_filename):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i, :]
fout.write('v %f %f %f %d %d %d\n' % (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
fout.close() |
pessoas_maiores_idade = 0
homens = 0
mulheres_menos_20_anos = 0
while True:
print('-' * 30)
print('{:^30}'.format('CADASTRE UMA PESSOA'))
print('-' * 30)
idade = int(input('Idade: '))
if idade > 18:
pessoas_maiores_idade += 1
sexo = ""
while sexo != 'M' and sexo != 'F':
sexo = input('Sexo [M/F]: ').strip().upper()[0]
print('-' * 30)
if sexo == 'M':
homens += 1
else:
if idade < 20:
mulheres_menos_20_anos += 1
continuar = ""
while continuar != 'S' and continuar != 'N':
continuar = input('Quer continuar [S/N]? ').strip().upper()[0]
if continuar == 'N':
break
print(f"""Total de pessoas com mais de 18 anos: {pessoas_maiores_idade}
Homens cadastrados: {homens}
Mulheres com menos de 20 anos: {mulheres_menos_20_anos}""") |
from .internal import NeighborDistanceNormalization
import flowws
from flowws import Argument as Arg
from geometric_algebra_attention import keras as gala
from tensorflow import keras
import numpy as np
import tensorflow as tf
NORMALIZATION_LAYERS = {
None: lambda _: [],
'none': lambda _: [],
'batch': lambda _: [keras.layers.BatchNormalization()],
'layer': lambda _: [keras.layers.LayerNormalization()],
'momentum': lambda _: [gala.MomentumNormalization()],
}
NORMALIZATION_LAYER_DOC = ' (any of {})'.format(
','.join(map(str, NORMALIZATION_LAYERS))
)
class VAESampler(keras.layers.Layer):
def __init__(self, dim, loss_sum_axes=(-1,), loss_scale=1.0, *args, **kwargs):
self.dim = dim
self.loss_sum_axes = loss_sum_axes
self.loss_scale = loss_scale
self.z_mean_projection = keras.layers.Dense(self.dim)
self.z_log_var_projection = keras.layers.Dense(self.dim)
super().__init__(*args, **kwargs)
def call(self, inputs):
z_mean = self.z_mean_projection(inputs)
z_log_var = self.z_log_var_projection(inputs)
shape = tf.shape(z_mean)
loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
loss = tf.reduce_mean(tf.reduce_sum(loss, self.loss_sum_axes))
loss = loss * self.loss_scale
self.add_metric(loss, name='kl_loss')
self.add_loss(loss)
return tf.random.normal(shape, z_mean, tf.exp(0.5 * z_log_var))
def get_config(self):
result = super().get_config()
result['dim'] = self.dim
result['loss_sum_axes'] = tuple(self.loss_sum_axes)
result['loss_scale'] = self.loss_scale
return result
def expand_neighborhood_dim(x):
return x[..., None, :]
def stack_vector_layers(
AttentionVector,
r,
v,
n_vectors,
scorefun,
valuefun,
scalefun,
join_fun,
merge_fun,
n_dim=32,
rank=2,
invar_mode='full',
covar_mode='full',
):
pieces = []
for _ in range(n_vectors):
layer = AttentionVector(
scorefun(),
valuefun(n_dim),
scalefun(1),
reduce=True,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
merge_fun=merge_fun,
join_fun=join_fun,
rank=rank,
)
piece = layer([r, v])
piece = keras.layers.Lambda(expand_neighborhood_dim)(piece)
pieces.append(piece)
return keras.layers.Concatenate(axis=-2)(pieces)
class SVDLayer(keras.layers.Layer):
def build(self, input_shape):
self.num_vectors, self.n_dim = input_shape[-2:]
return super().build(input_shape)
def call(self, inputs):
s, u, v = tf.linalg.svd(inputs)
result = tf.matmul(u, v, adjoint_b=True)
if self.num_vectors < self.n_dim:
cross = tf.linalg.cross(result[..., 0, :], result[..., 1, :])[..., None, :]
result = tf.concat([result, cross], axis=-2)
return result
class NormalizeLayer(keras.layers.Layer):
def call(self, inputs):
return tf.linalg.normalize(inputs, axis=-1)[0]
class StaticEmbedding(keras.layers.Embedding):
def call(self, inputs):
return super().call(tf.range(0, self.input_dim)[None, :])
@flowws.add_stage_arguments
class GalaBottleneckAutoencoder(flowws.Stage):
"""Reproduce point clouds after a bottleneck layer"""
ARGS = [
Arg('n_dim', '-n', int, 32, help='Working dimension of model'),
Arg('dilation_factor', None, float, 2.0, help='Width factor for MLPs'),
Arg(
'block_nonlinearity',
'-b',
bool,
True,
help='Add a nonlinearity to each block',
),
Arg(
'residual',
'-r',
bool,
True,
help='Use residual connections over each block',
),
Arg(
'join_fun',
'-j',
str,
'concat',
help='Function to use for joining invariant and node-level signals',
),
Arg(
'merge_fun',
'-m',
str,
'concat',
help='Function to use for merging node-level signals',
),
Arg('dropout', '-d', float, 0, help='Dropout probability within network'),
Arg('num_blocks', None, int, 1, help='Number of blocks to use'),
Arg(
'num_vector_blocks',
None,
int,
1,
help='Number of vector-valued blocks to use',
),
Arg('rank', None, int, 2, help='Attention calculation rank'),
Arg(
'activation',
'-a',
str,
'relu',
help='Activation function to use within network',
),
Arg(
'normalize_distances',
None,
str,
help='Create scale-invariant networks by normalizing neighbor distances (mean/min)',
),
Arg('invar_mode', '-i', str, 'full', help='Rotation-invariant mode switch'),
Arg('covar_mode', '-c', str, 'full', help='Rotation-covariant mode switch'),
Arg(
'score_normalization',
None,
str,
'layer',
help=(
'Normalizations to apply to score (attention) function'
+ NORMALIZATION_LAYER_DOC
),
),
Arg(
'value_normalization',
None,
str,
'layer',
help=(
'Normalizations to apply to value function' + NORMALIZATION_LAYER_DOC
),
),
Arg(
'block_normalization',
None,
str,
'layer',
help=(
'Normalizations to apply to the output of each attention block'
+ NORMALIZATION_LAYER_DOC
),
),
Arg(
'invariant_value_normalization',
None,
str,
'momentum',
help=(
'Normalizations to apply to value function, before MLP layers'
+ NORMALIZATION_LAYER_DOC
),
),
Arg(
'equivariant_value_normalization',
None,
str,
'layer',
help=(
'Normalizations to apply to equivariant results'
+ NORMALIZATION_LAYER_DOC
),
),
Arg('variational', '-v', bool, True, help='If True, make a VAE'),
Arg('vae_dim', None, int, 8, help='Dimensionality of latent space for VAE'),
Arg(
'vae_scale',
None,
float,
1e-5,
help='Loss term scale for variational component',
),
Arg(
'num_reference_vectors',
None,
int,
2,
help='Number of reference vectors to produce',
),
Arg(
'use_multivectors',
None,
bool,
False,
help='If True, use multivector intermediates for calculations',
),
Arg(
'cross_attention',
None,
bool,
True,
help='If True, generate embeddings using cross-attention between the '
'generated basis set and vector intermediates',
),
]
def run(self, scope, storage):
use_weights = scope.get('use_bond_weights', False)
n_dim = self.arguments['n_dim']
dilation = self.arguments['dilation_factor']
block_nonlin = self.arguments['block_nonlinearity']
residual = self.arguments['residual']
join_fun = self.arguments['join_fun']
merge_fun = self.arguments['merge_fun']
dropout = self.arguments['dropout']
num_blocks = self.arguments['num_blocks']
rank = self.arguments['rank']
activation = self.arguments['activation']
distance_norm = self.arguments.get('normalize_distances', None)
invar_mode = self.arguments['invar_mode']
covar_mode = self.arguments['covar_mode']
n_ref = self.arguments['num_reference_vectors']
DropoutLayer = scope.get('dropout_class', keras.layers.Dropout)
normalization_getter = lambda key: (
NORMALIZATION_LAYERS[self.arguments.get(key + '_normalization', None)](rank)
)
if self.arguments['use_multivectors']:
Attention = gala.MultivectorAttention
AttentionVector = gala.Multivector2MultivectorAttention
AttentionLabeled = gala.LabeledMultivectorAttention
maybe_upcast_vector = gala.Vector2Multivector()
maybe_downcast_vector = gala.Multivector2Vector()
else:
Attention = gala.VectorAttention
AttentionVector = gala.Vector2VectorAttention
AttentionLabeled = gala.LabeledVectorAttention
maybe_upcast_vector = lambda x: x
maybe_downcast_vector = lambda x: x
type_dim = 2 * scope.get('max_types', 1)
x_in = keras.layers.Input((None, 3), name='rij')
v_in = keras.layers.Input((None, type_dim), name='tij')
w_in = None
inputs = [x_in, v_in]
if use_weights:
w_in = keras.layers.Input((None,), name='wij')
inputs = [x_in, v_in, w_in]
dilation_dim = int(np.round(n_dim * dilation))
def make_scorefun():
layers = [keras.layers.Dense(dilation_dim)]
layers.extend(normalization_getter('score'))
layers.append(keras.layers.Activation(activation))
if dropout:
layers.append(DropoutLayer(dropout))
layers.append(keras.layers.Dense(1))
return keras.models.Sequential(layers)
def make_valuefun(dim, in_network=True):
layers = []
if in_network:
layers.extend(normalization_getter('invariant_value'))
layers.append(keras.layers.Dense(dilation_dim))
layers.extend(normalization_getter('value'))
layers.append(keras.layers.Activation(activation))
if dropout:
layers.append(DropoutLayer(dropout))
layers.append(keras.layers.Dense(dim))
return keras.models.Sequential(layers)
def make_block(last_x, last):
residual_in_x = last_x
residual_in = last
if self.arguments['use_multivectors']:
arg = [last_x, last, w_in] if use_weights else [last_x, last]
last_x = gala.Multivector2MultivectorAttention(
make_scorefun(),
make_valuefun(n_dim),
make_valuefun(1),
False,
rank=rank,
join_fun=join_fun,
merge_fun=merge_fun,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
)(arg)
arg = [last_x, last, w_in] if use_weights else [last_x, last]
last = Attention(
make_scorefun(),
make_valuefun(n_dim),
False,
rank=rank,
join_fun=join_fun,
merge_fun=merge_fun,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
)(arg)
if block_nonlin:
last = make_valuefun(n_dim, in_network=False)(last)
if residual:
last = last + residual_in
for layer in normalization_getter('block'):
last = layer(last)
if self.arguments['use_multivectors']:
last_x = residual_in_x + last_x
for layer in normalization_getter('equivariant_value'):
last_x = layer(last_x)
return last_x, last
def make_vector_block(rs, vs):
residual_in = rs
rs = AttentionVector(
make_scorefun(),
make_valuefun(n_dim),
make_valuefun(1),
False,
rank=rank,
join_fun=join_fun,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
merge_fun=merge_fun,
)([rs, vs])
if residual:
rs = rs + residual_in
for layer in normalization_getter('equivariant_value'):
rs = layer(rs)
return rs
def make_labeled_block(last):
last_x = AttentionLabeled(
make_scorefun(),
make_valuefun(n_dim),
make_valuefun(1),
True,
rank=rank,
join_fun=join_fun,
merge_fun=merge_fun,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
)([last, (reference_last_x, reference_embedding)])
return last_x
last_x = x_in
if distance_norm in ('mean', 'min'):
last_x = NeighborDistanceNormalization(distance_norm)(last_x)
elif distance_norm:
raise NotImplementedError(distance_norm)
last_x = maybe_upcast_vector(last_x)
last = keras.layers.Dense(n_dim)(v_in)
for _ in range(num_blocks):
last_x, last = make_block(last_x, last)
reference_last_x = stack_vector_layers(
AttentionVector,
last_x,
last,
n_ref,
make_scorefun,
make_valuefun,
make_valuefun,
join_fun,
merge_fun,
n_dim,
rank,
invar_mode,
covar_mode,
)
reference_last_x = SVDLayer()(maybe_downcast_vector(reference_last_x))
reference_last_x = maybe_upcast_vector(reference_last_x)
n_ref = max(n_ref, 3)
reference_embedding = StaticEmbedding(n_ref, n_dim)(x_in)
embedding = last
if self.arguments['cross_attention']:
arg = [last_x, last, w_in] if use_weights else [last_x, last]
arg = [arg, [reference_last_x, reference_embedding]]
embedding = last = Attention(
make_scorefun(),
make_valuefun(n_dim),
False,
rank=rank,
join_fun=join_fun,
merge_fun=merge_fun,
invariant_mode=invar_mode,
covariant_mode=covar_mode,
)(arg)
if self.arguments['variational']:
samp = VAESampler(
self.arguments['vae_dim'], (-1,), self.arguments['vae_scale']
)
embedding = samp.z_mean_projection(last)
last = samp(last)
if n_dim != self.arguments['vae_dim']:
last = keras.layers.Dense(n_dim)(last)
last_x = make_labeled_block(last)
for _ in range(self.arguments['num_vector_blocks']):
last_x = make_vector_block(last_x, last)
last_x = maybe_downcast_vector(last_x)
embedding_model = keras.models.Model(inputs, embedding)
scope['input_symbol'] = inputs
scope['output'] = last_x
scope['model'] = keras.models.Model(inputs, scope['output'])
scope['embedding_model'] = embedding_model
|
๏ปฟfrom dataloader.DataloaderApi import *
import torch.nn as nn
import torch.optim
import time
import difflib
import torch.nn.functional as F
import datetime
from collections import Counter
from GPT.RAdam.radam.radam import RAdam
from math import log
from numpy import array
from numpy import argmax
from collections import defaultdict
import copy
import re
class Candidate:
def __init__(self, pre_ids, pro, is_complete):
self.pre_ids = pre_ids
self.pro = pro
self.is_complete = is_complete
class BestToken:
def __init__(self, pre_ids, pro):
self.pre_ids = pre_ids
self.pro = pro
class Classifier:
def __init__(self, model, model_lstm, args, vocab, word_vocab, Rvocab, tokenizer):
self.model = model
self.model_lstm = model_lstm
self.vocab = vocab
self.args = args
self.word_vocab = word_vocab
self.Rvocab = Rvocab
self.counter = Counter()
self.next_api = 0
self.pad_id = tokenizer.pad_token_id
self.eos_id = tokenizer.eos_token_id
self.tokenizer = tokenizer
self.ep = 0
self.control_num = 0
# ๆๅฐๆจกๅๅๆฐ
def summary(self):
print(self.model)
# ่ฎญ็ป
def train(self, train_data, dev_data, test_data, args_device, arg, epoch, search_word_dict, train_batch_len=None,
dev_batch_len=None):
# optimizer = torch.optim.Adam(self.model.parameters(),
# lr=self.args.lr,
# weight_decay=self.args.weight_decay)
# optimizer = RAdam(self.model.parameters(), lr=self.args.lr,
# weight_decay=self.args.weight_decay)
optimizer = RAdam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.args.lr,
weight_decay=self.args.weight_decay)
# criterion = nn.CrossEntropyLoss(ignore_index=self.pad_id).to(self.device)
train_loss_list, train_acc_list = [], []
best_acc = 0
dev_loss_list, dev_acc_list = [], []
patenice = 0
for ep in range(self.args.epoch):
self.ep = ep
train_data_num = 1
train_num = 1
train_acc = 0
train_acc_1 = 0
train_loss = 0
word_acc = 0
# word_num = 0
start_time = datetime.datetime.now()
self.model.train()
print("start train")
# print(len(train_data))
for onebatch in get_batch_train(train_data, self.args.batch_size, arg, train_batch_len):
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
pred = self.model(words)
pred = pred[:, :-1].contiguous()
loss = self.compuate_loss(targets.view(-1), pred.view(-1, pred.shape[-1]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.data.item()
acc_1, num = self.compuate_acc(targets.view(-1), pred.view(-1, pred.shape[-1]))
train_acc += acc_1
train_data_num += num
end_time = datetime.datetime.now()
during_time = end_time - start_time
print (len(dev_data))
dev_acc, dev_loss, dev_data_num, dev_word_acc, dev_num, perplexity = self.validate(dev_data, args_device,
arg, dev_batch_len,
False)
train_acc /= train_data_num
train_loss /= train_data_num
# train_acc_list.append(train_acc)
train_loss_list.append(train_loss)
# dev_acc_list.append(dev_acc)
if patenice > 2:
self.ep = self.ep - patenice
break
if dev_acc > best_acc:
print(dev_acc, best_acc)
dev_acc_list.append(dev_acc)
best_acc = dev_acc_list[-1]
patenice = 0
else:
patenice += 1
print(patenice)
print("[Epoch {}] train loss :{} train_acc:{} % Time:{} train_word_acc:{} train data num:{} train data vocab num:{}".format(
ep + 1, train_loss, train_acc * 100, during_time, word_acc / train_num, train_data_num, train_num))
print(
"[Epoch {}] dev loss :{} dev_acc:{} dev_word_acc:{} % train data num:{} train data vocab num:{} perplexity:{}".format(ep + 1,
dev_loss,
dev_acc * 100,
dev_word_acc / dev_num,
dev_data_num,
dev_num,
perplexity))
s = datetime.datetime.now()
print(datetime.datetime.now() - s)
# print(test_num)
if arg.is_save:
# torch.save(self.model.state_dict(), "data/API/trained_GPT_{}_jdbc_bpe".format(-1))
torch.save(self.model, "data/API/data/trained_GPT_{}_swing_bpe_1".format(-1))
# print("{} round:training complete".format(epoch))
return dev_acc_list[-1], perplexity, self.ep
def validate(self, dev_data, args_device, arg, batch_len, is_refine):
dev_loss = 0
dev_acc = 0
batch_num = 1
dev_data_num = 1
perplexity = 0.0
dev_word_acc = 0
num_1 = 1
num_2 = 0
self.model.eval()
dev_num = 1
refine_num = 0
no_refine_num = 0
true_test_seq = []
pred_api_seq = []
acc_2 = 0
acc_3 = 0
# with torch.no_grad:
for line_num, onebatch in enumerate(get_batch_train(dev_data, arg.batch_size, arg, None)):
batch_num += 1
# refine_pred = []
# refine_pred = torch.FloatTensor(1,self.args.max_seq_len,4565).zero_().to(args_device)
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
# print(onebatch[0])
# pred:[batch_size, seq_len, hidden_size * num_direction]
# pred = self.model(words)
targets = words[:, 1:].contiguous()
# print(words)
# print(targets)
pred = self.model(words)
pred = pred[:, :-1].contiguous()
loss = self.compuate_loss(targets.view(-1), pred.view(-1, pred.shape[-1]))
dev_loss += loss.data.item()
acc_1, num = self.compuate_acc(targets.view(-1), pred.view(-1, pred.shape[-1]))
perplexity += torch.exp(loss).data.item()
dev_acc += acc_1
dev_data_num += num
print(acc_2, acc_3, dev_word_acc)
dev_acc /= dev_data_num
dev_loss /= dev_data_num
perplexity /= batch_num
print("num_1:", num_1)
print("num_2:", num_2)
return dev_acc, dev_loss, dev_data_num, dev_word_acc, num_1, perplexity
# ่ฏไผฐๆจกๅ
def evluate(self, dev_data,dev_data_1, args_device, arg, batch_len, is_refine, search_word_dict):
reject_token = ["[EOS]","[BOS]","[PAD]","[UNK]"]
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
length_pro = 0
length_pro_1 = 0
length_pro_2 = 0
control_node = 0
k = 10
top1_len = Counter()
top1_ground_true_len = Counter()
top1_len_info = Counter()
top3_len = Counter()
top3_ground_true_len = Counter()
beam_size = arg.boundary
batch_num = 0
dev_data_num = 0
perplexity = 0.0
dev_word_acc_top1 = 0
dev_word_acc_top10 = 0
dev_word_acc_top3 = 0
dev_word_acc_top5 = 0
dev_word_acc_class_1 = 0
dev_word_acc_class_3 = 0
dev_word_acc_class_5 = 0
dev_word_acc_class_10 = 0
#non-control-node-num
num_1 = 1
#control-node-num
num_2 = 0
#coreect_non-control-node-num
num_3 = 0
num_4 = 0
num_5 = 0
new_num = 0
self.model.eval()
dev_num = 1
refine_num = 0
no_refine_num = 0
domain_count = 1
a_api_counter = set()
b_api_counter = set()
c_api_counter = set()
cross_domain = 0
for line_num, onebatch in enumerate(get_batch_train(dev_data_1, 1, arg, None)):
# print("*********new_seq***********")
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
true_seq = "".join(self.tokenizer.convert_ids_to_tokens(onebatch[0].input_ids))
pred_index_1 = 0
pred_index = 0
for word_loc, word_len in enumerate(onebatch[0].word_index):
cur_word = words.contiguous().clone()
cur_word = cur_word.expand(beam_size, 512)
true_token = []
if word_loc == 0:
true_token1 = [self.tokenizer.convert_id_to_token(targets[0, index:index + 1].item()) for index
in range(word_len)]
pred_index_1 = word_len
pred_index = word_len
# print("number one word", "".join(true_token1).replace("โ", ""))
continue
pred_index = pred_index_1
varible_cut_dot = 0
for word_dex in range(word_len):
if self.tokenizer.convert_id_to_token(targets[0, pred_index_1].item()) == "โ.":
varible_cut_dot = word_dex + 1
true_token.append(
self.tokenizer.convert_id_to_token(targets[0, pred_index_1:pred_index_1 + 1].item()))
pred_index_1 += 1
true_api = "".join(true_token).replace("โ", "")
if onebatch[0].tags[word_loc] != 1:
continue
else:
if true_api.find(".new") != -1:
continue
else:
b_api_counter.add(true_api)
for line_num, onebatch in enumerate(get_batch_train(dev_data, 1, arg, None)):
batch_num += 1
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
if is_refine:
true_seq = "".join(self.tokenizer.convert_ids_to_tokens(onebatch[0].input_ids))
cahe_list =true_seq.replace("[BOS]","").replace("[PAD]","").replace("โ","").replace("</t>"," ").replace("[EOS]","").split(" ")
pred_index_1 = 0
pred_index = 0
for word_loc, word_len in enumerate(onebatch[0].word_index):
candidate_list = []
bestToken_list = []
beam_candidate_list = []
tokensDone = 0
iter = 0
count = 0
token_pro_sum = 0.0
# if The probability of the best candidate is less than the worst current complete top-k tokens
hope = True
cur_word = words.contiguous().clone()
cur_word = cur_word.expand(beam_size, 512)
true_token = []
if word_loc == 0:
true_token1 = [self.tokenizer.convert_id_to_token(targets[0, index:index + 1].item()) for index
in range(word_len)]
pred_index_1 = word_len
pred_index = word_len
# print("number one word", "".join(true_token1).replace("โ", ""))
continue
if word_loc <= 3:
cur_cahe_list = cahe_list[:word_loc]
else:
cur_cahe_list = cahe_list[word_loc-3:word_loc]
class_cahe_list = [class_method.split(".")for class_method in cur_cahe_list]
# dev_num += 1
pred_index = pred_index_1
varible_cut_dot = 0
for word_dex in range(word_len):
if self.tokenizer.convert_id_to_token(targets[0, pred_index_1].item()) =="โ.":
varible_cut_dot = word_dex+1
true_token.append(
self.tokenizer.convert_id_to_token(targets[0, pred_index_1:pred_index_1 + 1].item()))
pred_index_1 += 1
true_api = "".join(true_token).replace("โ", "")
if true_api.find(".new") != -1:
if onebatch[0].tags[word_loc] == 1:
new_num += 1
continue
dev_num += 1
if onebatch[0].tags[word_loc] == 1:
domain_count += 1
c_api_counter.add(true_api)
else:
continue
# else:
# continue
a = re.sub(u"\\(.*?\\)|\\{.*?}|\\[.*?]|\\<.*?>", "", true_api)
true_api_nop = re.sub(u"\\(\\)|\\{|\\[\\]|\\>|\\<", "", a)
method_len = word_len - varible_cut_dot
class_name = [words[0, index].item() for index in range(pred_index+1,pred_index+varible_cut_dot+1)]
if len(class_name) == 0:
class_name_var ="".join(self.tokenizer.convert_ids_to_tokens(class_name)).replace("โ", "")
else:
class_name_var = "".join(self.tokenizer.convert_ids_to_tokens(class_name)).replace("โ", "")
pred_index = pred_index+varible_cut_dot
append_info = [[words[0, pred_index].item(), 1]]
while ((tokensDone <= 5000) and hope):
iter += 1
if len(beam_candidate_list) > 1:
if count >= k:
break
for i in range(len(beam_candidate_list)):
if pred_index+ len(beam_candidate_list[i].pre_ids)>= 512:
print("over the limit")
count += 1
continue
cur_word[i, pred_index:pred_index + len(beam_candidate_list[i].pre_ids)] = torch.tensor(
beam_candidate_list[i].pre_ids, dtype=torch.long)
currt_pred = self.model(cur_word[i:i + 1, :])
singel_word_pred = currt_pred[:, pred_index + len(beam_candidate_list[i].pre_ids)-1, :].clone()
singel_word_pred = F.softmax(singel_word_pred, dim=1)
subword_pro_order = torch.argsort(singel_word_pred, dim=1, descending=True)[0][:beam_size]
for pred_subword in subword_pro_order:
if self.tokenizer.convert_id_to_token(pred_subword.item()).find("</t>") != -1 or self.tokenizer.convert_id_to_token(pred_subword.item()) in reject_token:
# print('-------------')
if self.tokenizer.convert_id_to_token(pred_subword.item()) in reject_token:
continue
tokensDone += 1
update_list = [index for index in beam_candidate_list[i].pre_ids]
# print(candidate_list[i].pre_ids)
update_list.append(pred_subword.item())
method_name ="".join([self.tokenizer.convert_id_to_token(index) for index in
update_list]).replace("โ", "").replace("</t>", "").replace(
"[EOS]", "").replace("[UNK]", "").replace("[PAD]", "").replace("[BOS]","")
# if len(class_name) == 0:
# print(method_name)
# print(class_name_var)
if class_name_var != "":
if method_name not in search_word_dict[class_name_var.replace(".","")]:
continue
bestToken_list.append(BestToken(update_list,
beam_candidate_list[i].pro * singel_word_pred[0][
pred_subword.item()].item()))
else:
if method_name not in appendControlNodesStrings:
continue
bestToken_list.append(BestToken(update_list,
beam_candidate_list[i].pro *
singel_word_pred[0][
pred_subword.item()].item()))
bestToken_list = sorted(bestToken_list, key=lambda x: x.pro, reverse=True)
if len(bestToken_list) > k :
bestToken_list.pop(-1)
else:
update_list = [index for index in beam_candidate_list[i].pre_ids]
update_list.append(pred_subword.item())
candidate_list.append(Candidate(update_list,
beam_candidate_list[i].pro * singel_word_pred[0][
pred_subword.item()].item(), False))
candidate_list = sorted(candidate_list, key=lambda x: x.pro, reverse=True)
token_pro_sum = sum([token.pro for token in bestToken_list])
if len(bestToken_list) >= 1 and len(candidate_list) != 0:
if candidate_list[0].pro < bestToken_list[-1].pro:
hope = False
if len(candidate_list) < beam_size:
print (len(candidate_list))
for i in range(len(candidate_list), 0, -1):
beam_candidate_list[i - 1] = candidate_list.pop(i - 1)
else:
for i in range(beam_size,0,-1):
beam_candidate_list[i-1] = candidate_list.pop(i-1)
else:
cur_word[0, pred_index] = append_info[0][0]
currt_pred = self.model(cur_word[0:1, :])
init_candidate_list, init_bestTokens = self.compuate_acc_2(
currt_pred[0, pred_index:pred_index + 1, :], append_info, k, reject_token,search_word_dict,class_name_var,appendControlNodesStrings,beam_size=beam_size)
candidate_list = [data for data in init_candidate_list]
bestToken_list =[data for data in init_bestTokens]
beam_candidate_list = [data for data in init_candidate_list]
if len(bestToken_list) >= 1 and len(candidate_list) != 0:
if candidate_list[0].pro < bestToken_list[-1].pro:
hope = False
pred_index += 1
for i in range(beam_size,0,-1):
candidate_list.pop(i-1)
final_result = []
final_result_check = []
final_result_nop = []
final_class_result = []
bestToken_list = sorted(bestToken_list, key=lambda x: x.pro, reverse=True)
for best_token in bestToken_list[:10]:
final_result.append(class_name_var+"".join([self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("โ", "").replace("</t>","").replace("[EOS]","").replace("[UNK]","").replace("[PAD]",""))
final_result_check.append(best_token.pro)
final_class_result.append("".join(
[self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("โ",
"").replace(
"</t>", "").replace("[EOS]", "").replace("[UNK]", "").split(".")[0])
raw_api = class_name_var + "".join([self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("โ","").replace("</t>", "").replace("[EOS]", "").replace("[UNK]", "").replace("[PAD]", "")
a = re.sub(u"\\(.*?\\)|\\{.*?}|\\[.*?]|\\<.*?>", "", raw_api)
true_api_nop_can = re.sub(u"\\(\\)|\\{|\\[\\]|\\>|\\<", "", a)
final_result_nop.append(true_api_nop_can)
if true_api.replace("</t>","") in final_result[:1]:
dev_word_acc_top1 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_1 += 1
else:
if true_api.replace("</t>","") not in appendControlNodesStrings:
pass
else:
control_node += 1
pass
if true_api.replace("</t>","") in final_result[:3]:
dev_word_acc_top3 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_3 += 1
else:
m3 = 0
if true_api.replace("</t>", "") in final_result[:5]:
dev_word_acc_top5 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_5 += 1
if true_api.replace("</t>", "") in final_result:
if true_api.replace("</t>", "") not in appendControlNodesStrings:
num_3 += 1
else:
num_4 += 1
dev_word_acc_top10 += 1
if true_api not in b_api_counter:
if onebatch[0].tags[word_loc] == 1:
a_api_counter.add(true_api)
cross_domain += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_10 += 1
else:
if true_api.replace("</t>", "") not in appendControlNodesStrings:
num_1 += 1
if true_api_nop.replace("</t>", "") in final_result_nop:
pass
else:
if onebatch[0].tags[word_loc] == 1:
num_5 += 1
else:
num_2 += 1
word_acc_1 = dev_word_acc_top1 / dev_num
word_acc_3 = dev_word_acc_top3 / dev_num
word_acc_5 = dev_word_acc_top5 / dev_num
word_acc_10 = dev_word_acc_top10 / dev_num
dev_1 = dev_word_acc_class_1 / domain_count
dev_3 = dev_word_acc_class_3 / domain_count
dev_5 = dev_word_acc_class_5 / domain_count
dev_10 = dev_word_acc_class_10 / domain_count
print("class acc: top1:{} top3:{} top5:{} top10:{}".format(dev_1,dev_3,dev_5,dev_10))
d_api_counter = b_api_counter & c_api_counter
print("------------------------------")
print (dev_num)
print (domain_count)
print("cross domain")
print(a_api_counter)
print (len(a_api_counter))
print (cross_domain /domain_count )
if len(c_api_counter) == 0:
print ("coverage:",len(d_api_counter)/ 1)
else:
print("coverage:", len(d_api_counter) / len(c_api_counter))
return word_acc_1,word_acc_3,word_acc_5,word_acc_10, dev_num,[dev_1,dev_3,dev_5,dev_10],domain_count
def compuate_acc(self, true_tags, logit):
correct_num = 0
select_index = []
for i in range(logit.shape[0]):
if true_tags[i].item() != 0:
# prediction[i] = logit[i]
select_index.append(i)
if len(select_index) == 0:
# print(true_tags)
return 0, 0
# print(len(select_index))
logit = torch.index_select(logit, 0, torch.tensor(select_index).long().to(self.args.device))
true_tags = torch.index_select(true_tags, 0, torch.tensor(select_index).long().to(self.args.device))
logit = F.softmax(logit, dim=1)
for i in range(logit.shape[0]):
if true_tags[i] in torch.argsort(logit[i], descending=True)[: 2]:
correct_num += 1
return correct_num, true_tags.shape[0]
def compuate_acc_1(self, true_tags, logit):
correct_num = 0
select_index = []
append_info = []
for i in range(true_tags.shape[0]):
select_index.append(i)
logit = torch.index_select(logit, 0, torch.tensor(select_index).long().to(self.args.device))
true_tags = torch.index_select(true_tags, 0, torch.tensor(select_index).long().to(self.args.device))
logit = F.softmax(logit, dim=1)
for i in range(logit.shape[0]):
if true_tags[i].item() in torch.argsort(logit[i], descending=True)[: 5].tolist():
correct_num += 1
for i in range(self.args.boundary):
append_info.append(torch.argsort(logit[-1], descending=True)[i].item())
else:
for i in range(self.args.boundary):
append_info.append(torch.argsort(logit[-1], descending=True)[i].item())
return correct_num, true_tags.shape[0], append_info
def compuate_acc_2(self, logit, pre_info, k, reject_token,search_dict,class_name,control_lable,beam_size,target=None):
bestTokens = []
pre_candidate = []
lowest_pro = 0.0
logit = F.softmax(logit, dim=1)
sort = torch.argsort(logit, dim=1, descending=True)
flag1 = False
flag2 = False
if len(pre_info) != 1:
for i in range(logit.shape[0]):
for j in range(self.args.boundary):
append_info.append((sort[-1][j].item() % self.tokenizer.vocab_size,
logit[-1][sort[-1][j].item()].item() *
pre_info[int(sort[-1][j].item() / self.tokenizer.vocab_size)][1]))
pre_candidate.append(pre_info[int(sort[-1][j].item() / self.tokenizer.vocab_size)][0])
else:
# for i in range(logit.shape[0]):
for j in range(logit.shape[1]):
if flag1 and flag2:
break
if len(pre_candidate) < beam_size:
if self.tokenizer.convert_id_to_token(sort[0][j].item()).find(
"</t>") == -1 and self.tokenizer.convert_id_to_token(
sort[0][j].item()) not in reject_token:
pre_candidate.append(
Candidate([sort[0][j].item()], logit[0][sort[0][j].item()].item(), False))
else:
flag1 = True
if len(bestTokens) < k:
method_name = self.tokenizer.convert_id_to_token(sort[0][j].item()).replace("โ","")
if class_name == "":
# print(method_name)
if method_name.replace("</t>", "") in control_lable:
bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
if method_name.find(
"</t>") != -1 or method_name in reject_token:
if method_name in reject_token:
continue
# bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
if method_name.replace("</t>","") in search_dict[class_name]:
bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
flag2 = True
bestTokens = sorted(bestTokens, key=lambda x: x.pro, reverse=True)
return pre_candidate, bestTokens
def compuate_loss(self, true_tags, logit):
loss = nn.CrossEntropyLoss(ignore_index=self.pad_id)
loss = loss(logit, true_tags)
return loss
def is_validate(self, pred_sub_word_order, validate_class_name, search_word_dict):
currt_api = validate_class_name[0]
currt_api_index = 0
currt_api_search_path = None
is_complete = False
flag = False
for i in range(pred_sub_word_order.shape[0]):
if flag:
break
for validate_class in validate_class_name[1]:
if flag:
break
for c_class in search_word_dict[validate_class]:
if c_class.find(
currt_api + self.tokenizer.convert_id_to_token(pred_sub_word_order[i].item()).replace("โ",
"")) != -1:
currt_api_index = pred_sub_word_order[i].item()
currt_api = currt_api + self.tokenizer.convert_id_to_token(pred_sub_word_order[i].item())
currt_api_search_path = validate_class
if self.tokenizer.convert_id_to_token(currt_api_index).find("โ") != -1:
currt_api = currt_api.replace("โ", "")
is_complete = True
flag = True
break
else:
continue
return currt_api, is_complete, currt_api_index, currt_api_search_path
def found_validate_class(self, pred_subword, all_classes, n):
# validate_class_name = ()
refine_words = []
# print(all_classes)
pred_subword = self.tokenizer.convert_id_to_token(pred_subword.item()).replace("โ", "")
if len(pred_subword) < 4:
refine_api = difflib.get_close_matches(pred_subword, all_classes.keys(), 100, 0.1)
for can_dai in refine_api:
if len(refine_words) == n:
break
if can_dai.startswith(pred_subword, 0, len(pred_subword)) and len(can_dai) > 4:
refine_words.append(can_dai)
else:
refine_api = difflib.get_close_matches(pred_subword, all_classes.keys(), 100, 0.1)
for can_dai in refine_api:
if len(refine_words) == n:
break
if can_dai.find(pred_subword) != -1:
refine_words.append(can_dai)
if len(refine_api) == 0:
refine_words = n * ["null"]
else:
for r_api in refine_api:
refine_words.append(r_api)
refine_words = refine_words + (n - len(refine_api)) * ["null"]
return refine_words
def refine(self, pred, true_tags):
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
tokens = []
true_token = []
flag = False
refine_words = []
pred = F.softmax(pred, dim=1)
# print(true_tags.shape,pred.shape)
for j in range(pred.shape[0]):
true_token.append(self.tokenizer.convert_id_to_token(true_tags[j].item()).replace("โ", ""))
for i in range(10):
token = []
for j in range(pred.shape[0]):
top5 = torch.argsort(pred[j], descending=True)[: 10]
token.append(self.tokenizer.convert_id_to_token(top5[i].item()).replace("โ", ""))
word = "".join(token)
tokens.append(word)
# print(true_word)
refine_word = difflib.get_close_matches(word, self.word_vocab, 1, 0.6)
if len(refine_word) == 0:
refine_words.append("null")
else:
refine_words.append(refine_word[0])
true_word = "".join(true_token)
if true_word in appendControlNodesStrings:
self.control_num += 1
# print(word, true_word, refine_word)
# if len(refine_word) != 0:
if true_word in refine_words:
# print(true_word,tokens)
# print(true_word, refine_words)
flag = True
else:
# print(true_word, refine_words)
pass
return true_word, tokens
def refine_for_rec(self, pred):
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
refine_words = []
for word in pred:
# print(word)
refine_api = difflib.get_close_matches(word.replace("โ", ""), self.word_vocab, 10, 0.05)
# try:
if len(refine_api) == 0:
refine_words.append("null")
else:
for r_api in refine_api:
refine_words.append(r_api)
# except IndexError:
return refine_words
def beam_search_decoder(self, data, k):
data = F.softmax(data, dim=1)
# print(data.shape)
sequences = [[list(), 1.0]]
# walk over each step in sequence
data = data.cpu().detach().numpy().tolist()
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
# print(row[j].item())
# a = -log(row[j].item())
try:
candidate = [seq + [j], score * -log(row[j])]
all_candidates.append(candidate)
except:
# print(row[j])
candidate = [seq + [j], score * -log(0.00001)]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup: tup[1])
# select k best
sequences = ordered[:k]
return sequences
def cache(self,top20_result,class_cahe_list):
for i,best_token in enumerate(top20_result):
token = "".join(self.tokenizer.convert_ids_to_tokens(best_token.pre_ids)).replace("โ", "").replace("</t>","").replace("[EOS]","").replace("[UNK]","").replace("[PAD]","")
if token.split(".")[0] in class_cahe_list:
# count = cur_cahe_list.count(token)
top20_result[i].pro = 0.7 * top20_result[i].pro + 0.3
else:
top20_result[i].pro = 0.7 * top20_result[i].pro
# count = 0
return top20_result
|
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List
from virtool_workflow.data_model import HMM
class AbstractHMMsProvider(ABC):
path: Path
@abstractmethod
async def get(self, hmm_id: str):
"""Get the HMM annotation with the given ID."""
...
@abstractmethod
async def hmm_list(self) -> List[HMM]:
"""Get a list of all HMM annotations."""
...
@abstractmethod
async def get_profiles(self) -> Path:
"""Get the profiles.hmm file."""
...
|
import datetime
import os
from tempfile import TemporaryFile
import webbrowser
from django.core.files.temp import gettempdir
from django.core.management.base import BaseCommand, CommandError
from canvas.models import Comment
from django.conf import settings
class Command(BaseCommand):
args = 'filename'
help = ''
def handle(self, *args, **options):
comment_id = args[0]
cmt = Comment.objects.get(id=comment_id)
html = cmt.footer.render_html()
print html
|
from __future__ import print_function
import argparse
import random
import numpy as np
import time
import math
from collections import OrderedDict, defaultdict
from pybullet_tools.utils import load_model, TURTLEBOT_URDF, joints_from_names, \
set_joint_positions, HideOutput, get_bodies, sample_placement, pairwise_collision, \
set_point, Point, create_box, stable_z, TAN, GREY, connect, PI, OrderedSet, \
wait_if_gui, dump_body, set_all_color, BLUE, child_link_from_joint, link_from_name, draw_pose, Pose, pose_from_pose2d, \
get_random_seed, get_numpy_seed, set_random_seed, set_numpy_seed, plan_joint_motion, plan_nonholonomic_motion, \
joint_from_name, safe_zip, draw_base_limits, BodySaver, WorldSaver, LockRenderer, elapsed_time, disconnect, flatten, \
INF, wait_for_duration, get_unbuffered_aabb, draw_aabb, DEFAULT_AABB_BUFFER, get_link_pose, get_joint_positions, \
get_subtree_aabb, get_pairs, get_distance_fn, get_aabb, set_all_static, step_simulation, get_bodies_in_region, \
AABB, update_scene, Profiler, pairwise_link_collision, BASE_LINK, get_collision_data, draw_pose2d, \
normalize_interval, wrap_angle, CIRCULAR_LIMITS, wrap_interval, Euler, rescale_interval, adjust_path
BASE_LINK_NAME = 'base_link'
BASE_JOINTS = ['x', 'y', 'theta']
DRAW_Z = 1e-3
DRAW_LENGTH = 0.5
MIN_AABB_VOLUME = DEFAULT_AABB_BUFFER**3
##################################################
def create_custom_base_limits(robot, base_limits):
return {joint_from_name(robot, joint): limits
for joint, limits in safe_zip(BASE_JOINTS[:2], zip(*base_limits))}
def sample_placements(body_surfaces, obstacles=None, savers=[], min_distances={}):
if obstacles is None:
obstacles = OrderedSet(get_bodies()) - set(body_surfaces)
obstacles = list(obstacles)
savers = list(savers) + [BodySaver(obstacle) for obstacle in obstacles]
if not isinstance(min_distances, dict):
min_distances = {body: min_distances for body in body_surfaces}
# TODO: max attempts here
for body, surface in body_surfaces.items(): # TODO: shuffle
min_distance = min_distances.get(body, 0.)
while True:
pose = sample_placement(body, surface)
if pose is None:
for saver in savers:
saver.restore()
return False
for saver in savers:
obstacle = saver.body
if obstacle in [body, surface]:
continue
saver.restore()
if pairwise_collision(body, obstacle, max_distance=min_distance):
break
else:
obstacles.append(body)
break
for saver in savers:
saver.restore()
return True
def draw_path(path2d, z=DRAW_Z, **kwargs):
if path2d is None:
return []
#return list(flatten(draw_pose(pose_from_pose2d(pose2d, z=z), **kwargs) for pose2d in path2d))
#return list(flatten(draw_pose2d(pose2d, z=z, **kwargs) for pose2d in path2d))
base_z = 1.
start = path2d[0]
mid_yaw = start[2]
#mid_yaw = wrap_interval(mid_yaw)
interval = (mid_yaw - PI, mid_yaw + PI)
#interval = CIRCULAR_LIMITS
draw_pose(pose_from_pose2d(start, z=base_z), length=1, **kwargs)
# TODO: draw the current pose
# TODO: line between orientations when there is a jump
return list(flatten(draw_pose2d(pose2d, z=base_z+rescale_interval(
wrap_interval(pose2d[2], interval=interval), old_interval=interval, new_interval=(-0.5, 0.5)), **kwargs)
for pose2d in path2d))
def plan_motion(robot, joints, goal_positions, attachments=[], obstacles=None, holonomic=False, reversible=False, **kwargs):
attached_bodies = [attachment.child for attachment in attachments]
moving_bodies = [robot] + attached_bodies
if obstacles is None:
obstacles = get_bodies()
obstacles = OrderedSet(obstacles) - set(moving_bodies)
if holonomic:
return plan_joint_motion(robot, joints, goal_positions,
attachments=attachments, obstacles=obstacles, **kwargs)
# TODO: just sample the x, y waypoint and use the resulting orientation
# TODO: remove overlapping configurations/intervals due to circular joints
return plan_nonholonomic_motion(robot, joints, goal_positions, reversible=reversible,
linear_tol=1e-6, angular_tol=0.,
attachments=attachments, obstacles=obstacles, **kwargs)
##################################################
def problem1(n_obstacles=10, wall_side=0.1, obst_width=0.25, obst_height=0.5):
floor_extent = 5.0
base_limits = (-floor_extent/2.*np.ones(2),
+floor_extent/2.*np.ones(2))
floor_height = 0.001
floor = create_box(floor_extent, floor_extent, floor_height, color=TAN)
set_point(floor, Point(z=-floor_height/2.))
wall1 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
set_point(wall1, Point(y=floor_extent/2., z=wall_side/2.))
wall2 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
set_point(wall2, Point(y=-floor_extent/2., z=wall_side/2.))
wall3 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
set_point(wall3, Point(x=floor_extent/2., z=wall_side/2.))
wall4 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
set_point(wall4, Point(x=-floor_extent/2., z=wall_side/2.))
walls = [wall1, wall2, wall3, wall4]
initial_surfaces = OrderedDict()
for _ in range(n_obstacles):
body = create_box(obst_width, obst_width, obst_height, color=GREY)
initial_surfaces[body] = floor
obstacles = walls + list(initial_surfaces)
initial_conf = np.array([+floor_extent/3, -floor_extent/3, 3*PI/4])
goal_conf = -initial_conf
with HideOutput():
robot = load_model(TURTLEBOT_URDF)
base_joints = joints_from_names(robot, BASE_JOINTS)
# base_link = child_link_from_joint(base_joints[-1])
base_link = link_from_name(robot, BASE_LINK_NAME)
set_all_color(robot, BLUE)
dump_body(robot)
set_point(robot, Point(z=stable_z(robot, floor)))
draw_pose(Pose(), parent=robot, parent_link=base_link, length=0.5)
set_joint_positions(robot, base_joints, initial_conf)
sample_placements(initial_surfaces, obstacles=[robot] + walls,
savers=[BodySaver(robot, joints=base_joints, positions=goal_conf)],
min_distances=10e-2)
return robot, base_limits, goal_conf, obstacles
##################################################
def iterate_path(robot, joints, path, step_size=None): # 1e-2 | None
if path is None:
return
path = adjust_path(robot, joints, path)
with LockRenderer():
handles = draw_path(path)
wait_if_gui(message='Begin?')
for i, conf in enumerate(path):
set_joint_positions(robot, joints, conf)
if step_size is None:
wait_if_gui(message='{}/{} Continue?'.format(i, len(path)))
else:
wait_for_duration(duration=step_size)
wait_if_gui(message='Finish?')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cfree', action='store_true',
help='When enabled, disables collision checking.')
# parser.add_argument('-p', '--problem', default='test_pour', choices=sorted(problem_fn_from_name),
# help='The name of the problem to solve.')
parser.add_argument('--holonomic', action='store_true', # '-h',
help='')
parser.add_argument('-l', '--lock', action='store_false',
help='')
parser.add_argument('-s', '--seed', default=None, type=int,
help='The random seed to use.')
parser.add_argument('-v', '--viewer', action='store_false',
help='')
args = parser.parse_args()
connect(use_gui=args.viewer)
seed = args.seed
#seed = 0
#seed = time.time()
set_random_seed(seed=seed) # None: 2147483648 = 2**31
set_numpy_seed(seed=seed)
print('Random seed:', get_random_seed(), random.random())
print('Numpy seed:', get_numpy_seed(), np.random.random())
#########################
robot, base_limits, goal_conf, obstacles = problem1()
draw_base_limits(base_limits)
custom_limits = create_custom_base_limits(robot, base_limits)
base_joints = joints_from_names(robot, BASE_JOINTS)
base_link = link_from_name(robot, BASE_LINK_NAME)
if args.cfree:
obstacles = []
# for obstacle in obstacles:
# draw_aabb(get_aabb(obstacle)) # Updates automatically
resolutions = None
#resolutions = np.array([0.05, 0.05, math.radians(10)])
set_all_static() # Doesn't seem to affect
region_aabb = AABB(lower=-np.ones(3), upper=+np.ones(3))
draw_aabb(region_aabb)
step_simulation() # Need to call before get_bodies_in_region
#update_scene() # TODO: https://github.com/bulletphysics/bullet3/pull/3331
bodies = get_bodies_in_region(region_aabb)
print(len(bodies), bodies)
# https://github.com/bulletphysics/bullet3/search?q=broadphase
# https://github.com/bulletphysics/bullet3/search?p=1&q=getCachedOverlappingObjects&type=&utf8=%E2%9C%93
# https://andysomogyi.github.io/mechanica/bullet.html
# http://www.cs.kent.edu/~ruttan/GameEngines/lectures/Bullet_User_Manual
#draw_pose(get_link_pose(robot, base_link), length=0.5)
for conf in [get_joint_positions(robot, base_joints), goal_conf]:
draw_pose(pose_from_pose2d(conf, z=DRAW_Z), length=DRAW_LENGTH)
aabb = get_aabb(robot)
#aabb = get_subtree_aabb(robot, base_link)
draw_aabb(aabb)
for link in [BASE_LINK, base_link]:
print(link, get_collision_data(robot, link), pairwise_link_collision(robot, link, robot, link))
#########################
saver = WorldSaver()
start_time = time.time()
profiler = Profiler(field='tottime', num=50) # tottime | cumtime | None
profiler.save()
with LockRenderer(lock=args.lock):
path = plan_motion(robot, base_joints, goal_conf, holonomic=args.holonomic, obstacles=obstacles,
custom_limits=custom_limits, resolutions=resolutions,
use_aabb=True, cache=True, max_distance=0.,
restarts=2, iterations=20, smooth=20) # 20 | None
saver.restore()
#wait_for_duration(duration=1e-3)
profiler.restore()
#########################
solved = path is not None
length = INF if path is None else len(path)
cost = sum(get_distance_fn(robot, base_joints, weights=resolutions)(*pair) for pair in get_pairs(path))
print('Solved: {} | Length: {} | Cost: {:.3f} | Runtime: {:.3f} sec'.format(
solved, length, cost, elapsed_time(start_time)))
if path is None:
disconnect()
return
iterate_path(robot, base_joints, path)
disconnect()
if __name__ == '__main__':
main()
|
from __future__ import print_function
from cloudmesh.config.cm_config import cm_config
from cloudmesh.shell.cm_cloud import CloudManage
from cloudmesh_base.logger import LOGGER
from tabulate import tabulate
from cmd3.console import Console
from cloudmesh.shell.cm_cloud import shell_command_cloud
from docopt import docopt
from cloudmesh.shell.shellutil import shell_commands_dict_output, get_command_list_refresh_default_setting
from pprint import pprint
# list_command_table_format = "simple"
list_command_table_format = "grid"
log = LOGGER(__file__)
def shell_command_list(arguments):
"""
::
List available flavors, images, vms, projects and clouds
Usage:
list flavor [CLOUD|--all]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
list image [CLOUD|--all]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
list vm [CLOUD|--all]
[--group=<group>]
[--refresh]
[--format=FORMAT]
[--column=COLUMN]
[--detail]
list project
list cloud [--column=COLUMN]
Arguments:
CLOUD the name of the cloud e.g. india
Options:
--all list information of all active clouds
--refresh refresh data before list
--group=<group> give the group name in list vm
--detail for table print format, a brief version
is used as default, use this flag to print
detailed table
--column=COLUMN specify what information to display in
the columns of the list command. For
example, --column=active,label prints
the columns active and label. Available
columns are active, label, host,
type/version, type, heading, user,
credentials, defaults (all to display
all, email to display all except
credentials and defaults)
--format=FORMAT output format: table, json, csv
Description:
List clouds and projects information, if the CLOUD argument is not specified, the
selected default cloud will be used. You can interactively set the default cloud with the command
'cloud select'.
list flavor
: list the flavors
list image
: list the images
list vm
: list the vms
list project
: list the projects
list cloud
: same as cloud list
See Also:
man cloud
"""
call = ListInfo(arguments)
call.execute()
class ListInfo(object):
def __init__(self, arguments):
self.cloudmanage = CloudManage()
try:
self.config = cm_config()
except:
Console.error("There is a problem with the configuration yaml files")
self.username = self.config['cloudmesh']['profile']['username']
self.arguments = arguments
# pprint(self.arguments)
self.cloudmanage = CloudManage()
try:
self.config = cm_config()
except:
Console.error("There is a problem with the configuration yaml files")
self.username = self.config['cloudmesh']['profile']['username']
self.refresh_default_setting = get_command_list_refresh_default_setting(self.username)
def _list_flavor(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = [
['id', 'id'],
['name', 'name'],
['vcpus', 'vcpus'],
['ram', 'ram'],
['disk', 'disk'],
['refresh time', 'cm_refresh']
]
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['flavors'])
# --format
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = []
for item in itemkeys:
if item[0] in s_column:
new_itemkeys.append(item)
itemkeys = new_itemkeys
for cloud in clouds:
self.cloudmanage.print_cloud_flavors(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False,
output=False,
print_format=p_format)
else:
return
def _list_image(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = {"openstack":
[
# [ "Metadata", "metadata"],
["name", "name"],
["status", "status"],
["id", "id"],
["type_id", "metadata", "instance_type_id"],
["iname", "metadata", "instance_type_name"],
["location", "metadata", "image_location"],
["state", "metadata", "image_state"],
["updated", "updated"],
# [ "minDisk" , "minDisk"],
["memory_mb", "metadata",
'instance_type_memory_mb'],
["fid", "metadata", "instance_type_flavorid"],
["vcpus", "metadata", "instance_type_vcpus"],
# [ "user_id" , "metadata", "user_id"],
# [ "owner_id" , "metadata", "owner_id"],
# [ "gb" , "metadata", "instance_type_root_gb"],
# [ "arch", ""]
],
"ec2":
[
# [ "Metadata", "metadata"],
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "is_public"],
["ownerid", "extra", "owner_id"],
["imagetype", "extra", "image_type"]
],
"azure":
[
["name", "label"],
["category", "category"],
["id", "id"],
["size", "logical_size_in_gb"],
["os", "os"]
],
"aws":
[
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "ispublic"],
["ownerid", "extra", "ownerid"],
["imagetype", "extra", "imagetype"]
]
}
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['images'])
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = {x: [] for x in itemkeys.keys()}
for cloud, items in itemkeys.iteritems():
for item in items:
if item[0] in s_column:
new_itemkeys[cloud].append(item)
itemkeys = new_itemkeys
for cloud in clouds:
self.cloudmanage.print_cloud_images(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False, output=False,
print_format=p_format)
else:
return
def _list_server(self):
self.cloudmanage._connect_to_mongo()
clouds = self.get_working_cloud_name()
if clouds:
itemkeys = {"openstack":
[
['name', 'name'],
['status', 'status'],
['addresses', 'addresses'],
['id', 'id'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id'],
['user_id', 'cm_user_id'],
['metadata', 'metadata'],
['key_name', 'key_name'],
['created', 'created'],
['cloud', 'cm_cloud']
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id', 'id'],
['image', 'extra', 'imageId'],
["user_id", 'user_id'],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id', 'id'],
['image', 'extra', 'image_id'],
["user_id", "user_id"],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"azure":
[
['name', 'name'],
['status', 'status'],
['addresses', 'vip'],
['flavor', 'flavor', 'id'],
['id', 'id'],
['image', 'image', 'id'],
['user_id', 'user_id'],
['metadata', 'metadata'],
['key_name', 'key_name'],
['created', 'created'],
]
}
itemkeys_short = {"openstack":
[
['name', 'name'],
['status', 'status'],
['addresses', 'addresses'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id']
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['image', 'extra', 'imageId']
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['image', 'extra', 'image_id']
],
"azure":
[
['name', 'name'],
['status', 'status'],
['addresses', 'vip'],
['flavor', 'flavor', 'id'],
['image', 'image', 'id']
]
}
if self.refresh_default_setting or self.arguments['--refresh']:
self.cloudmanage.mongo.activate(
cm_user_id=self.username, names=clouds)
self.cloudmanage.mongo.refresh(
cm_user_id=self.username, names=clouds, types=['servers'])
p_format = self.arguments['--format']
# --column
# available columns are: id, name, vcpus, ram, disk, refresh time,
# and all
if self.arguments['--column']:
if self.arguments['--column'] != "all":
s_column = [x.strip() for x in
self.arguments['--column'].split(',')]
new_itemkeys = {x: [] for x in itemkeys.keys()}
for cloud, items in itemkeys.iteritems():
for item in items:
if item[0] in s_column:
new_itemkeys[cloud].append(item)
itemkeys = new_itemkeys
else:
if not self.arguments['--detail']:
itemkeys = itemkeys_short
for cloud in clouds:
self.cloudmanage.print_cloud_servers(username=self.username,
cloudname=cloud.encode(
"ascii"),
itemkeys=itemkeys,
refresh=False,
output=False,
print_format=p_format,
group=self.arguments['--group'])
else:
return
def _list_project(self):
self.cloudmanage._connect_to_mongo()
selected_project = None
try:
selected_project = self.cloudmanage.mongo.db_defaults.find_one(
{'cm_user_id': self.username + "OIO"})['project']
except Exception, NoneType:
Console.warning("could not find selected project in the database")
except Exception, e:
Console.error("could not connect to the database")
print(e)
print("\n")
print(tabulate([[selected_project]], ["selected project"], tablefmt=list_command_table_format))
#
# active projects
#
projects = {}
for state in ["active", "completed"]:
projects[state] = None
try:
projects[state] = self.cloudmanage.mongo.db_user.find_one(
{'cm_user_id': self.username})['projects'][state]
except:
Console.error(
"could not find objects or connect to the database containing the projects")
to_print = []
if projects[state] is None:
to_print = [[None]]
else:
to_print = [[str(p)] for p in projects[state]]
print("\n")
print(tabulate(to_print, ["{0} projects".format(state)], tablefmt=list_command_table_format))
def _list_cloud(self):
""" same as the shell_command_cloud list"""
arguments = dict(self.arguments)
arguments["list"] = True
shell_command_cloud(arguments)
"""
self.cloudmanage._connect_to_mongo()
active_clouds = []
other_clouds = []
activeclouds = self.cloudmanage.mongo.active_clouds(self.username)
clouds = self.cloudmanage.get_clouds(self.username)
clouds = clouds.sort([('cm_cloud', 1)])
for cloud in clouds:
name = cloud['cm_cloud']
if name in activeclouds:
active_clouds.append([str(name)])
else:
other_clouds.append([str(name)])
if active_clouds == []: active_clouds = [None]
if other_clouds == []: other_clouds = [None]
print tabulate(active_clouds, ["active clouds"], tablefmt=list_command_table_format)
print "\n"
print tabulate(other_clouds, ["other clouds"], tablefmt=list_command_table_format)
print "\n"
"""
# --------------------------------------------------------------------------
def get_working_cloud_name(self):
'''
get the name of a cloud to be work on, if CLOUD not given, will pick the
slected cloud, is --all, will return a list of active clouds
'''
self.cloudmanage._connect_to_mongo()
activeclouds = None
try:
activeclouds = self.cloudmanage.mongo.active_clouds(self.username)
except:
pass
if self.arguments['--all']:
if activeclouds is None:
print("no active cloud, please activate a cloud by 'cloud on [CLOUD]'")
return False
return activeclouds
else:
if self.arguments['CLOUD']:
name = self.arguments['CLOUD']
else:
name = self.cloudmanage.get_selected_cloud(self.username)
if self.cloudmanage.get_clouds(self.username, getone=True, cloudname=name) is None:
Console.error(
"no cloud information of '{0}' in database".format(name))
return False
if name not in activeclouds:
Console.warning(
"cloud '{0}' is not active, to activate a cloud: cloud on [CLOUD]".format(name))
return False
return [name]
def execute(self):
if self.arguments['vm']:
self._list_server()
elif self.arguments['flavor']:
self._list_flavor()
elif self.arguments['image']:
self._list_image()
elif self.arguments['project']:
self._list_project()
elif self.arguments['cloud']:
self._list_cloud()
def main():
arguments = docopt(shell_command_list.__doc__)
shell_command_list(arguments)
if __name__ == '__main__':
main()
|
from django.contrib.gis.db import models
class Venue(models.Model):
"""
Model for a venue
"""
name = models.CharField(max_length=200)
location = models.PointField()
def __str__(self):
return self.name
class Event(models.Model):
"""
Model for an event
"""
name = models.CharField(max_length=200)
datetime = models.DateTimeField()
venue = models.ForeignKey(Venue)
def __str__(self):
return "%s - %s" % (self.name, self.venue.name)
|
import os
import csv
from math import ceil, floor
import gffutils
EXIT_GFF_REANNOTATION_ERROR = 3
try:
from Corekaburra.correct_gffs import annotate_refound_genes
except ModuleNotFoundError:
from correct_gffs import annotate_refound_genes
try:
from Corekaburra.exit_with_error import exit_with_error
except ModuleNotFoundError:
from exit_with_error import exit_with_error
def add_gene_to_dict(main_dict, gene, pan_gene_name, genome):
"""
Function to add a gene to a given dictionary
:param main_dict: Dict of genes from genomes. A dict of dicts, with first set of keys being genomes, second is locus_tags with pan-genome gene being the key.
:param gene: The gene in question from a specific genome (locus_tag)
:param pan_gene_name: The name of the pan-genome gene (cluster) to which the above gene belongs.
:param genome: The name of the genome in question
:return: returns the dict to be used further
"""
if ';' in gene:
for gene_part in gene.split(';'): # TODO - NOTE! HERE BOTH GENES IN A PAIR IS ADDED as separate key/value-pairs
main_dict[genome][gene_part] = pan_gene_name
else:
main_dict[genome][gene] = pan_gene_name
return main_dict
def check_fragmented_gene(fragment_info, input_gffs, tmp_folder_path, gene_data_dict, corrected_dir, logger):
"""
Function that check for that placement of fragmented gene parts, to determine if they are neighbouring or have some genomic feature between them
:param fragment_info: List of genes that are found to be fragmented, one composite of fragments for each index
:param input_gffs: A list of file-paths to the gff files given as input
:param tmp_folder_path: A file-path to the temporary folder of the Corekaburra run
:return: A List of booleans indicating if a fragments has nothing in between fragments (True) or not (False)
"""
# Check if any refound genes are in fragments to be checked, if then reannotate the genes before checking:
refound_fregments = [[i, gene_gff] for i, gene_gff in enumerate(fragment_info) if 'refound' in gene_gff[0]]
if refound_fregments:
for i, gene_gff in refound_fregments:
gene, gff = gene_gff
gff_name = None
try:
gff_name = [gff_name for gff_name in input_gffs
if f"{gff}_corrected" in [os.path.basename(gff_name),
os.path.basename(gff_name).rsplit('.', 1)[0],
os.path.basename(gff_name).rsplit('.', 1)[0].rsplit('.', 1)[0]]][0]
except IndexError:
pass
if gff_name is None:
try:
gff_name = [gff_name for gff_name in input_gffs
if gff in [os.path.basename(gff_name),
os.path.basename(gff_name).rsplit('.', 1)[0],
os.path.basename(gff_name).rsplit('.', 1)[0].rsplit('.', 1)[0]]][0]
except IndexError:
exit_with_error(EXIT_GFF_REANNOTATION_ERROR,
f'A problem occurred when trying to find a file for reannotation, when passing the '
f'gene_presence_absence_roary.csv! GFF: {gff}, Gene: {gene}')
gff_name = annotate_refound_genes(gff_name, gene_data_dict, tmp_folder_path, corrected_dir, logger)
fragment_info[i][1] = gff_name
fragments_close = []
for fragment in fragment_info:
# split the two fragments
fragment_pieces = fragment[0].split(';')
# Get the name of the genome
genome = fragment[1]
# Get the gff and its path
if '.gff' not in genome:
try:
gff_file = [file for file in input_gffs if f'{genome}.gff' in file][0]
db_name = os.path.join(tmp_folder_path, f'{genome}_db')
except IndexError:
raise NotImplementedError(f'No gff match was found when searching fragments for genome: {genome}')
else:
gff_file = genome
db_name = f"{os.path.basename(genome)}_db"
db_name = os.path.join(tmp_folder_path, db_name)
# Construct gff database to be searched
if not os.path.isfile(db_name):
gffutils.create_db(gff_file, db_name, force_gff=True, id_spec=['old_locus_tag', 'ID'])
# Attach database
gff_database = gffutils.FeatureDB(db_name)
# Check that all fragments are on the same contig.
first_fragment_contig = gff_database[fragment_pieces[0]][0]
frag_same_contig = all([first_fragment_contig == gff_database[fragment][0] for fragment in fragment_pieces])
if frag_same_contig:
# Get all coordinates
frag_coors = []
for frag in fragment_pieces:
frag_coors.append(gff_database[frag][3])
frag_coors.append(gff_database[frag][4])
# Construct region to be searched for annotations between fragments:
max_frag_coor = max(frag_coors)
min_frag_coor = min(frag_coors)
region = (first_fragment_contig, min_frag_coor, max_frag_coor)
# Find all features that are completely within the region
region_features = gff_database.region(region=region, completely_within=True)
# Find if some pieces are refound and change old_locus_tag to ID
refound_pieces = [[i, fragment_piece] for i, fragment_piece in enumerate(fragment_pieces) if 'refound' in fragment_piece]
if refound_pieces:
for i, piece in refound_pieces:
fragment_pieces[i] = gff_database[piece]['ID'][0]
# find all genes that are not part of the fragmented gene
region_locus_tags = set([feature[8]['locus_tag'][0] for feature in region_features])
excess_genes = region_locus_tags.difference(fragment_pieces)
# check the number of excess genes, if any then False to being core
if len(excess_genes) > 0:
fragments_close.append(False)
else:
fragments_close.append(True)
else:
fragments_close.append(False)
return fragments_close
# TODO - find out what the non-closed file problem is here! Can be seen when running unit-tests.
def read_gene_presence_absence(pres_abs_file, core_gene_presence, low_freq_gene, source_program, input_gffs, tmp_folder_path, gene_data_dict, corrected_dir, logger):
"""
Function that pass a Roary style gene presence/absence file.
:param pres_abs_file: File path to the gene presence/absence file identified
:param core_gene_presence: The ratio of genomes in which a gene must present, to be seen as a core gene
:param low_freq_gene: The ratio of genomes in which a gene must not surpass, to be seen as a low-frequency gene
:param source_program: The program from which the pan-genome was produced
:param input_gffs: A list of file-paths to the gff files given as input
:param tmp_folder_path: A file-path to the temporary folder of the Corekaburra run
:param logger: Program logger
:return: Directories of directories of core and low frequency genes, and a directory of pan genome clusters and their annotation.
"""
# Open the presence/absense file to index gene into core, accessory, or low-frequency genes
with open(pres_abs_file, 'r', newline='', ) as gene_presence_absence:
# Read column header line
gff_file_names = gene_presence_absence.readline()
# Strip for whitespace
gff_file_names = gff_file_names.strip()
# split column names
gff_file_names = gff_file_names.split(',')
# Remove the quotes from Rorary input
if source_program == 'Roary':
gff_file_names = [filename.replace('"', '') for filename in gff_file_names]
# Index gff filenames and column position in dict for better search
gff_file_dict = {}
for i, gff_name in enumerate(gff_file_names[14:]):
gff_file_dict[gff_name] = i
# Read remaining lines and construct a nested dicts one dict for each genome and its core genes,
# and a dict for low frequency genes found in less than set percent of isolates
# Initialise reader object to read remaining lines
reader = csv.reader(gene_presence_absence, delimiter=',')
# Counters
core_gene_number = 0
low_freq_gene_number = 0
acc_gene_number = 0
# Determine number of isolates that represent core and low frequency genes
core_gene_isolate_presence = floor(len(gff_file_dict.keys()) * core_gene_presence)
low_freq_gene_isolate_presence = ceil(len(gff_file_dict.keys()) * low_freq_gene)
logger.info(f"------------Opening the gene presence/absence file------------\n"
f"Core genes must be found in {core_gene_isolate_presence} or more genomes\n"
f"Low frequency genes must be found in less than {low_freq_gene_isolate_presence} genomes\n")
# initialise dict of dicts to hold genes from each gffs and to be returned
core_gene_dict = {item: {} for item in gff_file_names[14:]}
low_freq_gene_dict = {item: {} for item in gff_file_names[14:]}
acc_gene_dict = {item: {} for item in gff_file_names[14:]}
# Read lines from file and determine if core, low frequency or 'regular' accessory and record annotations
for line in reader:
# Remove quotes if Roary
if source_program == 'Roary':
line = [element.replace('"', '') for element in line]
# Get number of genes in line and average presence of genes in genomes
gene_isolate_presence = int(line[3])
no_seq_presence = int(line[4])
# Check if core gene, if then add annotations to genomes
# Check if gene is present in all genomes and no one gene is fragmented
if core_gene_isolate_presence <= gene_isolate_presence == no_seq_presence:
# Add gene cluster to genomes
for genome in core_gene_dict.keys():
# Check if there is an annotation for the given genome
if len(line[14 + gff_file_dict[genome]]) > 0:
core_gene_dict[genome][line[14+gff_file_dict[genome]]] = line[0]
core_gene_number += 1
# Check if gene is present in all genomes, but more than one copy is present
elif core_gene_isolate_presence <= gene_isolate_presence:
# Identify annotations for genomes that are fragmented genes
fragment_info = [[genes, gff] for genes, gff in zip(line[14:], gff_file_names[14:]) if ';' in genes]
# Check that each annotation is neighboring the other annotation.
fragments_close = check_fragmented_gene(fragment_info, input_gffs, tmp_folder_path, gene_data_dict,
corrected_dir, logger) # TODO - If a core gene is found to be made up of fragments not places close enough (With something in between) should this then not be subtracted from the core gene count? - How would this be handled if there is a gff that is not given as input?
# Check if gene was found to be a core gene
if all(fragments_close):
# Add the gene to the annotation dict
for genome in core_gene_dict:
# Get the annoations for a specific genome
genes_in_genome = line[14 + gff_file_dict[genome]]
# If there is an annotation add id
if len(genes_in_genome) > 0:
# Check if genome has fragments of genes,
# if then add them all to the annotation dict,
# if not then just ad the single annotation
add_gene_to_dict(core_gene_dict, genes_in_genome, line[0], genome)
core_gene_number += 1
else:
# Check if low frequency, if then add else then add as normal accessory
if low_freq_gene_isolate_presence >= gene_isolate_presence == no_seq_presence:
for genome in low_freq_gene_dict.keys():
if len(line[14 + gff_file_dict[genome]]) > 0:
add_gene_to_dict(low_freq_gene_dict, line[14 + gff_file_dict[genome]], line[0], genome)
low_freq_gene_number += 1
else:
for genome in acc_gene_dict.keys():
if len(line[14 + gff_file_dict[genome]]) > 0:
add_gene_to_dict(acc_gene_dict, line[14 + gff_file_dict[genome]], line[0], genome)
acc_gene_number += 1
# Check if accessory if then add annotation to genomes
elif low_freq_gene_isolate_presence >= gene_isolate_presence == no_seq_presence:
for genome in low_freq_gene_dict.keys():
if len(line[14+gff_file_dict[genome]]) > 0:
add_gene_to_dict(low_freq_gene_dict, line[14 + gff_file_dict[genome]], line[0], genome)
low_freq_gene_number += 1
# If not core or low frequency count as regular accessory
else:
for genome in acc_gene_dict.keys():
if len(line[14+gff_file_dict[genome]]) > 0:
add_gene_to_dict(acc_gene_dict, line[14 + gff_file_dict[genome]], line[0], genome)
acc_gene_number += 1
logger.info("A total of:\n"
f"{core_gene_number} core gene clusters were identified\n"
f"{low_freq_gene_number} low frequency gene clusters were identified\n"
f"{acc_gene_number} intermediate accessory gene clusters were identified\n")
# Remove gff databases
files_in_tmp = os.listdir(tmp_folder_path)
gff_dbs = [file for file in files_in_tmp if '_db' in file]
[os.remove(os.path.join(tmp_folder_path, db)) for db in gff_dbs]
return core_gene_dict, low_freq_gene_dict, acc_gene_dict
if __name__ == '__main__':
pass
|
import pretty_midi
import IPython.display
import math
import matplotlib.pyplot as plt
def make_music_heterophonic(pitches=60, durs=0.25, pgm=1, is_drum=False, format='inbrowser', sr=16000):
"""Turn a list of a list of numbers into music."""
# check and convert to list if needed
pitches = pitches if isinstance(pitches, list) else [pitches]
durs = durs if isinstance(durs, list) else [durs]
pgm = pgm if isinstance(pgm, list) else [pgm]
is_drum = is_drum if isinstance(is_drum, list) else [is_drum]
# extend short lists if size mismatch (in number of voices)
num_voices = max(len(pitches), len(durs), len(pgm), len(is_drum))
pitches += [pitches[-1]] * (num_voices - len(pitches))
durs += [durs[-1]] * (num_voices - len(durs))
pgm += [pgm[-1]] * (num_voices - len(pgm))
is_drum += [is_drum[-1]] * (num_voices - len(is_drum))
# make music for each and collect into list of instruments
ins = [make_music(pitches=p, durs=d, pgm=i, is_drum=x, format='MIDI').instruments[0]
for p,d,i,x in zip(pitches, durs, pgm, is_drum)]
# create a PrettyMIDI score
score = pretty_midi.PrettyMIDI()
# add all instruments
score.instruments.extend(ins)
# which format to render
if format=='MIDI':
return score
elif format=='audio':
return score.fluidsynth(fs=16000)
elif format=='inbrowser':
return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr)
elif format=='autoplay':
return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr, autoplay=True)
else:
raise ValueError("So sorry but your `format` argument did not match one of the available options")
def test_make_music_heterophonic():
v1 = [60 + 2 * x for x in range(8)]
v2 = [x + 4 for x in v1]
v3 = [x - 4 for x in v1]
return make_music_heterophonic(pitches=[v1, v2, v3], durs=0.25, pgm=[1,13,24], format='MIDI')
def make_music(pitches=60, durs=0.333, pgm=1, is_drum=False, format='inbrowser', sr=16000, resolution=220):
"""Turn lists of numbers into music.
Converts pitch and duration values into MIDI and/or audio playback. Uses
`pretty_midi` for MIDI representation handling, fluidsynth for resynthesis,
and `IPython.display.Audio` for browser playback.
Parameters
----------
pitches : list or scalar
List of pitches, or scalar if constant pitch. Floating point values are
interpreted as microtonal pitch deviations.
durs: list or scalar
List of durations, or scalar if constant duration.
pgm: number
MIDI program number, in range ``[0, 127]``.
is_drum : bool
If True use percussion channel 10.
format : string
Which format to render sound to?
- `'MIDI'` returns MIDI as a `pretty_midi` object
- `'audio'` returns waveforms as a `numpy` nd.array
- `'inbrowser'` returns `IPython.display.Audio` widget
- `'autoplay'` returns `IPython.display.Audio` widget and plays it
Returns
-------
synthesized: depends on the value of `format`.
Notes
-----
If len(pitches) and len(durs) do not match, the smaller list is extended to
match the length of the longer list by repeating the last value.
"""
# check and convert to list if needed
pitches = pitches if isinstance(pitches, list) else [pitches]
durs = durs if isinstance(durs, list) else [durs]
# extend short lists if size mismatch
max_length = max(len(pitches), len(durs))
pitches += [pitches[-1]] * (max_length - len(pitches))
durs += [durs[-1]] * (max_length - len(durs))
# create a PrettyMIDI score
score = pretty_midi.PrettyMIDI(resolution=resolution)
# create a list of instruments one for each voice (for polypohonic pitch bend)
num_voices = max([len(p) if isinstance(p, list) else 1 for p in pitches])
ins = [pretty_midi.Instrument(program=max(pgm-1, 0), is_drum=is_drum) for i in range(num_voices)]
# iterate through music
now_time = 0
for pitch, dur in zip(pitches, durs):
# rest if pitch is None
if pitch is not None:
# convert to list if needed
pitch = pitch if isinstance(pitch, list) else [pitch]
# loop through each voice of the list
for voice_index, pitch_val in enumerate(pitch):
# split into 12tet and microtones
micros, twlvtet = math.modf(pitch_val)
# create a new note
note = pretty_midi.Note(velocity=100, pitch=int(twlvtet), start=now_time, end=now_time+dur)
# and add it to the instrument
ins[voice_index].notes.append(note)
# if microtonal
if micros != 0:
# create a new pitch bend
# note: 4096 is a semitone in standard MIDI +/-2 pitchbend range
micropitch = pretty_midi.PitchBend(pitch=int(round(micros*4096)), time=now_time)
# and add it to the instrument
ins[voice_index].pitch_bends.append(micropitch)
# advance time
now_time += dur
# add instrument to the score
score.instruments.extend(ins)
# which format to render
if format=='MIDI':
return score
elif format=='audio':
return score.fluidsynth(fs=16000)
elif format=='inbrowser':
return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr)
elif format=='autoplay':
return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr, autoplay=True)
else:
raise ValueError("So sorry but your `format` argument did not match one of the available options")
def n_max(a):
'''Return the max of a list that contains None`'''
return max(filter(lambda x: x is not None, a))
def n_min(a):
'''Return the min of a list that contains None'''
return min(filter(lambda x: x is not None, a))
def make_music_plot(pitches=60, durs=0.333, pgm=1, is_drum=False, format='autoplay', sr=16000, figsize=(9,3), cmap='jet', show=True):
"""Plot lists of numbers as music (same API as `make_music`)"""
# check and convert to list if needed
pitches = pitches if isinstance(pitches, list) else [pitches]
durs = durs if isinstance(durs, list) else [durs]
# extend short lists if size mismatch
max_length = max(len(pitches), len(durs))
pitches += [pitches[-1]] * (max_length - len(pitches))
durs += [durs[-1]] * (max_length - len(durs))
# plot
plt.figure(figsize=figsize)
cm = plt.cm.get_cmap(name=cmap)
curr_time = 0
for pitch,dur in zip(pitches, durs):
if pitch is not None:
pitch_normed = float(pitch - n_min(pitches)) / (n_max(pitches) - n_min(pitches)) if (n_max(pitches) - n_min(pitches)) != 0 else 1
plt.scatter([curr_time], [pitch], marker='|', c='white', s=25, zorder=3)
plt.plot([curr_time, curr_time + dur], [pitch, pitch], lw=5, solid_capstyle='butt', c=cm(pitch_normed), alpha=0.75)
curr_time += dur
if show:
plt.show()
|
# Generated by Django 3.0.4 on 2020-05-12 21:52
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0002_auto_20200513_0257'),
]
operations = [
migrations.RemoveField(
model_name='fps',
name='user',
),
migrations.AddField(
model_name='fps',
name='user',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
|
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \
check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern
def get_gpu_ignored_patterns():
return {
'blocks': [(pattern, check_fused_scale_shift_patterns) for pattern in get_fused_scale_shift_patterns()] +
[(pattern, check_fused_op_const_patterns) for pattern in get_fused_op_const_pattern()],
'activations': [get_clamp_mult_const_pattern()],
'inputs': []
}
|
"""Leetcode 1161. Maximum Level Sum of a Binary Tree
Medium
URL: https://leetcode.com/problems/maximum-level-sum-of-a-binary-tree/
Given the root of a binary tree, the level of its root is 1,
the level of its children is 2, and so on.
Return the smallest level X such that the sum of all the values of nodes
at level X is maximal.
Example 1:
Input: [1,7,0,7,-8,null,null]
1
/ \
7 0
/ \
7 -8
Output: 2
Explanation:
Level 1 sum = 1.
Level 2 sum = 7 + 0 = 7.
Level 3 sum = 7 + -8 = -1.
So we return the level with the maximum sum which is level 2.
Note:
- The number of nodes in the given tree is between 1 and 10^4.
- -10^5 <= node.val <= 10^5
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionLevelBFS(object):
def maxLevelSum(self, root):
"""
:type root: TreeNode
:rtype: int
Time complexity: O(n).
Space complexity: O(logn) for balanced tree; O(n) for single sided.
"""
# Track current max sum and level id.
max_sum = -float('inf')
max_level = 0
# Use queue for level BFS.
queue = [root]
cur_level = 0
while queue:
# Accumulate level sum.
cur_level += 1
level_sum = 0
for i in range(len(queue)):
current = queue.pop()
level_sum += current.val
# Insert left/right for further level BFS.
if current.left:
queue.insert(0, current.left)
if current.right:
queue.insert(0, current.right)
# Compare level sum and max sum, update the latter if needed.
if level_sum > max_sum:
max_sum = level_sum
max_level = cur_level
return max_level
def main():
# Input: [1,7,0,7,-8,null,null]
# 1
# / \
# 7 0
# / \
# 7 -8
# Output: 2
root = TreeNode(1)
root.left = TreeNode(7)
root.right = TreeNode(0)
root.left.left = TreeNode(7)
root.left.right = TreeNode(-8)
print SolutionLevelBFS().maxLevelSum(root)
if __name__ == '__main__':
main()
|
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.operators.python_operator import BranchPythonOperator
from datetime import datetime, timedelta
import pandas as pd
from sklearn import datasets
import os
import json
from evidently.model_profile import Profile
from evidently.profile_sections import DataDriftProfileSection
from evidently.dashboard import Dashboard
from evidently.tabs import DataDriftTab
default_args = {
"start_date": datetime(2020, 1, 1),
"owner": "airflow",
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
dir_path = 'reports'
file_path = 'data_drift_dashboard_by_airflow.html'
#evaluate data drift with Evidently Profile
def _detect_dataset_drift(reference, production, column_mapping, confidence=0.95, threshold=0.5, get_ratio=False):
"""
Returns True if Data Drift is detected, else returns False.
If get_ratio is True, returns ration of drifted features.
The Data Drift detection depends on the confidence level and the threshold.
For each individual feature Data Drift is detected with the selected confidence (default value is 0.95).
Data Drift for the dataset is detected if share of the drifted features is above the selected threshold (default value is 0.5).
"""
data_drift_profile = Profile(sections=[DataDriftProfileSection()])
data_drift_profile.calculate(reference, production, column_mapping=column_mapping)
report = data_drift_profile.json()
json_report = json.loads(report)
drifts = []
num_features = column_mapping.get('numerical_features') if column_mapping.get('numerical_features') else []
cat_features = column_mapping.get('categorical_features') if column_mapping.get('categorical_features') else []
for feature in num_features + cat_features:
drifts.append(json_report['data_drift']['data']['metrics'][feature]['p_value'])
n_features = len(drifts)
n_drifted_features = sum([1 if x<(1. - confidence) else 0 for x in drifts])
if get_ratio:
return n_drifted_features/n_features
else:
return True if n_drifted_features/n_features >= threshold else False
def load_data_execute(**context):
#print("load_data_execute ")
data = datasets.load_boston()
data_frame = pd.DataFrame(data.data, columns=data.feature_names)
data_columns = {}
data_columns['numerical_features'] = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'TAX','PTRATIO', 'B', 'LSTAT']
#data_columns['categorical_features'] = ['CHAS', 'RAD']
context['ti'].xcom_push(key='data_frame', value=data_frame)
context['ti'].xcom_push(key='data_columns', value=data_columns)
def drift_analysis_execute(**context):
#print("drift_analysis_execute ")
data = context.get("ti").xcom_pull(key='data_frame')
data_columns = context.get("ti").xcom_pull(key='data_columns')
dataset_drift = _detect_dataset_drift(
data[:200],
data[200:],
column_mapping=data_columns)
context['ti'].xcom_push(key='dataset_drift', value=dataset_drift)
def detect_drift_execute(**context):
#print("detect_drift_execute ")
drift = context.get("ti").xcom_pull(key='dataset_drift')
if drift:
return 'create_dashboard'
def create_dashboard_execute(**context):
print("create_dashboard_execute ")
data = context.get("ti").xcom_pull(key='data_frame')
print("data")
data_drift_dashboard = Dashboard(tabs=[DataDriftTab()])
data_drift_dashboard.calculate(data[:200], data[200:])
try:
os.mkdir(dir_path)
except OSError:
print ("Creation of the directory {} failed".format(dir_path))
data_drift_dashboard.save(os.path.join(dir_path, file_path))
with DAG(
dag_id='evidently_conditional_drift_dashboard_generation',
schedule_interval='@daily',
default_args=default_args,
catchup=False) as dag:
load_data = PythonOperator(
task_id="load_data",
python_callable=load_data_execute,
provide_context=True,
op_kwargs={"parameter_variable":"parameter_value"} #not used now
)
drift_analysis = PythonOperator(
task_id="drift_analysis",
python_callable=drift_analysis_execute,
provide_context=True,
)
detect_drift = ShortCircuitOperator(
task_id='detect_drift',
python_callable=detect_drift_execute,
provide_context=True,
do_xcom_push=False,
)
create_dashboard = PythonOperator(
task_id='create_dashboard',
provide_context=True,
python_callable=create_dashboard_execute
)
load_data >> drift_analysis >> detect_drift >> [create_dashboard]
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# RefineDet in PyTorch
# Written by Dongdong Wang
# Official and original Caffe implementation is at
# https://github.com/sfzhang15/RefineDet
# -------------------------------------------------------- |
import unittest
from source.code.MyDBCAN import MyDBCAN
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
class TestMyDBSCAN(unittest.TestCase):
def setUp(self):
# TODO: generate train/test data frames
self.my_dbscan = MyDBCAN()
def test_my_dbscan(self):
dbscan_pipeline = Pipeline([
('reduce', PCA(n_components=2, random_state=42)),
('fit', self.my_dbscan)
])
# TODO: generate train/test data frames
if __name__ == '__main__':
unittest.main()
|
def sanitize(inputstr):
sanitized = inputstr
badstrings = [
';',
'$',
'&&',
'../',
'<',
'>',
'%3C',
'%3E',
'\'',
'--',
'1,2',
'\x00',
'`',
'(',
')',
'file://',
'input://'
]
for badstr in badstrings:
if badstr in sanitized:
sanitized = sanitized.replace(badstr, '')
return sanitized
|
#!/usr/bin/env python
import os
import math
import numpy as np
import cst.sord
# FIXME: prestress not correct
dx = 100.0
dt = dx / 12500.0
nx = 2
ny = int(16500.0 / dx + 21.5)
nz = int(12000.0 / dx + 120.5)
nt = int(8.0 / dt + 1.5)
alpha = math.sin(math.pi / 3.0)
prm = {
'affine': [
[1.0, 0.0, 0.0],
[0.0, alpha, 0.0],
[0.0, 0.5, 1.0],
],
'shape': [nx, ny, nz, nt],
'delta': [dx, dx, dx, dt],
'nproc3': [1, 1, 2],
'bc1': ['+node', 'free', 'free'],
'bc2': ['+node', 'pml', 'free'],
'n1expand': [0, 0, 50],
'n2expand': [0, 0, 50],
'hourglass': [1.0, 2.0],
'rho': [2700.0],
'vp': [5716.0],
'vs': [3300.0],
'faultnormal': '+z',
'co': [200000.0],
'dc': [0.5],
'mud': [0.1],
'sxx': [([0, ':'], '=<', 'sxx.bin')],
'syy': [([0, ':'], '=<', 'syy.bin')],
'szz': [([0, ':'], '=<', 'szz.bin')],
}
# hypocenter
y = 12000.0 / dx
z = nz // 2 - 0.5
prm['hypocenter'] = hypo = [0.0, y, z]
# near-fault volume
k = int(15000.0 / dx + 0.5)
l0 = int(z - 3000.0 / dx + 0.5)
l1 = int(z + 3000.0 / dx + 0.5)
prm['gam'] = [0.2, ([[], [k], [l0, l1]], '==', 0.02)]
# fault parameters
k = int(15000.0 / dx) + 1
prm['mus'] = [10000.0, ([[], [k]], '=', 0.7)]
# nucleation
i = int(1500.0 / dx + 0.5)
k = int(hypo[1])
prm['mus'] = [
([[], [k-i, k+i+1]], '=', 0.62),
([[], [k-i-1, k+i+2]], '=', 0.54),
]
# fault time histories
for f in (
'sux', 'suy', 'suz',
'svx', 'svy', 'svz',
'tsx', 'tsy', 'tsz', 'tnm',
):
prm[f] = []
for k in 0, 15, 30, 45, 75, 120:
y = k * 100.0 / dx
s = 'faultst%03ddp000-%s.bin' % (k, f)
prm[f] += [([0.0, y, []], '.>', s)]
# body time histories
for k, l in [
[0, -30],
[0, -20],
[0, -10],
[0, 10],
[0, 20],
[0, 30],
[3, -10],
[3, -5],
[3, 5],
[3, 10],
]:
y = k * 100.0 / dx / alpha
z = l * 100.0 / dx + hypo[2]
for f in 'u1', 'u2', 'u3', 'v1', 'v2', 'v3':
s = 'body%03dst000dp%03d-%s.bin' % (l, k, f)
s = s.replace('body-', 'body-0')
prm[f] += [([0.0, y, z, []], '.>', s)]
# pre-stress
d = np.arange(ny) * alpha * dx
x = d * 9.8 * -1147.16
y = d * 9.8 * -1700.0
z = d * 9.8 * -594.32
k = int(13800.0 / dx + 1.5)
x[k:] = y[k:]
z[k:] = y[k:]
# run SORD
d = 'repo/TVP12-2D'
os.mkdir(d)
os.chdir(d)
x.astype('f').tofile('sxx.bin')
y.astype('f').tofile('syy.bin')
z.astype('f').tofile('szz.bin')
cst.sord.run(prm)
|
#! /usr/bin/env python
# $Id: test_traversals.py 4641 2006-06-28 16:27:55Z blais $
# Author: Martin Blais <blais@furius.ca>
# Copyright: This module has been placed in the public domain.
"""
Test module for traversals.
"""
import unittest
import DocutilsTestSupport # must be imported before docutils
from docutils import nodes, core, io, utils, writers
from docutils.writers.null import Writer as NullWriter
import docutils
stop_traversal_input = '''
==================
Train Travel
==================
Happily, happily going by train.
.. attention:: Attention, attention. This is a public annoucement.
You must get off the train now.
KaZoom! Train crashes.
- Told ya!!! Get off the train next time.
'''
class AttentiveVisitor(nodes.SparseNodeVisitor):
def visit_attention(self, node):
raise nodes.StopTraversal
def visit_bullet_list(self, node):
raise RuntimeError("It's too late for attention, "
"more discipline is needed!.")
class AttentiveWriter(writers.Writer):
def translate(self):
self.visitor = visitor = AttentiveVisitor(self.document)
# Test both kinds of traversals.
self.document.walkabout(visitor)
self.document.walk(visitor)
class StopTraversalTests(unittest.TestCase, docutils.SettingsSpec):
"""
Test interrupting the visitor during traversal. In this test we stop it
when we reach an attention node.
"""
def test_stop_traversal(self):
# Load some document tree in memory.
doctree = docutils.core.publish_doctree(
source=stop_traversal_input,
reader_name='standalone',
parser_name='restructuredtext',
settings_spec=self)
self.assert_(isinstance(doctree, nodes.document))
parts = docutils.core.publish_parts(
reader_name='doctree', source_class=docutils.io.DocTreeInput,
source=doctree, source_path='test',
writer=AttentiveWriter())
if __name__ == '__main__':
unittest.main()
|
'''
the model for learning the initial guess
'''
import os
from argparse import Namespace
import torch
from models import base_model, resnet
from utils import utils
class InitialGuesserFactory(object):
@staticmethod
def get_initial_guesser(opt):
if opt.guess_model == 'init_guess':
model = InitialGuesser(opt)
model = utils.set_model_device(model)
else:
raise ValueError('unknown initial guess model:',
opt.loss_surface_name)
return model
class InitialGuesser(base_model.BaseModel, torch.nn.Module):
'''
Model for learning the initial guess
'''
def __init__(self, opt):
self.opt = opt
self.name = 'init_guess'
self.check_options()
super(InitialGuesser, self).__init__()
self.create_model()
def check_options(self):
if self.opt.guess_model != self.name:
content_list = []
content_list += ['You are not using the correct class for training or eval']
utils.print_notification(content_list, 'ERROR')
exit(1)
def create_model(self):
self.out_dim = 8
self.input_features = 3
resnet_config = self.create_resnet_config()
self.feature_extractor = resnet.resnet18(resnet_config, pretrained=resnet_config.pretrained,
num_classes=self.out_dim, input_features=self.input_features)
if (hasattr(self.opt, 'load_weights_upstream') and self.opt.load_weights_upstream):
assert resnet_config.pretrained is False, 'pretrained weights or imagenet weights'
self.load_pretrained_weights()
def create_resnet_config(self):
need_spectral_norm = False
pretrained = False
group_norm = 0
if hasattr(self.opt, 'need_spectral_norm') and self.opt.need_spectral_norm:
need_spectral_norm = self.opt.need_spectral_norm
elif hasattr(self.opt, 'need_spectral_norm_upstream') and self.opt.need_spectral_norm_upstream:
need_spectral_norm = self.opt.need_spectral_norm_error_model
if hasattr(self.opt, 'group_norm'):
group_norm = self.opt.group_norm
elif hasattr(self.opt, 'group_norm_upstream'):
group_norm = self.opt.group_norm_upstream
if hasattr(self.opt, 'imagenet_pretrain') and self.opt.imagenet_pretrain:
pretrained = True
resnet_config = Namespace(need_spectral_norm=need_spectral_norm,
pretrained=pretrained,
group_norm=group_norm,
)
self.print_resnet_config(resnet_config)
return resnet_config
def forward(self, x):
video = x
y = self.feature_extractor(video)
return y
def load_pretrained_weights(self):
'''load pretrained weights
this function can load weights from another model.
'''
super().load_pretrained_weights()
def _verify_checkpoint(self, check_options):
pass
def _get_checkpoint_path(self):
checkpoint_path = os.path.join(self.opt.out_dir, self.opt.load_weights_upstream, 'checkpoint.pth.tar')
return checkpoint_path
|
import unittest
from eth_utils import to_canonical_address
from raiden_installer import load_settings
from raiden_installer.tokens import (
Erc20Token,
EthereumAmount,
RequiredAmounts,
SwapAmounts,
TokenAmount,
TokenError,
Wei,
)
class TokenAmountTestCase(unittest.TestCase):
def setUp(self):
self.one_eth = EthereumAmount(1)
self.two_eth = EthereumAmount(2)
self.one_rdn = TokenAmount(1, Erc20Token.find_by_ticker("RDN", "mainnet"))
self.one_wiz = TokenAmount(1, Erc20Token.find_by_ticker("WIZ", "goerli"))
def test_can_convert_to_wei(self):
self.assertEqual(self.one_eth.as_wei, Wei(10 ** 18))
def test_can_multiply_amounts(self):
two_eth_in_wei = 2 * self.one_eth.as_wei
self.assertEqual(two_eth_in_wei, Wei(2 * 10 ** 18))
def test_can_get_token_ticker(self):
self.assertEqual(self.one_rdn.ticker, "RDN")
def test_can_get_formatted_amount(self):
zero_eth = EthereumAmount(0)
one_twei = EthereumAmount(Wei(10 ** 12))
one_gwei = EthereumAmount(Wei(10 ** 9))
one_mwei = EthereumAmount(Wei(10 ** 6))
almost_one_eth = EthereumAmount("0.875")
some_wei = EthereumAmount(Wei(50_000))
self.assertEqual(self.one_eth.formatted, "1 ETH")
self.assertEqual(self.one_rdn.formatted, "1 RDN")
self.assertEqual(zero_eth.formatted, "0 ETH")
self.assertEqual(one_twei.formatted, "1 TWEI")
self.assertEqual(one_gwei.formatted, "1 GWEI")
self.assertEqual(one_mwei.formatted, "1 MWEI")
self.assertEqual(almost_one_eth.formatted, "0.875 ETH")
self.assertEqual(some_wei.formatted, "50000 WEI")
def test_addition(self):
added_eth = self.one_eth + self.two_eth
self.assertEqual(added_eth.value, 3)
def test_cannot_add_different_currencies(self):
with self.assertRaises(ValueError):
self.one_rdn + self.one_wiz
def test_subtraction(self):
subtracted_eth = self.two_eth - self.one_eth
self.assertEqual(subtracted_eth.value, 1)
def test_cannot_subtract_different_currencies(self):
with self.assertRaises(ValueError):
self.one_rdn - self.one_wiz
def test_equality(self):
self.assertEqual(self.one_eth, EthereumAmount(1))
def test_lt_operator(self):
self.assertLess(self.one_eth, self.two_eth)
def test_cannot_compare_different_currencies_with_lt_operator(self):
with self.assertRaises(ValueError):
self.one_rdn < self.one_wiz
def test_le_operator(self):
self.assertLessEqual(self.one_eth, EthereumAmount(1))
def test_cannot_compare_different_currencies_with_le_operator(self):
with self.assertRaises(ValueError):
self.one_rdn <= self.one_wiz
def test_gt_operator(self):
self.assertGreater(self.two_eth, self.one_eth)
def test_cannot_compare_different_currencies_with_gt_operator(self):
with self.assertRaises(ValueError):
self.one_rdn > self.one_wiz
def test_ge_operator(self):
self.assertGreaterEqual(self.one_eth, EthereumAmount(1))
def test_cannot_compare_different_currencies_with_ge_operator(self):
with self.assertRaises(ValueError):
self.one_rdn >= self.one_wiz
def test_can_get_address(self):
rdn_token = Erc20Token.find_by_ticker("RDN", "mainnet")
self.assertEqual(self.one_rdn.address, rdn_token.address)
class Erc20TokenTestCase(unittest.TestCase):
def test_cannot_initialize_token_without_address(self):
with self.assertRaises(TokenError):
Erc20Token("RDN", "REI")
def test_cannot_get_token_on_network_without_deployment(self):
with self.assertRaises(TokenError):
Erc20Token.find_by_ticker("WIZ", "mainnet")
def test_get_address(self):
wiz_token = Erc20Token.find_by_ticker("WIZ", "goerli")
self.assertEqual(
wiz_token.address,
to_canonical_address("0x95b2d84de40a0121061b105e6b54016a49621b44")
)
class InstallerAmountsTestCase(unittest.TestCase):
def setUp(self):
self.settings = load_settings("mainnet")
self.service_token = Erc20Token.find_by_ticker(
self.settings.service_token.ticker,
self.settings.network
)
self.transfer_token = Erc20Token.find_by_ticker(
self.settings.transfer_token.ticker,
self.settings.network
)
def test_create_required_amounts(self):
required_amounts = RequiredAmounts.from_settings(self.settings)
self.assertEqual(
required_amounts.eth,
EthereumAmount(Wei(self.settings.ethereum_amount_required))
)
self.assertEqual(
required_amounts.eth_after_swap,
EthereumAmount(Wei(self.settings.ethereum_amount_required_after_swap))
)
self.assertEqual(
required_amounts.service_token,
TokenAmount(Wei(self.settings.service_token.amount_required), self.service_token)
)
self.assertEqual(
required_amounts.transfer_token,
TokenAmount(Wei(self.settings.transfer_token.amount_required), self.transfer_token)
)
def test_create_swap_amounts(self):
swap_amounts = SwapAmounts.from_settings(self.settings)
self.assertEqual(
swap_amounts.service_token,
TokenAmount(Wei(self.settings.service_token.swap_amount), self.service_token)
)
self.assertEqual(
swap_amounts.transfer_token,
TokenAmount(Wei(self.settings.transfer_token.swap_amount), self.transfer_token)
)
|
# -----------------------------------------------------------
# Copyright (c) 2021. Danil Smirnov
# Write a program that reads the names of the two primary
# colors for mixing. If the user enters anything other than
# red, blue, or yellow, the program should display an error
# message. Otherwise, the program should print the name of
# the secondary color that will be the result.
# -----------------------------------------------------------
all_colors: dict = {'red': 'ะบัะฐัะฝัะน', 'yellow': 'ะถะตะปััะน', 'blue': 'ัะธะฝะธะน',
'purple': 'ัะธะพะปะตัะพะฒัะน', 'green': 'ะทะตะปะตะฝัะน', 'orange': 'ะพัะฐะฝะถะตะฒัะน'}
class ColorMixer:
def __init__(self, color: str, color_: str):
self.color: str = self.mix_colors(color, color_)
def mix_colors(self, color: str, color_: str) -> str:
if self.is_purple(color, color_):
return all_colors['purple']
if self.is_green(color, color_):
return all_colors['green']
if self.is_orange(color, color_):
return all_colors['orange']
if self.is_same(color, color_):
return color
return 'ะพัะธะฑะบะฐ ัะฒะตัะฐ'
def is_purple(self, color: str, color_: str) -> bool:
return (color == all_colors['red'] and color_ == all_colors['blue']) or (
color == all_colors['blue'] and color_ == all_colors['red'])
def is_green(self, color: str, color_: str) -> bool:
return (color == all_colors['yellow'] and color_ == all_colors['blue']) or (
color == all_colors['blue'] and color_ == all_colors['yellow'])
def is_orange(self, color: str, color_: str) -> bool:
return (color == all_colors['red'] and color_ == all_colors['yellow']) or (
color == all_colors['yellow'] and color_ == all_colors['red'])
def is_same(self, color: str, color_: str) -> bool:
return color == color_ == all_colors['red'] or color == color_ == all_colors['yellow'] or \
color == color_ == all_colors['blue']
if __name__ == '__main__':
color_mixer = ColorMixer(str(input()), str(input()))
print(color_mixer.color)
|
# Generated by Django 2.1.13 on 2021-01-18 14:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0008_auto_20210118_2226'),
]
operations = [
migrations.AlterField(
model_name='resourcechange',
name='order_id',
field=models.IntegerField(null=True),
),
]
|
import sqlite3
from Client import Client
class DBManager:
LOGIN_SQL = """
SELECT id, username, balance, message
FROM clients
WHERE username=? AND password=?
LIMIT 1 """
CHANGE_PASS_SQL = """
UPDATE clients
SET password=?
WHERE id=? """
UPDATE_MSG_SQL = """
UPDATE clients
SET message=?
WHERE id=? """
INSERT_CLIENT_SQL = """
INSERT INTO clients(username, password) VALUES(?, ?)
"""
def __init__(self, conn):
self.conn = conn
self.curr = self.conn.cursor()
def change_message(self, new_message, logged_user):
self.curr.execute(Controller.UPDATE_MSG_SQL,
(new_message, logged_user.get_id()))
self.conn.commit()
logged_user.set_message(new_message)
def change_pass(self, new_pass, logged_user):
self.curr.execute(Controller.CHANGE_PASS_SQL,
(new_pass, logged_user.get_id()))
self.conn.commit()
def register(self, username, password):
self.curr.execute(Controller.REGISTER_CLIENT_SQL,
(username, password))
self.conn.commit()
def login(self, username, password):
self.curr.execute(Controller.LOGIN_SQL,
(username, password))
user = self.curr.fetchone()
if(user):
return Client(user[0], user[1], user[2], user[3])
else:
return False
|
STRUCT_INIT_TIME = 4.6
ADD_FILE_TIME = 3.3
DEL_FILE_TIME = 1.5
UPD_FILE_TIME = 1.5
|
import operator
import argparse
import progressbar
import json
from spacy.en import English
parser = argparse.ArgumentParser()
parser.add_argument('-split', type=str, default='train',
help = 'Options are train, val, test, test-dev')
parser.add_argument('-answers', type=str, default='top1000',
help = 'Specify if you want the top 1000 frequently occuring answers or all of them')
args = parser.parse_args()
nlpProcessor = English()
def getModalAnswer(answers):
candidates = {}
for i in xrange(10):
candidates[answers[i]['answer']] = 1
for i in xrange(10):
candidates[answers[i]['answer']] += 1
return max(candidates.iteritems(), key=operator.itemgetter(1))[0]
def getAllAnswer(answers):
answer_list = []
for i in xrange(10):
answer_list.append(answers[i]['answer'])
return ';'.join(answer_list)
def main():
if args.split == 'train':
annFile = '/home/mayank/Datasets/VQA/annotations/mscoco_train2014_annotations.json'
quesFile = '/home/mayank/Datasets/VQA/questions/OpenEnded_mscoco_train2014_questions.json'
questions_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_train2014.txt', 'w')
questions_id_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_id_train2014.txt', 'w')
questions_lengths_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_lengths_train2014.txt', 'w')
if args.answers == 'top1000':
answers_file = open('/home/mayank/Datasets/VQA/preprocessed/answers_train2014_top1000.txt', 'w')
elif args.answers == 'all':
answers_file = open('/home/mayank/Datasets/VQA/preprocessed/answers_train2014_all.txt', 'w')
coco_image_id = open('/home/mayank/Datasets/VQA/preprocessed/images_train2014.txt', 'w')
data_split = 'training data'
elif args.split == 'val':
annFile = '/home/mayank/Datasets/VQA/annotations/mscoco_val2014_annotations.json'
quesFile = '/home/mayank/Datasets/VQA/questions/OpenEnded_mscoco_val2014_questions.json'
questions_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_val2014.txt', 'w')
questions_id_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_id_val2014.txt', 'w')
questions_lengths_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_lengths_val2014.txt', 'w')
if args.answers == 'top1000':
answers_file = open('/home/mayank/Datasets/VQA/preprocessed/answers_val2014_top1000.txt', 'w')
elif args.answers == 'all':
answers_file = open('/home/mayank/Datasets/VQA/preprocessed/answers_val2014_all.txt', 'w')
coco_image_id = open('/home/mayank/Datasets/VQA/preprocessed/images_val2014.txt', 'w')
data_split = 'validation data'
elif args.split == 'test-dev':
quesFile = '/home/mayank/Datasets/VQA/questions/OpenEnded_mscoco_test-dev2015_questions.json'
questions_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_test-dev2015.txt', 'w')
questions_id_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_id_test-dev2015.txt', 'w')
questions_lengths_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_lengths_test-dev2015.txt', 'w')
coco_image_id = open('/home/mayank/Datasets/VQA/preprocessed/images_test-dev2015.txt', 'w')
data_split = 'test-dev data'
elif args.split == 'test':
quesFile = '/home/mayank/Datasets/VQA/questions/OpenEnded_mscoco_test2015_questions.json'
questions_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_test2015.txt', 'w')
questions_id_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_id_test2015.txt', 'w')
questions_lengths_file = open('/home/mayank/Datasets/VQA/preprocessed/questions_lengths_test2015.txt', 'w')
coco_image_id = open('/home/mayank/Datasets/VQA/preprocessed/images_test2015.txt', 'w')
data_split = 'test-dev data'
else:
raise RuntimeError('Invalid split')
questions = json.load(open(quesFile, 'r'))
ques = questions['questions']
if args.split == 'train' or args.split == 'val':
qa = json.load(open(annFile, 'r'))
qa = qa['annotations']
pbar = progressbar.ProgressBar()
print 'Dumping questions, answers, questionIDs, imageIDs, and question lengths to text files...'
for i, q in pbar(zip(xrange(len(ques)), ques)):
questions_file.write((q['question'] + '\n').encode('utf8'))
questions_lengths_file.write((str(len(nlpProcessor(q['question'])))+ '\n').encode('utf8'))
questions_id_file.write((str(q['question_id']) + '\n').encode('utf8'))
coco_image_id.write((str(q['image_id']) + '\n').encode('utf8'))
if args.split == 'train' or args.split == 'val':
if args.answers == 'top1000':
answers_file.write(getModalAnswer(qa[i]['answers']).encode('utf8'))
elif args.answers == 'all':
answers_file.write(getAllAnswer(qa[i]['answers']).encode('utf8'))
answers_file.write('\n'.encode('utf8'))
print 'completed dumping', data_split
if __name__ == "__main__":
main()
|
from typing import Dict
from typing import Optional
from ...ffprobe import FFprobe
from .audio_parameters import AudioParameters
from .input_audio_stream import InputAudioStream
from .input_stream import InputStream
from .input_video_stream import InputVideoStream
from .video_parameters import VideoParameters
from .video_tools import check_video_params
class AudioImagePiped(InputStream):
"""The audio/image stream piped descriptor
Attributes:
ffmpeg_parameters (``str``):
FFMpeg additional parameters
lip_sync (``bool``):
Lip Sync mode
raw_headers (``str``):
Headers of http the connection
stream_audio (:obj:`~pytgcalls.types.InputAudioStream()`):
Input Audio Stream Descriptor
stream_video (:obj:`~pytgcalls.types.InputVideoStream()`):
Input Video Stream Descriptor
Parameters:
audio_path (``str``):
The audio file path
image_path (``str``):
The image file path
audio_parameters (:obj:`~pytgcalls.types.AudioParameters()`):
The audio parameters of the stream, can be used also
:obj:`~pytgcalls.types.HighQualityAudio()`,
:obj:`~pytgcalls.types.MediumQualityAudio()` or
:obj:`~pytgcalls.types.LowQualityAudio()`
video_parameters (:obj:`~pytgcalls.types.VideoParameters()`):
The video parameters of the stream, can be used also
:obj:`~pytgcalls.types.HighQualityVideo()`,
:obj:`~pytgcalls.types.MediumQualityVideo()` or
:obj:`~pytgcalls.types.LowQualityVideo()`
headers (``Dict[str, str]``, **optional**):
Headers of http the connection
additional_ffmpeg_parameters (``str``, **optional**):
FFMpeg additional parameters
"""
def __init__(
self,
audio_path: str,
image_path: str,
audio_parameters: AudioParameters = AudioParameters(),
video_parameters: VideoParameters = VideoParameters(),
headers: Optional[Dict[str, str]] = None,
additional_ffmpeg_parameters: str = '',
):
self._image_path = image_path
self._audio_path = audio_path
self.ffmpeg_parameters = additional_ffmpeg_parameters
self.raw_headers = headers
video_parameters.frame_rate = 1
super().__init__(
InputAudioStream(
f'fifo://{audio_path}',
audio_parameters,
),
InputVideoStream(
f'fifo://image:{image_path}',
video_parameters,
),
)
@property
def headers(self):
return FFprobe.ffmpeg_headers(self.raw_headers)
async def check_pipe(self):
dest_width, dest_height, header1 = await FFprobe.check_file(
self._image_path,
needed_audio=False,
needed_video=True,
headers=self.raw_headers,
)
header2 = await FFprobe.check_file(
self._audio_path,
needed_audio=True,
needed_video=False,
headers=self.raw_headers,
)
width, height = check_video_params(
self.stream_video.parameters,
dest_width,
dest_height,
)
self.stream_video.parameters.width = width
self.stream_video.parameters.height = height
self.stream_video.header_enabled = header1
self.stream_audio.header_enabled = header2
|
from abc import ABCMeta
from abc import abstractmethod
from ..np_utils import ops
import numpy as np
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results, use_matmul_gather=False):
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != np.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
# TODO
# if use_matmul_gather:
# self._gather_op = ops.matmul_gather_on_zeroth_axis
@property
def match_results(self):
return self._match_results
def matched_column_indices(self):
return self._reshape_and_cast(np.where(np.greater(self._match_results, -1)))
def matched_column_indicator(self):
return np.greater_equal(self._match_results, 0)
def num_matched_columns(self):
return np.size(self.matched_column_indices())
def unmatched_column_indices(self):
return self._reshape_and_cast(np.where(np.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
return np.equal(self._match_results, -1)
def num_unmatched_columns(self):
return np.size(self.unmatched_column_indices())
def ignored_column_indices(self):
return self._reshape_and_cast(np.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
return np.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return np.size(self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
return self._reshape_and_cast(np.where(np.greater(0, self._match_results)))
def matched_row_indices(self):
return self._reshape_and_cast(
np.take(self._match_results, self.matched_column_indices(), axis=0))
def _reshape_and_cast(self, t):
return np.cast(np.reshape(t, [-1]), np.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
input_tensor = np.concatenate([np.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = np.maximum(self.match_results + 2, 0)
gathered_tensor = np.take(input_tensor, gather_indices, axis=0)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = ABCMeta
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard np.gather.
(Default: False).
"""
self._use_matmul_gather = use_matmul_gather
def match(self, similarity_matrix, scope=None, **params):
return Match(self._match(similarity_matrix, **params),
self._use_matmul_gather)
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
|
import numpy as np
import logging
from timeit import default_timer as timer
from sklearn.mixture import GMM
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
class Model:
def __init__(self):
self.__n_features = None
self.__n_clusters = None
self.__preprocessors = []
self._estimator = None
self._n_features_transformed = None
def train(self, data, preprocessors=None, n_clusters=None, init=4):
n_patterns = len(data)
n_features = len(data[0])
self.__n_features = n_features
t_start = timer()
logging.debug('Pre-processing %d patterns with %d features ...' % (n_patterns, n_features))
if preprocessors is None:
preprocessors = [StandardScaler()]
for preprocessor in preprocessors:
data = preprocessor.fit_transform(data)
self.__preprocessors = preprocessors
n_features = len(data[0])
self._n_features_transformed = n_features
logging.info('Finished pre-processing of %d patterns with %d features. %.3f sec' %
(n_patterns, n_features, timer() - t_start))
self._estimator, self.__n_clusters = self._fit(data, n_clusters=n_clusters)
def _fit(self, data, n_clusters=0):
return None, n_clusters
def score(self, data):
if len(data[0]) != self.__n_features:
raise ValueError('The number of features [%d] in the data is different from that in the model [%d].' %
(len(data[0]), self.__n_features))
for preprocessor in self.__preprocessors:
data = preprocessor.transform(data)
if len(data[0]) != self._n_features_transformed:
raise ValueError(
'The number of transformed features [%d] in the data is different from that in the model [%d].' %
(len(data[0]), self._n_features_transformed))
return self._score_transformed_data(data)
def _score_transformed_data(self, data):
return [record[0] for record in data]
class GMMModel(Model):
def __init__(self, min_prob=0.8):
Model.__init__(self)
self.__min_prob = min_prob
def _fit(self, samples, n_clusters=None):
t_start = timer()
n_clusters = len(samples)
best_estimator = None
min_aic = None
while n_clusters >= 16:
n_clusters /= 2
estimator = self.gmm_fit(samples, n_clusters)
aic = estimator.aic(samples)
if min_aic is None:
min_aic = aic
if aic > min_aic and min(abs(aic), abs(min_aic)) < 0.5 * max(abs(min_aic), abs(aic)):
break
elif aic <= min_aic:
best_estimator, min_aic = estimator, aic
n_clusters = best_estimator.n_components
logging.info('Finally got a GMM model on %d patterns using %d features for %d clusters. %.3f sec. AIC = %g' %
(len(samples), self._n_features_transformed, n_clusters, timer() - t_start,
best_estimator.aic(samples)))
return best_estimator, n_clusters
def gmm_fit(self, samples, n_clusters):
t_start = timer()
n_features = len(samples[0])
logging.debug('Running GMM on %d patterns using %d features for %d clusters ...' %
(len(samples), n_features, n_clusters))
estimator = GMM(n_components=n_clusters)
estimator.fit(samples)
logging.info('Finished GMM on %d patterns using %d features for %d clusters. %.3f sec. AIC = %g' %
(len(samples), n_features, n_clusters, timer() - t_start,
estimator.aic(samples)))
return estimator
def _score_transformed_data(self, data):
labels = [None] * len(data)
probs = self._estimator.predict_proba(data)
for i, p in enumerate(probs):
max_p = np.max(p)
if max_p >= self.__min_prob:
labels[i] = (np.where(p == max_p)[0][0], max_p)
return labels
class KMeansModel(Model):
def __init__(self):
Model.__init__(self)
self._centroids = None
# self._inertia = None
def centroids(self):
return self._centroids
def _fit(self, samples, n_clusters=2, init=4):
t_start = timer()
n_features = len(samples[0])
logging.debug('Running KMeans on %d patterns using %d features for %d clusters ...' %
(len(samples), n_features, n_clusters))
estimator = KMeans(n_clusters=n_clusters, n_init=init)
estimator.fit(samples)
# estimator.fit_transform(samples)
# estimator.fit_predict(samples)
self._centroids = estimator.cluster_centers_
# self._inertia = estimator.inertia_
logging.info('Finished KMeans on %d patterns using %d features for %d clusters. %.3f sec.' %
(len(samples), n_features, n_clusters, timer() - t_start))
return estimator, n_clusters
def _score_transformed_data(self, data):
return self._estimator.predict(data)
|
# -*- coding: utf-8 -*-
#
# clx.py - Ethernet/IP Client for Rockwell PLCs
#
#
# Copyright (c) 2014 Agostino Ruscito <ruscito@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pycomm.cip.cip_base import *
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
string_sizes = [82, 12, 16, 20, 40, 8]
class Driver(Base):
"""
This Ethernet/IP client is based on Rockwell specification. Please refer to the link below for details.
http://literature.rockwellautomation.com/idc/groups/literature/documents/pm/1756-pm020_-en-p.pdf
The following services have been implemented:
- Read Tag Service (0x4c)
- Read Tag Fragment Service (0x52)
- Write Tag Service (0x4d)
- Write Tag Fragment Service (0x53)
- Multiple Service Packet (0x0a)
The client has been successfully tested with the following PLCs:
- CompactLogix 5330ERM
- CompactLogix 5370
- ControlLogix 5572 and 1756-EN2T Module
"""
def __init__(self):
super(Driver, self).__init__()
self._buffer = {}
self._get_template_in_progress = False
self.__version__ = '0.2'
def get_last_tag_read(self):
""" Return the last tag read by a multi request read
:return: A tuple (tag name, value, type)
"""
return self._last_tag_read
def get_last_tag_write(self):
""" Return the last tag write by a multi request write
:return: A tuple (tag name, 'GOOD') if the write was successful otherwise (tag name, 'BAD')
"""
return self._last_tag_write
def _parse_instance_attribute_list(self, start_tag_ptr, status):
""" extract the tags list from the message received
:param start_tag_ptr: The point in the message string where the tag list begin
:param status: The status of the message receives
"""
tags_returned = self._reply[start_tag_ptr:]
tags_returned_length = len(tags_returned)
idx = 0
instance = 0
count = 0
try:
while idx < tags_returned_length:
instance = unpack_dint(tags_returned[idx:idx+4])
idx += 4
tag_length = unpack_uint(tags_returned[idx:idx+2])
idx += 2
tag_name = tags_returned[idx:idx+tag_length]
idx += tag_length
symbol_type = unpack_uint(tags_returned[idx:idx+2])
idx += 2
count += 1
self._tag_list.append({'instance_id': instance,
'tag_name': tag_name,
'symbol_type': symbol_type})
except Exception as e:
raise DataError(e)
if status == SUCCESS:
self._last_instance = -1
elif status == 0x06:
self._last_instance = instance + 1
else:
self._status = (1, 'unknown status during _parse_tag_list')
self._last_instance = -1
def _parse_structure_makeup_attributes(self, start_tag_ptr, status):
""" extract the tags list from the message received
:param start_tag_ptr: The point in the message string where the tag list begin
:param status: The status of the message receives
"""
self._buffer = {}
if status != SUCCESS:
self._buffer['Error'] = status
return
attribute = self._reply[start_tag_ptr:]
idx = 4
try:
if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:
idx += 2
self._buffer['object_definition_size'] = unpack_dint(attribute[idx:idx + 4])
else:
self._buffer['Error'] = 'object_definition Error'
return
idx += 6
if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:
idx += 2
self._buffer['structure_size'] = unpack_dint(attribute[idx:idx + 4])
else:
self._buffer['Error'] = 'structure Error'
return
idx += 6
if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:
idx += 2
self._buffer['member_count'] = unpack_uint(attribute[idx:idx + 2])
else:
self._buffer['Error'] = 'member_count Error'
return
idx += 4
if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:
idx += 2
self._buffer['structure_handle'] = unpack_uint(attribute[idx:idx + 2])
else:
self._buffer['Error'] = 'structure_handle Error'
return
return self._buffer
except Exception as e:
raise DataError(e)
def _parse_template(self, start_tag_ptr, status):
""" extract the tags list from the message received
:param start_tag_ptr: The point in the message string where the tag list begin
:param status: The status of the message receives
"""
tags_returned = self._reply[start_tag_ptr:]
bytes_received = len(tags_returned)
self._buffer += tags_returned
if status == SUCCESS:
self._get_template_in_progress = False
elif status == 0x06:
self._byte_offset += bytes_received
else:
self._status = (1, 'unknown status {0} during _parse_template'.format(status))
logger.warning(self._status)
self._last_instance = -1
def _parse_fragment(self, start_ptr, status):
""" parse the fragment returned by a fragment service.
:param start_ptr: Where the fragment start within the replay
:param status: status field used to decide if keep parsing or stop
"""
try:
data_type = unpack_uint(self._reply[start_ptr:start_ptr+2])
fragment_returned = self._reply[start_ptr+2:]
except Exception as e:
raise DataError(e)
fragment_returned_length = len(fragment_returned)
idx = 0
while idx < fragment_returned_length:
try:
typ = I_DATA_TYPE[data_type]
if self._output_raw:
value = fragment_returned[idx:idx+DATA_FUNCTION_SIZE[typ]]
else:
value = UNPACK_DATA_FUNCTION[typ](fragment_returned[idx:idx+DATA_FUNCTION_SIZE[typ]])
idx += DATA_FUNCTION_SIZE[typ]
except Exception as e:
raise DataError(e)
if self._output_raw:
self._tag_list += value
else:
self._tag_list.append((self._last_position, value))
self._last_position += 1
if status == SUCCESS:
self._byte_offset = -1
elif status == 0x06:
self._byte_offset += fragment_returned_length
else:
self._status = (2, '{0}: {1}'.format(SERVICE_STATUS[status], get_extended_status(self._reply, 48)))
logger.warning(self._status)
self._byte_offset = -1
def _parse_multiple_request_read(self, tags):
""" parse the message received from a multi request read:
For each tag parsed, the information extracted includes the tag name, the value read and the data type.
Those information are appended to the tag list as tuple
:return: the tag list
"""
offset = 50
position = 50
try:
number_of_service_replies = unpack_uint(self._reply[offset:offset+2])
tag_list = []
for index in range(number_of_service_replies):
position += 2
start = offset + unpack_uint(self._reply[position:position+2])
general_status = unpack_usint(self._reply[start+2:start+3])
if general_status == 0:
data_type = unpack_uint(self._reply[start+4:start+6])
value_begin = start + 6
value_end = value_begin + DATA_FUNCTION_SIZE[I_DATA_TYPE[data_type]]
value = self._reply[value_begin:value_end]
self._last_tag_read = (tags[index], UNPACK_DATA_FUNCTION[I_DATA_TYPE[data_type]](value),
I_DATA_TYPE[data_type])
else:
self._last_tag_read = (tags[index], None, None)
tag_list.append(self._last_tag_read)
return tag_list
except Exception as e:
raise DataError(e)
def _parse_multiple_request_write(self, tags):
""" parse the message received from a multi request writ:
For each tag parsed, the information extracted includes the tag name and the status of the writing.
Those information are appended to the tag list as tuple
:return: the tag list
"""
offset = 50
position = 50
try:
number_of_service_replies = unpack_uint(self._reply[offset:offset+2])
tag_list = []
for index in range(number_of_service_replies):
position += 2
start = offset + unpack_uint(self._reply[position:position+2])
general_status = unpack_usint(self._reply[start+2:start+3])
if general_status == 0:
self._last_tag_write = (tags[index] + ('GOOD',))
else:
self._last_tag_write = (tags[index] + ('BAD',))
tag_list.append(self._last_tag_write)
return tag_list
except Exception as e:
raise DataError(e)
def _check_reply(self):
""" check the replayed message for error
"""
self._more_packets_available = False
try:
if self._reply is None:
self._status = (3, '%s without reply' % REPLAY_INFO[unpack_dint(self._message[:2])])
return False
# Get the type of command
typ = unpack_uint(self._reply[:2])
# Encapsulation status check
if unpack_dint(self._reply[8:12]) != SUCCESS:
self._status = (3, "{0} reply status:{1}".format(REPLAY_INFO[typ],
SERVICE_STATUS[unpack_dint(self._reply[8:12])]))
return False
# Command Specific Status check
if typ == unpack_uint(ENCAPSULATION_COMMAND["send_rr_data"]):
status = unpack_usint(self._reply[42:43])
if status != SUCCESS:
self._status = (3, "send_rr_data reply:{0} - Extend status:{1}".format(
SERVICE_STATUS[status], get_extended_status(self._reply, 42)))
return False
else:
return True
elif typ == unpack_uint(ENCAPSULATION_COMMAND["send_unit_data"]):
status = unpack_usint(self._reply[48:49])
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Read Tag Fragmented"]:
self._parse_fragment(50, status)
return True
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Get Instance Attributes List"]:
self._parse_instance_attribute_list(50, status)
return True
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Get Attributes"]:
self._parse_structure_makeup_attributes(50, status)
return True
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Read Template"] and \
self._get_template_in_progress:
self._parse_template(50, status)
return True
if status == 0x06:
self._status = (3, "Insufficient Packet Space")
self._more_packets_available = True
elif status != SUCCESS:
self._status = (3, "send_unit_data reply:{0} - Extend status:{1}".format(
SERVICE_STATUS[status], get_extended_status(self._reply, 48)))
logger.warning(self._status)
return False
else:
return True
return True
except Exception as e:
raise DataError(e)
def read_tag(self, tag):
""" read tag from a connected plc
Possible combination can be passed to this method:
- ('Counts') a single tag name
- (['ControlWord']) a list with one tag or many
- (['parts', 'ControlWord', 'Counts'])
At the moment there is not a strong validation for the argument passed. The user should verify
the correctness of the format passed.
:return: None is returned in case of error otherwise the tag list is returned
"""
self.clear()
multi_requests = False
if isinstance(tag, list):
multi_requests = True
if not self._target_is_connected:
if not self.forward_open():
self._status = (6, "Target did not connected. read_tag will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. read_tag will not be executed.")
if multi_requests:
rp_list = []
for t in tag:
rp = create_tag_rp(t, multi_requests=True)
if rp is None:
self._status = (6, "Cannot create tag {0} request packet. read_tag will not be executed.".format(tag))
raise DataError("Cannot create tag {0} request packet. read_tag will not be executed.".format(tag))
else:
rp_list.append(chr(TAG_SERVICES_REQUEST['Read Tag']) + rp + pack_uint(1))
message_request = build_multiple_service(rp_list, Base._get_sequence())
else:
rp = create_tag_rp(tag)
if rp is None:
self._status = (6, "Cannot create tag {0} request packet. read_tag will not be executed.".format(tag))
return None
else:
# Creating the Message Request Packet
message_request = [
pack_uint(Base._get_sequence()),
chr(TAG_SERVICES_REQUEST['Read Tag']), # the Request Service
chr(len(rp) / 2), # the Request Path Size length in word
rp, # the request path
pack_uint(1)
]
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,
)) is None:
raise DataError("send_unit_data returned not valid data")
if multi_requests:
return self._parse_multiple_request_read(tag)
else:
# Get the data type
if self._status[0] == SUCCESS:
data_type = unpack_uint(self._reply[50:52])
try:
return UNPACK_DATA_FUNCTION[I_DATA_TYPE[data_type]](self._reply[52:]), I_DATA_TYPE[data_type]
except Exception as e:
raise DataError(e)
else:
return None
def read_array(self, tag, counts, raw=False):
""" read array of atomic data type from a connected plc
At the moment there is not a strong validation for the argument passed. The user should verify
the correctness of the format passed.
:param tag: the name of the tag to read
:param counts: the number of element to read
:param raw: the value should output as raw-value (hex)
:return: None is returned in case of error otherwise the tag list is returned
"""
self.clear()
if not self._target_is_connected:
if not self.forward_open():
self._status = (7, "Target did not connected. read_tag will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. read_tag will not be executed.")
self._byte_offset = 0
self._last_position = 0
self._output_raw = raw
if self._output_raw:
self._tag_list = ''
else:
self._tag_list = []
while self._byte_offset != -1:
rp = create_tag_rp(tag)
if rp is None:
self._status = (7, "Cannot create tag {0} request packet. read_tag will not be executed.".format(tag))
return None
else:
# Creating the Message Request Packet
message_request = [
pack_uint(Base._get_sequence()),
chr(TAG_SERVICES_REQUEST["Read Tag Fragmented"]), # the Request Service
chr(len(rp) / 2), # the Request Path Size length in word
rp, # the request path
pack_uint(counts),
pack_dint(self._byte_offset)
]
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,
)) is None:
raise DataError("send_unit_data returned not valid data")
return self._tag_list
def write_tag(self, tag, value=None, typ=None):
""" write tag/tags from a connected plc
Possible combination can be passed to this method:
- ('tag name', Value, data type) as single parameters or inside a tuple
- ([('tag name', Value, data type), ('tag name2', Value, data type)]) as array of tuples
At the moment there is not a strong validation for the argument passed. The user should verify
the correctness of the format passed.
The type accepted are:
- BOOL
- SINT
- INT'
- DINT
- REAL
- LINT
- BYTE
- WORD
- DWORD
- LWORD
:param tag: tag name, or an array of tuple containing (tag name, value, data type)
:param value: the value to write or none if tag is an array of tuple or a tuple
:param typ: the type of the tag to write or none if tag is an array of tuple or a tuple
:return: None is returned in case of error otherwise the tag list is returned
"""
self.clear() # cleanup error string
multi_requests = False
if isinstance(tag, list):
multi_requests = True
if not self._target_is_connected:
if not self.forward_open():
self._status = (8, "Target did not connected. write_tag will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. write_tag will not be executed.")
if multi_requests:
rp_list = []
tag_to_remove = []
idx = 0
for name, value, typ in tag:
# Create the request path to wrap the tag name
rp = create_tag_rp(name, multi_requests=True)
if rp is None:
self._status = (8, "Cannot create tag{0} req. packet. write_tag will not be executed".format(tag))
return None
else:
try: # Trying to add the rp to the request path list
val = PACK_DATA_FUNCTION[typ](value)
rp_list.append(
chr(TAG_SERVICES_REQUEST['Write Tag'])
+ rp
+ pack_uint(S_DATA_TYPE[typ])
+ pack_uint(1)
+ val
)
idx += 1
except (LookupError, struct.error) as e:
self._status = (8, "Tag:{0} type:{1} removed from write list. Error:{2}.".format(name, typ, e))
# The tag in idx position need to be removed from the rp list because has some kind of error
tag_to_remove.append(idx)
# Remove the tags that have not been inserted in the request path list
for position in tag_to_remove:
del tag[position]
# Create the message request
message_request = build_multiple_service(rp_list, Base._get_sequence())
else:
if isinstance(tag, tuple):
name, value, typ = tag
else:
name = tag
rp = create_tag_rp(name)
if rp is None:
self._status = (8, "Cannot create tag {0} request packet. write_tag will not be executed.".format(tag))
logger.warning(self._status)
return None
else:
# Creating the Message Request Packet
message_request = [
pack_uint(Base._get_sequence()),
chr(TAG_SERVICES_REQUEST["Write Tag"]), # the Request Service
chr(len(rp) / 2), # the Request Path Size length in word
rp, # the request path
pack_uint(S_DATA_TYPE[typ]), # data type
pack_uint(1), # Add the number of tag to write
PACK_DATA_FUNCTION[typ](value)
]
ret_val = self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,
)
)
if multi_requests:
return self._parse_multiple_request_write(tag)
else:
if ret_val is None:
raise DataError("send_unit_data returned not valid data")
return ret_val
def write_array(self, tag, values, data_type, raw=False):
""" write array of atomic data type from a connected plc
At the moment there is not a strong validation for the argument passed. The user should verify
the correctness of the format passed.
:param tag: the name of the tag to read
:param data_type: the type of tag to write
:param values: the array of values to write, if raw: the frame with bytes
:param raw: indicates that the values are given as raw values (hex)
"""
self.clear()
if not isinstance(values, list):
self._status = (9, "A list of tags must be passed to write_array.")
logger.warning(self._status)
raise DataError("A list of tags must be passed to write_array.")
if not self._target_is_connected:
if not self.forward_open():
self._status = (9, "Target did not connected. write_array will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. write_array will not be executed.")
array_of_values = ""
byte_size = 0
byte_offset = 0
for i, value in enumerate(values):
if raw:
array_of_values += value
else:
array_of_values += PACK_DATA_FUNCTION[data_type](value)
byte_size += DATA_FUNCTION_SIZE[data_type]
if byte_size >= 450 or i == len(values)-1:
# create the message and send the fragment
rp = create_tag_rp(tag)
if rp is None:
self._status = (9, "Cannot create tag {0} request packet. \
write_array will not be executed.".format(tag))
return None
else:
# Creating the Message Request Packet
message_request = [
pack_uint(Base._get_sequence()),
chr(TAG_SERVICES_REQUEST["Write Tag Fragmented"]), # the Request Service
chr(len(rp) / 2), # the Request Path Size length in word
rp, # the request path
pack_uint(S_DATA_TYPE[data_type]), # Data type to write
pack_uint(len(values)), # Number of elements to write
pack_dint(byte_offset),
array_of_values # Fragment of elements to write
]
byte_offset += byte_size
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,
)) is None:
raise DataError("send_unit_data returned not valid data")
array_of_values = ""
byte_size = 0
def _get_instance_attribute_list_service(self):
""" Step 1: Finding user-created controller scope tags in a Logix5000 controller
This service returns instance IDs for each created instance of the symbol class, along with a list
of the attribute data associated with the requested attribute
"""
try:
if not self._target_is_connected:
if not self.forward_open():
self._status = (10, "Target did not connected. get_tag_list will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. get_tag_list will not be executed.")
self._last_instance = 0
self._get_template_in_progress = True
while self._last_instance != -1:
# Creating the Message Request Packet
message_request = [
pack_uint(Base._get_sequence()),
chr(TAG_SERVICES_REQUEST['Get Instance Attributes List']), # STEP 1
# the Request Path Size length in word
chr(3),
# Request Path ( 20 6B 25 00 Instance )
CLASS_ID["8-bit"], # Class id = 20 from spec 0x20
CLASS_CODE["Symbol Object"], # Logical segment: Symbolic Object 0x6B
INSTANCE_ID["16-bit"], # Instance Segment: 16 Bit instance 0x25
'\x00',
pack_uint(self._last_instance), # The instance
# Request Data
pack_uint(2), # Number of attributes to retrieve
pack_uint(1), # Attribute 1: Symbol name
pack_uint(2) # Attribute 2: Symbol type
]
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,
)) is None:
raise DataError("send_unit_data returned not valid data")
self._get_template_in_progress = False
except Exception as e:
raise DataError(e)
def _get_structure_makeup(self, instance_id):
"""
get the structure makeup for a specific structure
"""
if not self._target_is_connected:
if not self.forward_open():
self._status = (10, "Target did not connected. get_tag_list will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. get_tag_list will not be executed.")
message_request = [
pack_uint(self._get_sequence()),
chr(TAG_SERVICES_REQUEST['Get Attributes']),
chr(3), # Request Path ( 20 6B 25 00 Instance )
CLASS_ID["8-bit"], # Class id = 20 from spec 0x20
CLASS_CODE["Template Object"], # Logical segment: Template Object 0x6C
INSTANCE_ID["16-bit"], # Instance Segment: 16 Bit instance 0x25
'\x00',
pack_uint(instance_id),
pack_uint(4), # Number of attributes
pack_uint(4), # Template Object Definition Size UDINT
pack_uint(5), # Template Structure Size UDINT
pack_uint(2), # Template Member Count UINT
pack_uint(1) # Structure Handle We can use this to read and write UINT
]
if self.send_unit_data(
build_common_packet_format(DATA_ITEM['Connected'],
''.join(message_request), ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,)) is None:
raise DataError("send_unit_data returned not valid data")
return self._buffer
def _read_template(self, instance_id, object_definition_size):
""" get a list of the tags in the plc
"""
if not self._target_is_connected:
if not self.forward_open():
self._status = (10, "Target did not connected. get_tag_list will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. get_tag_list will not be executed.")
self._byte_offset = 0
self._buffer = ""
self._get_template_in_progress = True
try:
while self._get_template_in_progress:
# Creating the Message Request Packet
message_request = [
pack_uint(self._get_sequence()),
chr(TAG_SERVICES_REQUEST['Read Template']),
chr(3), # Request Path ( 20 6B 25 00 Instance )
CLASS_ID["8-bit"], # Class id = 20 from spec 0x20
CLASS_CODE["Template Object"], # Logical segment: Template Object 0x6C
INSTANCE_ID["16-bit"], # Instance Segment: 16 Bit instance 0x25
'\x00',
pack_uint(instance_id),
pack_dint(self._byte_offset), # Offset
pack_uint(((object_definition_size * 4)-23) - self._byte_offset)
]
if not self.send_unit_data(
build_common_packet_format(DATA_ITEM['Connected'], ''.join(message_request),
ADDRESS_ITEM['Connection Based'], addr_data=self._target_cid,)):
raise DataError("send_unit_data returned not valid data")
self._get_template_in_progress = False
return self._buffer
except Exception as e:
raise DataError(e)
def _isolating_user_tag(self):
try:
lst = self._tag_list
self._tag_list = []
for tag in lst:
if tag['tag_name'].find(':') != -1 or tag['tag_name'].find('__') != -1:
continue
if tag['symbol_type'] & 0b0001000000000000:
continue
dimension = (tag['symbol_type'] & 0b0110000000000000) >> 13
if tag['symbol_type'] & 0b1000000000000000 :
template_instance_id = tag['symbol_type'] & 0b0000111111111111
tag_type = 'struct'
data_type = 'user-created'
self._tag_list.append({'instance_id': tag['instance_id'],
'template_instance_id': template_instance_id,
'tag_name': tag['tag_name'],
'dim': dimension,
'tag_type': tag_type,
'data_type': data_type,
'template': {},
'udt': {}})
else:
tag_type = 'atomic'
datatype = tag['symbol_type'] & 0b0000000011111111
data_type = I_DATA_TYPE[datatype]
if datatype == 0xc1:
bit_position = (tag['symbol_type'] & 0b0000011100000000) >> 8
self._tag_list.append({'instance_id': tag['instance_id'],
'tag_name': tag['tag_name'],
'dim': dimension,
'tag_type': tag_type,
'data_type': data_type,
'bit_position' : bit_position})
else:
self._tag_list.append({'instance_id': tag['instance_id'],
'tag_name': tag['tag_name'],
'dim': dimension,
'tag_type': tag_type,
'data_type': data_type})
except Exception as e:
raise DataError(e)
def _parse_udt_raw(self, tag):
try:
buff = self._read_template(tag['template_instance_id'], tag['template']['object_definition_size'])
member_count = tag['template']['member_count']
names = buff.split('\00')
lst = []
tag['udt']['name'] = 'Not an user defined structure'
for name in names:
if len(name) > 1:
if name.find(';') != -1:
tag['udt']['name'] = name[:name.find(';')]
elif name.find('ZZZZZZZZZZ') != -1:
continue
elif name.isalpha():
lst.append(name)
else:
continue
tag['udt']['internal_tags'] = lst
type_list = []
for i in xrange(member_count):
# skip member 1
if i != 0:
array_size = unpack_uint(buff[:2])
try:
data_type = I_DATA_TYPE[unpack_uint(buff[2:4])]
except Exception:
data_type = "None"
offset = unpack_dint(buff[4:8])
type_list.append((array_size, data_type, offset))
buff = buff[8:]
tag['udt']['data_type'] = type_list
except Exception as e:
raise DataError(e)
def get_tag_list(self):
self._tag_list = []
# Step 1
self._get_instance_attribute_list_service()
# Step 2
self._isolating_user_tag()
# Step 3
for tag in self._tag_list:
if tag['tag_type'] == 'struct':
tag['template'] = self._get_structure_makeup(tag['template_instance_id'])
for idx, tag in enumerate(self._tag_list):
# print (tag)
if tag['tag_type'] == 'struct':
self._parse_udt_raw(tag)
# Step 4
return self._tag_list
def write_string(self, tag, value, size=82):
"""
Rockwell define different string size:
STRING STRING_12 STRING_16 STRING_20 STRING_40 STRING_8
by default we assume size 82 (STRING)
"""
if size not in string_sizes:
raise DataError("String size is incorrect")
data_tag = ".".join((tag, "DATA"))
len_tag = ".".join((tag, "LEN"))
# create an empty array
data_to_send = [0] * size
for idx, val in enumerate(value):
data_to_send[idx] = ord(val)
self.write_tag(len_tag, len(value), 'DINT')
self.write_array(data_tag, data_to_send, 'SINT')
def read_string(self, tag):
data_tag = ".".join((tag, "DATA"))
len_tag = ".".join((tag, "LEN"))
length = self.read_tag(len_tag)
values = self.read_array(data_tag, length[0])
values = [val[1] for val in values]
char_array = [chr(ch) for ch in values]
return ''.join(char_array)
|
# Imports
import tests.utils as utils
import pytest
import os
@pytest.fixture(scope='session')
def create_intermediate_file(request):
# In case there is an intermediate file conversion needed (e.g. PSIM to XML)
# The approppriate function for the source_file_format will be called
source_file_format, path_and_file = request.param
test_file_name, current_test_dir = path_and_file
original_file_path = current_test_dir + '\\' + test_file_name + utils.extensions_dict[source_file_format]
# Runs the approppriate function on utils to get an intermediate file if it's the case
return utils.intermediate_file_function_dict[source_file_format](original_file_path)
# Conversion of the intermediate file to .tse
@pytest.fixture(scope='session')
def convert_to_tse(create_intermediate_file, request):
source_file_format, _ = request.param
_, intermediate_file_path = create_intermediate_file
tse_path = utils.convert_to_tse(source_file_format, intermediate_file_path)
return utils.rename_tse_file(tse_path, source_file_format)
# Load and compile the generated .tse file
@pytest.fixture(scope='session')
def load_and_compile(convert_to_tse, request):
use_vhil = request.param
tse_path = convert_to_tse
utils.load_and_compile(tse_path, use_vhil)
|
# ------------------------------------------------------------------------------
# Copyright (c) 2019 Parallax Inc. -
# -
# Permission is hereby granted, free of charge, to any person obtaining -
# a copy of this software and associated documentation files (the -
# โSoftwareโ), to deal in the Software without restriction, including -
# without limitation the rights to use, copy, modify, merge, publish, -
# distribute, sublicense, and/or sell copies of the Software, and to -
# permit persons to whom the Software is furnished to do so, subject -
# to the following conditions: -
# -
# The above copyright notice and this permission notice shall be -
# included in all copies or substantial portions of the Software. -
# -
# THE SOFTWARE IS PROVIDED โAS ISโ, WITHOUT WARRANTY OF ANY KIND, -
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFINGEMENT. -
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
# -
# -
# ------------------------------------------------------------------------------
"""
User REST API
The url_prefix is '/user'. All user endpoints are in the form
# of host:port/user/_service_
#
# Register a new user account
/user/register
server = request.headers.get('server')
email = request.form.get('email')
password = request.form.get('password')
password_confirm = request.form.get('password-confirm')
locale = request.form.get('locale')
screen_name = request.form.get('screenname')
# COPPA support
birth_month = request.form.get('bdmonth')
birth_year = request.form.get('bdyear')
parent_email = request.form.get('parent-email')
parent_email_source = request.form.get('parent-email-source')
# Retrieve details about an existing user account
api.add_resource(GetUserById, '/id/<int:id_user>')
api.add_resource(GetUserByEmail, '/email/<string:email>')
api.add_resource(GetUserByScreenname, '/screenname/<string:screen_name>')
# Update a user screen name
api.add_resource(DoInfoChange, '/info/<int:id_user>')
# Update the local defined in the user account
api.add_resource(DoLocaleChange, '/locale/<int:id_user>')
""" |
from flask import Flask
app = Flask(__name__)
app.config.from_object("common.config.DevelopmentConfig")
from ewi.api import views
|
# -*- coding: utf-8 -*-
import json
import os.path
import yaml
from chaoslib.exceptions import ActivityFailed
from chaoslib.types import Secrets
from kubernetes import client
from kubernetes.client.rest import ApiException
from logzero import logger
from chaosk8s import create_k8s_api_client, add_trigger_environment_variable
__all__ = [
"create_statefulset",
"scale_statefulset",
"remove_statefulset",
"trigger_rollout"
]
def create_statefulset(spec_path: str, ns: str = "default",
secrets: Secrets = None):
"""
Create a statefulset described by the service config, which must be
the path to the JSON or YAML representation of the statefulset.
"""
api = create_k8s_api_client(secrets)
with open(spec_path) as f:
p, ext = os.path.splitext(spec_path)
if ext == '.json':
statefulset = json.loads(f.read())
elif ext in ['.yml', '.yaml']:
statefulset = yaml.safe_load(f.read())
else:
raise ActivityFailed(
"cannot process {path}".format(path=spec_path))
v1 = client.AppsV1Api(api)
v1.create_namespaced_stateful_set(ns, body=statefulset)
def scale_statefulset(name: str, replicas: int, ns: str = "default",
secrets: Secrets = None):
"""
Scale a stateful set up or down. The `name` is the name of the stateful
set.
"""
api = create_k8s_api_client(secrets)
v1 = client.AppsV1Api(api)
body = {"spec": {"replicas": replicas}}
try:
v1.patch_namespaced_stateful_set(name, namespace=ns, body=body)
except ApiException as e:
raise ActivityFailed(
"failed to scale '{s}' to {r} replicas: {e}".format(
s=name, r=replicas, e=str(e)))
def remove_statefulset(name: str = None, ns: str = "default",
label_selector: str = None, secrets: Secrets = None):
"""
Remove a statefulset by `name` in the namespace `ns`.
The statefulset is removed by deleting it without
a graceful period to trigger an abrupt termination.
The selected resources are matched by the given `label_selector`.
"""
field_selector = "metadata.name={name}".format(name=name)
api = create_k8s_api_client(secrets)
v1 = client.AppsV1Api(api)
if label_selector:
ret = v1.list_namespaced_stateful_set(
ns, field_selector=field_selector,
label_selector=label_selector)
else:
ret = v1.list_namespaced_stateful_set(ns,
field_selector=field_selector)
logger.debug("Found {d} statefulset(s) named '{n}' in ns '{s}'".format(
d=len(ret.items), n=name, s=ns))
body = client.V1DeleteOptions()
for d in ret.items:
res = v1.delete_namespaced_stateful_set(
d.metadata.name, ns, body=body)
def trigger_rollout(name: str, ns: "default", secrets: Secrets = None):
api = create_k8s_api_client(secrets)
v1 = client.AppsV1Api(api)
statefulset = v1.read_namespaced_stateful_set(name, ns)
for container in statefulset.spec.template.spec.containers:
add_trigger_environment_variable(container)
v1.replace_namespaced_stateful_set(name, ns, statefulset)
|
# -*- coding: utf-8 -*-
import time
import datetime
from datetime import timedelta
import sys
import os
import re
import md5
from google.appengine.ext import ndb
import utils
from model.users import Users
from model.connections import Connections
from init import InitHandler
from model.devices import Devices
from oauth2 import OAuth2Handler
from model.oauth2_tokens import OAuth2Tokens
import logging
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader("dialogs"),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserHandler(InitHandler):
def __init__(self, request, response):
InitHandler.__init__(self, request, response)
# GET USERS (MY INFO)
def get(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
# token refresh
if kwargs.get('uid') == 'me' and self.arguments.get('refresh_token'):
token = OAuth2Handler.grant('refresh', {'refresh_token': self.arguments.get('refresh_token')})
result['code'] = 200
result['message'] = 'OK'
result['User'] = token.user.get().to_obj(mine = True)
result['Token'] = token.to_obj()
return self.createRes(200, result)
try:
uid = self.get_user().get('uid') if kwargs.get('uid') == 'me' else int(kwargs.get('uid', 0))
except ValueError, e:
result['code'] = 400
result['message'] = 'bad request'
return self.createRes(400, result)
except AttributeError, e1:
result['code'] = 400
result['message'] = 'bad request'
return self.createRes(400, result)
if kwargs.get('uid') == 'me' and not self.get_user():
result['code'] = 401
result['message'] = 'not logged in'
return self.createRes(401, result)
user = Users.get(id=int(uid))
me = self.get_user().get('uid') if self.get_user() else None
if type(user) == ndb.key.Key:
# cannot find user
result['code'] = 401
result['message'] = 'invalid uid'
return self.createRes(401, result)
elif user.key.id() == me:
result['code'] = 200
result['message'] = 'OK'
result['User'] = user.to_obj(mine = True)
return self.createRes(200, result)
else:
result['code'] = 200
result['message'] = 'OK'
result['User'] = user.to_obj()
return self.createRes(200, result)
# GET USERS (MY INFO)
def list(self, **kwargs):
pass
# SET USERS (ADD/UPDATE)
def regist(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
# check arguments
if not (self.arguments.get('email') and self.arguments.get('password') and self.arguments.get('nickname')):
result['code'] = 400
result['message'] = 'bad request'
return self.createRes(400, result)
else:
user, created = Users.regist(self.arguments.get('email'), self.arguments)
if not created:
result['code'] = 400
result['message'] = 'already exists'
return self.createRes(400, result)
else:
result['code'] = 201
result['message'] = 'OK'
result['User'] = user.to_obj(mine=True)
return self.createRes(201, result)
def put(self, **kwargs):
print "put"
self.post(**kwargs)
def post(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
# update user info
if kwargs.get('uid'):
if self.get_user() is None:
result['code'] = 401
result['message'] = 'unauthorized'
else:
uid = self.get_user().get('uid')
if kwargs.get('uid') != 'me' and uid != int(kwargs.get('uid')):
result['code'] = 400
result['message'] = 'already exists'
return self.createRes(400, result)
reqInfo = self.arguments
# get user info
user = Users.get(id=uid)
if type(user) == ndb.key.Key:
result['code'] = 400
result['message'] = 'bad request'
else:
# using API, user can modify nickname and picture
available_list = ['nickname', 'picture', 'hanasee', 'language'] + [kwargs.get('attribute')]
reqInfo = []
for field in self.arguments:
if field in available_list:
if hasattr(self.arguments[field], 'FieldStorageClass'):
dtCreated = datetime.datetime.now()
filename = "%d" % (time.mktime(dtCreated.timetuple())*1e3 + dtCreated.microsecond/1e3)
image_url = self.create_file(self.arguments.get(field).value, filename, self.arguments.get(field).type)
setattr(user, field, image_url)
elif field == 'password':
if user.password == md5.md5(self.arguments.get('old_password')).hexdigest():
setattr(user, field, md5.md5(self.arguments.get('password')).hexdigest())
else:
result['code'] = 401
result['message'] = 'invalid password'
else:
setattr(user, field, self.arguments.get(field))
if result['code'] != 401:
user.put()
result['code'] = 200
result['message'] = 'OK'
result['User'] = user.to_obj(mine = True)
else:
arguments = self.arguments
args_regist = ['email', 'password', 'nickname']
# check parameter validation
if len(set(arguments) & set(args_regist)) == len(args_regist):
user = Users.find(self.request.get('email'))
if user:
result['code'] = 401
result['message'] = 'already registered'
#return self.createRes(401, result)
else:
# check password
#md5password = md5.md5(self.arguments.get('password')).hexdigest()
# insert as a new user
user = Users(auto_id=True)
#user.password = md5password
user.password = self.arguments.get('password')
user.set(self.convertRequsetParameter(self.arguments, ['password']))
if ('deviceInfo' in self.session) and (appName in self.session['deviceInfo']):
deviceInfo = self.session['deviceInfo'][self.request.get('appName')]
# save reg_id in user
setattr(user, re.sub('\.', '', self.request.get('appName')), deviceInfo['regId'])
user.put()
# save user in device
device = ndb.Key(Devices, "%s|%s" % (self.request.get('appName'), deviceInfo['deviceId'])).get()
setattr(device, 'user', user.key)
device.put()
self.session['user'] = user.to_obj(mine=True)
result['code'] = 200
result['message'] = 'OK'
if result['code'] == 200:
if self.arguments.get('returnTo', None):
return self.redirect(str(self.arguments.get('returnTo')))
else:
return self.createRes(200, result)
else:
if self.arguments.get('returnTo', None):
options = {
'returnTo': self.arguments.get('returnTo'),
'message': result['message']
};
if self.get_user():
options['uid'] = self.get_user().get('uid')
template = JINJA_ENVIRONMENT.get_template(self.arguments.get('dialog'))
return self.response.write(template.render(options))
else:
return self.createRes(401, result)
def post_backup(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
reqInfo = {}
for item in self.arguments:
if bool(re.search('\[\d\]', item)):
if re.sub('\[\d\]', '', item) in reqInfo:
reqInfo[re.sub('\[\d\]', '', item)].append(self.arguments.get(item))
else:
reqInfo[re.sub('\[\d\]', '', item)] = [self.arguments.get(item)]
elif bool(re.search('\.', item)):
reqInfo[re.sub('\.', '', item)] = self.arguments.get(item)
else:
reqInfo[item] = self.arguments.get(item)
Users.set(reqInfo)
return self.createRes(200, {'me':'O'})
redirect_uri = self.session['returnTo'] if self.session and self.session['returnTo'] else self.arguments.get('returnTo')
redirect_uri = redirect_uri + '&from=regist' if redirect_uri else null
del self.session['returnTo']
if not kwargs.get('id'):
if not self.arguments.get('email') or not self.arguments.get('password'):
return self.createRes(400, {'message': 'missing parameter'})
current = time.time()
id = (current * 100) + Math.floor(Math.random() * 100)
key = {
'id': id.toString(),
'email': self.arguments.get('email')
}
if self.arguments.get('nickname'):
key['nickname'] = self.arguments.get('nickname')
reqInfo = {}
for item in self.arguments:
reqInfo[item] = self.argum.get(item);
"""
db.users.regist(key, reqInfo, function(err, user) {
if err == 'already exists':
return res.render('regist', {'message': '์ด๋ฉ์ผ ๋๋ ๋๋ค์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.'})
return self.createRes(409, {'message': err})
elif err:
return res.render('regist', {'message': '์ ์ ์๋ ์๋ฌ๊ฐ ๋ฐ์ํ์ต๋๋ค. ๋ค์ ์๋ํด์ฃผ์ธ์.'})
return self.createRes(500, {'message': err})
elif not user:
return res.render('regist', {'message': err})
return self.createRes(500, {'message': 'unknown error'})
del user.password
self.logIn(user, function(err) {
if self.session.deviceInfo:
for (appName in self.session.deviceInfo):
if self.session.deviceInfo[appName] !== user[appName]:
regId = {}
regId[appName] = self.session.deviceInfo[appName].regId
keyDevice = {'appName': appName, 'deviceId': self.session.deviceInfo[appName].deviceId}
db.users.update({'id': user.id}, regId)
db.devices.update(keyDevice, {'user_id': user.id})
if redirect_uri:
return res.redirect(redirect_uri)
else:
result['code'] = 200
result['message'] = 'OK'
result['User'] = user
return self.createRes(200, user)
})
})
"""
else:
# modify my userinfo
if kwargs.get('id') == 'me' or kwargs.get('id') == self.get_user().get('id'):
if not self.get_user():
return self.createRes(401, {'message': 'not logged in'})
key = {
id: self.get_user().get('id')
}
reqInfo = {}
for item in self.arguments:
reqInfo[item] = self.arguments.get(item);
# cannot modify id
del reqInfo['id']
del reqInfo['email']
del reqInfo['kakao']
if kwargs.get('attribute') == 'connection':
if not reqInfo.get('connectionProvider'):
return self.createRes(400, {'message': 'missing parameter'})
unique = reqInfo.get('connectionProvider')[0:3]+self.get_user().get('id')[2:4]+str(time.time())
md5.md5(unique).hexdigest()
reqInfo[reqInfo['connectionProvider']] = connectionKey
del reqInfo['connectionProvider']
"""
db.users.update(key, reqInfo, function(err, user) {
del user['password']
del user['kakao']
del user['facebook']
self.logIn(user, function(err) {
if kwargs.get('attribute') == 'connection':
user['connectionKey'] = connectionKey
return self.createRes(200, user)
})
})
"""
elif self.get_user().get('id') != kwargs.get('id'):
return self.createRes(401, {'message': 'cannot modify others'})
# DEL USERS
def delete(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
# check user validation
if self.get_user() is None:
result['code'] = 401
result['message'] = 'unauthorized'
return self.createRes(401, result)
uid = int(kwargs.get('uid'))
if uid != self.get_user().get('uid'):
result['code'] = 401
result['message'] = 'unauthorized'
return self.createRes(401, result)
# get user info
user = Users.get(id=uid)
connections = Connections.find(user.key)
# delete connection info
for connection in connections:
connection.key.delete()
# delete user info
user.key.delete()
self.session.clear()
result['code'] = 200
result['message'] = 'OK'
return self.createRes(200, result)
def reset(self, **kwargs):
result = {
'code': 400,
'message': 'bad request'
}
if not self.arguments.get('email') or not self.arguments.get('returnTo'):
return self.createRes(400, {'message': 'invalid request'})
|
#! python
from airtable import Airtable
import discord, fstrings, re, random, os
from datetime import datetime
# datetime object containing current date and time
AIRTABLE_API_KEY = os.getenv('AIRTABLE_API_KEY') # stored in .env
AIRTABLE_BASE_KEY = os.getenv('AIRTABLE_BASE_KEY') # stored in .env
CAMPAIGN_NAME = os.getenv('CAMPAIGN_NAME')
campaign_airtable = Airtable(AIRTABLE_BASE_KEY, 'Campaign')
party_airtable = Airtable(AIRTABLE_BASE_KEY, 'Parties')
characters_airtable = Airtable(AIRTABLE_BASE_KEY, 'Characters')
scenario_airtable = Airtable(AIRTABLE_BASE_KEY, 'Scenarios')
items_airtable = Airtable(AIRTABLE_BASE_KEY, 'Items') # items record lookup
abilities_airtable = Airtable(AIRTABLE_BASE_KEY, 'Character Abilities') # abilities record lookup
classes_airtable = Airtable(AIRTABLE_BASE_KEY, 'Character Classes') # class record lookup
storylines_airtable = Airtable(AIRTABLE_BASE_KEY, 'Storylines')
players_airtable = Airtable(AIRTABLE_BASE_KEY, 'Players')
achievements_airtable = Airtable(AIRTABLE_BASE_KEY, 'Achievements')
class Player:
character_levels = (0,45,95,150,210,275,345,420,500)
prosperity_levels = (0,4,9,15,22,30,39,50,64)
def __init__(self, author):
self.name = author
self.player_rec = players_airtable.match('discordUsername', author)
def activate_character(self, ch_name):
character_rec = characters_airtable.match('name', ch_name)
characters_airtable.update(character_rec['id'], {'discordUsername': [self.player_rec['id']], 'isActive': True})
def create_character(self, ch_name, ch_class):
self.world = World(campaign_airtable.match('name', 'Camp Pain')['id'])
self.party = Party(party_airtable.match('name', 'Wyld Stallyns')['id'])
prosperity = self.world.prosperity
xp = self.character_levels[prosperity]
gold = (prosperity + 1) * 15
charclass = classes_airtable.match('name', ch_class)['id']
characters_airtable.insert(
{
'name': ch_name, 'xp': xp, 'gold': gold, 'checks': 0, 'class': [charclass], 'isActive': True,
'owner': [self.player_rec['id']], 'discordUsername': [self.player_rec['id']],
'campaign': [self.world.campaign_rec['id']], 'party': [self.party.party_rec['id']]
})
print(f"[Isaacbot Logger]--{datetime.now()}-- New Character {ch_name} {ch_class} ")
class World:
# campaign_rec, name, donations, pticks, prosperity, achievements
prosperity_levels = (0,4,9,15,22,30,39,50,64)
donation_levels = (100,150,200,250,300,350,400,500,600,700,800,900,1000)
def __init__(self, campaign_rec_id):
# Use World(character.campaign[0])
self.campaign_rec = campaign_airtable.get(campaign_rec_id)
# campaign name is an env varibale for the bot eg CAMPAIGN_NAME=Camp Pain
self.name = self.campaign_rec['fields']['name']
self.donations = self.campaign_rec['fields']['totalDonations']
self.pticks = self.campaign_rec['fields']['prosperityTicks']
self.prosperity = self.prosperity_calc(self.pticks)
self.achievements = self.campaign_rec['fields']['achievements']
def prosperity_calc(self, pticks):
for i in range(len(self.prosperity_levels)):
# calculate prosperity from the prosperity_levels tuple
if pticks >= self.prosperity_levels[i] and pticks < self.prosperity_levels[i+1]:
prosperity = i+1
break
else:
continue
return prosperity
def gain_prosperity(self):
self.gain_ptick()
if self.pticks in self.prosperity_levels:
self.prosperity += 1
print(f"[Isaacbot Logger]--{datetime.now()}-- +1 Overall Prosperity....{self.name}, {self.prosperity}")
self.unlock_prosperity(self.prosperity)
def gain_ptick(self):
self.pticks += 1
campaign_airtable.update(self.campaign_rec['id'], {'prosperityTicks':self.pticks})
print(f"[Isaacbot Logger]--{datetime.now()}-- Gain prosperity....{self.name}, {self.pticks} ticks")
def lose_ptick(self):
self.pticks -= 1
campaign_airtable.update(self.campaign_rec['id'], {'prosperityTicks':self.pticks})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lose prosperity....{self.name}, {self.pticks} ticks")
def unlock_prosperity(self, level_to_unlock):
items_to_unlock = items_airtable.search('prosperityRequirement', level_to_unlock)
for item in items_to_unlock:
items_airtable.update(item['id'], {'maxCount':item['fields']['realMax'], 'isUnlocked':True})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lvl{level_to_unlock} items unlocked")
def donate(self):
self.donations += 10
campaign_airtable.update(self.campaign_rec['id'], {'totalDonations':self.donations})
print(f"[Isaacbot Logger]--{datetime.now()}-- +10gp donated.....Total: {self.donations}")
def calc_donations_needed(self):
for d in range(len(self.donation_levels)):
if self.donations >= self.donation_levels[d] and self.donations < self.donation_levels [d + 1]:
self.next_donation_level = self.donation_levels[d + 1]
return self.next_donation_level
class Scenario:
# scenario_rec, number
# Scenario details for scen_no
# Can be used to unlock or complete a scenario
# In future: get all available scenarios, add description, scenario info
def __init__(self, scene_no):
self.scenario = scenario_airtable.match('number', int(scene_no))
self.number = int(scene_no)
self.name = ""
self.unlocked = None
self.description = ""
self.complete = None
self.outcome = ""
try:
self.unlocked = self.scenario['fields']['isUnlocked']
self.name = self.scenario['fields']['name']
try:
self.description = self.scenario['fields']['description']
except:
pass
except:
self.unlocked = False
try:
if self.scenario['fields']['isComplete'] == True:
self.complete = True
try:
self.outcome = self.scenario['fields']['outcome']
except:
pass
except:
self.complete = False
self.outcome = ""
def mark_unlocked(self, scene_name, scene_description=''):
self.unlocked = self.scenario['fields']['isUnlocked'] = True
self.name = scene_name
self.description = scene_description
scenario_airtable.update(self.scenario['id'], {'isUnlocked':True, 'name':self.name, 'description': self.description})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number} unlocked")
def mark_complete(self):
self.scenario['fields']['isComplete'] = True
scenario_airtable.update(self.scenario['id'], {'isComplete':True})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number}: {self.name} complete")
def update_description(self, description):
self.description = description
scenario_airtable.update(self.scenario['id'], {'description':description})
print(f"[Isaacbot Logger]--{datetime.now()}--Scenario {self.number} description added -- '{description}'")
def update_outcome(self, outcome):
self.outcome = outcome
scenario_airtable.update(self.scenario['id'], {'outcome':self.outcome})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number} outcome added -- '{outcome}'")
class Party:
# party_rec, name, members, reputation, discount, achievements
discount_levels = (0,3,7,11,15,19)
# reputation levels where discount changes (+ or -)
def __init__(self, party_rec_id):
self.party_rec = party_airtable.get(party_rec_id)
self.name = self.party_rec['fields']['name']
self.members = self.party_rec['fields']['characters']
self.reputation = self.party_rec['fields']['reputation']
self.discount = self.discount_calc(self.reputation)
self.achievements = self.party_rec['fields']['achievements']
def discount_calc(self, reputation):
# determine discount based on reputation. Used for buy action
for j in range(len(self.discount_levels)):
if self.reputation >= 19:
discount = -5
elif abs(self.reputation) >= self.discount_levels[j] and self.reputation < self.discount_levels[j+1]:
discount = -j
break
else:
continue
if reputation < 0:
discount = discount * -1
return discount
def gain_reputation(self):
self.reputation += 1
self.discount = self.discount_calc(self.reputation)
campaign_airtable.update(self.party_rec['id'], {'reputation':self.reputation})
print(f"[Isaacbot Logger]--{datetime.now()}-- Gain Reputation....{self.name}, {self.reputation}")
def lose_reputation(self):
self.reputation -= 1
self.discount = self.discount_calc(self.reputation)
campaign_airtable.update(self.party_rec['id'], {'reputation':self.reputation})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lose Reputation....{self.name}, {self.reputation}")
def gain_achiev(self):
pass
def lose_achiev(self):
pass
class Character:
# character_rec, party, campaign, name, charclass, xp, lvl, gold, checks, items, abilities
character_levels = (0,45,95,150,210,275,345,420,500)
# xp values for character level-up
def __init__(self, author):
self.character_rec = characters_airtable.match('discordUsername', author) #returns dict
self.party = self.character_rec['fields']['party']
# record id
self.campaign = self.character_rec['fields']['campaign']
# record id
self.name = self.character_rec['fields']['name']
self.charclass = classes_airtable.get(self.character_rec['fields']['class'][0])['fields']['name']
self.xp = self.character_rec['fields']['xp']
self.lvl= self.lvl_calc()
self.gold = self.character_rec['fields']['gold']
self.checks = self.character_rec['fields']['checks']
self.ch = self.check_calc()
self.id = self.character_rec['id']
try:
self.items = self.character_rec['fields']['items']
except KeyError:
self.items = []
characters_airtable.update(self.character_rec['id'], {'items':self.items})
finally:
self.item_nums = sorted(items_airtable.get(a)['fields']['number'] for a in self.items)
try:
self.abilities = self.character_rec['fields']['abilities']
except KeyError:
self.abilities = []
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
self.abil_nums = sorted(abilities_airtable.get(a)['fields']['number'] for a in self.abilities)
def retire(self, quest=''):
characters_airtable.update(self.character_rec['id'], {'isActive': False, 'isRetired': True, 'quest': quest})
def deactivate(self):
characters_airtable.update(self.id, {'discordUsername': '', 'isActive': False})
def gain_xp(self, xp_gained):
self.xp += xp_gained
# Input XP gained and it will be added to the author's previous total
characters_airtable.update(self.character_rec['id'], {'xp':self.xp})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Gain {xp_gained}xp Total: {self.xp}xp")
new_lvl = self.lvl_calc()
if new_lvl > self.lvl:
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} leveled up to Lvl {new_lvl}")
self.lvl += 1
return True
else:
return False
def change_xp(self, new_xp):
# update author xp to input
self.xp = new_xp
characters_airtable.update(self.character_rec['id'], {'xp':self.xp})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.xp}xp")
new_lvl= self.lvl_calc()
if new_lvl> self.lvl:
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} leveled up to Lvl {new_lvl}")
return True
else:
return False
def lvl_calc(self):
if self.xp >= 500:
level = 9
else:
for i in range(len(self.character_levels)):
if self.character_levels[i] <= self.xp and self.character_levels[i+1] > self.xp:
level = i+1
return level
def gain_gold(self, gold_gained):
# for gold lost use a negative number
self.gold += gold_gained
characters_airtable.update(self.character_rec['id'], {'gold':self.gold})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} +{gold_gained}gp Total: {self.gold}gold")
def change_gold(self, new_gold):
# update author gold to input
self.gold = new_gold
characters_airtable.update(self.character_rec['id'], {'gold':self.gold})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.gold}gold")
def gain_checks(self, checks_gained):
# for lose_checks use negative number
self.checks += checks_gained
characters_airtable.update(self.character_rec['id'], {'checks':self.checks})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} +{checks_gained} checks Total: {self.checks}checks")
self.ch = self.check_calc()
def change_checks(self, new_checks):
self.checks = new_checks
characters_airtable.update(self.character_rec['id'], {'checks':self.checks})
# update author checks to input
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.checks}checks")
self.ch = self.check_calc()
def check_calc(self):
if self.checks == 1:
self.ch = 'check'
else:
self.ch = 'checks'
return self.ch
def level_up(self, abil_to_add):
# abil must be given as a list of Airtable record ID eg [rec92398626]
self. abilities = self.abilities + list(abil_to_add)
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
def item_transaction(self, action, item_num):
item = Item(item_num)
if action == 'gain':
self.items.append(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} gain item {item.number}")
elif action == 'lose':
self.items.remove(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name}e lose item {item.number}")
elif action == 'loot':
self.items.append(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} loot item {item.number}")
characters_airtable.update(self.character_rec['id'], {'items': self.items})
def abil_transaction(self, action, abil_num):
abil = Ability(abil_num)
if action == 'gain':
self.abilities.append(abil.ability['id'])
self.abil_nums = sorted((abilities_airtable.get(a)['fields']['number'] for a in self.abilities))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} gain abil {abil.number}, {self.abil_nums}")
elif action == 'lose':
self.abilities.remove(abil.ability['id'])
self.abil_nums = sorted((abilities_airtable.get(a)['fields']['number'] for a in self.abilities))
print(f"[Isaacbot Logger]--{datetime.now()}-- Ghostface remove abil {abil.number}, {sorted(self.abil_nums)}")
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
class Item:
def __init__(self, item_num):
self.item_rec = items_airtable.match('number', item_num)
self.number = item_num
self.level = self.item_rec['fields']['prosperityRequirement']
try:
self.unlocked = self.item_rec['fields']['isUnlocked']
self.number = self.item_rec['fields']['number']
self.name = self.item_rec['fields']['name']
self.cost = self.item_rec['fields']['cost']
self.text = self.item_rec['fields']['description']
self.numberAvailable = self.item_rec['fields']['numberAvailable']
self.maxCount = self.item_rec['fields']['maxCount']
self.realMax = self.item_rec['fields']['realMax']
self.owners = self.item_rec['fields']['characterCount']
self.num_name = f"{self.number}: {self.name}"
self.description = self.item_rec['fields']['description']
except:
self.unlocked = False
self.numberAvailable = 0
self.maxCount = 0
self.realMax = self.item_rec['fields']['realMax']
def unlock_design(self):
# via gain_prosperity or loot design (all copies become available)
self.unlocked = True
self.maxCount = self.realMax
update = {'isUnlocked':True, 'maxCount':self.maxCount}
items_airtable.update(self.item_rec['id'], update)
print(f"[Isaacbot Logger]--{datetime.now()}-- Item Design {self.number} unlocked")
def unlock_loot(self):
# via loot design
self.unlocked = True
if self.maxCount < self.realMax:
self.maxCount += 1
items_airtable.update(self.item_rec['id'], {'isUnlocked':self.unlocked, 'maxCount':self.maxCount})
print(f"[Isaacbot Logger]--{datetime.now()}-- Item {self.number} looted.")
class Ability:
def __init__(self, abil_num):
self.ability = abilities_airtable.match('number', abil_num)
self.number = abil_num
self.lvl= self.ability['fields']['levelRequired']
self.charclass = classes_airtable.get(self.ability['fields']['class'][0])['fields']['name']
self.name = self.ability['fields']['name']
self.num_name = f"Lvl {self.lvl} -- {self.name}"
print('done')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.