content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
# Test for CVE-2018-xxxxx.
from mosq_test_helper import *
import signal
def write_config(filename, port, per_listener):
with open(filename, 'w') as f:
f.write("per_listener_settings %s\n" % (per_listener))
f.write("port %d\n" % (port))
f.write("password_file %s\n" % (filename.replace('.conf', '.pwfile')))
f.write("allow_anonymous false")
def write_pwfile(filename, bad_line1, bad_line2):
with open(filename, 'w') as f:
if bad_line1 is not None:
f.write('%s\n' % (bad_line1))
# Username test, password test
f.write('test:$6$njERlZMi/7DzNB9E$iiavfuXvUm8iyDZArTy7smTxh07GXXOrOsqxfW6gkOYVXHGk+W+i/8d3xDxrMwEPygEBhoA8A/gjQC0N2M4Lkw==\n')
# Username empty, password 0 length
f.write('empty:$6$o+53eGXtmlfHeYrg$FY7X9DNQ4uU1j0NiPmGOOSU05ZSzhqNmNhXIof/0nLpVb1zDhcRHdaC72E3YryH7dtTiG/r6jH6C8J+30cZBgA==\n')
if bad_line2 is not None:
f.write('%s\n' % (bad_line2))
def do_test(port, connack_rc, username, password):
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("username-password-check", keepalive=keepalive, username=username, password=password)
connack_packet = mosq_test.gen_connack(rc=connack_rc)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
if rc:
raise AssertionError
def username_password_tests(port):
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
try:
do_test(port, connack_rc=0, username='test', password='test')
do_test(port, connack_rc=5, username='test', password='bad')
do_test(port, connack_rc=5, username='test', password='')
do_test(port, connack_rc=5, username='test', password=None)
do_test(port, connack_rc=5, username='empty', password='test')
do_test(port, connack_rc=5, username='empty', password='bad')
do_test(port, connack_rc=5, username='empty', password='')
do_test(port, connack_rc=5, username='empty', password=None)
do_test(port, connack_rc=5, username='bad', password='test')
do_test(port, connack_rc=5, username='bad', password='bad')
do_test(port, connack_rc=5, username='bad', password='')
do_test(port, connack_rc=5, username='bad', password=None)
do_test(port, connack_rc=5, username='', password='test')
do_test(port, connack_rc=5, username='', password='bad')
do_test(port, connack_rc=5, username='', password='')
do_test(port, connack_rc=5, username='', password=None)
do_test(port, connack_rc=5, username=None, password='test')
do_test(port, connack_rc=5, username=None, password='bad')
do_test(port, connack_rc=5, username=None, password='')
do_test(port, connack_rc=5, username=None, password=None)
except ValueError:
pass
finally:
broker.terminate()
broker.wait()
def all_tests(port):
# Valid file, single user
write_pwfile(pw_file, bad_line1=None, bad_line2=None)
username_password_tests(port)
# Invalid file, first line blank
write_pwfile(pw_file, bad_line1='', bad_line2=None)
username_password_tests(port)
# Invalid file, last line blank
write_pwfile(pw_file, bad_line1=None, bad_line2='')
username_password_tests(port)
# Invalid file, first and last line blank
write_pwfile(pw_file, bad_line1='', bad_line2='')
username_password_tests(port)
# Invalid file, first line 'comment'
write_pwfile(pw_file, bad_line1='#comment', bad_line2=None)
username_password_tests(port)
# Invalid file, last line 'comment'
write_pwfile(pw_file, bad_line1=None, bad_line2='#comment')
username_password_tests(port)
# Invalid file, first and last line 'comment'
write_pwfile(pw_file, bad_line1='#comment', bad_line2='#comment')
username_password_tests(port)
# Invalid file, first line blank and last line 'comment'
write_pwfile(pw_file, bad_line1='', bad_line2='#comment')
username_password_tests(port)
# Invalid file, first line incomplete
write_pwfile(pw_file, bad_line1='bad:', bad_line2=None)
username_password_tests(port)
# Invalid file, first line incomplete, but with "password"
write_pwfile(pw_file, bad_line1='bad:bad', bad_line2=None)
username_password_tests(port)
# Invalid file, first line incomplete, partial password hash
write_pwfile(pw_file, bad_line1='bad:$', bad_line2=None)
username_password_tests(port)
# Invalid file, first line incomplete, partial password hash
write_pwfile(pw_file, bad_line1='bad:$6', bad_line2=None)
username_password_tests(port)
# Invalid file, first line incomplete, partial password hash
write_pwfile(pw_file, bad_line1='bad:$6$', bad_line2=None)
username_password_tests(port)
# Valid file, first line incomplete, has valid salt but no password hash
write_pwfile(pw_file, bad_line1='bad:$6$njERlZMi/7DzNB9E', bad_line2=None)
username_password_tests(port)
# Valid file, first line incomplete, has valid salt but no password hash
write_pwfile(pw_file, bad_line1='bad:$6$njERlZMi/7DzNB9E$', bad_line2=None)
username_password_tests(port)
# Valid file, first line has invalid hash designator
write_pwfile(pw_file, bad_line1='bad:$5$njERlZMi/7DzNB9E$iiavfuXvUm8iyDZArTy7smTxh07GXXOrOsqxfW6gkOYVXHGk+W+i/8d3xDxrMwEPygEBhoA8A/gjQC0N2M4Lkw==', bad_line2=None)
username_password_tests(port)
# Invalid file, missing username but valid password hash
write_pwfile(pw_file, bad_line1=':$6$njERlZMi/7DzNB9E$iiavfuXvUm8iyDZArTy7smTxh07GXXOrOsqxfW6gkOYVXHGk+W+i/8d3xDxrMwEPygEBhoA8A/gjQC0N2M4Lkw==', bad_line2=None)
username_password_tests(port)
# Valid file, valid username but password salt not base64
write_pwfile(pw_file, bad_line1='bad:$6$njER{ZMi/7DzNB9E$iiavfuXvUm8iyDZArTy7smTxh07GXXOrOsqxfW6gkOYVXHGk+W+i/8d3xDxrMwEPygEBhoA8A/gjQC0N2M4Lkw==', bad_line2=None)
username_password_tests(port)
# Valid file, valid username but password hash not base64
write_pwfile(pw_file, bad_line1='bad:$6$njERlZMi/7DzNB9E$iiavfuXv{}8iyDZArTy7smTxh07GXXOrOsqxfW6gkOYVXHGk+W+i/8d3xDxrMwEPygEBhoA8A/gjQC0N2M4Lkw==', bad_line2=None)
username_password_tests(port)
port = mosq_test.get_port()
conf_file = os.path.basename(__file__).replace('.py', '.conf')
pw_file = os.path.basename(__file__).replace('.py', '.pwfile')
try:
write_config(conf_file, port, "false")
all_tests(port)
write_config(conf_file, port, "true")
all_tests(port)
finally:
os.remove(conf_file)
os.remove(pw_file)
sys.exit(0)
|
import os
import torch
import argparse
import TD3
import OurDDPG
import DDPG
import random
import numpy as np
import torch.nn.functional as F
from sac import SAC
from utils import make_env
from pytorch_sac.agent.sac import SACAgent as PytorchSAC
from pytorch_sac.agent.random_noise_sac import RandomNoiseSACAgent
from pytorch_sac.agent.smoothed_sac import SmoothedSACAgent
from pytorch_sac.agent.fr_sac import FuncRegSACAgent
from main import NETWORK_CLASSES
from create_qval_dataset import CustomReplayBuffer
from tqdm import trange
from main import eval_policy
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def experiment(variant):
print('CUDA status:', torch.cuda.is_available())
env = make_env(variant['env'])
# Set seeds
variant['seed'] = int(variant['seed'])
env.seed(int(variant['seed']))
torch.manual_seed(int(variant['seed']))
np.random.seed(int(variant['seed']))
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {"state_dim": state_dim, "action_dim": action_dim, "max_action": max_action,
"discount": variant['discount'], "tau": variant['tau'],
'network_class': NETWORK_CLASSES[variant['network_class']]}
# custom network kwargs
mlp_network_kwargs = dict(n_hidden=variant['n_hidden'],
hidden_dim=variant['hidden_dim'],
first_dim=variant['first_dim'])
dropout_mlp_network_kwargs = dict(n_hidden=variant['n_hidden'],
hidden_dim=variant['hidden_dim'],
first_dim=variant['first_dim'],
dropout_p=variant['dropout_p'])
variable_init_mlp_network_kwargs = dict(n_hidden=variant['n_hidden'],
hidden_dim=variant['hidden_dim'],
first_dim=variant['first_dim'],
sigma=variant['sigma'])
fourier_network_kwargs = dict(n_hidden=variant['n_hidden'],
hidden_dim=variant['hidden_dim'],
fourier_dim=variant['fourier_dim'],
sigma=variant['sigma'],
concatenate_fourier=variant['concatenate_fourier'],
train_B=variant['train_B'])
siren_network_kwargs = dict(n_hidden=variant['n_hidden'],
hidden_dim=variant['hidden_dim'],
first_omega_0=variant['omega'],
hidden_omega_0=variant['omega'])
if variant['network_class'] in {'MLP', 'D2RL', 'ConcatMLP', 'SpectralMLP'}:
kwargs['network_kwargs'] = mlp_network_kwargs
elif variant['network_class'] == 'DropoutMLP':
kwargs['network_kwargs'] = dropout_mlp_network_kwargs
elif variant['network_class'] == 'VariableInitMLP':
kwargs['network_kwargs'] = variable_init_mlp_network_kwargs
elif variant['network_class'] in {'FourierMLP', 'LogUniformFourierMLP'}:
kwargs['network_kwargs'] = fourier_network_kwargs
elif variant['network_class'] == 'Siren':
kwargs['network_kwargs'] = siren_network_kwargs
else:
raise NotImplementedError
# Initialize policy
if variant['policy'] == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = variant['policy_noise * max_action']
kwargs["noise_clip"] = variant['noise_clip * max_action']
kwargs["policy_freq"] = variant['policy_freq']
policy = TD3.TD3(**kwargs)
elif variant['policy'] == "OurDDPG":
policy = OurDDPG.DDPG(**kwargs)
elif variant['policy'] == "DDPG":
policy = DDPG.DDPG(**kwargs)
elif variant['policy'] == "SAC":
kwargs['lr'] = variant['lr']
kwargs['alpha'] = variant['alpha']
kwargs['automatic_entropy_tuning'] = variant['automatic_entropy_tuning']
kwargs['weight_decay'] = variant['weight_decay']
# left out dmc
policy = SAC(**kwargs)
elif 'PytorchSAC' in variant['policy']:
kwargs['action_range'] = [float(env.action_space.low.min()), float(env.action_space.high.max())]
kwargs['actor_lr'] = variant['lr']
kwargs['critic_lr'] = variant['lr']
kwargs['alpha_lr'] = variant['alpha_lr']
kwargs['weight_decay'] = variant['weight_decay']
kwargs['no_target'] = variant['no_target']
kwargs['mlp_policy'] = variant['mlp_policy']
kwargs['mlp_qf'] = variant['mlp_qf']
del kwargs['max_action']
if variant['policy'] == 'PytorchSAC':
policy = PytorchSAC(**kwargs)
elif variant['policy'] == 'RandomNoisePytorchSAC':
kwargs['noise_dist'] = variant['noise_dist']
kwargs['noise_scale'] = variant['noise_scale']
policy = RandomNoiseSACAgent(**kwargs)
elif variant['policy'] == 'SmoothedPytorchSAC':
kwargs['n_critic_samples'] = variant['n_critic_samples']
kwargs['noise_dist'] = variant['noise_dist']
kwargs['noise_scale'] = variant['noise_scale']
policy = SmoothedSACAgent(**kwargs)
elif variant['policy'] == 'FuncRegPytorchSAC':
kwargs['critic_target_update_frequency'] = variant['critic_freq']
kwargs['fr_weight'] = variant['fr_weight']
policy = FuncRegSACAgent(**kwargs)
else:
raise NotImplementedError
if variant['load_model'] != "":
raise RuntimeError
# load replay buffer
replay_buffer = torch.load(os.path.join(variant['replay_buffer_folder'], 'generated_replay_buffer.pt'))
policy_optimizer = torch.optim.Adam(policy.actor.parameters(), lr=variant['lr'])
qf_optimizer = torch.optim.Adam(policy.critic.Q1.parameters(), lr=variant['lr'])
# split into train and val for both action and q_value
indices = np.arange(replay_buffer.max_size)
random.shuffle(indices)
train_indices = indices[:int(0.9 * len(indices))]
val_indices = indices[int(0.9 * len(indices)):]
train_dataset = torch.utils.data.TensorDataset(torch.tensor(replay_buffer.state[train_indices]).float(),
torch.tensor(replay_buffer.action[train_indices]).float(),
torch.tensor(replay_buffer.correct_action[train_indices]).float(),
torch.tensor(replay_buffer.q_value[train_indices]).float())
val_dataset = torch.utils.data.TensorDataset(torch.tensor(replay_buffer.state[val_indices]).float(),
torch.tensor(replay_buffer.action[val_indices]).float(),
torch.tensor(replay_buffer.correct_action[val_indices]).float(),
torch.tensor(replay_buffer.q_value[val_indices]).float())
# train a network on it
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=variant['batch_size'], shuffle=True,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=variant['batch_size'], shuffle=True,
pin_memory=True)
train_q_losses = []
train_policy_losses = []
val_q_losses = []
val_policy_losses = []
for _ in trange(variant['n_train_epochs']):
total_q_loss = 0
total_policy_loss = 0
for (state, action, correct_action, q) in train_loader:
state = state.to(DEVICE)
action = action.to(DEVICE)
correct_action = correct_action.to(DEVICE)
q = q.to(DEVICE)
q_preds = policy.critic.Q1(torch.cat([state, action], dim=-1))
policy_preds = policy.actor(state).mean
q_loss = F.mse_loss(q_preds, q)
policy_loss = F.mse_loss(policy_preds, correct_action)
qf_optimizer.zero_grad()
policy_optimizer.zero_grad()
q_loss.backward()
policy_loss.backward()
qf_optimizer.step()
policy_optimizer.step()
total_q_loss += q_loss.item()
total_policy_loss += policy_loss.item()
# get validation stats
total_val_q_loss = 0
total_val_policy_loss = 0
with torch.no_grad():
for (state, action, correct_action, q) in val_loader:
state = state.to(DEVICE)
action = action.to(DEVICE)
correct_action = correct_action.to(DEVICE)
q = q.to(DEVICE)
q_preds = policy.critic.Q1(torch.cat([state, action], dim=-1))
policy_preds = policy.actor(state).mean
q_loss = F.mse_loss(q_preds, q)
policy_loss = F.mse_loss(policy_preds, correct_action)
total_val_q_loss += q_loss.item()
total_val_policy_loss += policy_loss.item()
train_q_losses.append(total_q_loss / len(train_loader))
train_policy_losses.append(total_policy_loss / len(train_loader))
val_q_losses.append(total_val_q_loss / len(val_loader))
val_policy_losses.append(total_val_policy_loss / len(val_loader))
print(f'train: qf loss: {train_q_losses[-1]:.4f}, policy loss: {train_policy_losses[-1]:.4f}')
print(f'val: qf loss: {val_q_losses[-1]:.4f}, policy loss: {val_policy_losses[-1]:.4f}')
# evaluate the resulting policy for 100 episodes
eval_return = eval_policy(policy, variant['env'], variant['seed'], eval_episodes=variant['eval_episodes'])
# save the results
to_save = dict(
train_q_losses=train_q_losses,
train_policy_losses=train_policy_losses,
val_q_losses=val_q_losses,
val_policy_losses=val_policy_losses,
eval_return=eval_return,
qf=policy.critic.Q1.state_dict(),
policy=policy.actor.state_dict()
)
torch.save(to_save, os.path.join(variant['replay_buffer_folder'], f'{variant["network_class"]}_distillation.pt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3", type=str,
choices=['TD3', 'DDPG', 'OurDDPG', 'SAC',
'PytorchSAC', 'RandomNoisePytorchSAC', 'SmoothedPytorchSAC', 'FuncRegPytorchSAC'])
parser.add_argument("--env", default="HalfCheetah-v2") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--max_timesteps", default=1e6, type=int) # Max time steps to run environment
parser.add_argument("--policy_noise", type=float, default=0.2) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", type=float, default=0.5) # Range to clip target policy noise
parser.add_argument("--load_model", type=str,
default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", type=float, default=0.99) # Discount factor
parser.add_argument("--tau", type=float, default=0.005) # Target network update rate
parser.add_argument("--lr", type=float, default=3E-4) # Target network update rate
parser.add_argument("--alpha_lr", type=float, default=1E-4) # Target network update rate
parser.add_argument("--alpha", type=float, default=0.1)
parser.add_argument("--automatic_entropy_tuning", action='store_true')
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--no_target", action='store_true')
parser.add_argument("--mlp_qf", action='store_true')
parser.add_argument("--mlp_policy", action='store_true')
# network kwargs
parser.add_argument("--network_class", default="MLP",
choices=['MLP', 'FourierMLP', 'LogUniformFourierMLP', 'Siren', 'D2RL', 'VariableInitMLP',
'ConcatMLP', 'DropoutMLP', 'SpectralMLP'])
parser.add_argument("--n_hidden", default=1, type=int)
parser.add_argument("--hidden_dim", default=256, type=int)
parser.add_argument("--first_dim", default=0, type=int)
parser.add_argument("--fourier_dim", default=256, type=int)
parser.add_argument("--sigma", default=1.0, type=float)
parser.add_argument("--omega", default=30.0, type=float)
parser.add_argument("--concatenate_fourier", action='store_true')
parser.add_argument("--train_B", action='store_true')
# other exp/ablation flags
parser.add_argument("--noise_dist", default="gaussian", type=str, choices=['gaussian', 'uniform'])
parser.add_argument("--noise_scale", default=0.1, type=float)
parser.add_argument("--n_critic_samples", default=10, type=int)
parser.add_argument("--dropout_p", default=0.0, type=float)
parser.add_argument("--fr_weight", default=0.25, type=float)
# other
parser.add_argument("--expID", default=9999, type=int)
parser.add_argument("--test", '-t', action='store_true')
parser.add_argument("--ec2", action='store_true')
parser.add_argument("--local_docker", action='store_true')
# added
parser.add_argument("--replay_buffer_folder", type=str, default=None)
parser.add_argument("--n_train_epochs", default=100, type=int)
parser.add_argument("--eval_episodes", default=100, type=int)
args = parser.parse_args()
variant = vars(args)
experiment(variant)
|
cumprimentos = ['Oi', 'Olá', 'Bom dia']
for cumprimento in cumprimentos:
print(cumprimento)
|
#
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import json
from random import randint
import pytest
from ote_sdk.entities.id import ID
from ote_sdk.entities.label import Color, Domain, LabelEntity
from ote_sdk.entities.label_schema import (
LabelGraph,
LabelGroup,
LabelGroupType,
LabelSchemaEntity,
LabelTree,
)
from ote_sdk.serialization.datetime_mapper import DatetimeMapper
from ote_sdk.serialization.label_mapper import (
ColorMapper,
LabelGraphMapper,
LabelGroupMapper,
LabelMapper,
LabelSchemaMapper,
label_schema_to_bytes,
)
from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent
from ote_sdk.tests.constants.requirements import Requirements
from ote_sdk.utils.time_utils import now
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestColorMapper:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_color_serialization(self):
"""
This test serializes Color and checks serialized representation.
Then it compares deserialized Color with original one.
"""
red = randint(0, 255) # nosec
green = randint(0, 255) # nosec
blue = randint(0, 255) # nosec
alpha = randint(0, 255) # nosec
color = Color(red, green, blue, alpha)
serialized = ColorMapper.forward(color)
assert serialized == {"red": red, "green": green, "blue": blue, "alpha": alpha}
deserialized = ColorMapper.backward(serialized)
assert color == deserialized
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestLabelEntityMapper:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_label_entity_serialization(self):
"""
This test serializes LabelEntity and checks serialized representation.
Then it compares deserialized LabelEntity with original one.
"""
cur_date = now()
red = randint(0, 255) # nosec
green = randint(0, 255) # nosec
blue = randint(0, 255) # nosec
alpha = randint(0, 255) # nosec
label = LabelEntity(
name="my_label",
domain=Domain.DETECTION,
color=Color(red, green, blue, alpha),
hotkey="ctrl+1",
creation_date=cur_date,
is_empty=False,
id=ID("0000213"),
)
serialized = LabelMapper.forward(label)
assert serialized == {
"_id": "0000213",
"name": "my_label",
"color": {"red": red, "green": green, "blue": blue, "alpha": alpha},
"hotkey": "ctrl+1",
"domain": "DETECTION",
"creation_date": DatetimeMapper.forward(cur_date),
"is_empty": False,
}
deserialized = LabelMapper.backward(serialized)
assert label == deserialized
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestLabelSchemaEntityMapper:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_flat_label_schema_serialization(self):
"""
This test serializes flat LabelSchema and checks serialized representation.
Then it compares deserialized LabelSchema with original one.
"""
cur_date = now()
names = ["cat", "dog", "mouse"]
colors = [
Color(
randint(0, 255), # nosec
randint(0, 255), # nosec
randint(0, 255), # nosec
randint(0, 255), # nosec
) # nosec # noqa
for _ in range(3)
]
labels = [
LabelEntity(
name=name,
domain=Domain.CLASSIFICATION,
creation_date=cur_date,
id=ID(i),
color=colors[i],
)
for i, name in enumerate(names)
]
label_schema = LabelSchemaEntity.from_labels(labels)
serialized = LabelSchemaMapper.forward(label_schema)
assert serialized == {
"label_tree": {"type": "tree", "directed": True, "nodes": [], "edges": []},
"label_groups": [
{
"_id": label_schema.get_groups()[0].id,
"name": "from_label_list",
"label_ids": ["0", "1", "2"],
"relation_type": "EXCLUSIVE",
}
],
"all_labels": {
"0": {
"_id": "0",
"name": "cat",
"color": ColorMapper.forward(colors[0]),
"hotkey": "",
"domain": "CLASSIFICATION",
"creation_date": DatetimeMapper.forward(cur_date),
"is_empty": False,
},
"1": {
"_id": "1",
"name": "dog",
"color": ColorMapper.forward(colors[1]),
"hotkey": "",
"domain": "CLASSIFICATION",
"creation_date": DatetimeMapper.forward(cur_date),
"is_empty": False,
},
"2": {
"_id": "2",
"name": "mouse",
"color": ColorMapper.forward(colors[2]),
"hotkey": "",
"domain": "CLASSIFICATION",
"creation_date": DatetimeMapper.forward(cur_date),
"is_empty": False,
},
},
}
deserialized = LabelSchemaMapper.backward(serialized)
assert label_schema == deserialized
# Checking value returned by "label_schema_to_bytes" function
expected_label_schema_to_bytes = json.dumps(serialized, indent=4).encode()
actual_label_schema_to_bytes = label_schema_to_bytes(label_schema)
assert actual_label_schema_to_bytes == expected_label_schema_to_bytes
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestLabelGroupMapper:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_label_group_serialization(self):
"""
This test serializes flat LabelGroup and checks serialized representation.
Then it compares deserialized LabelGroup with original one.
"""
names = ["cat", "dog", "mouse"]
labels = [
LabelEntity(
name=name,
domain=Domain.CLASSIFICATION,
id=ID(str(i)),
)
for i, name in enumerate(names)
]
label_group = LabelGroup(
name="Test LabelGroup", labels=labels, group_type=LabelGroupType.EMPTY_LABEL
)
serialized = LabelGroupMapper.forward(label_group)
assert serialized == {
"_id": label_group.id,
"name": "Test LabelGroup",
"label_ids": ["0", "1", "2"],
"relation_type": "EMPTY_LABEL",
}
all_labels = {ID(str(i)): labels[i] for i in range(3)}
deserialized = LabelGroupMapper.backward(
instance=serialized, all_labels=all_labels
)
assert deserialized == label_group
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestLabelGraphMapper:
label_0 = LabelEntity(name="label_0", domain=Domain.SEGMENTATION, id=ID("0"))
label_0_1 = LabelEntity(name="label_0_1", domain=Domain.SEGMENTATION, id=ID("0_1"))
label_0_2 = LabelEntity(name="label_0_2", domain=Domain.SEGMENTATION, id=ID("0_2"))
label_0_1_1 = LabelEntity(
name="label_0_1_1", domain=Domain.SEGMENTATION, id=ID("0_1_1")
)
label_0_1_2 = LabelEntity(
name="label_0_1_2", domain=Domain.SEGMENTATION, id=ID("0_1_2")
)
label_0_2_1 = LabelEntity(
name="label_0_2_1", domain=Domain.SEGMENTATION, id=ID("0_2_1")
)
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_label_graph_forward(self):
"""
<b>Description:</b>
Check "LabelGraphMapper" class "forward" method
<b>Input data:</b>
"LabelGraph" and "LabelTree" objects
<b>Expected results:</b>
Test passes if dictionary returned by "forward" method is equal to expected
<b>Steps</b>
1. Check dictionary returned by "forward" method for "LabelGraph" object
2. Check dictionary returned by "forward" method for "LabelTree" object
"""
# Checking dictionary returned by "forward" for "LabelGraph"
label_graph = LabelGraph(directed=False)
label_graph.add_edges(
[
(self.label_0, self.label_0_1),
(self.label_0, self.label_0_2),
(self.label_0_1, self.label_0_1_1),
(self.label_0_1, self.label_0_1_2),
(self.label_0_1_1, self.label_0_1_2),
]
)
forward = LabelGraphMapper.forward(label_graph)
assert forward == {
"type": "graph",
"directed": False,
"nodes": ["0", "0_1", "0_2", "0_1_1", "0_1_2"],
"edges": [
("0", "0_1"),
("0", "0_2"),
("0_1", "0_1_1"),
("0_1", "0_1_2"),
("0_1_1", "0_1_2"),
],
}
# Checking dictionary returned by "forward" for "LabelTree"
label_tree = LabelTree()
for parent, child in [
(self.label_0, self.label_0_1),
(self.label_0, self.label_0_2),
(self.label_0_1, self.label_0_1_1),
(self.label_0_1, self.label_0_1_2),
(self.label_0_2, self.label_0_2_1),
]:
label_tree.add_child(parent, child)
forward = LabelGraphMapper.forward(label_tree)
assert forward == {
"type": "tree",
"directed": True,
"nodes": ["0_1", "0", "0_2", "0_1_1", "0_1_2", "0_2_1"],
"edges": [
("0_1", "0"),
("0_2", "0"),
("0_1_1", "0_1"),
("0_1_2", "0_1"),
("0_2_1", "0_2"),
],
}
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_label_graph_backward(self):
"""
<b>Description:</b>
Check "LabelGraphMapper" class "backward" method
<b>Input data:</b>
Dictionary object to deserialize, labels list
<b>Expected results:</b>
Test passes if "LabelGraph" or "LabelTree" object returned by "backward" method is equal to expected
<b>Steps</b>
1. Check dictionary returned by "backward" method for "LabelGraph" object
2. Check dictionary returned by "backward" method for "LabelTree" object
3. Check that "ValueError" exception is raised when unsupported type is specified as "type" key in dictionary
object of "instance" parameter for "backward" method
"""
# Checking dictionary returned by "backward" for "LabelGraph"
forward = {
"type": "graph",
"directed": False,
"nodes": ["0", "0_1", "0_2", "0_1_1"],
"edges": [("0", "0_1"), ("0", "0_2"), ("0_1", "0_1_1"), ("0_1_1", "0_2")],
}
labels = {
ID("0"): self.label_0,
ID("0_1"): self.label_0_1,
ID("0_2"): self.label_0_2,
ID("0_1_1"): self.label_0_1_1,
}
expected_backward = LabelGraph(directed=False)
expected_backward.add_edges(
[
(self.label_0, self.label_0_1),
(self.label_0, self.label_0_2),
(self.label_0_1, self.label_0_1_1),
(self.label_0_1_1, self.label_0_2),
]
)
actual_backward = LabelGraphMapper.backward(instance=forward, all_labels=labels)
assert actual_backward == expected_backward
# Checking dictionary returned by "backward" for "LabelTree"
forward = {
"type": "tree",
"directed": True,
"nodes": ["0_1", "0", "0_2", "0_1_1", "0_2_1"],
"edges": [("0_1", "0"), ("0_2", "0"), ("0_1_1", "0_1"), ("0_2_1", "0_2")],
}
labels = {
ID("0"): self.label_0,
ID("0_1"): self.label_0_1,
ID("0_2"): self.label_0_2,
ID("0_1_1"): self.label_0_1_1,
ID("0_1_2"): self.label_0_1_2,
ID("0_2_1"): self.label_0_2_1,
}
expected_backward = LabelTree()
for parent, child in [
(self.label_0, self.label_0_1),
(self.label_0, self.label_0_2),
(self.label_0_1, self.label_0_1_1),
(self.label_0_2, self.label_0_2_1),
]:
expected_backward.add_child(parent, child)
actual_backward = LabelGraphMapper.backward(instance=forward, all_labels=labels)
assert actual_backward == expected_backward
# Checking "ValueError" exception raised when unsupported type specified as "type" in dictionary "instance" for
# "backward"
forward = {
"type": "rectangle",
"directed": True,
"nodes": ["0_1", "0", "0_2", "0_1_1", "0_2_1"],
"edges": [("0_1", "0"), ("0_2", "0"), ("0_1_1", "0_1"), ("0_2_1", "0_2")],
}
with pytest.raises(ValueError):
LabelGraphMapper.backward(instance=forward, all_labels=labels)
|
# get_url_text.py
"""A script to download the html source at a URL as text file
The file will be placed in a folder.
Usage: get_url_text.py url foldername filename
"""
import os, sys, requests, bs4
print(f"{sys.argv=}")
url = sys.argv[1]
foldername = sys.argv[2]
filename = sys.argv[3]
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
res = requests.get(url, headers=headers)
if res.status_code == 404:
raise SystemExit(f'HTTP response status: {res.status_code}') # Exit with message if URL is bad
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# text1 = soup.prettify()
text = soup.prettify()
"""
try:
os.remove(foldername)
except OSError as e: ## if failed, report it back to the user ##
print (f'Error: {e.filename} - {e.strerror}')
"""
os.mkdir(foldername)
with open(f'{foldername}/{filename}', 'w', encoding='utf-8') as writer:
writer.write(text)
"""
for https://www.gutenberg.org/ the code causes the following error:
Traceback (most recent call last):
File "get_url_text.py", line 33, in <module>
writer.write(text)
File "C:\Program Files\Python38\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u25be' in position 1885: character maps to <undefined>
"""
|
#!/usr/bin/env python
"""
server.py
Copyright 2018. All Rights Reserved.
Created: April 13, 2017
Authors: Toki Migimatsu
"""
from __future__ import print_function, division
import threading
from multiprocessing import Process
from argparse import ArgumentParser
import json
import os
import shutil
import sys
WEB_DIRECTORY = os.path.join(os.path.dirname(__file__), "web")
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "python")))
from RedisMonitor import RedisMonitor
from WebSocketServer import WebSocketServer
from HTTPRequestHandler import makeHTTPRequestHandler
if sys.version.startswith("3"):
from http.server import HTTPServer
else:
from BaseHTTPServer import HTTPServer
args = None
app_thread = None
def start_app(app_name, app_target):
global app_thread
if app_thread is not None:
print("Killing %s" % app_thread.name)
app_thread.terminate()
print("Starting %s app" % app_name)
app_thread = Process(target=app_target)
app_thread.start()
def make_handle_get_request(redis_monitor):
def handle_get_request(request_handler, get_vars, **kwargs):
"""
HTTPRequestHandler callback:
Serve content inside WEB_DIRECTORY
"""
global app_thread
path_tokens = [token for token in request_handler.path.split("/") if token]
apps = {
#"dh": dh_app,
}
# Default to index.html
if not path_tokens or ".." in path_tokens:
request_path = os.path.join(WEB_DIRECTORY, "index.html")
elif path_tokens[0] == "get_websocket_port":
request_handler.wfile.write(str(kwargs["ws_port"]).encode("utf-8"))
return
elif len(path_tokens) > 2 and path_tokens[0] == "resources":
path_resources = redis_monitor.redis_db.smembers("webapp::resources::{}".format(path_tokens[1]))
request_path = None
if path_resources is not None:
for path_resource in path_resources:
request_path = os.path.join(path_resource.decode("utf-8"), *path_tokens[2:])
print(request_path)
if os.path.isfile(request_path):
break
request_path = None
if request_path is None:
request_path = os.path.join(WEB_DIRECTORY, *path_tokens)
else:
file_ext = os.path.splitext(path_tokens[0])
if len(file_ext) == 2 and file_ext[1] == ".html" and file_ext[0] in apps:
app_name = file_ext[0]
if app_thread is not None:
print("Killing %s" % app_thread.name)
app_thread.terminate()
print("Starting %s app" % app_name)
app_thread = Process(target=apps[app_name])
app_thread.start()
request_path = os.path.join(WEB_DIRECTORY, *path_tokens)
# Check if file exists
if not os.path.isfile(request_path):
print(request_path)
request_handler.send_error(404, "File not found.")
return
# Otherwise send file directly
with open(request_path, "rb") as f:
shutil.copyfileobj(f, request_handler.wfile)
return handle_get_request
def handle_post_request(request_handler, post_vars, **kwargs):
"""
HTTPRequestHandler callback:
Set POST variables as Redis keys
"""
path_tokens = [token for token in request_handler.path.split("/") if token]
if not path_tokens or ".." in path_tokens:
return
if path_tokens[0] == "DEL":
keys = [key for key, _ in post_vars.items()]
if not keys:
return
if type(keys[0]) is bytes:
keys = [k.decode("utf-8") for k in keys]
result = kwargs["redis_db"].delete(*keys)
print("DEL {}: {}".format(" ".join(keys), result))
elif path_tokens[0] == "SET":
for key, val_str in post_vars.items():
if type(val_str[0]) is bytes:
val_json = json.loads(val_str[0].decode("utf-8"))
else:
val_json = json.loads(val_str[0])
try:
types = (str, unicode)
except:
types = (str,)
if type(val_json) in types:
val = val_json
elif type(val_json) is dict:
val = json.dumps(val_json)
else:
val = "; ".join(" ".join(map(str, row)) for row in val_json)
print("%s: %s" % (key, val))
kwargs["redis_db"].set(key, val)
if __name__ == "__main__":
# Parse arguments
parser = ArgumentParser(description=(
"Monitor Redis keys in the browser."
))
parser.add_argument("-hp", "--http_port", help="HTTP Port (default: 8000)", default=8000, type=int)
parser.add_argument("-wp", "--ws_port", help="WebSocket port (default: 8001)", default=8001, type=int)
parser.add_argument("-rh", "--redis_host", help="Redis hostname (default: 127.0.0.1)", default="127.0.0.1")
parser.add_argument("-rp", "--redis_port", help="Redis port (default: 6379)", default=6379, type=int)
parser.add_argument("-ra", "--redis_pass", help="Redis password (default: '')", default='', type=str)
parser.add_argument("-rd", "--redis_db", help="Redis database number (default: 0)", default=0, type=int)
parser.add_argument("-r", "--refresh_rate", help="Redis refresh rate in seconds (default: 0.05)", default=0.05, type=float)
parser.add_argument("-kf", "--key_filter", help="Regex filter for Redis keys to monitor (default: \"\")", default="", type=str)
parser.add_argument("--realtime", action="store_true", help="Subscribe to realtime Redis SET pubsub notifications")
args = parser.parse_args()
# Create RedisMonitor, HTTPServer, and WebSocketServer
print("Starting up server...\n")
redis_monitor = RedisMonitor(host=args.redis_host, port=args.redis_port, password=args.redis_pass, db=args.redis_db,
refresh_rate=args.refresh_rate, key_filter=args.key_filter, realtime=args.realtime)
print("Connected to Redis database at %s:%d (db %d)" % (args.redis_host, args.redis_port, args.redis_db))
get_post_args = {"ws_port": args.ws_port, "redis_db": redis_monitor.redis_db}
http_server = HTTPServer(("", args.http_port),
makeHTTPRequestHandler(make_handle_get_request(redis_monitor), handle_post_request, get_post_args))
ws_server = WebSocketServer(port=args.ws_port)
# Start HTTPServer
http_server_process = Process(target=http_server.serve_forever)
http_server_process.start()
print("Started HTTP server on port %d" % (args.http_port))
# Start WebSocketServer
ws_server_thread = threading.Thread(target=ws_server.serve_forever, args=(redis_monitor.initialize_client,))
ws_server_thread.daemon = True
ws_server_thread.start()
print("Started WebSocket server on port %d\n" % (args.ws_port))
# Start RedisMonitor
print("Server ready. Listening for incoming connections.\n")
redis_monitor.run_forever(ws_server)
http_server_process.join()
|
"""Clean Code in Python - Chapter 3: General Traits of Good Code
"""
import unittest
from unittest.mock import Mock, patch
from exceptions_1 import DataTransport, Event
class FailsAfterNTimes:
def __init__(self, n_times: int, with_exception) -> None:
self._remaining_failures = n_times
self._exception = with_exception
def connect(self):
self._remaining_failures -= 1
if self._remaining_failures >= 0:
raise self._exception
return self
def send(self, data):
return data
@patch("time.sleep", return_value=0)
class TestTransport(unittest.TestCase):
def test_connects_after_retries(self, sleep):
data_transport = DataTransport(
FailsAfterNTimes(2, with_exception=ConnectionError)
)
data_transport.send = Mock()
data_transport.deliver_event(Event("test"))
data_transport.send.assert_called_once_with("decoded test")
assert sleep.call_count == DataTransport.retry_n_times - 1, sleep.call_count
if __name__ == "__main__":
unittest.main()
|
def bubbleSort(arr):
for i in range(len(arr)):
for j in range(len(arr) - i - 1):
if arr[j] > arr[j+1]:
temp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = temp
return arr
arr = [1, 13, 5, 2, 4, -1, 2, 2, 4]
# print(bubbleSort(arr))
def mergeSort(arr):
if len(arr) == 1: return arr #deals with 1 item long arrays, already sorted)
m = len(arr)//2 #length of the array, middle
firstHalf = arr[:m]
secondHalf = arr[m:]
print(arr, firstHalf, secondHalf)
firstHalf = mergeSort(firstHalf)
secondHalf = mergeSort(secondHalf)
return merge(firstHalf, secondHalf)
def merge(firstHalf, secondHalf):
out = []
fhIndex = 0
shIndex = 0
outIndex = fhIndex
while (fhIndex < len(firstHalf) and shIndex < len(secondHalf)):
if firstHalf
return out
print(mergeSort(arr)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-25 17:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djangosige.apps.login.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('user_foto', models.ImageField(blank=True, default='imagens/user.png',
upload_to=djangosige.apps.login.models.user_directory_path)),
('user', models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import matplotlib.pyplot as plt
import cv2
import numpy as np
import csv
import tensorflow
import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Conv2D, ELU, Activation
from keras.layers import Lambda, Dropout, Cropping2D, SpatialDropout2D
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras import backend as K
from keras.utils import np_utils
def create_model():
model = Sequential()
# trim image to only see section with road
model.add(Cropping2D(cropping=((50,20), (0,10)), input_shape=(160,320,3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# Nvidia model
model.add(Conv2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))
model.add(Conv2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1)))
model.add(Conv2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
return model
model = create_model()
model.load_weights('model.h5')
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='./pics/nvidia_model.png', show_shapes=True, show_layer_names=True)
images = []
image = cv2.imread('./pics/center_2016_12_01_13_42_42_686.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
image = cv2.imread('./pics/right_2016_12_01_13_32_52_652.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
image = cv2.imread('./pics/center_2016_12_01_13_40_11_279.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
from kerastoolbox.visu import plot_feature_map
x = plot_feature_map(model, X=images, layer_id=2, n_columns=3, n=256)
x.savefig('./pics/conv_1.png')
x = plot_feature_map(model, X=images, layer_id=3, n_columns=3, n=256)
x.savefig('./pics/conv_2.png')
x = plot_feature_map(model, X=images, layer_id=4, n_columns=3, n=256)
x.savefig('./pics/conv_3.png')
x = plot_feature_map(model, X=images, layer_id=6, n_columns=3, n=256)
x.savefig('./pics/conv_4.png')
x = plot_feature_map(model, X=images, layer_id=7, n_columns=3, n=256)
x.savefig('./pics/conv_5.png')
|
from common.stream_consumer.dummy_stream_consumer import DummyStreamConsumer
from common.stream_consumer.factory import InvalidStreamConsumerException
from common.stream_consumer.factory import StreamConsumerFactory
from common.stream_consumer.stream_consumer import StreamConsumer
__all__ = [
'InvalidStreamConsumerException',
'StreamConsumer',
'StreamConsumerFactory',
'DummyStreamConsumer'
]
|
"""
A simple scrape-and-BSOUP parse on my own website.
Angles: compute, single-core threading.
Works: Python 3.
"""
from timeit import default_timer as timer
from concurrent.futures import ThreadPoolExecutor
"""
Make request to jackhales.com, print out response + page len
and use BeautifulSoup to compute into a class.
"""
def just_a_request(id):
from requests import get
from bs4 import BeautifulSoup
page = get("https://jackhales.com")
soup = BeautifulSoup(page.text, "html.parser")
print("finished id:", id, "with page len:", len(page.text))
return True
if __name__ == "__main__":
start = timer()
with ThreadPoolExecutor(1000) as handler:
results = handler.map(just_a_request, range(1000))
end = timer()
if True: # set to false if you dont want extra compute
worked = len([True for t in results if t == True])
not_worked = len([False for t in results if t == None]) # t == None if excepts in-thread
print("out of all req,", worked, "worked and", not_worked, "didn't")
time_taken = (end - start)
print("seconded {}".format(time_taken))
|
"""
Compute the differences for each block
using the centroids
"""
# -- python --
import sys
import torch
import torchvision
import numpy as np
from einops import rearrange,repeat
from numba import jit,njit,prange
# -- clgen --
sys.path.append("/home/gauenk/Documents/experiments/cl_gen/lib/")
from pyutils import get_img_coords
# -- faiss --
# sys.path.append("/home/gauenk/Documents/faiss/contrib/")
from bp_search import create_mesh_from_ranges
from warp_utils import warp_burst_from_locs,warp_burst_from_pix
th_pad = torchvision.transforms.functional.pad
# ----------------------------------------------
#
# Compute Ave over Expanded Centroid
#
# ----------------------------------------------
def compute_ecentroid_ave(centroids,sizes):
# -- create output --
device = centroids.device
f,tK,s,h,w,ps,ps = centroids.shape
# -- to numpy --
centroids = centroids.cpu().numpy()
sizes = sizes.cpu().numpy()
ave = np.zeros((f,s,h,w,ps,ps))
# -- numba --
compute_ecentroid_ave_numba(centroids,sizes,ave)
# -- to torch --
ave = torch.FloatTensor(ave).to(device)
return ave
@njit
def compute_ecentroid_ave_numba(centroids,sizes,ave):
f,tK,s,h,w,ps,ps = centroids.shape
F_MAX = 10000.
for hi in prange(h):
for wi in prange(w):
for si in prange(s):
num_ci = 0
for ci in range(tK):
size = sizes[ci][si][hi][wi]
if size == 0: continue
num_ci += 1
for fi in range(f):
for pi in range(ps):
for pj in range(ps):
cent = centroids[fi][ci][si][hi][wi][pi][pj]*size/tK
ave[fi][si][hi][wi][pi][pj] += cent
for fi in range(f):
for pi in range(ps):
for pj in range(ps):
val = ave[fi][si][hi][wi][pi][pj]
val = val / num_ci if num_ci > 0 else F_MAX
ave[fi][si][hi][wi][pi][pj] = val
# ----------------------------------------------
#
# Compute Ave over Standard Centroid
#
# ----------------------------------------------
def compute_centroid_ave(centroids,sizes):
# -- create output --
device = centroids.device
f,tK,s,h,w = centroids.shape
vals = np.zeros((s,h,w)).astype(np.float32)
# -- to numpy --
centroids = centroids.cpu().numpy()
sizes = sizes.cpu().numpy()
ave = np.zeros((f,s,h,w))
# -- numba --
compute_centroid_ave_numba(centroids,sizes,ave)
# -- to torch --
ave = torch.FloatTensor(ave).to(device)
return ave
@njit
def compute_centroid_ave_numba(centroids,sizes,ave):
f,tK,s,h,w = centroids.shape
F_MAX = 10000.
for hi in prange(h):
for wi in prange(w):
for si in prange(s):
num_ci = 0
for ci in range(tK):
size = sizes[ci][si][hi][wi]
if size == 0: continue
num_ci += 1
for fi in range(f):
cent = centroids[fi][ci][si][hi][wi]
ave[fi][si][hi][wi] += cent
for fi in range(f):
val = ave[fi][si][hi][wi]
ave[fi][si][hi][wi] = val / num_ci if num_ci > 0 else F_MAX
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-31 05:13
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('gender', models.CharField(choices=[('MALE', 'MALE'), ('FEMALE', 'FEMALE')], max_length=10, null=True)),
('date_of_birth', models.DateField(blank=True, null=True)),
('state_of_origin', models.CharField(max_length=30, null=True)),
('occupation', models.CharField(max_length=250, null=True)),
('address', models.TextField(default='Address')),
('local_government', models.CharField(max_length=30)),
('nationality', models.CharField(max_length=30)),
('image', models.ImageField(upload_to='', verbose_name='Captured image')),
('role', models.CharField(choices=[('SUBSCRIBER', 'SUBSCRIBER'), ('REGISTRATION AGENT', 'REGISTRATION AGENT'), ('MOBILE NETWORK OPERATOR', 'MOBILE NETWORK OPERATOR')], default='SUBSCRIBER', max_length=30)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='NextOfKin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('gender', models.CharField(choices=[('MALE', 'MALE'), ('FEMALE', 'FEMALE')], max_length=10)),
('relationship', models.CharField(choices=[('FATHER', 'FATHER'), ('MOTHER', 'MOTHER'), ('BROTHER', 'BROTHER'), ('SISTER', 'SISTER'), ('UNCLE', 'UNCLE'), ('AUNT', 'AUNT')], max_length=10)),
('address', models.TextField(default='Address')),
('phone_number', models.CharField(max_length=11)),
('referrer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import venv
import logging
import json
from typing import Dict, Any, Optional
from tempfile import TemporaryDirectory
from subprocess import CalledProcessError, run
class TapCreateError(Exception):
...
class TapRuntimeError(Exception):
...
class Tap:
def __init__(
self,
tap: str,
tap_exec: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
state: Optional[Dict[str, Any]] = None,
config_path: Optional[str] = None,
state_path: Optional[str] = None,
) -> None:
self.tap_name = tap
self.tap_exec = tap_exec or tap
self.config = config
self.state = state
self.wdir = TemporaryDirectory()
self.venv_path = f"{self.wdir.name}/{self.tap_name}"
self.pip_path = f"{self.venv_path}/bin/pip"
self.exec_path = f"{self.venv_path}/bin/"
self.config_path = config_path or f"{self.wdir.name}/tap_config.json"
self.state_path = state_path or f"{self.wdir.name}/tap_state.json"
self.use_config_path = True if config_path else False
self.use_state_path = True if state_path else False
self._initialized = False
def initialize(self) -> None:
if not self._initialized:
self.create_venv()
self.install_tap()
self.create_config_state_files()
self._initialized = True
def create_venv(self) -> None:
logging.info(f"Creating virtualenv for {self.tap_name} at {self.venv_path}")
venv.create(self.venv_path, with_pip=True)
def install_tap(self, verbose: bool = False) -> None:
logging.info(f"Installing {self.tap_name} in {self.venv_path}")
cmd = f"{self.pip_path} install {self.tap_name}"
proc = run(cmd.split(), capture_output=True)
try:
proc.check_returncode()
except CalledProcessError:
for line in proc.stderr.splitlines():
logging.error(line.decode("utf-8"))
raise TapCreateError(f"Failed to create {self.tap_name}")
if verbose:
for line in proc.stdout.splitlines():
logging.info(line.decode("utf-8"))
def create_config_state_files(self) -> None:
if self.config:
with open(self.config_path, "w") as f:
f.write(json.dumps(self.config))
if self.state:
with open(self.state_path, "w") as f:
f.write(json.dumps(self.state))
@property
def run_cmd(self) -> str:
cmd = f"{self.exec_path}{self.tap_exec} "
if self.config or self.use_config_path:
cmd += f"--config {self.config_path} "
if self.state or self.use_state_path:
cmd += f"--state {self.state_path} "
return cmd
|
from flask import Flask, request, jsonify
from string import ascii_uppercase
from random import choice
# Mock Data Store
LICENSE_KEY_STORE = {
"PRIVATE_LICENSE_KEY": {
"renewal": True,
"discord": "DISCORD_ID",
"expire": "2022-01-01 00:00 UTC",
"plan": "Lifetime"
},
"PRIVATE_LICENSE_KEY_2": {
"renewal": True,
"discord": "DISCORD_ID_2",
"expire": "2022-01-01 00:00 UTC",
"plan": "Lifetime"
},
"PRIVATE_LICENSE_KEY_3": {
"renewal": False,
"discord": "DISCORD_ID_3",
"expire": "2020-01-01 00:00 UTC",
"plan": "$60/6 months"
}
}
OTP_STORE = {}
PLANS = ["Lifetime", "$60/6 months"]
# API Key
API_KEY = "jJz23yFoMdm87XPCXXjv9E4H22vvYJ"
app = Flask(__name__)
@app.route('/verify', methods=['POST'])
def verify_endpoint():
"""
Verify a given license, whether it's valid or not
Input in JSON format :
{
"license": <LICENSE_STR>,
"discord": <DISCORD_ID_STR>
}
:return: 200 OK
{
"require_renewal": <RENEWAL_BOOL>,
"expire_datetime": <EXPIRE_yyyy-mm-dd hh:mm UTC>
}
:return 401 Unauthorized
:return 404 Not Found
"""
if request.headers.get("Authorization") != API_KEY:
return jsonify({"error": "Invalid API Key"}), 401
data = request.json
license = data["license"]
discord = data["discord"]
# Check if the license exists or not, return 404 if not found
if license not in LICENSE_KEY_STORE:
return jsonify({
"error": "Key not found"
}), 404
# License exists
# Check if the specified discord is the same, return 404 if not
if LICENSE_KEY_STORE[license]["discord"] != discord:
return jsonify({
"error": "Discord not found"
}), 404
# License exists & valid discord
# Return json with 200 status code
license_data = LICENSE_KEY_STORE[license]
return jsonify({
"require_renewal": license_data["renewal"],
"expire_datetime": license_data["expire"],
"plan": license_data["plan"]
}), 200
@app.route("/transfer", methods=["POST"])
def transfer_endpoint():
"""
Transfer ownership of a given license by
deactivating the old one and create a new one
with the same expiry
Input in JSON format :
{
"from_license": <FROM_LICENSE_STR>,
"from_discord": <FROM_DISCORD_ID_STR>,
"to_discord": <TO_DISCORD_ID_STR>
}
:return: 200 OK
{
"license": <NEW_LICENSE_STR>,
"discord": <TO_DISCORD_ID_STR>
}
:return 404 Not Found
"""
if request.headers.get("Authorization") != API_KEY:
return jsonify({"error": "Invalid API Key"}), 401
data = request.json
license = data["from_license"]
discord = data["from_discord"]
to_discord = data["to_discord"]
# Check if the license exists or not, return 404 if not found
if license not in LICENSE_KEY_STORE:
return jsonify({
"error": "Key not found"
}), 404
# License exists
# Check if the specified discord is the same, return 404 if not
if LICENSE_KEY_STORE[license]["discord"] != discord:
return jsonify({
"error": "Discord not found"
}), 404
# Generate new license made up from random chars
new_license = [choice(ascii_uppercase) for _ in range(10)]
new_license = ''.join(new_license)
# Copy the old license to new license
LICENSE_KEY_STORE[new_license] = LICENSE_KEY_STORE[license]
# Change to new discord
LICENSE_KEY_STORE[new_license]["discord"] = to_discord
# Delete the old license
del LICENSE_KEY_STORE[license]
return jsonify({
"discord": LICENSE_KEY_STORE[new_license]["discord"],
"license": new_license,
"plan": LICENSE_KEY_STORE[new_license]["plan"]
}), 200
@app.route("/plan", methods=["GET"])
def plan_endpoint():
"""
Return list of available license plan
Input: None
:return: 200 OK
{
"plans" : ["plan1", "plan2", ...]
}
"""
return jsonify({"plans": PLANS})
@app.route("/gen-otp", methods=["POST"])
def gen_otp_endpoint():
"""
Generate a one time password to verify ownership of discord account
Input in JSON format :
{
"discord": <DISCORD_ID_STR>
}
:return: 200 OK
{
"status": "ok"
}
:return 401 Unauthorized
"""
if request.headers.get("Authorization") != API_KEY:
return jsonify({"error": "Invalid API Key"}), 401
data = request.json
discord = data["discord"]
discords = [val["discord"] for key, val in LICENSE_KEY_STORE.items()]
print(discords)
if discord not in discords:
return jsonify({"error": "Invalid discord id"}), 401
# Generate new OTP
otp = [choice(ascii_uppercase) for _ in range(5)]
otp = ''.join(otp)
OTP_STORE[discord] = otp
# Change this into a function that sends OTP to the user's discord
print(otp)
return jsonify({"success": True})
@app.route("/verify-otp", methods=["POST"])
def verify_otp_endpoint():
"""
Verify a one time password pre-generated before
Input in JSON format :
{
"discord": <DISCORD_ID_STR>
"otp": <OTP_CODE_STRING>
}
:return: 200 OK
{
"is_valid": <BOOLEAN>
}
:return 401 Unauthorized
"""
if request.headers.get("Authorization") != API_KEY:
return jsonify({"error": "Invalid API Key"}), 401
data = request.json
discord = data["discord"]
otp = data["otp"]
if discord not in OTP_STORE:
return jsonify({"error": "Invalid discord id"}), 404
return jsonify({"is_valid": OTP_STORE[discord] == otp})
if __name__ == "__main__":
app.run()
|
import requests
import smtplib
import urllib
import geopy
import json
import numpy
import matplotlib.pyplot as plt
import smopy
import Config
def get_location(geocode):
lonlat=open('/home/popschool/projects/Coordonnée/lonlatonly.txt', 'r')
for line in lonlat:
(lon, lat)=line.split(',')
coord = {'lat':lat.strip(), 'lon':lon}
geocode.append(coord)
return geocode
def get_weather(geocode, weathercode):
api = Config.apikey
a = 0
for list in geocode:
x = geocode[a]['lon']
y = geocode[a]['lat']
url='http://api.openweathermap.org/data/2.5/weather?lat='+x+'&lon='+y+'&units=metrics&appid='+api
weather_r=requests.get(url)
weather_j=weather_r.json()
name = weather_j["name"]
visibility = weather_j["visibility"]
country = weather_j["sys"]["country"]
timezone = weather_j["timezone"]
temp = weather_j["main"]["temp"]
temp_max = weather_j["main"]["temp_max"]
temp_min = weather_j["main"]["temp_min"]
humidity = weather_j["main"]["humidity"]
pressure =weather_j["main"]["pressure"]
feels_like = weather_j["main"]["feels_like"]
wind = weather_j["wind"]
meteo = {'name':name, 'visibility':visibility, 'country':country, 'timezone':timezone, 'temp':temp, 'temp_max':temp_max, 'temp_min':temp_min, 'humidity':humidity, 'pressure':pressure, 'feels_like':feels_like, 'wind':wind}
weathercode.append(meteo)
a += 1
def get_info_area (geocode, weathercode) :
var = (input("\nNuméro de zone : "))
print ""
print "Coordonnees : ", geocode[var]
print ""
print'Météo à ces coordonnées : \n\n', weathercode[var]
def get_area(geocode, coord) :
lat_min = lat_max = float (geocode[0]['lat'])
lon_min = lon_max = float (geocode[0]['lon'])
a = 0
for loc in geocode :
lat_min = min(lat_min, float(geocode[a]['lat']))
lat_max = max(lat_max, float(geocode[a]['lat']))
lon_min = min(lon_min, float(geocode[a]['lon']))
lon_max = max(lon_max, float(geocode[a]['lon']))
a += 1
marge_lon = ((lon_max - lon_min)/100)*10
marge_lat = ((lat_max - lat_min)/100)*10
lat_min -= marge_lat
lat_max += marge_lat
lon_min -= marge_lon
lon_max += marge_lon
MinMax= {'lat_min':lat_min,'lat_max':lat_max, 'lon_min':lon_min,'lon_max':lon_max}
coord.append (MinMax)
return (coord)
def get_map (coord, geocode) :
map = smopy.Map(coord[0]['lon_min'], coord[0]['lat_min'], coord[0]['lon_max'], coord[0]['lat_max'], z=15)
print coord
a = 0
ax = map.show_mpl(figsize=(8,6))
for loc in geocode :
x,y = map.to_pixels(float(geocode[a]['lon']), float(geocode[a]['lat']))
ax.plot(x,y, 'or', ms=10, mew=1)
a += 1
plt.show()
return True
def main():
geocode=[]
weathercode=[]
coord = []
get_location(geocode)
get_weather(geocode, weathercode)
print '\nAffichage des données météos --> 1'
print 'Affichage de la map --> 2'
var = (input("\nEntrez votre choix : "))
if (var == 1):
get_info_area(geocode, weathercode)
if (var == 2):
get_area(geocode, coord)
get_map(coord, geocode)
if (var != 1):
if (var !=2):
print 'Choix invalide'
main() |
from discord.ext import commands
import json
import aiohttp
import asyncio
with open('tokens.json') as f:
tokens = json.load(f)
class dbl(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = self.bot.session
self.token = tokens["botlists"]["dbl"]
self.bot.loop.create_task(self.on_ready())
# def __unload(self):
# self.bot.loop.create_task(self.session.close())
async def send(self):
dump = json.dumps({
'server_count': len(self.bot.guilds)
#'server_count': 235
})
head = {
'authorization': self.token,
'content-type' : 'application/json'
}
url = 'https://discordbots.org/api/bots/508268149561360404/stats'
async with self.bot.session.post(url, data=dump, headers=head) as resp:
print('returned {0.status} for {1} on dbl'.format(resp, dump))
await self.bot.get_channel(581803242992697346).send("**DBL:** Returned {0.status} for {1}".format(resp, dump))
async def on_ready(self):
while True:
await self.send()
await asyncio.sleep(1800)
def setup(bot):
bot.add_cog(dbl(bot)) |
# Generated by Django 3.1.2 on 2020-11-20 17:24
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.TextField(choices=[('incomplete', 'INCOMPLETE'), ('paused', 'PAUSED'), ('complete', 'COMPLETE')], default='incomplete', verbose_name='status')),
('complete', models.BinaryField(verbose_name='complete')),
],
),
]
|
from typing import List
def parse(instr: str) -> List:
return [x.replace("\n", " ") for x in instr.strip().split("\n\n")]
|
"""Caching utilities for the merge-counts command line tool."""
import glob
import os
import json
import tempfile
from pathlib import Path
from typing import Dict, Optional
from logzero import logger
from . import errors
CACHE_POINTER_LOCATION = Path.home() / ".mergecounts-cache"
class DNAnexusFileCache:
"""A utility class for keeping up with cached DNAnexus objects."""
def __init__(self):
"""Creates a DNAnexusFileCache object to hold the list of known properties
and describe calls for each DNAnexus file id.
"""
self.properties = {}
self.describes = {}
self.counts = {}
def load_from_filesystem(self):
"""Loads any cached information from the filesystem."""
self.properties = load_cached_properties_from_filesystem()
self.describes = load_cached_describes_from_filesystem()
##############################
# Cache folder manipulations #
##############################
def get_cache_folder() -> Optional[str]:
"""Gets the top level cache folder if it exists in the file at CACHE_POINTER_LOCATION.
If that file does not exist, merge-counts has not instantiated a cache folder, so None
is returned.
Returns:
Optional[str]: the cache folder directory or None if a cache has not been created by merge-counts.
"""
if not os.path.exists(CACHE_POINTER_LOCATION):
return None
# will always be the first line of the file
cache_loc = [l.strip() for l in open(CACHE_POINTER_LOCATION, "r").readlines()][0]
if not os.path.exists(cache_loc):
errors.raise_error(
f"Cache pointed to in {CACHE_POINTER_LOCATION} does not exist! {cache_loc}."
)
return Path(cache_loc)
def create_new_cache_folder() -> None:
"""Creates a new cache folder as a temporary dir (assumes that an existing cache instantiated
by merge-counts does not exist and errors if it does).
"""
cache_folder_loc = get_cache_folder()
if cache_folder_loc:
errors.raise_error(
f"Refusing to overwrite existing cache: {cache_folder_loc}",
suggest_report=False,
)
new_cache_loc = tempfile.mkdtemp()
with open(CACHE_POINTER_LOCATION, "w") as cache_pointer:
cache_pointer.writelines(new_cache_loc)
logger.info(
"Created new cache folder pointer at %s to %s.",
CACHE_POINTER_LOCATION,
new_cache_loc,
)
def clean_cache() -> None:
"""Assuming a cache instantied by merge-counts exists, it will clean and remove the cache or
silently succeed otherwise (e.g. if the cache folder does not exist).
"""
cache_folder_loc = get_cache_folder()
if not cache_folder_loc or not os.path.exists(cache_folder_loc):
logger.debug("No cache folder to delete.")
else:
logger.debug("Removing cache folder: %s.", cache_folder_loc)
os.removedirs(cache_folder_loc)
if os.path.exists(CACHE_POINTER_LOCATION):
logger.debug("Removing cache folder pointer.")
os.remove(CACHE_POINTER_LOCATION)
def get_cached_properties_folder(silently_create: bool = True) -> Path:
"""Returns the subfolder within the cache that contains all DNAnexus properties for
each dxid. In this folder, the filename is the dxid and the contents of each property
are the DNAnexus properties as JSON objects.
Arguments:
silently_create (bool): if the subfolder does not exist, create before returning.
Defaults to True. If False, this will error if the folder
doesn't exist.
Returns:
Path: path to the subfolder containing the cached DNAnexus property files.
"""
properties_folder = get_cache_folder() / "properties"
if not os.path.exists(properties_folder):
if silently_create:
os.makedirs(properties_folder)
else:
errors.raise_error(
f"Properties subfolder in cache does not exist: {properties_folder}!"
)
return properties_folder
def get_cached_describes_folder(silently_create: bool = True) -> Path:
"""Returns the subfolder within the cache that contains all DNAnexus describe calls for
each dxid. In this folder, the filename is the dxid and the contents of each property
are the DNAnexus describe calls as JSON objects.
Arguments:
silently_create (bool): if the subfolder does not exist, create before returning.
Defaults to True. If False, this will error if the folder
doesn't exist.
Returns:
Path: path to the subfolder containing the cached DNAnexus property files.
"""
describes_folder = get_cache_folder() / "describes"
if not os.path.exists(describes_folder):
if silently_create:
os.makedirs(describes_folder)
else:
errors.raise_error(
f"Properties subfolder in cache does not exist: {describes_folder}!"
)
return describes_folder
def cache_properties_on_filesystem(dxid: str, properties: Dict) -> None:
"""Caches DNAnexus properties in the property subfolder of the cache.
Args:
dxid (str): DNAnexus id of the file in question.
properties (Dict): DNAnexus properties as a dict.
"""
cache_filepath = get_cached_properties_folder() / dxid
with open(cache_filepath, "w") as cache:
json.dump(properties, cache)
def cache_describes_on_filesystem(dxid: str, describe: Dict) -> None:
"""Caches DNAnexus describe calls in the describes subfolder of the cache.
Args:
dxid (str): DNAnexus id of the file in question.
describe (Dict): DNAnexus describe call as a dict.
"""
cache_filepath = get_cached_describes_folder() / dxid
with open(cache_filepath, "w") as cache:
json.dump(describe, cache)
def load_cached_properties_from_filesystem() -> Dict:
"""Loads the cached DNAnexus properties from the appropriate subfolder in the
merge-counts cache.
Returns:
Dict: all cached properties where the key is the DNAnexus file id and the value
is the DNAnexus properties as a dict.
"""
result = dict()
path = str(get_cached_properties_folder() / "*")
for filename in glob.glob(path):
basename = os.path.basename(filename)
result[basename] = json.load(open(filename, "r"))
logger.info("Loaded %d entries from the properties cache.", len(result.items()))
return result
def load_cached_describes_from_filesystem() -> Dict:
"""Loads the cached DNAnexus describe calls from the appropriate subfolder in the
merge-counts cache.
Returns:
Dict: all cached describes where the key is the DNAnexus file id and the value
is the DNAnexus describe call as a dict.
"""
result = dict()
for filename in glob.glob(str(get_cached_describes_folder() / "*")):
basename = os.path.basename(filename)
result[basename] = json.load(open(filename, "r"))
logger.info("Loaded %d entries from the describes cache.", len(result.items()))
return result
|
class UnpopulatedPropertyError(Exception):
pass
class UnknownGranteeTypeError(Exception):
pass
|
# Generated by Django 3.2.7 on 2021-09-12 18:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20210912_2131'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tag',
),
migrations.AddField(
model_name='post',
name='tag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.tag'),
),
]
|
from p2pserver import *
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
try:
from tkinter import messagebox
import tkinter as tk
except ImportError:
import Tkinter as tk
import tkMessageBox as messagebox
s = Server()
global client_socket
global BUFSIZ
global msg_list
failure_max = 3
def make_entry(parent, caption, width=None, **options):
tk.Label(parent, text=caption).pack(side=tk.TOP)
entry = tk.Entry(parent, **options)
if width:
entry.config(width=width)
entry.pack(side=tk.TOP, padx=10, fill=tk.BOTH)
return entry
def enter(event):
check_password()
#============================Send==============================
def send(event=None): # event is passed by binders.
global client_socket
"""Handles sending of messages."""
msg = my_msg.get()
print("the message sent is ", msg)
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg))
if msg == "{quit}":
client_socket.close()
root.quit()
#=========================Receive===============================
def receive():
"""Handles receiving of messages."""
global BUFSIZ
global msg_list
global client_socket
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tk.END, msg)
except OSError: # Possibly client has left the chat.
break
#============================Check User and Password===========================
def check_password(failures=[]):
""" Collect 1's for every failure and quit program in case of failure_max failures """
#print(user.get(), password.get())
auth = (user.get(), password.get())
if s.check_login(auth):
messagebox.showinfo("Successful", "Login Successful")
root.withdraw()
homewindow()
else:
messagebox.showerror("Error", "Incorrect Username or password")
failures.append(1)
if sum(failures) >= failure_max:
messagebox.showwarning("Login_attempt", "Max login attempts reached")
root.destroy()
raise SystemExit('Unauthorized login attempt')
else:
root.title('Try again. Attempt %i/%i' % (sum(failures)+1, failure_max))
#================================Main Home Page===============================
def homewindow():
global msg_list
root = tk.Tk()
root.title("Chatter")
messages_frame = tk.Frame(root)
scrollbar = tk.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tk.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
msg_list.pack(side=tk.LEFT, fill=tk.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tk.Entry(root, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tk.Button(root, text="Send", command=send)
send_button.pack()
root.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
global BUFSIZ
BUFSIZ = 1024
ADDR = (HOST, PORT)
global client_socket
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
#send_thread = Thread(target=send)
#send_thread.start()
root.mainloop() # Starts GUI execution.
#=========================Closes Window========================
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
#=============================Login=============================
root = tk.Tk()
my_msg = tk.StringVar() # For the messages to be sent.
name = my_msg.get()
my_msg.set(name)
#my_msg.set("Type your messages here.")
root.geometry('300x160')
root.title('Enter your information')
#frame for window margin
parent = tk.Frame(root, padx=10, pady=10)
parent.pack(fill=tk.BOTH, expand=True)
#entrys with not shown text
user = make_entry(parent, "User name:", 16)
password = make_entry(parent, "Password:", 16, show="*")
#button to attempt to login
b = tk.Button(parent, borderwidth=4, text="Login", width=20, pady=8, command=check_password)
b.pack(side=tk.BOTTOM)
password.bind('<Return>', enter)
user.focus_set()
parent.mainloop()
|
""""
date: 06-10-2020
author: Poulomi Chatterjee
""""
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
plt.rc("font", size=14)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import seaborn as sns
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
data = pd.read_csv(r"banking.csv")
data = data.dropna() #to remove null rows
#reduce number of categories
data['education']=np.where(data['education'] =='basic.9y', 'Basic', data['education'])
data['education']=np.where(data['education'] =='basic.6y', 'Basic', data['education'])
data['education']=np.where(data['education'] =='basic.4y', 'Basic', data['education'])
#create dummy variables
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = pd.get_dummies(data[var], prefix=var)
data1=data.join(cat_list)
data=data1
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
data_vars=data.columns.values.tolist()
to_keep=[i for i in data_vars if i not in cat_vars]
data_final=data[to_keep] #final coulumns
#SMOTE
X = data_final.loc[:, data_final.columns != 'y']
y = data_final.loc[:, data_final.columns == 'y']
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
columns = X_train.columns
os_data_X,os_data_y=os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X,columns=columns )
os_data_y= pd.DataFrame(data=os_data_y,columns=['y'])
#Recursive model fitting to determine the best or worst performing feature
data_final_vars=data_final.columns.values.tolist()
y=['y']
X=[i for i in data_final_vars if i not in y]
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
rfe = RFE(logreg, 20)
rfe = rfe.fit(os_data_X, os_data_y.values.ravel())
cols=['euribor3m', 'job_blue-collar', 'job_housemaid', 'marital_unknown', 'education_illiterate',
'month_apr', 'month_aug', 'month_dec', 'month_jul', 'month_jun', 'month_mar',
'month_may', 'month_nov', 'month_oct', "poutcome_failure", "poutcome_success"]
X=os_data_X[cols]
y=os_data_y['y']
#implementation of the model
import statsmodels.api as sm
logit_model=sm.Logit(y,X)
result=logit_model.fit()
#Logistic Regression model fitting
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
#predicting test results and finding accuracy
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
#Confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
#ROC curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#end
|
_base_ = ['../actnn/repvggA0_b256x8_imagenet.py']
actnn = False
|
import numpy as np
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import eval_detection_coco
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCOSingleClass(unittest.TestCase):
def setUp(self):
self.pred_bboxes = np.array([[[0, 0, 10, 10], [0, 0, 20, 20]]])
self.pred_labels = np.array([[0, 0]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_bboxes = np.array([[[0, 0, 10, 9]]])
self.gt_labels = np.array([[0, 0]])
def test_crowded(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_crowdeds=[[True]])
# When the only ground truth is crowded, nothing is evaluated.
# In that case, all the results are nan.
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.75/area=all/max_dets=100']))
def test_area_not_supplied(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels)
self.assertFalse(
'map/iou=0.50:0.95/area=small/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=medium/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=large/max_dets=100' in result)
def test_area_specified(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_areas=[[2048]])
self.assertFalse(
np.isnan(result['map/iou=0.50:0.95/area=medium/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=small/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=large/max_dets=100']))
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCOSomeClassNonExistent(unittest.TestCase):
def setUp(self):
self.pred_bboxes = np.array([[[0, 0, 10, 10], [0, 0, 20, 20]]])
self.pred_labels = np.array([[1, 2]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_bboxes = np.array([[[0, 0, 10, 9]]])
self.gt_labels = np.array([[1, 2]])
def test(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels)
self.assertEqual(
result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (3,))
self.assertTrue(
np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
self.assertEqual(
np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
result['map/iou=0.50:0.95/area=all/max_dets=100'])
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCO(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_url = 'https://chainercv-models.preferred.jp/tests'
cls.dataset = np.load(request.urlretrieve(os.path.join(
base_url, 'eval_detection_coco_dataset_2017_10_16.npz'))[0])
cls.result = np.load(request.urlretrieve(os.path.join(
base_url, 'eval_detection_coco_result_2017_10_16.npz'))[0])
def test_eval_detection_coco(self):
pred_bboxes = self.result['bboxes']
pred_labels = self.result['labels']
pred_scores = self.result['scores']
gt_bboxes = self.dataset['bboxes']
gt_labels = self.dataset['labels']
gt_areas = self.dataset['areas']
gt_crowdeds = self.dataset['crowdeds']
result = eval_detection_coco(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
expected = {
'map/iou=0.50:0.95/area=all/max_dets=100': 0.5069852,
'map/iou=0.50/area=all/max_dets=100': 0.69937725,
'map/iou=0.75/area=all/max_dets=100': 0.57538619,
'map/iou=0.50:0.95/area=small/max_dets=100': 0.58562572,
'map/iou=0.50:0.95/area=medium/max_dets=100': 0.51939969,
'map/iou=0.50:0.95/area=large/max_dets=100': 0.5013979,
'mar/iou=0.50:0.95/area=all/max_dets=1': 0.38919373,
'mar/iou=0.50:0.95/area=all/max_dets=10': 0.59606053,
'mar/iou=0.50:0.95/area=all/max_dets=100': 0.59773394,
'mar/iou=0.50:0.95/area=small/max_dets=100': 0.63981096,
'mar/iou=0.50:0.95/area=medium/max_dets=100': 0.5664206,
'mar/iou=0.50:0.95/area=large/max_dets=100': 0.5642906
}
non_existent_labels = np.setdiff1d(
np.arange(max(result['existent_labels'])),
result['existent_labels'])
for key, item in expected.items():
non_mean_key = key[1:]
self.assertIsInstance(result[non_mean_key], np.ndarray)
self.assertEqual(result[non_mean_key].shape, (80,))
self.assertTrue(
np.all(np.isnan(result[non_mean_key][non_existent_labels])))
np.testing.assert_almost_equal(
result[key], expected[key], decimal=5)
testing.run_module(__name__, __file__)
|
import flask
import base64
from portal.utils import get_vc3_client
from portal import app
from portal.decorators import authenticated
@app.route('/rest/virtual_cluster/<name>', methods=['GET'])
@authenticated
def virtual_cluster(name):
"""
Get information for a specified cluster and return
it in as json or jsonp
:return: json or jsonp status of cluster
"""
result = {}
vc3_client = get_vc3_client()
virtual_clusters = vc3_client.listRequests()
nodesets = vc3_client.listNodesets()
for vc in virtual_clusters:
if vc.name == name:
sanitized_obj = {'name': vc.name,
'state': vc.state,
'cluster': vc.cluster,
'statusraw': vc.statusraw,
'statusinfo': vc.statusinfo,
'displayname': vc.displayname,
'description': vc.description,
'statereason': vc.state_reason,
'action': vc.action,
'headnode': vc.headnode}
if vc.statusinfo is not None:
sanitized_obj['statusinfo_error'] = vc.statusinfo[vc.statusinfo.keys()[
0]]['error']
sanitized_obj['statusinfo_idle'] = vc.statusinfo[vc.statusinfo.keys()[
0]]['idle']
sanitized_obj['statusinfo_node_number'] = vc.statusinfo[vc.statusinfo.keys()[
0]]['node_number']
sanitized_obj['statusinfo_requested'] = vc.statusinfo[vc.statusinfo.keys()[
0]]['requested']
sanitized_obj['statusinfo_running'] = vc.statusinfo[vc.statusinfo.keys()[
0]]['running']
for nodeset in nodesets:
if nodeset.name == vc.headnode:
sanitized_obj['headnode_app_host'] = nodeset.app_host
sanitized_obj['headnode_app_type'] = nodeset.app_type
sanitized_obj['headnode_state'] = nodeset.state
sanitized_obj['headnode_state_reason'] = nodeset.state_reason
return flask.jsonify(sanitized_obj)
return flask.jsonify(result), 404
@app.route('/rest/allocation/<name>', methods=['GET'])
@authenticated
def allocation(name):
"""
Get information for a specified cluster and return
it in as json or jsonp
:return: json or jsonp status of cluster
"""
result = {}
vc3_client = get_vc3_client()
allocations = vc3_client.listAllocations()
for x in allocations:
if x.name == name:
sanitized_obj = {'name': x.name,
'state': x.state,
'action': x.action,
'owner': x.owner,
'displayname': x.displayname,
'description': x.description,
'statereason': x.state_reason,
'pubtoken': x.pubtoken}
if x.pubtoken:
sanitized_obj['pubtoken'] = base64.b64decode(
x.pubtoken).rstrip('\n')
return flask.jsonify(sanitized_obj)
return flask.jsonify(result), 404
|
# -*- coding: utf-8 -*-
"""
Island with single jungle cell, first herbivores only, later carnivores.
"""
__author__ = 'Hans Ekkehard Plesser, NMBU'
import textwrap
from src.biosim.biosim import BioSim
geogr = """WWW
WLW
WWW"""
geogr = textwrap.dedent(geogr)
ini_herbs = [{'loc': (2, 2),
'pop': [{'species': 'Herbivore',
'age': 5,
'weight': 20}
for _ in range(50)]}]
ini_carns = [{'loc': (2, 2),
'pop': [{'species': 'Carnivore',
'age': 5,
'weight': 20}
for _ in range(20)]}]
for seed in range(100, 103):
sim = BioSim(geogr, ini_herbs, seed=seed,
img_dir='results', img_base=f'mono_hc_{seed:05d}', img_years=300)
sim.simulate(50)
sim.add_population(ini_carns)
sim.simulate(251)
|
#t Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import sparseconvnet as scn
import time
import os, sys
import math
import numpy as np
import wandb
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--category', default=1, type=int)
parser.add_argument('--lr', default=1e-1, type=float)
parser.add_argument('--lr_decay', default=4e-2, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
args = parser.parse_args()
p={}
p['n_epochs'] = 200
p['initial_lr'] = args.lr
p['lr_decay'] = args.lr_decay
p['weight_decay'] = args.weight_decay
p['momentum'] = args.momentum
p['check_point'] = False
p['use_cuda'] = torch.cuda.is_available()
wandb.init()
wandb.config.update(args, allow_val_change=True)
# data.init(-1,24,24*8,16)
print(args)
data.init(int(args.category),24,24*8,16)
dimension = 3
reps = 1 #Conv block repetition factor
m = 32 #Unet number of features
nPlanes = [m, 2*m, 3*m, 4*m, 5*m] #UNet number of features per level
class Model(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, data.spatialSize, mode=3)).add(
scn.SubmanifoldConvolution(dimension, 1, m, 3, False)).add(
scn.UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2,2])).add(
scn.BatchNormReLU(m)).add(
scn.OutputLayer(dimension))
self.linear = nn.Linear(m, data.nClassesTotal)
def forward(self,x):
x=self.sparseModel(x)
x=self.linear(x)
return x
model=Model()
wandb.watch(model)
trainIterator=data.train()
validIterator=data.valid()
criterion = nn.CrossEntropyLoss()
dtype = 'torch.cuda.FloatTensor' if p['use_cuda'] else 'torch.FloatTensor'
dtypei = 'torch.cuda.LongTensor' if p['use_cuda'] else 'torch.LongTensor'
if p['use_cuda']:
model.cuda()
criterion.cuda()
optimizer = optim.SGD(model.parameters(),
lr=p['initial_lr'],
momentum = p['momentum'],
weight_decay = p['weight_decay'],
nesterov=True)
if p['check_point'] and os.path.isfile('epoch.pth'):
p['epoch'] = torch.load('epoch.pth') + 1
print('Restarting at epoch ' +
str(p['epoch']) +
' from model.pth ..')
model.load_state_dict(torch.load('model.pth'))
else:
p['epoch']=1
print(p)
print('#parameters', sum([x.nelement() for x in model.parameters()]))
def store(stats,batch,predictions,loss):
ctr=0
for nP,f,classOffset,nClasses in zip(batch['nPoints'],batch['xf'],batch['classOffset'],batch['nClasses']):
categ,f=f.split('/')[-2:]
if not categ in stats:
stats[categ]={}
if not f in stats[categ]:
stats[categ][f]={'p': 0, 'y': 0}
#print(predictions[ctr:ctr+nP,classOffset:classOffset+nClasses].abs().max().item())
stats[categ][f]['p']+=predictions.detach()[ctr:ctr+nP,classOffset:classOffset+nClasses].cpu().numpy()
stats[categ][f]['y']=batch['y'].detach()[ctr:ctr+nP].cpu().numpy()-classOffset
ctr+=nP
def inter(pred, gt, label):
assert pred.size == gt.size, 'Predictions incomplete!'
return np.sum(np.logical_and(pred.astype('int') == label, gt.astype('int') == label))
def union(pred, gt, label):
assert pred.size == gt.size, 'Predictions incomplete!'
return np.sum(np.logical_or(pred.astype('int') == label, gt.astype('int') == label))
def point_cloud_examples(x, y, predictions, splits):
input_data = x[0].numpy()
points = input_data[:,:3]
## Create y label point cloud
# Wrap for concat
y_numpy = y.cpu().numpy()
y_labels = np.expand_dims(y_numpy, axis=1)
points_with_y_label = np.concatenate((points,y_labels), axis=1)
## Transform loss predictions into a point cloud with labels
predictions_numpy = predictions.detach().cpu().numpy()
predictions_highest = predictions_numpy.argmax(axis=1)
predictions_labels = np.expand_dims(predictions_highest, axis=1)
points_with_predictions_label = np.concatenate((points,predictions_labels), axis=1)
idxs = np.cumsum(splits)
xs = np.split(input_data, idxs)
ys = np.split(points_with_y_label, idxs)
ps = np.split(points_with_predictions_label, idxs)
return {"x": [wandb.Object3D(x) for x in xs],
"y": [wandb.Object3D(y) for y in ys],
"predictions": [wandb.Object3D(p) for p in ps]}
def iou(stats):
eps = sys.float_info.epsilon
categories= sorted(stats.keys())
ncategory = len(categories)
iou_all = np.zeros(ncategory)
nmodels = np.zeros(ncategory, dtype='int')
for i, categ in enumerate(categories):
nmodels[i] = len(stats[categ])
pred = []
gt = []
for j in stats[categ].values():
pred.append(j['p'].argmax(1))
gt.append(j['y'])
npart = np.max(np.concatenate(gt))+1
iou_per_part = np.zeros((len(pred), npart))
# loop over parts
for j in range(npart):
# loop over CAD models
for k in range(len(pred)):
p = pred[k]
iou_per_part[k, j] = (inter(p, gt[k], j) + eps) / (union(p, gt[k], j) + eps)
# average over CAD models and parts
iou_all[i] = np.mean(iou_per_part)
# weighted average over categories
iou_weighted_ave = np.sum(iou_all * nmodels) / np.sum(nmodels)
return {'iou': iou_weighted_ave, 'nmodels_sum': nmodels.sum(), 'iou_all': iou_all}
for epoch in range(p['epoch'], p['n_epochs'] + 1):
model.train()
stats = {}
for param_group in optimizer.param_groups:
param_group['lr'] = p['initial_lr'] * \
math.exp((1 - epoch) * p['lr_decay'])
scn.forward_pass_multiplyAdd_count=0
scn.forward_pass_hidden_states=0
start = time.time()
for batch in trainIterator:
optimizer.zero_grad()
batch['x'][1]=batch['x'][1].type(dtype)
batch['y']=batch['y'].type(dtypei)
batch['mask']=batch['mask'].type(dtype)
predictions=model(batch['x'])
# Comparse prediction
loss = criterion.forward(predictions,batch['y'])
store(stats,batch,predictions,loss)
loss.backward()
optimizer.step()
r = iou(stats)
wandb.log({'train epoch': epoch, 'iou': r['iou'], 'iou_all': r['iou_all'], 'MegaMulAdd=': scn.forward_pass_multiplyAdd_count/r['nmodels_sum']/1e6, 'MegaHidden': scn.forward_pass_hidden_states/r['nmodels_sum']/1e6, 'time': time.time() - start})
if p['check_point']:
torch.save(epoch, 'epoch.pth')
torch.save(model.state_dict(),'model.pth')
examples = None;
if epoch in [1,10,30,100]:
model.eval()
stats = {}
scn.forward_pass_multiplyAdd_count=0
scn.forward_pass_hidden_states=0
start = time.time()
for rep in range(1,1+3):
for batch in validIterator:
batch['x'][1]=batch['x'][1].type(dtype)
batch['y']=batch['y'].type(dtypei)
batch['mask']=batch['mask'].type(dtype)
predictions=model(batch['x'])
examples = point_cloud_examples(batch['x'], batch['y'], predictions, batch['nPoints'])
loss = criterion.forward(predictions,batch['y'])
store(stats,batch,predictions,loss)
r = iou(stats)
log_data = {'train epoch': epoch,
'iou': r['iou'],
'iou_all': r['iou_all'],
'MegaMulAdd=': scn.forward_pass_multiplyAdd_count/r['nmodels_sum']/1e6,
'MegaHidden': scn.forward_pass_hidden_states/r['nmodels_sum']/1e6,
'time': time.time() - start}
with_ex = {**examples, **log_data}
print("wlog", with_ex)
wandb.log(with_ex)
|
from __future__ import (
annotations,
)
import logging
from asyncio import (
TimeoutError,
)
from collections.abc import (
AsyncIterator,
Iterable,
)
from functools import (
partial,
)
from typing import (
Optional,
)
import aiopg
from aiopg import (
Connection,
Cursor,
)
from psycopg2 import (
IntegrityError,
OperationalError,
ProgrammingError,
)
from minos.common import (
CircuitBreakerMixin,
ConnectionException,
DatabaseClient,
IntegrityException,
ProgrammingException,
)
from .operations import (
AiopgDatabaseOperation,
)
logger = logging.getLogger(__name__)
class AiopgDatabaseClient(DatabaseClient, CircuitBreakerMixin):
"""Aiopg Database Client class."""
_connection: Optional[Connection]
_cursor: Optional[Cursor]
def __init__(
self,
database: str,
host: Optional[str] = None,
port: Optional[int] = None,
user: Optional[str] = None,
password: Optional[str] = None,
circuit_breaker_exceptions: Iterable[type] = tuple(),
connection_timeout: Optional[float] = None,
cursor_timeout: Optional[float] = None,
*args,
**kwargs,
):
super().__init__(
*args,
**kwargs,
circuit_breaker_exceptions=(ConnectionException, *circuit_breaker_exceptions),
)
if host is None:
host = "localhost"
if port is None:
port = 5432
if user is None:
user = "postgres"
if password is None:
password = ""
if connection_timeout is None:
connection_timeout = 1
if cursor_timeout is None:
cursor_timeout = 60
self._database = database
self._host = host
self._port = port
self._user = user
self._password = password
self._connection_timeout = connection_timeout
self._cursor_timeout = cursor_timeout
self._connection = None
self._cursor = None
async def _setup(self) -> None:
await super()._setup()
await self.recreate()
async def _destroy(self) -> None:
await super()._destroy()
await self.close()
async def recreate(self) -> None:
"""Recreate the database connection.
:return: This method does not return anything.
"""
await self.close()
self._connection = await self.with_circuit_breaker(self._connect)
logger.debug(f"Created {self.database!r} database connection identified by {id(self._connection)}!")
async def _connect(self) -> Connection:
try:
return await aiopg.connect(
timeout=self._connection_timeout,
host=self.host,
port=self.port,
dbname=self.database,
user=self.user,
password=self.password,
)
except (OperationalError, TimeoutError) as exc:
raise ConnectionException(f"There was not possible to connect to the database: {exc!r}")
async def close(self) -> None:
"""Close database connection.
:return: This method does not return anything.
"""
if await self.is_connected():
await self._connection.close()
if self._connection is not None:
logger.debug(f"Destroyed {self.database!r} database connection identified by {id(self._connection)}!")
self._connection = None
async def is_connected(self) -> bool:
"""Check if the client is connected.
:return: ``True`` if it is connected or ``False`` otherwise.
"""
if self._connection is None:
return False
try:
# This operation connects to the database and raises an exception if something goes wrong.
self._connection.isolation_level
except OperationalError:
return False
return not self._connection.closed
async def _reset(self, **kwargs) -> None:
await self._destroy_cursor(**kwargs)
# noinspection PyUnusedLocal
async def _fetch_all(self) -> AsyncIterator[tuple]:
if self._cursor is None:
raise ProgrammingException("An operation must be executed before fetching any value.")
try:
async for row in self._cursor:
yield row
except ProgrammingError as exc:
raise ProgrammingException(str(exc))
except OperationalError as exc:
raise ConnectionException(f"There was not possible to connect to the database: {exc!r}")
# noinspection PyUnusedLocal
async def _execute(self, operation: AiopgDatabaseOperation) -> None:
if not isinstance(operation, AiopgDatabaseOperation):
raise ValueError(f"The operation must be a {AiopgDatabaseOperation!r} instance. Obtained: {operation!r}")
fn = partial(self._execute_cursor, operation=operation.query, parameters=operation.parameters)
await self.with_circuit_breaker(fn)
async def _execute_cursor(self, operation: str, parameters: dict):
if not await self.is_connected():
await self.recreate()
self._cursor = await self._connection.cursor(timeout=self._cursor_timeout)
try:
await self._cursor.execute(operation=operation, parameters=parameters)
except OperationalError as exc:
raise ConnectionException(f"There was not possible to connect to the database: {exc!r}")
except IntegrityError as exc:
raise IntegrityException(f"The requested operation raised a integrity error: {exc!r}")
async def _destroy_cursor(self, **kwargs):
if self._cursor is not None:
if not self._cursor.closed:
self._cursor.close()
self._cursor = None
@property
def cursor(self) -> Optional[Cursor]:
"""Get the cursor.
:return: A ``Cursor`` instance.
"""
return self._cursor
@property
def connection(self) -> Optional[Connection]:
"""Get the connection.
:return: A ``Connection`` instance.
"""
return self._connection
@property
def database(self) -> str:
"""Get the database's database.
:return: A ``str`` value.
"""
return self._database
@property
def host(self) -> str:
"""Get the database's host.
:return: A ``str`` value.
"""
return self._host
@property
def port(self) -> int:
"""Get the database's port.
:return: An ``int`` value.
"""
return self._port
@property
def user(self) -> str:
"""Get the database's user.
:return: A ``str`` value.
"""
return self._user
@property
def password(self) -> str:
"""Get the database's password.
:return: A ``str`` value.
"""
return self._password
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
import datetime
from django.template import Template, Context
raw_template = """
<p>Dear {{ person_name }},</p>
<p>Thanks for placing an order from {{ company }}. It's scheduled to
ship on {{ ship_date|date:"F j, Y" }}.</p>
{% if ordered_warranty %}
<p>Your warranty information will be included in the packaging.</p>
{% else %}
<p>You didn't order a warranty, so you're on your own when
the products inevitably stop working.</p>
{% endif %}
<p>Sincerely,<br />{{ company }}</p>"""
t = Template(raw_template)
c = Context({'person_name': 'houxiurong', 'company': 'Ngari Health',
'ship_date': datetime.date(2016, 11, 25),
'ordered_warranty': False})
t.render(c)
|
# 参考
# https://ikatakos.com/pot/programming/python/packages/numba
# >>> numba compile >>>
import sys
import numpy as np
def numba_compile(numba_config):
import os, sys
if sys.argv[-1] == "ONLINE_JUDGE":
from numba import njit
from numba.pycc import CC
cc = CC("my_module")
for func, signature in numba_config:
globals()[func.__name__] = njit(signature)(func)
cc.export(func.__name__, signature)(func)
cc.compile()
exit()
elif os.name == "posix":
exec(f"from my_module import {','.join(func.__name__ for func, _ in numba_config)}")
for func, _ in numba_config:
globals()[func.__name__] = vars()[func.__name__]
else:
from numba import njit
for func, signature in numba_config:
globals()[func.__name__] = njit(signature, cache=True)(func)
print("compiled!", file=sys.stderr)
def solve(In):
idx_In = np.array([-1], dtype=np.int64)
def read():
idx_In[0] += 1
return In[idx_In[0]]
numba_compile([
[solve, "void(i8[:])"],
])
def main():
In = np.array(sys.stdin.buffer.read().split(), dtype=np.int64)
solve(In)
main()
# <<< numba compile <<<
# >>> binary indexed tree >>>
# 必要な要素数+1 の長さの ndarray の 1 要素目以降を使う
def bitify(arr): # [bitify, "void(i8[:])"],
# len(arr) は 2 冪 + 1
for i in range(1, len(arr)-1):
arr[i + (i & -i)] += arr[i]
def bit_sum(bit, i): # [bit_sum, "i8(i8[:],i8)"],
# (0, i]
res = 0
while i:
res += bit[i]
i -= i & -i
return res
def bit_add(bit, i, val): # [bit_add, "void(i8[:],i8,i8)"],
n = len(bit)
while i < n:
bit[i] += val
i += i & -i
# <<< binary indexed tree <<<
def inversion_number(arr): # [inversion_number, "i8(f8[:])"],
# 転倒数
n = len(arr)
arr = np.argsort(arr) + 1
bit = np.zeros(n+1, dtype=np.int64)
res = n * (n-1) >> 1
for val in arr:
res -= bit_sum(bit, val)
bit_add(bit, val, 1)
return res
def pow_mod(base, exp): # [numba_pow, "i8(i8,i8)"],
# mod はグローバル変数を参照
exp %= mod - 1
res = 1
while exp:
if exp % 2:
res = res * base % mod
base = base * base % mod
exp //= 2
return res
def comb_cunstruct(n): # [comb_cunstruct, "Tuple((i8[:],i8[:]))(i8,)"],
# mod はグローバル変数を参照
fac = np.empty(n + 1, dtype=np.int64)
facinv = np.empty(n + 1, dtype=np.int64)
fac[0] = f = 1
for i in range(1, n + 1):
f = f * i % mod
fac[i] = f
f = pow_mod(f, -1)
for i in range(n, -1, -1):
facinv[i] = f
f = f * i % mod
return fac, facinv
def comb(n, r, fac, facinv): # [comb, "i8(i8,i8,i8[:],i8[:])"],
# mod はグローバル変数を参照
return fac[n] * facinv[r] % mod * facinv[n - r] % mod
def z_algo(S): # [z_algo, "i8[:](i8[:])"],
# Z-algoirhm O(n)
# Z[i] := S と S[i:] で prefix が何文字一致しているか
# 検証1: https://atcoder.jp/contests/abc150/submissions/15829530
# 検証2: https://atcoder.jp/contests/abc141/submissions/15855247
i, j, n = 1, 0, len(S)
Z = np.zeros(S.shape, dtype=np.int64)
Z[0] = n
while i < n:
while i+j < n and S[j] == S[i+j]:
j += 1
if j == 0:
i += 1
continue
Z[i] = j
d = 1
while i+d < n and d+Z[d] < j:
Z[i+d] = Z[d]
d += 1
i += d
j -= d
return Z
def sort_edges(N, edges_): # [sort_edges, "Tuple((i8[:],i8[:]))(i8,i8[:,:])"],
# N: 頂点番号の最大値
M = len(edges_)
edges = np.empty((M * 2, 2), dtype=np.int64)
edges[:M] = edges_
edges[M:] = edges_[:, ::-1]
order = np.argsort(edges[:, 0]) # O(N) にできなくもない
edges = edges[order, 1]
c = np.zeros(N+1, dtype=np.int64)
c_ = np.bincount(edges_.ravel()) # minlength を使わせて
c[:len(c_)] = c_
c = np.cumsum(c)
lefts = np.zeros(len(c) + 1, dtype=np.int64)
lefts[1:] = c
return edges, lefts
def eular_tour(edges, lefts, root): # [eular_tour, "Tuple((i8[:],i8[:],i8[:],i8[:]))(i8[:],i8[:],i8)"],
# グラフは 1-indexed が良い
n = len(lefts)-1
stack = [root]
tour = [0] * 0
firsts = np.full(n, -100, dtype=np.int64)
lasts = np.full(n, -100, dtype=np.int64)
parents = np.full(n, -100, dtype=np.int64)
while stack:
v = stack.pop()
if firsts[v] >= 0:
lasts[v] = len(tour)
tour.append(-v) # 帰りがけの辺の表現をマイナス以外にしたい場合ここを変える
continue
p = parents[v]
firsts[v] = len(tour)
tour.append(v)
stack.append(v)
for u in edges[lefts[v]:lefts[v+1]]:
if p != u:
parents[u] = v
stack.append(u)
tour = np.array(tour, dtype=np.int64)
return tour, firsts, lasts, parents
from functools import reduce
def rerooting(n, edges): # [rerooting, "(i8,i8[:,:])"],
# 全方位木 dp
# 参考1: https://qiita.com/keymoon/items/2a52f1b0fb7ef67fb89e
# 参考2: https://atcoder.jp/contests/abc160/submissions/15255726
# 検証: https://atcoder.jp/contests/abc160/submissions/15971370
# >>> ここを変える >>>
# 必要な情報は引数に持たせる
identity = (1, 0)
def merge(a, b):
return a[0] * b[0] % mod * comb(a[1] + b[1], a[1], fac, facinv) % mod, a[1] + b[1]
def add_node(value, idx):
return value[0], value[1] + 1
# <<< ここを変える <<<
G = [[0]*0 for _ in range(n)]
for i in range(n-1):
a, b = edges[i]
G[a].append(b)
G[b].append(a)
# step 1
order = [] # 行きがけ順
stack = [0]
while stack:
v = stack.pop()
order.append(v)
for u in G[v]:
stack.append(u)
G[u].remove(v)
# 下から登る
dp_down = [identity] * n # 自身とその下
for v in order[:0:-1]:
dp_down[v] = add_node(reduce(
merge, [dp_down[u] for u in G[v]], identity
), v)
# step 2
# 上から降りる
dp_up = [identity] * n # 親とその先
for v in order:
Gv = G[v]
if len(Gv) == 0:
continue
cum = identity
right = [identity]
for u in Gv[:0:-1]:
cum = merge(dp_down[u], cum)
right.append(cum)
right.reverse()
cum = dp_up[v]
for u, cum_r in zip(Gv, right):
dp_up[u] = add_node(merge(cum, cum_r), v)
cum = merge(cum, dp_down[u])
results = [identity] * 0
for v, Gv in enumerate(G):
results.append(add_node(
reduce(merge, [dp_down[u] for u in Gv], dp_up[v]), v
))
return np.array(results)
# セグメント木: https://atcoder.jp/contests/abc158/submissions/16233600
# 平方分割(遅延評価): https://atcoder.jp/contests/abc177/submissions/16376895
# 文字列を uint8 で読み込む: np.frombuffer(input(), dtype=np.uint8)
|
import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'psql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
if settings_dict['DATABASE_USER']:
args += ["-U", settings_dict['DATABASE_USER']]
if settings_dict['DATABASE_HOST']:
args.extend(["-h", settings_dict['DATABASE_HOST']])
if settings_dict['DATABASE_PORT']:
args.extend(["-p", str(settings_dict['DATABASE_PORT'])])
args += [settings_dict['DATABASE_NAME']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
|
# Fazer um conversor de binário, hexadecimal e octal
num = int(input('Digite um número inteiro: '))
conversao = int(input('Para qual base de conversão:\n[1] - BINÁRIO\n[2] - HEXADECIMAL\n[3] - OCTAL\nQual opção:'))
if conversao == 1:
#converter para binário
convertido = bin(num)
print('o número convertido para binário fica {}'.format(convertido[2:]))
elif conversao == 2:
#converter para hexadecimal
convertido = hex(num)
print('O número conivertido para hexadecimal é {}'.format(convertido[2:]))
elif conversao == 3:
#converter para octal
convertido = oct(num)
print('O númerio convertido para octal é {}'.format(convertido[2:]))
else:
print(f'\033[31m{conversao} NÃO É UMA OPÇÃO VÁLIDA\033[m') |
import re
class ServiceResolver():
def __init__(self, application):
self.application = application
def resolve_services(self, service_name):
return self.resolve_services_from_array([service_name])
def resolve_services_from_array(self, services):
services_to_start = []
services_to_not_start = []
for service_name in services:
if service_name.startswith('-'):
services_to_not_start += self.resolve_services(service_name[1:])
elif '*' in service_name:
services_to_start += self._all_matching(service_name)
elif self.application.has_service(service_name):
services_to_start += [service_name]
elif self.application.is_profile(service_name):
services_to_start += self._get_all_in_profile(service_name)
else:
print "The requested service %s does not exist" % service_name
for not_start in services_to_not_start:
if not_start in services_to_start:
services_to_start.remove(not_start)
return services_to_start
def _get_all_in_profile(self, profile_name):
services = []
services_to_not_start = []
for service_name in self.application.services_for_profile(profile_name):
if service_name.startswith('-'):
services_to_not_start += self._all_matching(service_name[1:])
elif '*' in service_name:
services += self._all_matching(service_name)
elif self.application.has_service(service_name):
services.append(service_name)
else:
print "The requested service %s does not exist" % service_name
for not_start in services_to_not_start:
if not_start in services:
services.remove(not_start)
return services
def _all_matching(self, wildcard):
services = []
for service_name in self.application.services:
if ServiceResolver._matches(service_name, wildcard):
services.append(service_name)
return services
@staticmethod
def _matches(service_name, wildcard):
regex = re.compile(wildcard.replace('*', '.*'))
return re.match(regex, service_name) |
from abc import abstractmethod
from typing import List, Union
from requests import Response
from .manga import Manga
from manga_py.libs.http import Http
class Abstract:
def __init__(self):
super().__init__()
@abstractmethod
def get_main_page_url(self) -> str:
"""
Returns manga main page url.
For example:
http://example.org/manga/manga-name/chapter1.html
->
http://example.org/manga/manga-name.html
:return:
"""
pass
@abstractmethod
def get_content(self) -> Response: # mixed
"""
Returns mixed data on the main page.
Used in methods get_manga_name, get_chapters, get_cover, get_meta
Ideally, the main page is requested only once.
(Use self.content to get data from the provider)
Must correct the address of the main page if the user did not pass it correctly.
(For example, instead of the address of the main page of the manga,
the address of one of the chapters was given.
Call self.url = 'http://example.org/manga/here' for this)
:return:
"""
pass
@abstractmethod
def get_manga_name(self) -> str:
"""
Returns the 'user-friendly' name of the manga.
Ideally, it is called only once. (Use self.manga_name to get data from the provider)
:return:
"""
pass
@abstractmethod
def get_chapters(self) -> list:
"""
Returns the list of chapters.
Ideally, it is called only once. (Use self.chapters to get data from the provider)
The method is required to return a list of the form:
[etree.Element, ...]
or
[('absolute_url', 'archive_name/forlder_name'), ...]
or
[('absolute_url', ('0', '1', 2, 3), ...] # chapter idx
or
[('absolute_url', 'archive_name/forlder_name'), etree.Element, ...]
or (for download archive, without images) NOT MIXED WITH PREV!
[{'url': 'absolute_url', 'name': 'archive_name'}, {'url': 'absolute_url', 'name': 'archive_name'}]
The latter is not recommended, but can be used.
:return:
"""
pass
@abstractmethod
def get_files(self) -> list:
"""
The method is required to return a list of the form:
[etree.Element, ...]
or
[('absolute_url', 'relative_file_name'), ...]
or
[('absolute_url', 'relative_file_name'), etree.Element, ...]
The latter is not recommended, but can be used.
:return:
"""
pass
@abstractmethod
def get_chapter_name(self, chapter) -> str:
"""
Returns the current name of the chapter.
It is called at each iteration of the chapter list. (Use self.chapter to get RAW data from the provider)
:return:
:rtype str
"""
pass
def get_chapter_url(self) -> str:
"""
Used to overload the standard behavior.
Returns the current url of the chapter.
It is called at each iteration of the chapter list. (Use self.chapter to get RAW data from the provider)
:return:
"""
pass
def before_provider(self, args: dict) -> None:
"""
The method will be called once, <b>before</b> any other methods in the provider.\
Will not be automatically called for API! The developer must do it himself.
:return:
"""
pass
def after_provider(self) -> None:
"""
The method will be called once, <b>after</b> any other methods in the provider.
Will not be automatically called for API! The developer must do it himself.
:return:
"""
pass
def get_cover(self) -> Union[str, list]:
"""
Returns the cover of the manga, if possible.
:return:
:rtype str or str[] or None
"""
raise NotImplementedError
def get_meta(self) -> Manga: # Todo
"""
:return:
:rtype Manga or None
"""
raise NotImplementedError
@staticmethod
def search(title: str, http: Http) -> List[str]:
"""
Returns the list of manga if search is possible on the site.
:param title: str
:return:
"""
raise NotImplementedError
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from Controller.NetworkAnalysis.ImageScaler.IImageScaler import IImageScaler
import cv2
class ImageResize(IImageScaler):
def scale (self, image, outputShape):
imageScaled = cv2.resize(image,outputShape)
imageChangedColor = cv2.cvtColor(imageScaled,cv2.COLOR_RGB2BGR)
return imageChangedColor
def __str__(self):
text = '################################\n'
text += 'ImageResize' + '\n'
text += '################################\n'
text += 'resize FrameWork: cv2' + '\n'
text += 'compression: INTER_LINEAR' + '\n'
text += '\n################################\n'
return text |
#!/bin/python3
import logging
import os
import fcntl
import struct
import hexdump
from argparse import ArgumentParser
from datetime import datetime, timedelta
from scapy.all import *
from pyopenvpn import Client, Settings
class TunClient:
def __init__(self, args):
self.host = args.host
self.inited = False
def create(self):
TUNSETIFF = 0x400454ca
IFF_TUN = 0x0001
self.tfd = os.open("/dev/net/tun", os.O_RDWR)
ifs = fcntl.ioctl(self.tfd, TUNSETIFF, struct.pack("16sH", b"t%d", IFF_TUN))
self.tname = ifs[:16].strip(b"\x00").decode("utf8")
print(self.tname)
flag = fcntl.fcntl(self.tfd, fcntl.F_GETFD)
fcntl.fcntl(self.tfd, fcntl.F_SETFD, flag | os.O_NONBLOCK)
flag = fcntl.fcntl(self.tfd, fcntl.F_GETFD)
if flag & os.O_NONBLOCK:
print("xxxxxxxxx O_NONBLOCK!!")
def config(self, ip, masklen):
os.system("ip link set %s up" % (self.tname))
os.system("ip link set %s mtu %i" % (self.tname, 1000))
os.system("ip addr add %s/%i dev %s" % (ip, masklen, self.tname))
def __call__(self, client):
if self.inited is False:
self.create()
self.config(client.tunnel_ipv4, 24)
self.inited = True
if self.inited is True:
incoming = client.recv_data(decode=False)
if incoming is not None:
hexdump(incoming)
incoming = bytes([0, 0, 0, 0x80]) + bytes(incoming)
os.write(self.tfd, incoming)
data = os.read(self.tfd, 1500)
hexdump(data)
if data is not None:
client.send_data(data[4:])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(levelname)-5s:%(name)-8s: %(filename)s:%(lineno)d %(message)s")
parser = ArgumentParser()
parser.add_argument('config_file', help="OpenVPN configuration file")
parser.add_argument('host', help="Remote host to ping")
parser.add_argument('-i', dest='interval', default=1, metavar='interval', type=int)
parser.add_argument('-W', dest='timeout', default=5, metavar='timeout', type=int)
parser.add_argument('-c', dest='count', default=0, metavar='count', type=int)
args = parser.parse_args()
c = Client(Settings.from_file(args.config_file), TunClient(args))
c.run()
|
#!/usr/bin/env python
"""
provide mock objects in order to develop locally
"""
from PIL import Image, ImageTk, ImageFilter
import time
import tty
import sys
PADDING = 10
class Keyboard:
def __init__(self):
tty.setcbreak(sys.stdin)
def getch_generator(self, debug=False, timeout=None):
while True:
c = sys.stdin.read(1)
if ord(c) == 27:
c2 = sys.stdin.read(1)
if c2 == '[':
c3 = sys.stdin.read(1)
if c3 == 'D':
yield 'KEY_LEFT'
elif c3 == 'A':
yield 'KEY_UP'
elif c3 == 'C':
yield 'KEY_RIGHT'
elif c3 == 'B':
yield 'KEY_DOWN'
elif c3 == '1':
c4 = sys.stdin.read(2)
if c4 == '5~':
yield 'KEY_F5'
elif c4 == '7~':
yield 'KEY_F6'
elif c4 == '8~':
yield 'KEY_F7'
elif c4 == '9~':
yield 'KEY_F8'
else:
print('unknown F key: ' + c4)
yield 'DUMMY'
elif c2 == 'O':
c3 = sys.stdin.read(1)
yield f'KEY_F{ord(c3)-79}'
else:
print(f'unknown control character {c2}')
yield 'AA'
elif ord(c) == 127:
yield 'KEY_BACKSPACE'
elif ord(c) == 10:
yield 'KEY_ENTER'
else:
yield c
class Sonos:
def speakers(self):
return ['Schwarz', 'Weiss']
def volume_play_as_string(self, selected_speaker, debug=False):
return "> 50%"
def search(self, context, term, offset=0, max_items=7, debug=False):
res = []
if context == 'albums':
res = ['Appetite for Destruction', 'OK Computer', 'The Four Seasons',
'Music for a jilted generation']
elif context == 'tracks':
res = ['Hamba hamba', 'Everybody', 'Take Five', 'Paranoid Android']
return [(i, i) for i in res]
def play(self, speaker, uri):
print(f'play {uri} on {speaker}')
class Display:
width = 160
height = 128
def __init__(self):
import tkinter as tk
from PIL import Image, ImageTk, ImageFile
self._root = tk.Tk()
self._canvas = tk.Canvas(
self._root, width=self.width+2*PADDING, height=self.height+2*PADDING)
self._imgArea = self._canvas.create_image(
PADDING, PADDING, anchor=tk.NW)
self._canvas.pack()
self._canvas.configure(background='black')
self._root.update()
def __del__(self):
self._root.quit()
def draw(self, image):
img = ImageTk.PhotoImage(image)
self._canvas.itemconfig(self._imgArea, image=img)
self._root.update()
# image.show()
if __name__ == "__main__":
k = Keyboard()
for c in k.getch_generator():
print('>>' + c)
pass
|
# -*- coding: utf-8 -*-
"""
sphinx.environment.managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Manager components for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
if False:
# For type annotation
from typing import Any # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
class EnvironmentManager(object):
"""Base class for sphinx.environment managers."""
name = None # type: unicode
env = None # type: BuildEnvironment
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def attach(self, env):
# type: (BuildEnvironment) -> None
self.env = env
if self.name:
setattr(env, self.name, self)
def detach(self, env):
# type: (BuildEnvironment) -> None
self.env = None
if self.name:
delattr(env, self.name)
def clear_doc(self, docname):
# type: (unicode) -> None
raise NotImplementedError
def merge_other(self, docnames, other):
# type: (List[unicode], Any) -> None
raise NotImplementedError
def process_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
raise NotImplementedError
|
import subprocess
import tarfile
import docker
import time
import os
from io import BytesIO
client = docker.from_env()
containers = set()
def get_ssh_key(name, algo, email, password, id):
file_path = f'/keys/{name}'
if not os.path.exists(file_path + '.pub'):
print(f"\tGenerating {algo} SSH key for {name} ({id})")
if algo == 'rsa':
subprocess.run(['ssh-keygen', '-t', 'rsa', '-b', '4096', '-f', file_path, '-C', email, '-N', password])
else:
subprocess.run(['ssh-keygen', '-t', 'ed25519', '-f', file_path, '-C', email, '-N', password])
return name, file_path
def send_file(cnt, file_name, file_path, dest):
size = os.stat(file_path).st_size
with open(file_path, 'rb') as fp:
pw_tarstream = BytesIO()
pw_tar = tarfile.TarFile(fileobj=pw_tarstream, mode='w')
tarinfo = tarfile.TarInfo(name=file_name)
tarinfo.mode = 0o400
tarinfo.size = size
tarinfo.mtime = time.time()
pw_tar.addfile(tarinfo, fp)
pw_tar.close()
pw_tarstream.seek(0)
print(f"\tSending {file_name}")
cnt.put_archive(dest, pw_tarstream)
def parse_cnt(cnt):
print(f"Detected new container {cnt.name} ({cnt.id})")
needs_machine_fingerprint = False
for name, value in cnt.labels.items():
if name in ('ENABLE_SSH', 'GENERATE_KEY'):
# Signal we need to send it
needs_machine_fingerprint = True
# Generate user SSH key
email, password, user, algo = value.split(':')
user = 'root' if not user else user
home = '/root' if user == 'root' else '/home/' + user
ssh_name = email.replace('@', '_at_')
ssh_name, ssh_path = get_ssh_key(ssh_name, algo, email, password, cnt.id)
print(f"\tSending {algo} SSH key to {cnt.name} ({cnt.id})")
if name == 'ENABLE_SSH':
send_file(cnt, ssh_name + '.pub', ssh_path + '.pub', home + '/.ssh/')
cnt.exec_run(f'sudo chmod 600 {home}/.ssh/{ssh_name}.pub')
cnt.exec_run(f'sudo chown {user}:{user} {home}/.ssh/{ssh_name}.pub')
cnt.exec_run(f'/bin/bash -c "cat > {home}/.ssh/authorized_keys < {home}/.ssh/{ssh_name}.pub"')
else:
send_file(cnt, ssh_name, ssh_path, home + '/.ssh/')
cnt.exec_run(f'sudo chmod 600 {home}/.ssh/{ssh_name}')
cnt.exec_run(f'sudo chown {user}:{user} {home}/.ssh/{ssh_name}')
# Generate machine identity / ssh fingerprint
if needs_machine_fingerprint:
ssh_name, ssh_path = get_ssh_key(cnt.name + "_ssh_host_rsa_key", 'rsa', email, password, cnt.id)
print(f"\tSending ssh_host_rsa_key key to {cnt.name} ({cnt.id})")
send_file(cnt, 'ssh_host_rsa_key', ssh_path, '/etc/ssh/')
send_file(cnt, 'ssh_host_rsa_key.pub', ssh_path + '.pub', '/etc/ssh/')
# EXECUTE sshd
cnt.exec_run('sudo /usr/sbin/sshd')
# Allow for some leeway
print("Starting")
time.sleep(1)
print("Scanning")
# Already running containers
for cnt in client.containers.list():
parse_cnt(cnt)
try:
# New containers
for event in client.events(decode=True):
if event.get('Type') == 'container':
name = event.get("Actor", {}).get("Attributes", {}).get("com.docker.compose.service", event["id"])
print(f'Got {event.get("status")} from {name}')
if event.get('status') == 'start':
parse_cnt(client.containers.get(event['id']))
except KeyboardInterrupt:
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-03-28 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='QuestionAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('input_text', models.TextField(blank=True, null=True)),
('input_images', models.CharField(blank=True, max_length=10000, null=True)),
('answer_text', models.TextField(blank=True, null=True)),
('answer_images', models.CharField(blank=True, max_length=10000, null=True)),
('socket_id', models.CharField(blank=True, max_length=1000, null=True)),
],
options={
'db_table': 'questionanswer',
},
),
migrations.CreateModel(
name='Tasks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('unique_id', models.PositiveIntegerField(unique=True)),
('name', models.CharField(blank=True, max_length=1000, null=True)),
('placeholder', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('num_of_images', models.PositiveIntegerField()),
],
options={
'db_table': 'tasks',
},
),
migrations.AddField(
model_name='questionanswer',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Tasks'),
),
]
|
#!/usr/bin/env python3
"""Recipe for doing ASR with phoneme targets and joint seq2seq
and CTC loss on the TIMIT dataset following a knowledge distillation scheme as
reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint
CTC-Attention End-to-End Speech Recognition", Yan Gao et al.
To run this recipe, do the following:
> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT
Authors
* Yan Gao 2021
* Titouan Parcollet 2021
"""
import sys
import torch
import h5py
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
phns_bos = torch.cat([phns_bos, phns_bos])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
e_in = self.modules.emb(phns_bos)
h, _ = self.modules.dec(e_in, x, wav_lens)
# output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
if stage == sb.Stage.VALID:
hyps, scores = self.hparams.greedy_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
elif stage == sb.Stage.TEST:
hyps, scores = self.hparams.beam_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
return p_ctc, p_seq, wav_lens
def def_tea_name(self):
# define teacher variable name
tea_name = []
for tea_num in range(self.hparams.num_tea):
tea = "t{}".format(tea_num)
tea_name.append(tea)
return tea_name
def re_format(self, data_dict):
item_tea_list = [None, None, None, None]
tea_name = self.def_tea_name()
for tea_num in range(self.hparams.num_tea):
for i in range(4):
item_tea = data_dict[str(self.step)][tea_name[tea_num]][
self.hparams.tea_keys[i]
][()]
if self.hparams.tea_keys[i].startswith("wer"):
item_tea = torch.tensor(item_tea)
else:
item_tea = torch.from_numpy(item_tea)
item_tea = item_tea.to(self.device)
item_tea = torch.unsqueeze(item_tea, 0)
if tea_num == 0:
item_tea_list[i] = item_tea
else:
item_tea_list[i] = torch.cat(
[item_tea_list[i], item_tea], 0
)
return item_tea_list
def compute_objectives(self, predictions, batch, stage):
if stage == sb.Stage.TRAIN:
p_ctc, p_seq, wav_lens = predictions
else:
p_ctc, p_seq, wav_lens, hyps = predictions
ids = batch.id
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
phns_eos = torch.cat([phns_eos, phns_eos], dim=0)
phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0)
# normal supervised training
loss_ctc_nor = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens)
loss_seq_nor = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos)
# load teacher inference results
data_dict = (
self.train_dict
if stage == sb.Stage.TRAIN
else self.valid_dict
if stage == sb.Stage.VALID
else self.test_dict
)
item_tea_list = self.re_format(data_dict)
p_ctc_tea, p_seq_tea, wer_ctc_tea, wer_tea = [
item for item in item_tea_list
]
# Strategy "average": average losses of teachers when doing distillation.
# Strategy "best": choosing the best teacher based on WER.
# Strategy "weighted": assigning weights to teachers based on WER.
if self.hparams.strategy == "best":
# tea_ce for kd
wer_scores, indx = torch.min(wer_tea, dim=0)
indx = list(indx.cpu().numpy())
# select the best teacher for each sentence
tea_seq2seq_pout = None
for stn_indx, tea_indx in enumerate(indx):
s2s_one = p_seq_tea[tea_indx][stn_indx]
s2s_one = torch.unsqueeze(s2s_one, 0)
if stn_indx == 0:
tea_seq2seq_pout = s2s_one
else:
tea_seq2seq_pout = torch.cat([tea_seq2seq_pout, s2s_one], 0)
apply_softmax = torch.nn.Softmax(dim=0)
if (
self.hparams.strategy == "best"
or self.hparams.strategy == "weighted"
):
# mean wer for ctc
tea_wer_ctc_mean = wer_ctc_tea.mean(1)
tea_acc_main = 100 - tea_wer_ctc_mean
# normalise weights via Softmax function
tea_acc_softmax = apply_softmax(tea_acc_main)
if self.hparams.strategy == "weighted":
# mean wer for ce
tea_wer_mean = wer_tea.mean(1)
tea_acc_ce_main = 100 - tea_wer_mean
# normalise weights via Softmax function
tea_acc_ce_softmax = apply_softmax(tea_acc_ce_main)
# kd loss
ctc_loss_list = None
ce_loss_list = None
for tea_num in range(self.hparams.num_tea):
# ctc
p_ctc_tea_one = p_ctc_tea[tea_num]
# calculate CTC distillation loss of one teacher
loss_ctc_one = self.hparams.ctc_cost_kd(
p_ctc, p_ctc_tea_one, wav_lens, device=self.device
)
loss_ctc_one = torch.unsqueeze(loss_ctc_one, 0)
if tea_num == 0:
ctc_loss_list = loss_ctc_one
else:
ctc_loss_list = torch.cat([ctc_loss_list, loss_ctc_one])
# ce
p_seq_tea_one = p_seq_tea[tea_num]
# calculate CE distillation loss of one teacher
loss_seq_one = self.hparams.seq_cost_kd(
p_seq, p_seq_tea_one, phn_lens_eos
)
loss_seq_one = torch.unsqueeze(loss_seq_one, 0)
if tea_num == 0:
ce_loss_list = loss_seq_one
else:
ce_loss_list = torch.cat([ce_loss_list, loss_seq_one])
# kd loss
if self.hparams.strategy == "average":
# get average value of losses from all teachers (CTC and CE loss)
ctc_loss_kd = ctc_loss_list.mean(0)
seq2seq_loss_kd = ce_loss_list.mean(0)
else:
# assign weights to different teachers (CTC loss)
ctc_loss_kd = (tea_acc_softmax * ctc_loss_list).sum(0)
if self.hparams.strategy == "best":
# only use the best teacher to compute CE loss
seq2seq_loss_kd = self.hparams.seq_cost_kd(
p_seq, tea_seq2seq_pout, phn_lens_eos
)
if self.hparams.strategy == "weighted":
# assign weights to different teachers (CE loss)
seq2seq_loss_kd = (tea_acc_ce_softmax * ce_loss_list).sum(0)
# total loss
# combine normal supervised training
loss_ctc = (
self.hparams.temperature
* self.hparams.temperature
* self.hparams.alpha
* ctc_loss_kd
+ (1 - self.hparams.alpha) * loss_ctc_nor
)
loss_seq = (
self.hparams.temperature
* self.hparams.temperature
* self.hparams.alpha
* seq2seq_loss_kd
+ (1 - self.hparams.alpha) * loss_seq_nor
)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
# Record losses for posterity
if stage != sb.Stage.TRAIN:
self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens)
self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos)
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim,
)
return loss
def fit_batch(self, batch):
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
self.ctc_metrics = self.hparams.ctc_stats()
self.seq_metrics = self.hparams.seq_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={
"loss": stage_loss,
"ctc_loss": self.ctc_metrics.summarize("average"),
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"CTC, seq2seq, and PER stats written to file",
self.hparams.wer_file,
)
def data_io_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
if (
hparams["blank_index"] != hparams["bos_index"]
or hparams["blank_index"] != hparams["eos_index"]
):
label_encoder.insert_blank(index=hparams["blank_index"])
if hparams["bos_index"] == hparams["eos_index"]:
label_encoder.insert_bos_eos(
bos_label="<eos-bos>",
eos_label="<eos-bos>",
bos_index=hparams["bos_index"],
)
else:
label_encoder.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
return train_data, valid_data, test_data, label_encoder
def load_teachers(hparams):
"""
Load results of inference of teacher models stored on disk.
Note: Run experiment_save_teachers.py beforehand to generate .hdf5 files.
"""
path = hparams["tea_infer_dir"] + "/tea_infer_{}batch.hdf5".format(
hparams["batch_size"]
)
f = h5py.File(path, "r")
train_dict = f["train"]
valid_dict = f["valid"]
test_dict = f["test"]
return train_dict, valid_dict, test_dict
def st_load(hparams, asr_brain):
"""
load pre-trained student model and remove decoder layer.
"""
print("loading pre-trained student model...")
chpt_path = hparams["pretrain_st_dir"] + "/model.ckpt"
weight_dict = torch.load(chpt_path)
# del the decoder layer
key_list = []
for k in weight_dict.keys():
key_list.append(k)
for k in key_list:
if not k.startswith("0"):
del weight_dict[k]
# loading weights
asr_brain.hparams.model.load_state_dict(weight_dict, strict=False)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"splits": ["train", "dev", "test"],
"save_folder": hparams["data_folder"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = data_io_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# load teacher models
train_dict, valid_dict, test_dict = load_teachers(hparams)
asr_brain.train_dict = train_dict
asr_brain.valid_dict = valid_dict
asr_brain.test_dict = test_dict
if hparams["pretrain"]:
# load pre-trained student model except last layer
if hparams["epoch_counter"].current == 0:
st_load(hparams, asr_brain)
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
|
import glob
path = 'search_queries/prepped/hw/*'
new_path = 'search_queries/prepped/hw_nice/'
paths = glob.glob(path)
print(paths)
for x in paths:
out = ""
with open(x, 'r') as f:
lines = f.readlines()
for line in lines:
if line == "\n":
out += line
else:
out += line.replace("\n", ' ')
parts = x.split('/')
print(parts)
name = parts[-1]
new_name = new_path + name.replace(' ', '_')
#new_name = x.replace(' ', '_').strip('.txt') + '_nice.txt'
with open(new_name, 'w') as f:
f.write(out)
|
def number_of_carries(a, b):
|
import secrets
from urllib.parse import urlencode, urlparse
import pytest
import requests
from django.http.response import HttpResponseRedirect
from django.urls.base import reverse
from djangito_client.views import login_handler
@pytest.fixture
def mock_response(monkeypatch):
class MockResponse:
text = """{"url": "mock_response"}"""
monkeypatch.setattr(requests, "get", lambda *args, **kwargs: MockResponse())
@pytest.mark.django_db
def test_with_requestfactory(rf, settings, monkeypatch, mock_response) -> None:
# imports
monkeypatch.setattr(secrets, 'token_urlsafe', lambda: 'abcdefg')
# inputs
request = rf.get('', {'next': '/some_return_path'}) # note: path doesn't matter
response = login_handler(request)
# build response mantually
redirect_uri = f"{request.scheme}://{request.get_host()}{reverse('callback_handler')}"
oidc_url_as_dictionary = {
'client_id': settings.DJANGITO_CLIENT_ID,
'redirect_uri': redirect_uri,
'scope': 'read',
'response_type': 'code',
'response_mode': 'query',
'nonce': secrets.token_urlsafe(),
'state': '/some_return_path',
}
oidc_url = f'{settings.DJANGITO_SERVER_URL}/o/authorize?{urlencode(oidc_url_as_dictionary)}'
response0 = HttpResponseRedirect(redirect_to=oidc_url)
assert vars(response) == vars(response0)
@pytest.mark.django_db
def test_with_client(client, settings, monkeypatch, mock_response) -> None:
# imports
monkeypatch.setattr(secrets, 'token_urlsafe', lambda: 'abcdefg')
# inputs
response = client.get(reverse('login_handler'), {'next': '/some_return_path'})
# build response mantually
redirect_uri = f"http://testserver{reverse('callback_handler')}"
oidc_url_as_dictionary = {
'client_id': settings.DJANGITO_CLIENT_ID,
'redirect_uri': redirect_uri,
'scope': 'read',
'response_type': 'code',
'response_mode': 'query',
'nonce': secrets.token_urlsafe(),
'state': '/some_return_path',
}
oidc_url = f'{settings.DJANGITO_SERVER_URL}/o/authorize?{urlencode(oidc_url_as_dictionary)}'
response0 = HttpResponseRedirect(redirect_to=oidc_url)
assert response.status_code == response0.status_code
assert response.url == response0.url
|
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import escargot
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
requires = open('requirements.txt').read().strip().split('\n')
test_requires = requires + open('requirements-dev.txt').read().strip().split('\n')
setup(
name = "escargot",
version = escargot.__version__,
author = "Vayel",
author_email = "vincent.lefoulon@free.fr",
packages = find_packages(),
long_description = open('README.md').read(),
install_requires = requires,
include_package_data = True,
url = "https://github.com/tartopum/Escargot",
classifiers = [
"Programming Language :: Python",
"Natural Language :: French",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules"
],
licence = "MIT",
cmdclass = {"test": PyTest},
tests_require = test_requires
)
|
from PyQt5 import QtCore, QtGui, QtWidgets
sitecol = {'id': 0, 'name': 1, 'x': 2, 'y': 3, 'desc': 4, 'id_units': 5}
datacol = {'id': 0, 'id_sites': 1, 'id_struct': 2, 'azi': 3, 'inc': 4, 'struct': 5, 'desc': 6, 'tags': 7}
structurecol = {'id': 0, 'structure': 1, 'planar': 2, 'desc': 3, 'scode': 4, 'gcode': 5}
unitcol = {'id': 0, 'name': 1, 'desc': 2}
tagcol = {'id': 0, 'name': 1, 'desc': 2, 'check': 3}
SCHEMA = '''pragma auto_vacuum=0;
pragma default_cache_size=2000;
pragma encoding='UTF-8';
pragma page_size=1024;
drop table if exists sites;
CREATE TABLE sites (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, id_units integer NOT NULL DEFAULT 0, name varchar(16) NOT NULL DEFAULT '', x_coord double DEFAULT NULL, y_coord double DEFAULT NULL, description text);
drop table if exists structdata;
CREATE TABLE structdata (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, id_sites integer NOT NULL DEFAULT 0, id_structype integer NOT NULL DEFAULT 0, azimuth double NOT NULL DEFAULT 0, inclination double NOT NULL DEFAULT 0, description text);
drop table if exists structype;
CREATE TABLE structype (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, pos integer NOT NULL DEFAULT 0, structure varchar(16) NOT NULL UNIQUE, description text, structcode integer DEFAULT NULL, groupcode integer DEFAULT NULL, planar integer DEFAULT 1);
drop table if exists tagged;
CREATE TABLE tagged (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, id_tags integer NOT NULL DEFAULT 0, id_structdata integer NOT NULL DEFAULT 0);
drop table if exists tags;
CREATE TABLE tags (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, pos integer NOT NULL DEFAULT 0, name varchar(16) NOT NULL UNIQUE, description text);
drop table if exists units;
CREATE TABLE units (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, pos integer NOT NULL DEFAULT 0, name varchar(60) NOT NULL UNIQUE, description text);
drop table if exists attach;
CREATE TABLE attach (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, id_structdata_planar integer NOT NULL DEFAULT '0', id_structdata_linear integer NOT NULL DEFAULT '0');
drop table if exists meta;
CREATE TABLE meta (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, name varchar(16) NOT NULL UNIQUE, value text);'''
DEFDATA = '''INSERT INTO structype VALUES (1, 1,'S', 'Default planar feature', 35, 13, 1);
INSERT INTO structype VALUES (2, 2, 'L', 'Default linear feature', 78, 13, 0);
INSERT INTO units VALUES (1, 1, 'Default', 'Default unit');'''
class SiteModel(QtCore.QAbstractTableModel):
# Here we define model to store sites table data
def __init__(self, mlist, parent=None):
super(SiteModel, self).__init__(parent)
# Cache the passed data list as a class member.
self._items = mlist
# Create lookup dictionaries
self.updateIndex()
def updateIndex(self):
""" Update lookup dictionaries for id and row. """
self.id2row = {}
self.row2id = {}
for idx,row in enumerate(self._items):
self.id2row[row[0]] = idx
self.row2id[idx] = row[0]
def rowCount(self, index=QtCore.QModelIndex()):
""" Returns the number of rows the model holds. """
return len(self._items)
def columnCount(self, index=QtCore.QModelIndex()):
""" Returns the number of columns the model holds. """
return len(sitecol)
def data(self, index, role = QtCore.Qt.DisplayRole):
""" Depending on the index and role given, return data. If not
returning data, return None (PySide equivalent of QT's
"invalid QVariant").
"""
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if role == QtCore.Qt.DisplayRole:
# The view is asking for the actual data, so, just return the item it's asking for.
return self._items[index.row()][index.column()]
elif role == QtCore.Qt.ToolTipRole:
# The view is asking for tooltip data, so, we just return description.
return self._items[index.row()][sitecol['desc']]
else:
# We don't care about anything else, so make sure to return None.
return None
def getRow(self, index):
""" Returns model row. """
return self._items[index.row()]
def updateRow(self, index, datarow):
""" Updates model row. """
self._items[index.row()] = datarow
self.dataChanged.emit(index, index)
# self.emit(QtCore.SIGNAL('dataChanged(QModelIndex,QModelIndex)'), index, index)
def appendRow(self, datarow):
""" Append model row. """
self.beginInsertRows(QtCore.QModelIndex(), len(self._items), len(self._items))
self._items.append(datarow)
self.endInsertRows()
self.updateIndex()
def removeRow(self, index):
""" Remove model row. """
self.beginRemoveRows(QtCore.QModelIndex(), index.row(), index.row())
del self._items[index.row()]
self.endRemoveRows()
self.updateIndex()
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
""" Set the headers to be displayed. """
if role != QtCore.Qt.DisplayRole:
return None
if orientation == QtCore.Qt.Horizontal:
if section == sitecol['name']:
return 'Site'
else:
return None
return None
class StructureModel(QtCore.QAbstractTableModel):
# Here we define model to store structures table data
def __init__(self, mlist, parent=None):
super(StructureModel, self).__init__(parent)
# Cache the passed data list as a class member.
self._items = mlist
# Create lookup dictionaries
self.updateIndex()
def updateIndex(self):
""" Update lookup dictionaries for id and row. """
self.id2row = {}
self.row2id = {}
for idx,row in enumerate(self._items):
self.id2row[row[0]] = idx
self.row2id[idx] = row[0]
def rowCount(self, index=QtCore.QModelIndex()):
""" Returns the number of rows the model holds. """
return len(self._items)
def columnCount(self, index=QtCore.QModelIndex()):
""" Returns the number of columns the model holds. """
return len(structurecol)
def data(self, index, role = QtCore.Qt.DisplayRole):
""" Depending on the index and role given, return data. If not
returning data, return None (PySide equivalent of QT's
"invalid QVariant").
"""
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if role == QtCore.Qt.DisplayRole:
# The view is asking for the actual data, so, just return the item it's asking for.
return self._items[index.row()][index.column()]
else:
# We don't care about anything else, so make sure to return None.
return None
def getRow(self, index):
""" Returns model row. """
return self._items[index.row()]
def updateRow(self, index, datarow):
""" Updates model row. """
self._items[index.row()] = datarow
self.dataChanged.emit(index, index)
# self.emit(QtCore.SIGNAL('dataChanged(QModelIndex,QModelIndex)'), index, index)
def appendRow(self, datarow, index=None, offset=0):
""" Append model row. """
if index is None:
pos = len(self._items)
else:
pos = index.row() + offset
self.beginInsertRows(QtCore.QModelIndex(), pos, pos)
self._items.insert(pos, datarow)
self.endInsertRows()
self.updateIndex()
def removeRow(self, index):
""" Remove model row. """
self.beginRemoveRows(QtCore.QModelIndex(), index.row(), index.row())
del self._items[index.row()]
self.endRemoveRows()
self.updateIndex()
def isplanar(self, row):
return self._items[row][structurecol['planar']] == 1
class UnitModel(QtCore.QAbstractTableModel):
# Here we define model to store units table data
def __init__(self, mlist, parent=None):
super(UnitModel, self).__init__(parent)
# Cache the passed data list as a class member.
self._items = mlist
# Create lookup dictionaries
self.updateIndex()
def updateIndex(self):
""" Update lookup dictionaries for id and row. """
self.id2row = {}
self.row2id = {}
for idx,row in enumerate(self._items):
self.id2row[row[0]] = idx
self.row2id[idx] = row[0]
def rowCount(self, index=QtCore.QModelIndex()):
""" Returns the number of rows the model holds. """
return len(self._items)
def columnCount(self, index=QtCore.QModelIndex()):
""" Returns the number of columns the model holds. """
return len(unitcol)
def data(self, index, role = QtCore.Qt.DisplayRole):
""" Depending on the index and role given, return data. If not
returning data, return None (PySide equivalent of QT's
"invalid QVariant").
"""
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if role == QtCore.Qt.DisplayRole:
# The view is asking for the actual data, so, just return the item it's asking for.
return self._items[index.row()][index.column()]
else:
# We don't care about anything else, so make sure to return None.
return None
def getRow(self, index):
""" Returns model row. """
return self._items[index.row()]
def updateRow(self, index, datarow):
""" Updates model row. """
self._items[index.row()] = datarow
self.dataChanged.emit(index, index)
# self.emit(QtCore.SIGNAL('dataChanged(QModelIndex,QModelIndex)'), index, index)
def appendRow(self, datarow, index=None, offset=0):
""" Append model row. """
if index is None:
pos = len(self._items)
else:
pos = index.row() + offset
self.beginInsertRows(QtCore.QModelIndex(), pos, pos)
self._items.insert(pos, datarow)
self.endInsertRows()
self.updateIndex()
def removeRow(self, index):
""" Remove model row. """
self.beginRemoveRows(QtCore.QModelIndex(), index.row(), index.row())
del self._items[index.row()]
self.endRemoveRows()
self.updateIndex()
class TagModel(QtCore.QAbstractTableModel):
# Here we define model to store tags table data
def __init__(self, mlist, parent=None):
super(TagModel, self).__init__(parent)
# Cache the passed data list as a class member.
self._items = mlist
# Create lookup dictionaries
self.updateIndex()
def updateIndex(self):
""" Update lookup dictionaries for id and row. """
self.id2row = {}
self.row2id = {}
for idx,row in enumerate(self._items):
self.id2row[row[0]] = idx
self.row2id[idx] = row[0]
def rowCount(self, index=QtCore.QModelIndex()):
""" Returns the number of rows the model holds. """
return len(self._items)
def columnCount(self, index=QtCore.QModelIndex()):
""" Returns the number of columns the model holds. """
return len(tagcol)
def data(self, index, role = QtCore.Qt.DisplayRole):
""" Depending on the index and role given, return data. If not
returning data, return None (PySide equivalent of QT's
"invalid QVariant").
"""
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if role == QtCore.Qt.CheckStateRole and index.column() == tagcol['check']:
# The view is asking for the actual state of checkable item.
return self._items[index.row()][index.column()]
elif role == QtCore.Qt.FontRole and index.column() == tagcol['check']:
# The view is asking for the font properties.
font = QtGui.QFont()
if self._items[index.row()][index.column()] == QtCore.Qt.Checked:
font.setBold(True)
else:
font.setBold(False)
return font
elif role == QtCore.Qt.DisplayRole:
# The view is asking for the actual data, so, just return the item it's asking for.
if index.column() == tagcol['check']:
return self._items[index.row()][tagcol['name']]
else:
return self._items[index.row()][index.column()]
else:
# We don't care about anything else, so make sure to return None.
return None
def flags(self, index):
if not index.isValid():
return None
if index.column() == tagcol['check']:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def setData(self, index, value, role):
if index.isValid() and role == QtCore.Qt.CheckStateRole:
if index.column() == tagcol['check']:
self._items[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
def getChecked(self):
return [row[tagcol['id']] for row in self._items if row[tagcol['check']] == QtCore.Qt.Checked]
def cleanState(self):
for row in self._items:
row[tagcol['check']] = QtCore.Qt.Unchecked
def setState(self, ids):
for row in self._items:
if row[tagcol['id']] in ids:
row[tagcol['check']] = QtCore.Qt.Checked
else:
row[tagcol['check']] = QtCore.Qt.Unchecked
def getRow(self, index):
""" Returns model row. """
return self._items[index.row()]
def updateRow(self, index, datarow):
""" Updates model row. """
self._items[index.row()] = datarow
self.dataChanged.emit(index, index)
# self.emit(QtCore.SIGNAL('dataChanged(QModelIndex,QModelIndex)'), index, index)
def appendRow(self, datarow, index=None, offset=0):
""" Append model row. """
if index is None:
pos = len(self._items)
else:
pos = index.row() + offset
self.beginInsertRows(QtCore.QModelIndex(), pos, pos)
self._items.insert(pos, datarow)
self.endInsertRows()
self.updateIndex()
def removeRow(self, index):
""" Remove model row. """
self.beginRemoveRows(QtCore.QModelIndex(), index.row(), index.row())
del self._items[index.row()]
self.endRemoveRows()
self.updateIndex()
class DataModel(QtCore.QAbstractTableModel):
# Here we define model to store data table
def __init__(self, mlist, parent=None):
super(DataModel, self).__init__(parent)
# Cache the passed data list as a class member.
self._items = mlist
# Create lookup dictionaries
self.updateIndex()
def updateIndex(self):
""" Update lookup dictionaries for id and row. """
self.id2row = {}
self.row2id = {}
for idx,row in enumerate(self._items):
self.id2row[row[0]] = idx
self.row2id[idx] = row[0]
def rowCount(self, index=QtCore.QModelIndex()):
""" Returns the number of rows the model holds. """
return len(self._items)
def columnCount(self, index=QtCore.QModelIndex()):
""" Returns the number of columns the model holds. """
return len(datacol)
def data(self, index, role = QtCore.Qt.DisplayRole):
""" Depending on the index and role given, return data. If not
returning data, return None (PySide equivalent of QT's
"invalid QVariant").
"""
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if role == QtCore.Qt.DisplayRole:
# The view is asking for the actual data, so, just return the item it's asking for.
return self._items[index.row()][index.column()]
elif role == QtCore.Qt.ToolTipRole:
# The view is asking for tooltip data, so, we just return description.
return self._items[index.row()][datacol['desc']]
else:
# We don't care about anything else, so make sure to return None.
return None
def getRow(self, index):
""" Returns model row. """
return self._items[index.row()]
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
""" Set the headers to be displayed. """
if role != QtCore.Qt.DisplayRole:
return None
if orientation == QtCore.Qt.Horizontal:
if section == datacol['azi']:
return "Azimuth"
elif section == datacol['inc']:
return "Inclination"
elif section == datacol['struct']:
return "Structure"
elif section == datacol['tags']:
return "Tags"
else:
return None
return None
|
from django.db import models
import uuid
from django.contrib.auth.models import User
from django.core.validators import int_list_validator
from django.urls import reverse
# from django.dispatch import receiver
# from django.db.models.signals import pre_save
CONTENT_TYPE = {
('text/plain', 'Plain Text'),
('text/markdown', 'Markdown'),
('image/png;base64', 'Image/png'),
('image/jpeg;base64', 'Image/jpeg'),
('application/base64', 'Application'),
}
VISIBILITY_CHOICES = {
("PUBLIC", "public"),
("PRIVATE_TO_AUTHOR", "private to author"),
("PRIVATE_TO_FRIENDS", "private to friends")
}
class CitrusAuthor(models.Model):
type = models.CharField(max_length=100, default="Author")
id = models.CharField(max_length=50,primary_key=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
host = models.CharField(max_length=200, default="http://localhost:8000/")
displayName = models.CharField(max_length=300, default=f"{str(user)}")
github = models.CharField(max_length=300, default="", null=True)
url = models.CharField(max_length=300, default="http://localhost:8000/", null=True)
profile_picture = models.ImageField(null=True, blank=True, upload_to="images/")
"""
each post has a unqiue
title, id, author, description, content, categories
common_mark = markdown
posts have different types: public, shared to friends, private to author, private to friends
"""
class Post(models.Model):
type = models.CharField(max_length=50, default='post')
# title of a post
title = models.CharField(max_length=200)
# id of the post
id = models.CharField(max_length=50, primary_key=True)
# where did you get this post from?
source = models.CharField(max_length=300)
# where is it actually from
origin = models.CharField(max_length=300)
# a brief description of the post
description = models.CharField(max_length=300, null=True, blank=True)
contentType = models.CharField(max_length=20, default='text/plain', choices=CONTENT_TYPE)
content = models.TextField()
author = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
# parse this and return as list for GET request
categories = models.CharField(max_length=400, null=True, blank=True)
# total number of comments for this post
count = models.IntegerField(null=True, blank=True)
# page size
size = models.IntegerField(null=True, blank=True)
# the first page of comments
comments = models.CharField(max_length=300, null=True, blank=True)
published = models.DateTimeField(auto_now_add=True)
# if visibility option is not provided the default will be public
visibility = models.CharField(max_length=50, choices=VISIBILITY_CHOICES, default="PUBLIC")
# unlisted means it is public if you know the post name -- use this for images, it's so images don't show up in timelines
unlisted = models.BooleanField(default=False)
# if private to author or private to friends is true add usernames to shared_with
shared_with = models.CharField(max_length=600, null=True, blank=True)
"""
a comment will belong to an author and also be associated with one post
"""
class Comment(models.Model):
author = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
comment = models.CharField(max_length=400)
published = models.DateTimeField(auto_now_add=True)
id = models.CharField(max_length=500, primary_key=True)
# https://stackoverflow.com/questions/1429293/storing-an-integer-array-in-a-django-database
class Friend(models.Model):
uuid = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
friends_uuid = models.TextField(validators=[int_list_validator])
class Follower(models.Model):
uuid = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
followers_uuid = models.TextField(validators=[int_list_validator])
#class Following(models.Model):
#uuid = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
#following_uuid = models.TextField(validators=[int_list_validator])
class Node(models.Model):
# add a node with URL
host = models.URLField(primary_key=True)
# for Basic Auth TODO later
node_username = models.CharField(max_length=100)
node_password = models.CharField(max_length=100)
host_username = models.CharField(max_length=100)
host_password = models.CharField(max_length=100)
public_posts = models.CharField(max_length=100)
author_link = models.CharField(max_length=100)
class Inbox(models.Model):
author = models.ForeignKey(CitrusAuthor, on_delete=models.CASCADE)
items = models.TextField()
class Like(models.Model):
author = models.CharField(max_length=50, default="1")
post_id = models.CharField(max_length=50, blank=True, null=True)
comment_id = models.CharField(max_length=50, blank=True, null=True) |
from django.apps import AppConfig
class PreumsConfig(AppConfig):
name = 'preums'
|
import io
import requests
import sys
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models import Count, Value, Q
from django.http import HttpResponse, HttpResponseNotAllowed, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.clickjacking import xframe_options_exempt
from middleware.login_required import login_exempt
from survey.models import *
from survey.forms import *
import survey.helpers as helpers
MISSING_THANKYOU_MESSAGE = 'Thank you! Your response was submitted successfully.'
##
## /survey/api/campaigns/responses/ <campaign=___, since=___>
##
@login_exempt
def api_responses(request):
'''
Return all responses for the given campaign, since the given date.
"campaign" and "since" are required.
'''
try:
campaign = Campaign.objects.get(uid=request.GET['campaign'])
sinceDate = timezone.make_aware(datetime.fromtimestamp(int(request.GET['since'])+1))
except:
response = JsonResponse({'responses':[]}, status=404)
response["Access-Control-Allow-Origin"] = "*"
return response
campaignResponses = campaign.response_campaign
response = JsonResponse({
'responses': list(campaign.response_campaign.filter(created_at__gt=sinceDate).values_list('raw_data', flat=True))
}, status=200)
response["Access-Control-Allow-Origin"] = "*"
return response
##
## /survey/api/user/add/ <POST DATA>
##
@user_passes_test(helpers.hasAdminAccess)
def api_user_add(request):
'''
Creates a user with the passed email and name. Basic.
Returns the user object ID and user's name (for optional display).
'''
email = request.POST.get('email')
httpCode = 404
# NOTE: This ensures all usernames/emails to be lowercase. Prevents mismatch
# for users with mix-case emails.
try:
user = helpers.createNewUser(email)
httpCode = 200
responseData = {
'id': user.id,
'username': user.username,
'fullName': user.profile.full_name
}
except Exception as ex:
httpCode = 500
responseData = {
'results': {
'message': repr(ex)
}
}
return JsonResponse(responseData, status=httpCode)
##
## /survey/api/adminaccess/<POST>
##
@user_passes_test(helpers.hasAdminAccess)
def api_adminaccess(request):
'''
Admin access api, adds/removes user via email to admin group.
'''
email = request.POST.get('email')
action = request.POST.get('action')
adminGroup, created = Group.objects.get_or_create(name='admins')
httpCode = 404
# If user no existy, throw back default 404.
try:
user = User.objects.get(username=email)
except Exception as ex:
responseData = {
'results': {
'message': repr(ex)
}
}
if user:
if action == "add":
user.groups.add(adminGroup)
httpCode = 200
responseData = {
'results': {
'id': user.id,
'name': user.profile.full_name,
'username': user.username
}
}
elif action == "remove":
adminGroup.user_set.remove(user)
httpCode = 200
responseData = {
'results': {
'message': 'User removed successfully'
}
}
else:
httpCode = 400
responseData = {
'results': {
'message': 'You forgot to tell me what to do; add or remove the user.'
}
}
return JsonResponse(responseData, status=httpCode)
##
## /survey/api/submit/<POST>
##
@xframe_options_exempt
@login_exempt
@csrf_exempt
def api_submit_response(request):
'''
Surveys post to this URL. Store the response and return message to display.
'''
try:
campaign = Campaign.objects.get(uid=request.POST.get('cuid'))
except:
return JsonResponse({'results': {'message': 'The survey campaign is invalid.'}}, status=400)
try:
# Store the raw data, then set user survey submit timestamp/flag.
response = campaign.storeResponse(request.session['uuid'], request)
campaign.setUserStatus(request.session['uuid'], 'submitted')
response.sendSlackNotification()
except Exception as ex:
try:
userInfo, created = campaign.getCreateUserInfo(request)
response = campaign.storeResponse(request.session['uuid'], request)
campaign.setUserStatus(request.session['uuid'], 'submitted')
response.sendSlackNotification()
if campaign.survey.survey_type == 'feedback':
response.sendToLux()
except Exception as ex:
cuid = campaign.uid if campaign else 'none'
uuid = request.session['uuid'] if request.session['uuid'] else 'none'
print(f'Error: api_submit_response failed - CUID:{cuid}:, UID :{uuid}:, error: - {ex}')
return JsonResponse({'results': {'message': f'{ex}'}}, status=400)
try:
projectNameToUse = campaign.project.getDisplayName()
if campaign.custom_project_name:
projectNameToUse = campaign.custom_project_name
thankyou = campaign.survey_thankyou.message
thankyou = thankyou.replace('{projectname}', projectNameToUse)
except:
thankyou = MISSING_THANKYOU_MESSAGE
response = JsonResponse({'message': thankyou}, status=200)
response["Access-Control-Allow-Origin"] = "*"
return response
##
## /survey/api/deleteresponse/
##
@user_passes_test(helpers.hasAdminAccess)
def api_delete_response(request):
'''
Admin center response list delete a response link hits this.
'''
try:
response = Response.objects.get(id=request.POST.get('response'))
response.deleteResponseAndRecalc()
except Exception as ex:
return JsonResponse({'results': {'message': f'{ex}'}}, status=400)
return JsonResponse({'results': {'message': 'Success.'}}, status=200)
##
## /survey/api/deletecampaignresponses/
##
@user_passes_test(helpers.hasAdminAccess)
def api_delete_campaign_responses(request):
'''
Admin center campaign list page "delete all responses" hits this.
'''
try:
campaign = Campaign.objects.get(id=request.POST.get('campaign'))
campaign.deleteResponsesAndReset()
campaign.customPreSave()
campaign.save()
except Exception as ex:
return JsonResponse({'results': {'message': f'{ex}'}}, status=400)
return JsonResponse({'results': {'message': 'Success.'}}, status=200)
##
## /survey/api/deletetakenflags/
##
@user_passes_test(helpers.hasAdminAccess)
def api_delete_taken_flags(request):
'''
Admin center api to ONLY delete campaignUserInfos for a campaign. Leaves responses.
Use case is if you want to leave existing responses, but allow everyone to be able to take
the survey again. Essentially clearing the "shown" and "taken" flags.
'''
try:
campaign = Campaign.objects.get(id=request.POST.get('campaign'))
campaign.resetUserStatusFlags()
campaign.customPreSave()
campaign.save()
except Exception as ex:
return JsonResponse({'results': {'message': f'{ex}'}}, status=400)
return JsonResponse({'results': {'message': 'Success.'}}, status=200)
##
## /survey/api/campaign/toggleenabled/
##
@user_passes_test(helpers.hasAdminAccess)
def api_campaign_toggle_enabled(request):
'''
Admin center campaign list, toggle a campaign on/off.
'''
try:
campaign = Campaign.objects.get(id=request.POST.get('campaign'))
campaign.enabled = True if not campaign.enabled else False
campaign.customPreSave()
campaign.save()
except Exception as ex:
return JsonResponse({'results': {'message': f'{ex}'}}, status=400)
return JsonResponse({'results': {'message': 'Success.'}}, status=200)
##
## /survey/takelater/
##
@login_exempt
@csrf_exempt
def api_campaign_take_later(request):
'''
When the elect to take it later, set their status so on page load we know
to show a little reminder icon.
If setting fails because it can't find it, create user status then set it.
'''
try:
campaign = Campaign.objects.get(uid=request.POST.get('cuid'))
campaign.setUserStatus(request.session['uuid'], 'take_later')
except:
try:
userInfo, created = campaign.getCreateUserInfo(request)
campaign.setUserStatus(request.session['uuid'], 'intercept_shown')
campaign.setUserStatus(request.session['uuid'], 'take_later')
except Exception as ex:
cuid = campaign.uid if campaign else 'none'
uuid = request.session['uuid'] if request.session['uuid'] else 'none'
print(f'Error: api_campaign_take_later failed - CUID:{cuid}:, UID :{uuid}: - {ex}')
response = JsonResponse({'results': {'message': 'Success.'}}, status=200)
response["Access-Control-Allow-Origin"] = "*"
try:
response.delete_cookie(campaign.uid)
except:
pass
return response
##
## /survey/api/removetakelater/
##
@login_exempt
@csrf_exempt
def api_campaign_remove_take_later(request):
'''
When they click on the reminder icon remove take_later flag.
'''
try:
campaign = Campaign.objects.get(uid=request.POST.get('cuid'))
except:
campaign = None
uuid = request.session['uuid'] if request.session['uuid'] else 'none'
if campaign:
try:
campaign.setUserStatus(uuid, 'remove_take_later')
except Exception as ex:
try:
userInfo, created = campaign.getCreateUserInfo(request)
campaign.setUserStatus(uuid, 'intercept_shown')
except Exception as ex:
print(f'Error: api_campaign_remove_take_later failed - CUID:{campaign.uid}:, UID :{uuid}: - {ex}')
else:
print(f"Error: api_campaign_remove_take_later failed, no campaign found - CUID:{request.POST.get('cuid')}:, UID :{request.session['uuid']}:")
response = JsonResponse({'results': {'message': 'Success.'}}, status=200)
try:
reqDomain = request.META['HTTP_ORIGIN']
except:
reqDomain = '*'
response['Access-Control-Allow-Origin'] = reqDomain
response['Access-Control-Allow-Credentials'] = 'true'
return response
##
## /survey/api/setactivestates/
##
@login_exempt
def api_set_active_states(request):
'''
Cron job api run daily to check for campaigns with dates and en/disable them.
'''
Campaign.setActiveStateAllCampaigns()
return JsonResponse({'results':'Success.'}, status=200)
##
## /survey/takelater/<id>/
##
@login_exempt
@csrf_exempt
def api_campaign_email_link(request):
'''
When they elect to take later and email them a link.
If we can't get a campaign user status, create one.
'''
try:
campaign = Campaign.objects.get(uid=request.POST.get('cuid','None'))
campaign.setUserStatus(request.session['uuid'], 'email_link')
email = request.POST.get('email')
runInBackground(campaign.emailLink, {'email':email})
except Exception as ex:
try:
userInfo, created = campaign.getCreateUserInfo(request)
campaign.setUserStatus(request.session['uuid'], 'email_link')
email = request.POST.get('email')
runInBackground(campaign.emailLink, {'email':email})
except Exception as ex:
print(f'Error: email link api_campaign_email_link failed - {ex}')
response = JsonResponse({'results': {'message': 'Success.'}}, status=200)
response["Access-Control-Allow-Origin"] = "*"
return response
##
## /survey/api/campaigns/
##
@login_exempt
def api_campaigns(request):
'''
List of active campaigns. This is for downstream systems to pull and
then loop through to get responses for a given campaign.
'''
try:
campaigns = list(Campaign.objects.filter(active=True).values('uid', 'key'))
except:
campaigns = []
return JsonResponse({'campaigns': campaigns}, status=200)
##
## /survey/removecampaignuserinfo/
##
def api_remove_campaign_user_info(request):
'''
For admins - when clicking "force survey" in debug box, we first remove their entry for the campaign.
'''
try:
campaign = Campaign.objects.get(uid=request.POST.get('campaign'))
campaign.removeUserInfo(request.session['uuid'])
except Exception as ex:
print(f'{ex}')
response = JsonResponse({'results': {'message': 'Success.'}}, status=200)
response["Access-Control-Allow-Origin"] = "*"
return response
##
## /survey/api/defaultthankyou/
##
def api_get_default_thankyou(request):
'''
For admins - when choosing campaign survey, get and set the initial thank you based on survey type.
'''
data = {
'thankyouId': False
}
try:
surveyType = Survey.objects.get(id=request.GET.get('surveyid')).survey_type
if surveyType == 'vote':
ty = SurveyThankyou.objects.get(vote_default=True)
elif surveyType == 'feedback':
ty = SurveyThankyou.objects.get(feedback_default=True)
data = {
'thankyouId': ty.id,
}
except Exception as ex:
pass
response = JsonResponse(data, status=200)
return response
|
### build a class for the 2D-Ising Model system
### import libraries
import numpy as np
import Hamiltonian
from Hamiltonian import Hamiltonian as Hamil
class Configuration:
"""A configuration of Ising spins with four-body interaction terms."""
def __init__(self, spins, L, J, K, T):
self.size = L
self.J = J
self.K = K
self.beta = 1./ T
self.spins = spins
self.energy = self._get_energy()
self.magnetization = self._get_magnetization()
def _get_energy(self):
"""Return the total energy."""
return Hamil(self.J, self.K, self.spins)
def _get_magnetization(self):
"""Return the total magnetization."""
magnet = np.sum(self.spins)
return magnet
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from qdarkstyle import qt_bindings, qt_abstractions, information, __version__
import qdarkstyle
from example import example
import argparse
import sys
from os.path import abspath, dirname
sys.path.insert(0, abspath(dirname(abspath(__file__)) + '/..'))
def print_list_md(info):
"""Print a list of information, line by line."""
for item in info:
print(' - ' + item)
def main():
"""Execute QDarkStyle example."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--information', action='store_true',
help="Show information about environment (important for bug report)")
parser.add_argument('-b', '--bindings', action='store_true',
help="Show available bindings for Qt")
parser.add_argument('-a', '--abstractions', action='store_true',
help="Show available abstraction layers for Qt bindings")
# parser.add_argument('-e', '--example', action='store_true',
# help="Show qdarkstyle example")
parser.add_argument('-v', '--version', action='store_true',
help="Show qdarkstyle version")
parser.add_argument('--all', action='store_true',
help="Show all information options at once")
# parsing arguments from command line
args = parser.parse_args()
parser.print_help()
if args.information or args.all:
info = information()
print('\nInformation about your current environment setup:')
print_list_md(info)
if args.bindings or args.all:
info = qt_bindings()
print('\nQt bindings available:')
print_list_md(info)
if args.abstractions or args.all:
info = qt_abstractions()
print('\nQt abstraction layers available:')
print_list_md(info)
if args.version:
info = __version__
print('\nVersion: %s' % info)
# if args.example:
# example.main()
if __name__ == "__main__":
sys.exit(main())
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.timeline import importer
from telemetry.timeline import trace_data as trace_data_module
class TraceBufferOverflowException(Exception):
pass
class TabIdImporter(importer.TimelineImporter):
def __init__(self, model, trace_data):
# Needs to run after all other importers so overflow events have been
# created on the model.
super(TabIdImporter, self).__init__(
model,
trace_data,
import_order=999)
self._trace_data = trace_data
@staticmethod
def GetSupportedPart():
return trace_data_module.TAB_ID_PART
def ImportEvents(self):
pass
def FinalizeImport(self):
self._CheckTraceBufferOverflow()
self._CreateTabIdsToThreadsMap()
def _CheckTraceBufferOverflow(self):
# Since _CreateTabIdsToThreadsMap() relies on markers output on timeline
# tracing data, it may not work in case we have trace events dropped due to
# trace buffer overflow.
for process in self._model.GetAllProcesses():
if process.trace_buffer_did_overflow:
raise TraceBufferOverflowException(
'Trace buffer of process with pid=%d overflowed at timestamp %d. '
'Raw trace data:\n%s' %
(process.pid, process.trace_buffer_overflow_event.start,
repr(self._trace_data)))
def _CreateTabIdsToThreadsMap(self):
tab_id_events = []
for tab_ids in self._trace_data.GetTracesFor(trace_data_module.TAB_ID_PART):
tab_id_events.extend(tab_ids)
for tab_id in tab_id_events:
try:
timeline_markers = self._model.FindTimelineMarkers(tab_id)
# If timeline_markers with name equals |tab_id| can't be found, it's
# non-fatal.
except Exception:
logging.warning('Cannot find timeline marker for tab with id=%s' %
tab_id)
continue
assert len(timeline_markers) == 1
assert timeline_markers[0].start_thread == timeline_markers[0].end_thread
self._model.AddMappingFromTabIdToRendererThread(
tab_id, timeline_markers[0].start_thread)
|
# coding=utf-8
from datetime import date, datetime
from django.utils.translation import ugettext_lazy as _
from django.core.management import BaseCommand
from django.conf import settings
from support.models import Issue
DEFAULT_NOTE = _("Generated automatically on {}\n".format(date.today()))
class Command(BaseCommand):
help = """Cierra incidencias de facturación de tipo gestión de cobranzas, automáticamente."""
# This is left blank if it's necessary to add some arguments
# def add_arguments(self, parser):
# # parser.add_argument('payment_type', type=str)
def handle(self, *args, **options):
# TODO: Generate a queryset to look for debtors, or a method to check for it while iterating through all
# The first part would be faster probably
issues = Issue.objects.filter(category="I").exclude(
status__slug__in=settings.ISSUE_STATUS_FINISHED_LIST
)
if getattr(settings, 'ISSUE_STATUS_AUTO_CLOSE_SLUGS', None):
issues = issues.filter(status__slug__in=settings.ISSUE_STATUS_AUTO_CLOSE_SLUGS)
if getattr(settings, 'ISSUE_SUBCATEGORY_AUTO_CLOSE_SLUGS', None):
issues = issues.filter(status__slug__in=settings.ISSUE_SUBCATEGORY_AUTO_CLOSE_SLUGS)
print(_("Started process"))
for issue in issues.iterator():
try:
contact = issue.contact
if contact.is_debtor() is False: # No open issues with category I
print(
_(
"Closing issue {} for contact {}. All their invoices are paid".format(
issue.id, contact.id
)
)
)
msg = u"Incidencia cerrada automáticamente por pago de facturas el {}".format(datetime.now())
issue.mark_solved(msg)
except Exception as e:
print("Error issue {}, contact {}: {}".format(issue.id, contact.id, e.message))
print(_("Ended process"))
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import operator
def calculateShannonEntropy(dataset):
# [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'Y']]
instance_number = len(dataset)
# {'Y': 1, 'N': 1}
label_number_map = {}
for instance in dataset:
label = instance[-1]
if label not in label_number_map.keys():
label_number_map[label] = 0
label_number_map[label] += 1
total_shannon_entropy = 0.0
for label in label_number_map:
probability = float(label_number_map[label]) / instance_number
shannon_entropy = probability * math.log(probability, 2) * -1
total_shannon_entropy += shannon_entropy
return total_shannon_entropy
def testCalculateShannonEntropy():
# Should be 1.0
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'Y']]
print("The shannon entropy is: {}".format(calculateShannonEntropy(dataset)))
# Should be 0.0
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'N']]
print("The shannon entropy is: {}".format(calculateShannonEntropy(dataset)))
def split_dataset(dataset, feature, value):
""" Get the dataset when "feature" is equal to "value"
"""
reture_data_set = []
# TODO: Example
for instance in dataset:
if instance[feature] == value:
new_instance = instance[:feature]
new_instance.extend(instance[feature + 1:])
reture_data_set.append(new_instance)
return reture_data_set
def choose_best_feature_to_split(dataset):
# Example: 4
feature_number = len(dataset[0]) - 1
base_entropy = calculateShannonEntropy(dataset)
best_info_gain_ratio = 0.0
best_feature = -1
# Example: [0, 0, 0, 0]
for i in range(feature_number):
# Example:
instance_with_one_feature = [instance[i] for instance in dataset]
feature_value_set = set(instance_with_one_feature)
after_split_entropy = 0.0
instrinsic_value = 0.0
# Example: [0, 1]
for value in feature_value_set:
sub_dataset = split_dataset(dataset, i, value)
probability = len(sub_dataset) / float(len(dataset))
after_split_entropy += probability * calculateShannonEntropy(sub_dataset)
instrinsic_value += -probability * math.log(probability, 2)
info_gain = base_entropy - after_split_entropy
# Check if it is zero
if (instrinsic_value == 0):
continue
info_gain_ratio = info_gain / instrinsic_value
if (info_gain_ratio > best_info_gain_ratio):
best_info_gain_ratio = info_gain_ratio
best_feature = i
return best_feature
def create_decision_tree(dataset, header_names):
# Example: [[0, 0, 0, 0, 'N'], [0, 0, 0, 1, 'N'], [1, 0, 0, 0, 'Y']]
# Example: ['N', 'N', 'Y']
labels = [instance[-1] for instance in dataset]
if labels.count(labels[0]) == len(labels):
# Return if all the values are the same
return labels[0]
# Example: ['N']
if len(dataset[0]) == 1:
label_count_map = {}
for label in labels:
if label not in label_count_map.keys():
label_count_map[label] = 0
label_count_map[label] += 1
sorted_label_count_map = sorted(
label_count_map.iteritems(), key=operator.itemgetter(1), reversed=True)
return sorted_label_count_map[0][0]
best_feature_id = choose_best_feature_to_split(dataset)
header_name = header_names[best_feature_id]
decision_tree = {header_name: {}}
# TODO: don't modify the input parameter
del (header_names[best_feature_id])
all_feature_values = [instance[best_feature_id] for instance in dataset]
unique_feature_values = set(all_feature_values)
for value in unique_feature_values:
sub_header_names = header_names[:]
sub_dataset = split_dataset(dataset, best_feature_id, value)
decision_tree[header_name][value] = create_decision_tree(
sub_dataset, sub_header_names)
return decision_tree
def predict(decision_tree, header_names, test_dataset):
# Example: {'outlook': {0: 'N', 1: 'Y', 2: {'windy': {0: 'Y', 1: 'N'}}}}
# print("Current tree: {}".format(decision_tree))
# Example: "outlook"
root_key = list(decision_tree.keys())[0]
# Example: {0: 'N', 1: 'Y', 2: {'windy': {0: 'Y', 1: 'N'}}}
sub_decision_tree = decision_tree[root_key]
# Example: 0
feature_index = header_names.index(root_key)
for key in sub_decision_tree.keys():
if test_dataset[feature_index] == key:
if type(sub_decision_tree[key]).__name__ == 'dict':
predict_label = predict(sub_decision_tree[key], header_names,
test_dataset)
else:
predict_label = sub_decision_tree[key]
return predict_label
def main():
# Train
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 0, 1, 'N'], [1, 0, 0, 0, 'Y'],
[2, 1, 0, 0, 'Y'], [2, 2, 1, 0, 'Y'], [2, 2, 1, 1,
'N'], [1, 2, 1, 1, 'Y']]
header_names = ['outlook', 'temperature', 'humidity', 'windy']
decision_tree = create_decision_tree(dataset, header_names)
print("Train and get decision tree: {}".format(decision_tree))
# Test
header_names = ['outlook', 'temperature', 'humidity', 'windy']
test_dataset = [2, 1, 0, 0]
result = predict(decision_tree, header_names, test_dataset)
print("Predict decision tree and get result: {}".format(result))
if __name__ == "__main__":
main()
|
import unittest
import pandas as pd
from runpandarun.dataset import Dataset, RESAMPLE_METHODS, RESAMPLE_INTERVALS
from runpandarun.store import Datastore
from runpandarun.storage import Storage
class Test(unittest.TestCase):
def setUp(self):
self.store = Datastore('./example/config.yml')
def test_init(self):
store = self.store
self.assertIn('datastore-testdata', repr(store))
self.assertIn('datastore-testdata', repr(store._storage))
self.assertIn('datastore-testdata', store._storage.backend.get_base_path())
def test_store(self):
store = self.store
self.assertIsInstance(store._storage, Storage)
# updating
store.update()
self.assertIsNotNone(store.last_update)
self.assertIsNotNone(store.last_complete_update)
last_complete_update = store.last_complete_update
store.update()
self.assertGreater(store.last_complete_update, last_complete_update)
def test_store_datasets(self):
store = self.store
self.assertIsInstance(store.datasets, list)
self.assertIsInstance([d for d in store], list)
self.assertEqual(4, len(store.datasets))
dataset = store.datasets[0]
self.assertEqual(getattr(store, 'my_dataset'), dataset)
dataset = store.datasets[1]
self.assertEqual(getattr(store, 'a_local_csv'), dataset)
def test_datasets(self):
dataset = self.store.datasets[0]
self.assertIsInstance(dataset, Dataset)
def test_df(self):
df = self.store.datasets[0].get_df()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual('id', df.index.name)
def test_json(self):
ds = self.store.a_local_json
self.assertTrue(ds.config.dt_index)
df = ds.get_df()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual('date', df.index.name)
def test_dtindex(self):
df = self.store.a_local_csv.get_df()
self.assertIsInstance(df.index, pd.DatetimeIndex)
def test_resampling(self):
ds = self.store.a_local_csv
for interval in RESAMPLE_INTERVALS.keys():
resample = getattr(ds, interval, None)
self.assertIsNotNone(resample)
for method in RESAMPLE_METHODS.keys():
func = getattr(resample, method, None)
self.assertIsNotNone(func)
self.assertTrue(callable(func))
if interval == 'yearly':
df = func()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df), len(df.index.year.unique()))
if method == 'count':
self.assertEqual(df.shape[1], 1)
self.assertEqual(list(df.columns), ['count'])
def test_combine_long(self):
df1 = self.store.a_local_csv.get_df()
df2 = self.store.a_local_json.get_df()
combined = self.store.combined
self.assertSetEqual(set(combined.columns), set(df1.columns))
self.assertEqual(len(df1) + len(df2), len(combined))
self.assertTrue(combined.equals(pd.concat([df1, df2]).sort_index()))
def test_combine_wide(self):
# add a dummy (copied) dataset
config = """
storage:
filesystem:
data_root: datastore-testdata/test_combine
combine:
- a_local_csv
- same_but_different
datasets:
a_local_csv:
csv_local: ./example/testdata.csv
columns:
- value: amount
- state
- date
dt_index: date
same_but_different:
csv_local: ./example/testdata.csv
columns:
- amount
- location: state
- date
dt_index: date
"""
store = Datastore(config)
store.update()
df1 = store.a_local_csv.get_df()
df1 = df1.rename(columns={c: f'a_local_csv.{c}' for c in df1.columns})
df2 = store.same_but_different.get_df()
df2 = df2.rename(columns={c: f'same_but_different.{c}' for c in df2.columns})
combined = store.combined
self.assertEqual(len(df1), len(combined))
self.assertTrue(combined.equals(pd.concat([df1, df2], axis=1)))
def test_incremental(self):
# FIXME TODO
# create a proper incremental scenario,
# this test breaks with the new handling of not storing identical files
return
config = """
storage:
filesystem:
data_root: datastore-testdata/test_incremental
datasets:
my_dataset:
csv_url: https://docs.google.com/spreadsheets/d/e/2PACX-1vRhzhiVJr0XPcMANnb9_F7bcE6h-C5826MGJs034AocLpyo4uy0y97LIG2ns8F1heCrSTsyEkL1XwDK/pub?output=csv # noqa
columns:
- id: identifier
- value
- date
incremental: true
ops: false # disable drop_duplicates to simulate updated data
"""
store = Datastore(config)
ds = store.my_dataset
self.assertTrue(ds.config.incremental)
items = len(ds.get_df())
ds = ds.update()
self.assertGreater(len(ds.get_df()), items)
self.assertEqual(len(ds.get_df()), items*2)
config = store.config.to_dict()
del config['datasets']['my_dataset']['ops'] # enable default ops with drop_duplicates
store = Datastore(config)
ds = store.my_dataset
self.assertTrue(ds.config.incremental)
items = len(ds.get_df())
ds = ds.update()
self.assertEqual(len(ds.get_df()), items)
def test_ops(self):
config = """
storage:
filesystem:
data_root: datastore-testdata/test_incremental
datasets:
my_dataset:
csv_local: ./example/testdata.csv
dt_index: date
"""
store = Datastore(config)
ds = store.datasets[0]
self.assertIsInstance(ds.config.ops, list) # base ops
config = store.config.to_dict()
config['datasets']['my_dataset']['ops'] = [
{'sort_values': {'ascending': False, 'by': 'state'}},
{'fillna': {'value': ''}},
{'applymap': {'func': 'lambda x: x.lower() if isinstance(x, str) else x'}}
]
store = Datastore(config)
ds = store.datasets[0]
df = ds.get_df()
self.assertTrue(all(df['state'].map(lambda x: x.islower())))
# unsafe eval raise
config['datasets']['my_dataset']['ops'] = [
{'applymap': {'func': "__import__('os').system('rm -rf /tmp/still-dont-be-too-risky-in-this-test')"}}
]
store = Datastore(config)
ds = store.datasets[0]
self.assertRaises(NameError, ds.get_df)
def test_json_dtype(self):
store = self.store
df = store.a_local_json.get_df()
self.assertTrue(df['value'].dtype.name == 'object')
def test_columns_map(self):
ds = self.store.a_local_json
df = ds.get_df()
self.assertTrue(all(df['state'].map(lambda x: x.isupper())))
|
from flask import Flask
from flask import request
from threading import Thread
import json
import sys
from pprint import pprint
from handlers import *
import os
import requests
import ast
PING_RESPONSE = {"id": 1}
DATANODE_ADDRESSES = set()
NAMENODE = ''
app = Flask(__name__)
class DataNodePropagate(Thread):
# a new thread to propagate updates to other nodes
def __init__(self, url, data):
Thread.__init__(self)
self.url = url
self.data = data
def run(self):
# make three tries
tries = 0
while tries < 3:
try:
resp = requests.get(self.url, json=self.data)
if resp.json()['verdict'] == 0:
break
except:
pass
tries += 1
def request_datanodes_info():
res = requests.get(NAMENODE, json={'command': 'dn_list', 'size': 1000})
add_nodes(res.json())
def propagate(request):
data = request.get_json()
if 'no-prop' in data:
return
# propagate updates to other nodes
for node in DATANODE_ADDRESSES:
url = node
data['no-prop'] = 'yes'
conn = DataNodePropagate(url, data)
conn.start()
def add_nodes(json_data):
if 'arguments' in json_data:
for addr in json_data['arguments']:
DATANODE_ADDRESSES.add(addr)
return {'verdict': 0, 'message': 'New datanodes were memorized.'}
else:
return {'verdict': 1, 'message': 'No arguments are provided.'}
def remove_nodes(json_data):
if 'arguments' in json_data:
for addr in json_data['arguments']:
DATANODE_ADDRESSES.remove(addr)
return {'verdict': 0, 'message': 'Old datanodes were forgotten.'}
else:
return {'verdict': 1, 'message': 'No arguments are provided.'}
@app.route('/', methods=['GET', 'POST'])
def get_request():
# handle comming requests
json_data = request.get_json()
print(json_data)
try:
cmd = json_data['command']
except KeyError:
return json.dumps({'verdict': 1, 'message': 'The request does not contain command field.'})
print('json_data-----------', json_data)
if cmd == 'write_file':
result = write_file(app, request)
propagate(request)
elif cmd == 'init':
result = init(app, request)
propagate(request)
elif cmd == 'create_file':
result = create_file(app, request)
propagate(request)
elif cmd == 'delete_file':
result = delete_file_or_dir(app, request)
propagate(request)
elif cmd == 'copy_file':
result = copy_file(app, request)
propagate(request)
elif cmd == 'move_file':
result = move_file(app, request)
propagate(request)
elif cmd == 'read_file':
result = read_file(app, request)
elif cmd == 'delete_dir':
result = delete_file_or_dir(app, request)
propagate(request)
elif cmd == 'read_dir':
result = read_dir(app, request)
elif cmd == 'make_dir':
result = make_dir(app, request)
propagate(request)
elif cmd == 'PING':
result = PING_RESPONSE
elif cmd == 'NEW_NODE':
result = add_nodes(request.get_json())
elif cmd == "DELETE_NODE":
result = remove_nodes(request.get_json())
elif cmd == "repl":
result = replicate(app, request)
else:
result = {'verdict': 1, 'message': 'The command is invalid.'}
pass
return result
if __name__ == "__main__":
if len(sys.argv) == 3:
NAMENODE = sys.argv[1]
root = sys.argv[2]
else:
raise ValueError('The arguments provided are incorrect.')
app.config['root'] = root
request_datanodes_info()
app.run(debug=True, host='0.0.0.0', port=8000)
|
import glob
import os
import sys
import requests
from PIL import Image
from configparser import ConfigParser
os.chdir(os.path.dirname(sys.argv[0])) # Place de répertoire de travail dans le même dossier que le script
# try: # Permet de renvoyer l'erreur peut importe où elle se trouve dans le programme, sans le quitter pour autant.
gist_url = 'https://gist.githubusercontent.com/AiroPi/54e11509fac8db37eb2be1ae18b2ef38/raw/pattern.ini' # On recupère le fichier exemple directement en ligne, qui permet de mettre a jour l'apparence sans toucher au fichier.'
pattern_file = requests.get(gist_url)
config = ConfigParser() # On ouvre le fichier .ini qui liste tout les raccourcis que l'on souhaite faire.
config.optionxform = str
config.read('@Resources/Config.ini')
for path in glob.glob('@Resources/*/*'): # on parcours tout les dossiers dans les ressources pour acceder a toutes les images.
if os.path.isdir(path):
continue # Si c'est un dossier, ce n'est donc pas une image :)
filename, file_ext = os.path.splitext(path) # On récupère son nom et son type (png, ico...)
if file_ext.upper() != '.ICO': # Si ce n'est pas une icone, on le converti en icone, pour qu'elle soit plus légère et ne fasse pas buguer le skin.
print('Conversion de '+filename+file_ext+' en .ico')
img = Image.open(path)
icon_sizes = [(64,64)]
if not os.path.isdir(os.path.split(path)[0]+'\\non_ico'): # Si il n'y a pas déjà de dossier non_icon/ dans le dossier ressource, on le créer.
os.mkdir(os.path.split(path)[0]+'\\non_ico')
img = img.convert('RGBA') # On le converti pour avoir de la transparence.
img.save(filename+'.ico', sizes=icon_sizes) # On l'enregistre en .ico, avec un petit taille.
os.replace(path, '{p[0]}\\non_ico\\{p[1]}'.format(p=os.path.split(path))) # on déplace l'ancienne image dans le fichier non_icon/
print('')
def create_section_config():
"""Function to create a new Config object based on the Exemple.ini file."""
_section_config = ConfigParser() # We create the current section Config based on the exemple
_section_config.optionxform = str
if os.path.isfile('pattern.ini'): # Soit à partir de l'Exemple local, soit a partir de l'exemple en ligne.
_section_config.read('pattern.ini')
return _section_config
_section_config.read_string(pattern_file.text)
return _section_config
print("Nous allons créer votre widget pour ces catégories :", ", ".join(config.sections()))
for section in [config[section_name] for section_name in config.sections()]: # We go through the different sections (exemple: uPlay, Steam…)
if section.name == "Default":
continue
print(f"En cours pour {section.name}...")
section_config = create_section_config()
direction = section.pop('direction', 'right') # Will decide if the widget will be oriented to the right or left (default to right)
print(direction)
print(direction == 'right')
if direction == 'right':
list_index_positions = list(range(len(section.items())))
else:
list_index_positions = list(reversed(range(len(section.items()))))
sections_to_append = []
app_number = 1
app_icons = {os.path.splitext(os.path.basename(path))[0].lower(): path.replace('@Resources', '#@#')
for path in glob.glob(rf"@Resources\{section.name}\*")}
print(app_icons)
section_config['MeterAppsShape']['Shape'] = section_config['MeterAppsShape']['Shape'].format(len(section))
section_config['MeterActiveOverShape']['Shape'] = section_config['MeterActiveOverShape']['Shape'].format(list_index_positions[0])
for app_name, app_path in section.items(): # We go through all images regarding the current section (in the SECTION_NAME file in @ressource)
MeterAppShapeIcon = dict(section_config['MeterAppShapeIcon'])
MeterAppIcon = dict(section_config['MeterAppIcon'])
MeterAppText = dict(section_config['MeterAppText'])
MeterAppIcon['LeftMouseUpAction'] = app_path
if app_name == 'category':
MeterAppIcon['ImageName'] = app_icons.get(section.name.lower(), 'introuvable')
MeterAppText['Text'] = section.name
MeterAppShapeIcon['Shape'] = MeterAppShapeIcon['Shape'].format(list_index_positions[0])
MeterAppIcon['X'] = MeterAppIcon['X'].format(list_index_positions[0])
MeterAppIcon['Container'] = MeterAppIcon['Container'].format(0)
MeterAppText['FontColor'] = MeterAppText['FontColor'].format(255)
MeterAppIcon.pop('Group')
MeterAppIcon.pop('Hidden')
MeterAppIcon.pop('ImageAlpha')
MeterAppText.pop('Group')
sections_to_append.insert(0, (MeterAppShapeIcon, MeterAppIcon, MeterAppText))
print(f"....raccourcis pour l'application-catégorie {section.name} configuré")
continue
MeterAppText['Text'] = app_name
MeterAppIcon['ImageName'] = app_icons.get(app_name.lower(), 'introuvable')
MeterAppShapeIcon['Shape'] = MeterAppShapeIcon['Shape'].format(list_index_positions[app_number])
MeterAppIcon['X'] = MeterAppIcon['X'].format(list_index_positions[app_number])
MeterAppIcon['Container'] = MeterAppIcon['Container'].format(app_number)
MeterAppText['FontColor'] = MeterAppText['FontColor'].format('#Alpha#')
sections_to_append.append((MeterAppShapeIcon, MeterAppIcon, MeterAppText))
print(f"....raccourcis pour l'application {app_name} configuré")
app_number += 1
del section_config['MeterAppShapeIcon']
del section_config['MeterAppIcon']
del section_config['MeterAppText']
for app_index, app_meters in enumerate(sections_to_append):
section_config[f'MeterAppShapeIcon{app_index}'] = app_meters[0]
section_config[f'MeterAppIcon{app_index}'] = app_meters[1]
section_config[f'MeterAppText{app_index}'] = app_meters[2]
if section.name not in [name for name in os.listdir(".") if os.path.isdir(name)]:
os.mkdir(section.name)
with open(section.name + '/' + section.name + '.ini', 'w') as configfile:
section_config.write(configfile)
print(f"Section {section.name} créée avec succès !\n")
input('Appuyez sur un touche pour fermer...')
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='bitoolbox',
version='0.5.2',
author='Thiago MadPin',
author_email='madpin@gmail.com',
description="A toolbox for BI purpouses",
packages=[
'bitoolbox',
'bitoolbox.dbtools',
'bitoolbox.datetools',
'bitoolbox.pogtools',
'bitoolbox.omnitools',
'bitoolbox.pdutils',
'bitoolbox.birsttools',
'bitoolbox.platformtools',
'bitoolbox.logtools',
'bitoolbox.tracksaletools',
'bitoolbox.webtools',
'bitoolbox.geotools',
],
long_description=open('README.md', 'r', encoding='utf-8').read(),
install_requires=[
# 'pandas',
# 'configparser>=3.3.0.post2',
# 'pandas>=0.17.0',
# 'PyYAML>=3.11'
])
|
from __future__ import division
import os
import sys
import random
if not '/home/blakeb/.local/lib/python2.7/scikit_learn-0.11-py2.7-linux-x86_64.egg' in sys.path:
if os.path.exists('/home/blakeb/.local/lib/python2.7/scikit_learn-0.11-py2.7-linux-x86_64.egg'):
sys.path.append('/home/blakeb/.local/lib/python2.7/scikit_learn-0.11-py2.7-linux-x86_64.egg')
import sklearn as sk
from sklearn.svm import SVC
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
import utils as ut
import numpy as np
import multiprocessing
import features as fe
NCORES = multiprocessing.cpu_count()
def fit_and_test(scored, clf, norm=True):
"""
scored: [arr_train, arr_test]
"""
arr_train, arr_test = scored
if not exist_pos_neg(arr_train):
return []
scaler = fit_clf(arr_train, clf, norm=norm)
tested = classify(clf, arr_test, scaler)
return tested
def exist_pos_neg(arr):
hits = arr['hit']
found_true = False
found_false = False
for h in hits:
if h:
found_true = True
else:
found_false = True
if found_true and found_false:
return True
return False
def fit_clf(arr, clfbase, norm=True):
if norm:
arr = fe.retype_arr(arr) # change f2 to f4 to prevent overflow
X,y = arr_feats(arr), arr['hit']
scaler = None
if norm:
X, scaler = normalize(X)
print "Training classifier: %s examples, %s features" % (len(X), len(X[0]))
clfbase.fit(X,y) # skip this if just normalizing
return scaler
def normalize(X):
print "Fitting and scaling training features."
scaler = sk.preprocessing.StandardScaler().fit(X)
X = scaler.transform(X)
return X, scaler
def classify(clf, arr, scaler=None, do_sort=True):
"""
If the clf was trained without at least a pos and a neg, this will fail.
"""
X = arr_feats(arr)
if scaler:
print "Scaling features before prediction."
X = scaler.transform(X)
print "Predicting: %s examples, %s features" % (len(arr), len(X[0]))
probs = (x[1] for x in clf.predict_proba(X))
tested = zip(arr['id1'], arr['id2'], probs, arr['hit'])
if do_sort:
random.shuffle(tested)
tested.sort(key=lambda x:x[2],reverse=True)
return tested
def arr_feats(arr):
return [[x for x in r] for r in arr[list(arr.dtype.names[3:])]]
def tree(n_estimators=200,n_jobs=NCORES-1, bootstrap=True, **kwargs):
return ExtraTreesClassifier(n_estimators=n_estimators, n_jobs=n_jobs,
bootstrap=bootstrap, **kwargs)
def tree_feats(**kwargs):
return tree(compute_importances=True, **kwargs)
def svm(kernel='linear', cache_size=4000, **kwargs):
return SVC(kernel=kernel, cache_size=cache_size, probability=True, **kwargs)
def linear(dual=False, **kwargs):
return LinearSVC(dual=dual, **kwargs)
def feature_selection(arr, clf, printn=10, do_plot=False):
"""
clf: ml.tree(compute_importances=True) or ml.linear()
"""
names = arr.dtype.names[3:]
fit_clf(arr, clf, norm=True)
importances = (clf.coef_[0] if hasattr(clf, 'coef_') else
clf.feature_importances_)
indices = np.argsort(importances)[::-1]
ranked = [(names[index], importances[index]) for index in indices]
print "Dislaying top %s features:" % printn
for i,(name,imp) in enumerate(ranked[:printn]):
print "%d. %s (%f)" % (i + 1, name, imp)
# Plot the feature importances of the trees and of the forest
if do_plot:
import pylab as pl
pl.figure()
pl.title("Feature importances")
for tree in forest.estimators_:
pl.plot(indnums, tree.feature_importances_[indices], "r")
pl.plot(indnums, importances[indices], "b")
pl.show()
feats, weights = zip(*ranked)
return list(feats), list(weights)
if __name__ == '__main__':
if len(sys.argv) < 4:
sys.exit("usage: python ml.py train_test feats_f clf_type \
donorm kwarg1_val1-kwarg2-val2")
ttf = sys.argv[1]
tt = np.load(ttf)
feats = ut.loadpy(sys.argv[2])
k = sys.argv[3]
do_norm = sys.argv[4]
kvs = sys.argv[5]
kwargs = dict([tuple(kv.split('_')) for kv in kvs.split('-')]) \
if kvs else {}
clf = tree(**kwargs) if k=='tree' else svm(kernel=k, **kwargs)
ts = [('%s features, %s kernel, norm: %s, %s' %(n,k,do_norm, kvs),
fit_and_test([fe.keep_cols(t, ut.i0(feats[:n])) for t in tt],
clf, norm=do_norm))
for n in 20,30,40,50]
ut.savepy(ts, 'ts_%s_%s_%s_%s' %(k,do_norm,kvs,ttf))
|
from polars.datatypes import *
|
import pyautogui
from time import sleep
def __write__(key):
pyautogui.typewrite(key)
def __hotkey__(key1,key2):
pyautogui.hotkey(key1,key2)
def __click__(x1,y1,click):
pyautogui.click(x=x1,y=y1,button=click)
__click__(355, 1060, 'left')
sleep(4)
__write__(['r', 'e', 'w'])
sleep(1)
__write__(['esc'])
|
#contar todos os numeros pares no intervalo de 1 a 50
for c in range(1, 50,2):
print(c+1, end=" ")
print("funcionou!") |
"""
Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
"""
Method 1:
* Traverse through the LinkedList, by constantly
checking the value of the next element.
* If the next element is equal to the value of
the node given, double jump the pointer
* Assign the val and node to the next.next pointer
Your runtime beats 94.15 % of python3 submissions.
"""
node.val = node.next.val
node.next = node.next.next |
import socket
UDP_IP = "192.168.1.6"
UDP_PORT = 8080
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT)) |
"""
Django settings for posthog project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# isort: skip_file
import os
from typing import Dict, List
# :TRICKY: Imported before anything else to support overloads
from posthog.settings.overrides import *
from posthog.settings.base_variables import *
from posthog.settings.access import *
from posthog.settings.async_migrations import *
from posthog.settings.celery import *
from posthog.settings.data_stores import *
from posthog.settings.dynamic_settings import *
from posthog.settings.ee import EE_AVAILABLE
from posthog.settings.feature_flags import *
from posthog.settings.logging import *
from posthog.settings.sentry import *
from posthog.settings.shell_plus import *
from posthog.settings.service_requirements import *
from posthog.settings.statsd import *
from posthog.settings.web import *
from posthog.settings.utils import get_from_env, str_to_bool
USE_PRECALCULATED_CH_COHORT_PEOPLE = not TEST
CALCULATE_X_COHORTS_PARALLEL = get_from_env("CALCULATE_X_COHORTS_PARALLEL", 2, type_cast=int)
# Instance configuration preferences
# https://posthog.com/docs/self-host/configure/environment-variables
SELF_CAPTURE = get_from_env("SELF_CAPTURE", DEBUG, type_cast=str_to_bool)
debug_queries = get_from_env("DEBUG_QUERIES", False, type_cast=str_to_bool)
disable_paid_fs = get_from_env("DISABLE_PAID_FEATURE_SHOWCASING", False, type_cast=str_to_bool)
INSTANCE_PREFERENCES = {
"debug_queries": debug_queries,
"disable_paid_fs": disable_paid_fs,
}
SITE_URL: str = os.getenv("SITE_URL", "http://localhost:8000").rstrip("/")
if DEBUG:
JS_URL = os.getenv("JS_URL", "http://localhost:8234").rstrip("/")
else:
JS_URL = os.getenv("JS_URL", "")
DISABLE_MMDB = get_from_env(
"DISABLE_MMDB", TEST, type_cast=str_to_bool
) # plugin server setting disabling GeoIP feature
PLUGINS_PREINSTALLED_URLS: List[str] = (
os.getenv("PLUGINS_PREINSTALLED_URLS", "https://github.com/PostHog/posthog-plugin-geoip").split(",")
if not DISABLE_MMDB
else []
)
PLUGINS_CELERY_QUEUE = os.getenv("PLUGINS_CELERY_QUEUE", "posthog-plugins")
PLUGINS_RELOAD_PUBSUB_CHANNEL = os.getenv("PLUGINS_RELOAD_PUBSUB_CHANNEL", "reload-plugins")
PLUGINS_ALERT_CHANNEL = "plugins-alert"
# Tokens used when installing plugins, for example to get the latest commit SHA or to download private repositories.
# Used mainly to get around API limits and only if no ?private_token=TOKEN found in the plugin URL.
GITLAB_TOKEN = os.getenv("GITLAB_TOKEN", None)
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN", None)
NPM_TOKEN = os.getenv("NPM_TOKEN", None)
ACTION_EVENT_MAPPING_INTERVAL_SECONDS = get_from_env("ACTION_EVENT_MAPPING_INTERVAL_SECONDS", 300, type_cast=int)
ASYNC_EVENT_PROPERTY_USAGE = get_from_env("ASYNC_EVENT_PROPERTY_USAGE", True, type_cast=str_to_bool)
EVENT_PROPERTY_USAGE_INTERVAL_SECONDS = get_from_env(
"ASYNC_EVENT_PROPERTY_USAGE_INTERVAL_SECONDS", 86400, type_cast=int
)
UPDATE_CACHED_DASHBOARD_ITEMS_INTERVAL_SECONDS = get_from_env(
"UPDATE_CACHED_DASHBOARD_ITEMS_INTERVAL_SECONDS", 90, type_cast=int
)
# Whether to capture internal metrics
CAPTURE_INTERNAL_METRICS = get_from_env("CAPTURE_INTERNAL_METRICS", False, type_cast=str_to_bool)
HOOK_EVENTS: Dict[str, str] = {}
# Support creating multiple organizations in a single instance. Requires a premium license.
MULTI_ORG_ENABLED = get_from_env("MULTI_ORG_ENABLED", False, type_cast=str_to_bool)
# Overriden by posthog-cloud
MULTI_TENANCY = False
# Whether this is a managed demo environment
DEMO = get_from_env("DEMO", False, type_cast=str_to_bool)
CACHED_RESULTS_TTL = 7 * 24 * 60 * 60 # how long to keep cached results for
TEMP_CACHE_RESULTS_TTL = 24 * 60 * 60 # how long to keep non dashboard cached results for
SESSION_RECORDING_TTL = 30 # how long to keep session recording cache. Relatively short because cached result is used throughout the duration a session recording loads.
AUTO_LOGIN = get_from_env("AUTO_LOGIN", False, type_cast=str_to_bool)
# Keep in sync with plugin-server
EVENTS_DEAD_LETTER_QUEUE_STATSD_METRIC = "events_added_to_dead_letter_queue"
# Extend and override these settings with EE's ones
if "ee.apps.EnterpriseConfig" in INSTALLED_APPS:
from ee.settings import * # noqa: F401, F403
# Lastly, cloud settings override and modify all
from posthog.settings.cloud import *
# TODO: Temporary
EMAIL_REPORTS_ENABLED: bool = get_from_env("EMAIL_REPORTS_ENABLED", False, type_cast=str_to_bool)
|
#!/usr/bin/env python
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Tag filter module used by testrunner
# This provides tag based filtering function for test case set.
"""Tag Filter Module"""
import unittest
TAG_PREFIX = "tag__"
def tag(*args, **kwargs):
"""tag decorator that adds attributes to classes or functions"""
def wrap_obj(obj):
"""wrap function"""
for name in args:
setattr(obj, TAG_PREFIX + name, True)
for name, value in kwargs.iteritems():
setattr(obj, TAG_PREFIX + name, value)
return obj
return wrap_obj
def hastag(obj, key):
"""check if obj has a tag"""
key = TAG_PREFIX + key
if not isinstance(obj, unittest.TestCase):
return hasattr(obj, key)
tc_method = getattr(obj, obj._testMethodName)
return hasattr(tc_method, key) or hasattr(obj, key)
def gettag(obj, key, default=None):
"""get a tag value from obj"""
key = TAG_PREFIX + key
if not isinstance(obj, unittest.TestCase):
return getattr(obj, key, default)
tc_method = getattr(obj, obj._testMethodName)
return getattr(tc_method, key, getattr(obj, key, default))
def getvar(obj):
"""if a variable not exist, find it in testcase"""
class VarDict(dict):
"""wrapper of var dict"""
def __getitem__(self, key):
# expression may be set a var in this dict
if key in self:
return super(VarDict, self).__getitem__(key)
if hastag(obj, key):
return gettag(obj, key)
# maybe some build-in object
try:
return eval(key, {}, {})
except:
return False
return VarDict()
def checktags(testcase, tagexp):
"""eval tag expression and return the result"""
return eval(tagexp, None, getvar(testcase))
def filter_tagexp(testsuite, tagexp):
"""filter according to true or flase of tag expression"""
if not tagexp:
return testsuite
caselist = []
for each in testsuite:
if not isinstance(each, unittest.BaseTestSuite):
if checktags(each, tagexp):
caselist.append(each)
else:
caselist.append(filter_tagexp(each, tagexp))
return testsuite.__class__(caselist)
def _testset(testsuite):
"""iterate tc in testsuite"""
for each in testsuite:
if unittest.suite._isnotsuite(each):
yield each
else:
for each2 in _testset(each):
yield each2
class TagInformations(object):
"""Get all tags informations of test suite"""
def __init__(self, tests):
self.tests = tests
def _testset(self):
for each in _testset(self.tests):
yield each
def count(self):
"""test cases count"""
ret = 0
for test in self._testset():
ret += test.countTestCases()
return ret
def group_by(self, key):
"""group by key, return key/val dict"""
ret = {}
for test in self._testset():
if hastag(test, key):
ret.setdefault(gettag(test, key), []).append(test)
continue
ret.setdefault("others", []).append(test)
for k in ret:
ret[k] = self.__class__(ret[k])
return ret
def get_sum(self, *args, **kwargs):
"""get summary report"""
assert len(args) + len(kwargs) == 1
lst = []
for test in self._testset():
if args:
if hastag(test, args[0]):
lst.append(test)
elif kwargs:
k, v = kwargs.items()[0]
if gettag(test, k) == v:
lst.append(test)
return self.__class__(lst)
|
# -*- coding: utf-8 -*-
import torch
from supar.utils.common import MIN
from supar.utils.fn import pad
def kmeans(x, k, max_it=32):
r"""
KMeans algorithm for clustering the sentences by length.
Args:
x (list[int]):
The list of sentence lengths.
k (int):
The number of clusters.
This is an approximate value. The final number of clusters can be less or equal to `k`.
max_it (int):
Maximum number of iterations.
If centroids does not converge after several iterations, the algorithm will be early stopped.
Returns:
list[float], list[list[int]]:
The first list contains average lengths of sentences in each cluster.
The second is the list of clusters holding indices of data points.
Examples:
>>> x = torch.randint(10,20,(10,)).tolist()
>>> x
[15, 10, 17, 11, 18, 13, 17, 19, 18, 14]
>>> centroids, clusters = kmeans(x, 3)
>>> centroids
[10.5, 14.0, 17.799999237060547]
>>> clusters
[[1, 3], [0, 5, 9], [2, 4, 6, 7, 8]]
"""
# the number of clusters must not be greater than the number of datapoints
x, k = torch.tensor(x, dtype=torch.float), min(len(x), k)
# collect unique datapoints
d = x.unique()
# initialize k centroids randomly
c = d[torch.randperm(len(d))[:k]]
# assign each datapoint to the cluster with the closest centroid
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
for _ in range(max_it):
# if an empty cluster is encountered,
# choose the farthest datapoint from the biggest cluster and move that the empty one
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
while len(none) > 0:
for i in none:
# the biggest cluster
b = torch.where(mask[mask.sum(-1).argmax()])[0]
# the datapoint farthest from the centroid of cluster b
f = dists[b].argmax()
# update the assigned cluster of f
y[b[f]] = i
# re-calculate the mask
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
# update the centroids
c, old = (x * mask).sum(-1) / mask.sum(-1), c
# re-assign all datapoints to clusters
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
# stop iteration early if the centroids converge
if c.equal(old):
break
# assign all datapoints to the new-generated clusters
# the empty ones are discarded
assigned = y.unique().tolist()
# get the centroids of the assigned clusters
centroids = c[assigned].tolist()
# map all values of datapoints to buckets
clusters = [torch.where(y.eq(i))[0].tolist() for i in assigned]
return centroids, clusters
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices making up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding :cite:`mcdonald-etal-2005-non`.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in :cite:`mcdonald-etal-2005-non`.
Notes:
The algorithm does not guarantee to parse a single-root tree.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
"""
s[0, 1:] = MIN
# prevent self-loops
s.diagonal()[1:].fill_(MIN)
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-projective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
multiroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = MIN
>>> scores.diagonal(0, 1, 2)[1:].fill_(MIN)
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length+1, :length+1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = MIN
s = s.index_fill(1, torch.tensor(0), MIN)
for root in roots:
s[:, 0] = MIN
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
|
# Interval class which converts
class Interval:
def __init__(self, interval_=[0, 0]):
self.start = interval_[0]
self.end = interval_[1]
def __repr__(self):
return '[{}, {}]'.format(self.start, self.end)
class Solution:
def merge(self, intervals):
intervals = [Interval(i) for i in intervals]
intervals.sort(key=lambda x: x.start)
print intervals
stack = []
for interval in intervals:
if not stack:
stack.append(interval)
else:
if interval.start <= stack[-1].end:
stack[-1].end = max(stack[-1].end, interval.end)
else:
stack.append(interval)
return stack
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Busana Apparel Group. All rights reserved.
#
# This product and it's source code is protected by patents, copyright laws and
# international copyright treaties, as well as other intellectual property
# laws and treaties. The product is licensed, not sold.
#
# The source code and sample programs in this package or parts hereof
# as well as the documentation shall not be copied, modified or redistributed
# without permission, explicit or implied, of the author.
#
# This module is part of Centric PLM Integration Bridge and is released under
# the Apache-2.0 License: https://www.apache.org/licenses/LICENSE-2.0
import asyncio
import logging
import time
from common.prochandler import CommandProcessor
from common.msgobject import mq_event
from utils.krclient import KRWebClient
from common import consts
from solr.core import SolrConnection
class HREmpUpdateSearchDB(CommandProcessor):
def __init__(self):
super(HREmpUpdateSearchDB, self).__init__()
self._module = 'TASM@EMPSEARCHDB'
self._props = None
self._rest_service = None
self._solr_connection = None
def do_configure(self):
prop = self.get_module_configuration()
self._props = dict if self._module not in prop else prop[self._module]
config = self.get_configuration()
base_url = config[consts.KRAKEN_REST_BASE_URL] if consts.KRAKEN_REST_BASE_URL in config else None
username = config[consts.KRAKEN_REST_USERNAME] if consts.KRAKEN_REST_USERNAME in config else None
password = config[consts.KRAKEN_REST_PASSWORD] if consts.KRAKEN_REST_PASSWORD in config else None
if not base_url or not username or not password:
return
try:
self._rest_service = KRWebClient(host_url=base_url, parent=self.get_parent())
self._rest_service.set_user(username, password)
self._solr_connection = SolrConnection("{0}/{1}".format(self._props['SOLR_URL'],
self._props['SOLR_EMP_NAMESPACE']))
except Exception as ex:
logging.exception(ex)
def get_employee_info(self, cono, emid):
module = self._rest_service.create_module(self._props['BASE_MODULE'])
command = module.create_command('getEmpSearchableInfo')
response = command.get(cono=cono, emid=emid)
status_code = response.status_code
try:
output = response.json() if status_code == 200 else dict()
finally:
response.close()
return output
def delete_search_db_record(self, info):
if not self._solr_connection:
return
self._solr_connection.delete(id=info['id'])
self._solr_connection.commit()
def update_search_db_record(self, info):
if not self._solr_connection:
return
self._solr_connection.add(**info)
self._solr_connection.commit()
@mq_event
def update_emp_search_db(self, cono=None, emid=None):
if (not cono) or (not emid):
logging.error("Not enough parameter to process update_emp_search_db")
return
logging.info("Processing Update Search for EMID: {0}".format(emid))
try:
info = self.get_employee_info(cono, emid)
if 'id' not in info:
return
self.delete_search_db_record(info)
self.update_search_db_record(info)
except Exception as ex:
logging.exception(ex)
finally:
self._solr_connection.close()
logging.info("End Update Search for EMID: {0}".format(emid))
@mq_event
async def async_update_emp_search_db(self, cono=None, emid=None):
logging.info("Hello world")
await asyncio.sleep(10)
logging.info("Done")
@mq_event
def sync_update_emp_search_db(self, cono=None, emid=None):
logging.info("Hello world")
time.sleep(10)
logging.info("Done")
|
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
class DataSet:
DIM_REDUCTION_METHODS = ["pca", "select_k_best"]
def __init__(self, name, X, Y, X_test, Y_test):
self.name = name
self.origin_X, self.origin_Y, self.origin_X_test, self.origin_Y_test \
= (X, Y, X_test, Y_test)
self.X, self.Y, self.X_test, self.Y_test = (X, Y, X_test, Y_test)
self._dr = False
self._r = False
def regularize(self):
if self._r:
return
ds = self
scaler = StandardScaler(with_mean=False).fit(ds.X)
self.X = scaler.transform(ds.X)
self.X_test = scaler.transform(ds.X_test)
self._r = True
def reduce_dim(self, method):
if self._dr:
return
if method == "pca":
self.apply_pca(n_c=10)
elif method == "select_k_best":
self.apply_select_k_best()
else:
print(method, "method not found. Dataset is not changed")
if method in DataSet.DIM_REDUCTION_METHODS:
self._dr = True
def restore(self):
self.X, self.Y, self.X_test, self.Y_test = \
self.origin_X, self.origin_Y, \
self.origin_X_test, self.origin_Y_test
self._r = False
self._dr = False
def apply_pca(self, n_c=None):
args = {}
if n_c is None:
if self.X.shape[0] < self.X[0].shape[0]:
# will reduce dimentions to [m, m]
# where m=min(n_samples, n_features)
args = {
"n_components": "mle",
"svd_solver": 'full'
}
else:
args = {
"n_components": n_c
}
pca = PCA(**args)
self.X = pca.fit_transform(self.origin_X.toarray(), self.origin_Y)
self.X_test = pca.transform(self.origin_X_test.toarray())
def apply_select_k_best(self, k=10):
ch2 = SelectKBest(chi2, k=k)
self.X = ch2.fit_transform(self.origin_X, self.origin_Y)
self.X_test = ch2.transform(self.origin_X_test)
def load_news_group_ds(n_cats):
cats = [
'comp.graphics',
'rec.autos',
'talk.politics.guns',
'sci.med',
'rec.sport.baseball',
][:n_cats]
remove = ('headers', 'footers', 'quotes')
newsgroups_train = fetch_20newsgroups(subset='train',
categories=cats,
shuffle=True,
random_state=42,
remove=remove)
newsgroups_test = fetch_20newsgroups(subset='test',
categories=cats,
shuffle=True,
random_state=42,
remove=remove)
# ---------
# Taken from sk-learn website.
# Print loaded data size .
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(newsgroups_train.data)
data_test_size_mb = size_mb(newsgroups_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(newsgroups_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(newsgroups_test.data), data_test_size_mb))
print("%d categories" % n_cats)
print()
# ---------
vectorizor = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X = vectorizor.fit_transform(newsgroups_train.data)
Y = newsgroups_train.target
X_test = vectorizor.transform(newsgroups_test.data)
Y_test = newsgroups_test.target
return DataSet("d20newsgroups", X, Y, X_test, Y_test)
|
import os
import logging
from datetime import datetime, timedelta
from flask import Flask, request, abort, jsonify , render_template
import requests
def temp_token():
import binascii
temp_token = binascii.hexlify(os.urandom(24))
return temp_token.decode('utf-8')
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
WEBHOOK_VERIFY_TOKEN = os.getenv('WEBHOOK_VERIFY_TOKEN')
CLIENT_AUTH_TIMEOUT = 24 # in Hours
app = Flask(__name__)
def re_ftime(dtime):
raw_time = dtime.split('T')
date = raw_time[0]
time = raw_time[1].split('.')[0]
return date + " " + time
def alert_line(data,token):
severity = ''
summary = ''
desc = ''
stime = ''
if data['status'] == "firing":
for alert in data['alerts']:
severity = alert['labels']['severity']
summary = alert['annotations']['summary']
desc = alert['annotations']['description']
stime = re_ftime(alert['startsAt'])
url = "https://notify-api.line.me/api/notify"
message ="Problem!!\nSeverity: " + severity + "\nTime: " + stime + "\nSummary: " + summary + "\nDescription: " + desc
header = {'Authorization':token}
payload = {'message': str(message)}
r = requests.post(url , headers=header,data=payload)
print(r.content)
else: pass
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
rootLogger.info(request.remote_addr + "on method : " + request.method)
if request.method == 'GET':
verify_token = request.args.get('verify_token')
if verify_token == WEBHOOK_VERIFY_TOKEN:
return jsonify({'status':'success'}), 200
else:
return jsonify({'status':'bad token'}), 401
elif request.method == 'POST':
client = request.remote_addr
line_token = request.headers['AUTHORIZATION']
alert_line(request.json,line_token)
return jsonify({'status':'success'}), 200
else:
abort(400)
@app.route('/get_token_webhook', methods=['GET'])
def get_token_webhook():
return render_template('token.html',get_token=WEBHOOK_VERIFY_TOKEN)
if __name__ == '__main__':
if WEBHOOK_VERIFY_TOKEN is None:
print('WEBHOOK_VERIFY_TOKEN has not been set in the environment.\nGenerating random token...')
token = temp_token()
print('Token: %s' % token)
WEBHOOK_VERIFY_TOKEN = token
app.run(host='0.0.0.0') #default allow on port 5000. |
# -*- coding: utf-8 -*-
from pyspark.sql import SparkSession
def basic_datasource_example(spark):
# generic load/save functions
df = spark.read.load("users.parquet")
df.select("name", "favorite_color").write.save("namesAndFavColors.parquet")
# 分区可以用在save和saveAsTable方法上当用dataset API时
df.write.partitionBy("favorite_color").format("parquet").save("namesPartByColor.parquet")
# 可以在单表上同时使用分区和分桶
df = spark.read.parquet("users.parquet")
(df
.write
.partitionBy("favorite_color")
.bucketBy(42, "name")
.saveAsTable("people_partitioned_bucketed"))
# 手动指定加载选项,例如:dataframe从其他数据源类型加载的可以转换为其他的类型
df = spark.read.load("people.json", format="json")
df.select("name", "age").write.save("namesAndAges.parquet", format="parquet")
# 手动指定加载CSV文件选项
df = spark.read.load("people.csv", format="csv", seq=":", inferSchema="True", header="true")
# 手动指定加载ORC文件选项,例如:可以控制布隆过滤器和字典编码配置
df = spark.read.orc("users.orc")
(df.write.format("orc")
.option("orc.bloom.filter.columns", "favorite_color")
.option("orc.dictionary.key.threshold", "1.0")
.save("users_with_options.orc"))
# 基于文件的数据源,桶化和排序或分区在输出上可以作用在持久化表上
df.write.bucketBy(42, "name").sortBy("age").saveAsTable("people_bucketed")
# 除了用read API加载文件到一个dataframe然后查询dataframe之外, 可以直接使用SQL查询文件
df = spark.sql("SELECT * FROM parquet.`users.parquet`")
if __name__ == '__main__':
spark = SparkSession.builder.appName("python spark sql data source demo").getOrCreate()
basic_datasource_example(spark)
spark.stop()
|
# -*- coding: utf-8 -*-
# Apache Software License 2.0
#
# Copyright (c) 2018, Christophe Duong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CLI module for Airflow related commands.
"""
import logging
from os.path import expanduser
from os.path import join
import click
from aiscalator import __version__
from aiscalator.airflow import command
from aiscalator.core.config import AiscalatorConfig
@click.group()
@click.version_option(version=__version__)
def airflow():
"""Author workflow DAGs and run tasks on schedule."""
@airflow.command()
@click.version_option(version=__version__)
@click.option('-d', '--config-home', 'config_home',
default=join(expanduser("~"), '.aiscalator'),
help="Redefine the location of the application home directory.")
@click.option('--append/--replace', 'append', default=False)
@click.argument('workspace', nargs=-1, required=True,
type=click.Path())
def setup(config_home, append, workspace):
"""Setup interactively the Airflow home folder and configurations."""
click.echo(command.airflow_setup(AiscalatorConfig(),
config_home, workspace,
append=append))
@airflow.command()
@click.version_option(version=__version__)
def update():
"""
Checks and tries to update the current docker image
to run airflow to a newer version.
Initiates a docker pull of the latest images we are depending on
and build the next aiscalator images from there.
Before replacing the version tags in the Dockerfile, we make sure
to do a maximum in the background while still having a working
image in the meantime.
"""
# TODO to implement
logging.error("Not implemented yet")
@airflow.command()
@click.version_option(version=__version__)
def start():
"""Start docker images to bring airflow services up."""
click.echo(command.airflow_up(AiscalatorConfig()))
click.echo("""
Airflow: http://localhost:8080
Flower: http://localhost:5555
""")
@airflow.command()
@click.version_option(version=__version__)
def stop():
"""Stop docker images to bring airflow services down."""
click.echo(command.airflow_down(AiscalatorConfig()))
@airflow.command()
@click.option("-s", "--service", default="webserver",
help='Run subcommand in docker service (default webserver)',
metavar='<service>')
@click.argument('subcommand', nargs=-1, required=True)
@click.version_option(version=__version__)
def run(service, subcommand):
"""Run sub-command in a running docker service."""
if not subcommand:
subcommand = None
click.echo(command.airflow_cmd(AiscalatorConfig(),
service=service, cmd=subcommand))
# TODO CLI to scale celery workers
# docker-compose -f docker-compose-CeleryExecutor.yml scale worker=5
@airflow.command()
@click.option('--name', prompt='What is the name of your dag?',
help="Name of the new dag to create",
metavar='<DAG>')
@click.option('-f', '--format', 'output_format',
help="format of the configuration file (default is hocon)",
type=click.Choice(['json', 'hocon']),
default='hocon')
@click.argument('path', type=click.Path())
@click.version_option(version=__version__)
def new(name, output_format, path):
"""Create a new DAG job"""
# TODO to implement
logging.error("Not implemented yet %s %s %s",
name, output_format, path)
@airflow.command()
@click.argument('conf', type=click.Path(exists=True))
@click.argument('notebook', nargs=-1)
@click.version_option(version=__version__)
def edit(conf, notebook):
"""Edit DAG job"""
if len(notebook) < 2:
notebook = notebook[0] if notebook else None
app_config = AiscalatorConfig(config=conf,
dag_selection=notebook)
click.echo(command.airflow_edit(app_config))
else:
raise click.BadArgumentUsage("Expecting one or less notebook names")
@airflow.command()
@click.argument('conf', type=click.Path(exists=True))
@click.argument('notebook', nargs=-1)
@click.version_option(version=__version__)
def push(conf, notebook):
"""Push a job into the DAGS folder to schedule in Airflow."""
if notebook:
for note in notebook:
app_config = AiscalatorConfig(config=conf,
dag_selection=note)
click.echo(command.airflow_push(app_config))
else:
app_config = AiscalatorConfig(config=conf)
click.echo(command.airflow_push(app_config))
|
a = int(input('Digite um Valor:'))
for c in range(1, 11):
print(f'{a} x {c} = {a*c}')
|
# Brythonで警告を出さないためのダミーヘッダー
# preprocessor.pyで、このファイルはhtmlに埋め込まない。
from typing import Callable
class Touch:
@property
def clientX(self)->int:
return 0
@property
def clientY(self)->int:
return 0
@property
def identifier(self)->int:
return 0
# addEventListenerで登録されたイベントハンドラの引数
class DOMEvent:
@property
def keyCode(self)->int:
return 0
@property
def offsetX(self)->int:
return 0
@property
def offsetY(self)->int:
return 0
@property
def buttons(self)->int:
return 0
@property
def touches(self)->list[Touch]:
return []
def preventDefault(self):
pass
def stopPropagation(self):
pass
class TextMetrics:
def __init__(self):
self.width = 0
self.height = 0
class HtmlImage:
def __init__(self):
self.naturalWidth = 0
self.naturalHeight = 0
def __setitem__(self, name:str, value:str)->"Element":
return Element()
class CanvasRenderingContext:
def __init__(self):
self.fillStyle:str
self.strokeStyle:str
self.font:str
self.textBaseline:str
def fillRect(self,x:int|float,y:int|float,w:int|float,h:int|float):
pass
def fillText(self,text:str,x:int|float,y:int|float):
pass
def strokeRect(self,x:int|float,y:int|float,w:int|float,h:int|float):
pass
def drawImage(self,image:HtmlImage, sx:int|float,sy:int|float,sw:int|float,sh:int|float,px:int|float,py:int|float,dw:int|float,dh:int|float):
pass
def measureText(self,text:str) -> TextMetrics:
return TextMetrics()
class ImageCreator:
def new(self)->HtmlImage:
return HtmlImage()
class IntervalHandle:
pass
class window:
Image = ImageCreator()
@staticmethod
def setInterval(callback: Callable[[],None], t:int|float)->IntervalHandle:
return IntervalHandle()
@staticmethod
def clearInterval(handle:IntervalHandle):
pass
class Element:
def __init__(self):
# for audio
self.currentTime = 0
self.volume = 0
# for canvas
self.width = 0
self.height = 0
def createElement(self, name:str)->"Element":
return Element()
def addEventListener(self, eventName:str, callback: Callable[[DOMEvent],None]):
pass
def removeEventListener(self, eventName:str, callback: Callable[[DOMEvent],None]):
pass
def __getitem__(self, name:str)->"Element":
return Element()
def __setitem__(self, name:str, value:str)->"Element":
return Element()
# for audio
def play(self):
pass
def stop(self):
pass
def pause(self):
pass
# for canvas
def getContext(self, name:str)->CanvasRenderingContext:
return CanvasRenderingContext()
document = Element()
class dialog:
pass
class widgets:
def __init__(self):
self.dialog = dialog()
|
import datetime
import random
# the single space to to create visual seperation
# so, that I know which are built into the standard library (datetime, random)
# and which are my own custom ones.
from questions import Add, Multiply
class Quiz:
"""Creates and runs quiz with 10 questions. Run .take_quiz() to start after init"""
questions = [] # Store and hold on to all the questions
answers = [] # holds if they got questions right or wrong
# Note: calling a attribute within class requires self.attribute
def __init__(self, num_of_questions=10):
"""Generate 10 random questions with numbers from 1 to 10.
Takes in type int for number questions.
Default of num_of_questions is 10.
"""
# This is a common pattern in Python.
question_types = (Add, Multiply)
num_of_questions = self.ask_number_of_questions()
# for stats of different quizzes later
self.final_total_time = None
self.final_total_correct = None
self.final_num_of_questions = None
for _ in range(num_of_questions): # generate 10 random questions
num1 = random.randint(1, 10)
num2 = random.randint(1, 10)
# we get random number from question_type tuple
# random.choice picks random element from population
question = random.choice(question_types)(num1, num2)
# append random type of question
self.questions.append(question)
def ask_number_of_questions(self):
# check if number
while True:
try:
self.num_of_questions = int(input('How many questions do you want?'))
except ValueError:
print('Enter a valid whole number')
continue
break # breaks if valid number is given.
return self.num_of_questions
@staticmethod
def _time_passed(start, end):
"""Returns elapsed time between start and end"""
return end - start
@staticmethod
def _format_time(datetime_obj):
"""Formats and returns times as strings"""
time = datetime_obj.strftime('%I:%M:%S %p')
return time
@staticmethod
def _format_time_elapsed(time_delta):
# Note: timedelta.min is not minutes, its minimum
seconds_passed = time_delta.seconds
minutes_passed = 0
if seconds_passed >= 60:
minutes_passed = int(seconds_passed / 60)
seconds_passed %= 60
if seconds_passed < 10: # add leading zero to make it look correct
seconds_passed = '0' + str(seconds_passed)
result = '{}:{}'.format(minutes_passed, seconds_passed)
return result
def ask(self, question):
start_time = datetime.datetime.now() # gets the start time
answer_time = None
answer = None
# makes sure that valid input is submitted: only int
while answer != int:
try:
answer = int(input(question.text)) # capture the answer
break # if valid break
except ValueError:
print('Invalid entry. Enter only integers.')
if answer == question.answer: # check the answer
end_time = datetime.datetime.now() # log the end time
answer_time = self._time_passed(start_time, end_time)
# if the answer is right, return True and elapsed time.
return True, answer_time
else:
end_time = datetime.datetime.now()
answer_time = self._time_passed(start_time, end_time)
# if answer is wrong, return False and elapsed time.
return False, answer_time
def total_correct(self):
"""Tallies number of correct questions. Returns number of correct questions
and total number of question asked.
"""
score_counter = 0
num_of_questions = len(self.questions)
for el in self.answers:
result = el[0]
if result:
score_counter += 1
return score_counter, num_of_questions
def summary(self, total_time):
"""Prints how many you got right and total # of questions,
as well as the total time for quiz.
"""
# gets and prints how many you got right and total # of questions: 5/10
total_right, num_of_questions = self.total_correct()
print('Score: {0}/{1}'.format(total_right, num_of_questions))
# log if perfect score
if total_right == num_of_questions and num_of_questions != 0:
print('Perfect score!')
# format and print the total time for quiz.
total_time = self._format_time_elapsed(total_time)
print('Total Quiz Time: {0}'.format(total_time))
# sets stats for quiz later
self.final_total_time = total_time
self.final_total_correct = total_right
self.final_num_of_questions = num_of_questions
def take_quiz(self):
"""Method that asks the quiz questions"""
total_quiz_time = None
# quiz start
quiz_start = datetime.datetime.now()
quiz_start_log = self._format_time(quiz_start)
print('Quiz starts at: {0}'.format(quiz_start_log)) # log start time
# ask all of the questions
for question in self.questions:
answer = self.ask(question)
is_correct, time_to_answer = answer
time_to_answer = self._format_time_elapsed(time_to_answer)
# feedback: correct or not
if is_correct:
print('Correct. \nTime: {0}\n'.format(time_to_answer))
else:
print('Nope. \nTime: {0}\n'.format(time_to_answer))
self.answers.append(answer) # add to self.answers list
# quiz end
quiz_end = datetime.datetime.now()
quiz_end_log = self._format_time(quiz_end)
print('Quiz ended at: {0}\n'.format(quiz_end_log)) # log end time
# total quiz time
total_quiz_time = self._time_passed(quiz_start, quiz_end)
# show a summary- call summary here
self.summary(total_quiz_time)
return '\nThanks for playing'
|
# -*- coding: utf-8 -*-
"""
Create Date: 20-03-2019
Author: Marc Enthoven
"""
# Imports of modules
import numpy as np
import random
class bsn:
def __init__(self):
pass
#Function to define whether random generated number satisfies bsn algorithm
def validate_bsn(self,bsn_number):
bsn_position = np.array([int(x) for x in str(bsn_number)])
bsn_digit = np.array(list(range(len(str(bsn_number)),0,-1)))
bsn_digit[-1:] = bsn_digit[-1:]*(-1)
product_bsn = np.sum(np.multiply(bsn_digit,bsn_position))
if product_bsn % 11 == 0:
return True
else:
return False
#Script runs until it has found the required number of BSN numbers
def generate_bsn(self,bsn_numbers_count):
#define variables
valid_bsn = []
chunks = 100
limit = bsn_numbers_count
# Run function until required number of bsn numbers are generated
while len(valid_bsn) < limit:
# Generate n random numbers
random_input = random.sample(range(10000000, 999999999),chunks)
# Apply the algorithm
elf_proef_bin = list(map(self.validate_bsn,random_input))
# Take the actual unique BSN number which are not generated in an earlier batch yet
input_rslt = [random_input for random_input,elf_proef_bin in zip(random_input,elf_proef_bin) if elf_proef_bin == True]
input_rslt_unique = set([gen_bsn for gen_bsn in input_rslt if gen_bsn not in valid_bsn])
# Add to temporary store and create ouput list
valid_bsn.extend(input_rslt_unique)
if len(valid_bsn) >= limit:
output_list = valid_bsn[:limit]
# print output list, don't do this with large numbers
output_txt = open('generate_'+ str(bsn_numbers_count) +'_bsn.txt','a')
for item in output_list:
output_txt.write(str(item) + '\r\n') # remove \r in linux environment
output_txt.close()
print(str(bsn_numbers_count) + ' bsn have been generated and written in your working directory.')
|
"""
pals.py - Common functions for launching applications with PALS
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=fixme
import base64
import errno
import fcntl
import json
import resource
import signal
import socket
import ssl
import stat
import sys
import threading
import time
import os
import uuid
import click
import websocket
from six.moves import urllib
from cray.echo import echo, LOG_INFO, LOG_WARN, LOG_DEBUG, LOG_RAW
from cray.errors import BadResponseError
from cray.rest import request
from cray.utils import get_hostname, open_atomic
from cray import atp, mpir
SIGNAL_RECEIVED = 0 # Last signal number received
PING_INTERVAL = 20 # WebSocket ping interval
MPIR_ATTACH_INTERVAL = 1 # Check for MPIR attach variable
def split_mpmd_args(args):
""" Split a list of arguments by the MPMD separator : """
cmdargs = []
cmdidx = 0
for idx, arg in enumerate(args):
if arg == ":":
cmdargs.append(args[cmdidx:idx])
cmdidx = idx + 1
cmdargs.append(args[cmdidx:])
return cmdargs
def make_ws_url(route, url=None):
""" Make a websocket URL (using wss scheme). Based on make_url in rest.py """
# If no URL given, use the configured hostname
if not url:
url = get_hostname()
# Split into components
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
# Override scheme with secure WebSocket protocol
scheme = "wss"
# If URL didn't start with a scheme, set netloc to the first part of the path
if not netloc and path:
netloc, _, path = path.partition("/")
# Append route to path
path = urllib.parse.urljoin(path, route)
# Join everything back together
return urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
def get_ws_headers():
""" Get a list of HTTP headers to send in WebSocket request """
ctx = click.get_current_context()
auth = ctx.obj["auth"]
# If we have an access token in the session, use it
if auth and auth.session and auth.session.access_token:
return ["Authorization: Bearer " + auth.session.access_token]
return []
def find_executable(executable):
""" Get a path to an executable file """
# Use given file if it contains a slash or no PATH set
if "/" in executable or "PATH" not in os.environ:
return executable
# Split PATH and search for an executable with the right name
for dirname in os.environ["PATH"].split(os.pathsep):
exefile = os.path.join(dirname, executable)
if os.path.isfile(exefile) and os.access(exefile, os.X_OK):
return exefile
# Couldn't find one
return None
def parse_hostfile(hostfile):
""" Parse a host list from a host file """
hostlist = []
for line in hostfile:
# Ignore leading/trailing whitespace
line = line.strip()
# Ignore empty lines, and lines that start with #
if line and line[0] != "#":
hostlist.append(line)
return hostlist
def signal_handler(signum, _):
# pylint: disable=global-statement
""" Signal handler that stores the signal value in a global """
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = signum
def setup_signals():
# pylint: disable=c-extension-no-member
""" Set up signal handlers and return a signal wakeup read fd """
# Create a pipe that we can use to determine when we've got a signal
sig_read, sig_write = os.pipe()
# Make the write end non-blocking (required for set_wakeup_fd)
flags = fcntl.fcntl(sig_write, fcntl.F_GETFL)
fcntl.fcntl(sig_write, fcntl.F_SETFL, flags | os.O_NONBLOCK)
signal.set_wakeup_fd(sig_write)
# Set up signal handlers
for signum in [
signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
]:
signal.signal(signum, signal_handler)
# Ignore SIGTTIN so we don't stop in the background
signal.signal(signal.SIGTTIN, signal.SIG_IGN)
return sig_read
def get_rpc(method, rpcid=None, **params):
""" Create a JSONRPC request """
rpc = {"jsonrpc": "2.0", "method": method}
if params:
rpc["params"] = params
if rpcid:
rpc["id"] = rpcid
return json.dumps(rpc)
def send_rpc(websock, method, reqid=None, **params):
""" Send an RPC over the socket """
req = get_rpc(method, reqid, **params)
echo("Sending RPC %s" % req, level=LOG_RAW)
websock.send(req)
def forward_stdin(websock, stdin=sys.stdin):
""" Read stdin content and write to application """
try:
while True:
try:
# Wait for content on stdin
content = os.read(stdin.fileno(), 4096)
# Empty read signifies EOF
if not content:
send_rpc(websock, "stdin", eof=True)
break
# Attempt to decode UTF-8
content = content.decode("utf-8")
send_rpc(websock, "stdin", content=content, encoding="UTF-8")
except OSError:
# I/O error, send EOF
send_rpc(websock, "stdin", eof=True)
break
except UnicodeError:
# Fall back on base64
content = base64.b64encode(content).decode("utf-8")
send_rpc(websock, "stdin", content=content, encoding="base64")
except websocket.WebSocketException:
pass
def forward_signals(websock, sig_pipe):
""" Forward signals to the application """
try:
while True:
# Block until a signal arrives
os.read(sig_pipe, 4096)
# Send it to the app
send_rpc(websock, "signal", str(uuid.uuid4()), signum=SIGNAL_RECEIVED)
except (OSError, websocket.WebSocketException):
pass
def send_pings(websock):
""" Avoid connection drops by sending periodic pings """
try:
while True:
time.sleep(PING_INTERVAL)
echo("Sending keepalive ping", level=LOG_RAW)
websock.ping()
except websocket.WebSocketException:
pass
def spawn_threads(websock):
""" Spawn threads to handle stdin, signals, and pings """
sig_read = setup_signals()
stdin_thread = threading.Thread(target=forward_stdin, args=(websock,))
stdin_thread.daemon = True
stdin_thread.start()
signal_thread = threading.Thread(target=forward_signals, args=(websock, sig_read))
signal_thread.daemon = True
signal_thread.start()
ping_thread = threading.Thread(target=send_pings, args=(websock,))
ping_thread.daemon = True
ping_thread.start()
def monitor_mpir(ctx, apid):
""" Wait on MPIR variable to fill in proctable """
while True:
# Look for MPIR debug flag
if mpir.get_MPIR_being_debugged():
# If proctable is not already filled, use procinfo to fill
if not mpir.MPIR_proctable_filled():
# Make request to procinfo endpoint
with ctx:
resp = request("GET", "apis/pals/v1/apps/" + apid + "/procinfo")
procinfo = resp.json()
# Extract MPIR information
proctable_elems = []
for rank in range(len(procinfo["cmdidxs"])):
hostname = procinfo["nodes"][procinfo["placement"][rank]]
executable = procinfo["executables"][procinfo["cmdidxs"][rank]]
pid = procinfo["pids"][rank]
proctable_elems.append((hostname, executable, pid))
# Call C library to set C MPIR variables
mpir.fill_MPIR_proctable(proctable_elems)
mpir.call_MPIR_Breakpoint()
# Now that MPIR proctable is set, exit the monitoring thread
return
time.sleep(MPIR_ATTACH_INTERVAL)
def spawn_mpir_thread(ctx, apid):
""" Create MPIR watcher thread """
mpir_thread = threading.Thread(target=monitor_mpir, args=(ctx, apid))
mpir_thread.daemon = True
mpir_thread.start()
def get_exit_code(status):
""" Translate an exit status (as returned by wait) into an exit code """
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
if os.WIFSIGNALED(status):
return 128 + os.WTERMSIG(status)
return 255
def print_output(params, a_file, label):
""" Print output from a stdout/stderr RPC to the given file """
content = params.get("content")
if not content:
return
encoding = params.get("encoding")
if encoding == "base64":
# Decode and output base64 content
click.echo(base64.b64decode(content), nl=False, file=a_file)
elif label and "host" in params and "rankid" in params:
# Label each line if requested and host/rank available
host = params["host"]
rankid = int(params["rankid"])
for line in content.splitlines():
click.echo("%s %d: %s" % (host, rankid, line), file=a_file)
else:
# Otherwise, print without processing
click.echo(content, nl=False, file=a_file)
def log_rank_exit(rankid, host, status):
""" Log a rank exit """
if rankid == -1:
rank = "shepherd"
else:
rank = "rank %d" % rankid
extra = ""
level = LOG_INFO
if os.WIFEXITED(status):
action = "exited with code"
code = os.WEXITSTATUS(status)
if code == 0:
level = LOG_RAW
elif os.WIFSIGNALED(status):
action = "died from signal"
code = os.WTERMSIG(status)
if os.WCOREDUMP(status):
extra = " and dumped core"
else:
action = "invalid status"
code = status
echo("%s: %s %s %d%s" % (host, rank, action, code, extra), level=level)
def write_procinfo_file(result, procinfo_file):
""" Dump the procinfo result to the given file """
try:
with open_atomic(procinfo_file) as procinfo_fp:
json.dump(result, procinfo_fp)
except (IOError, OSError) as err:
echo("Couldn't write %s: %s" % (procinfo_file, str(err)), level=LOG_WARN)
def get_executables(req, transfer):
""" Get the set of local paths to binaries """
executables = set()
for cmd in req["cmds"]:
executable = find_executable(cmd["argv"][0])
if executable:
executables.add(executable)
if transfer:
cmd["argv"][0] = os.path.basename(executable)
return executables
def get_resource_limits(limitnames):
# pylint: disable=c-extension-no-member
""" Given a list of resource names, fetch and format their limits """
limits = {}
for limitname in limitnames:
try:
limit = getattr(resource, "RLIMIT_" + limitname)
soft, hard = resource.getrlimit(limit)
limits[limitname] = "%d %d" % (soft, hard)
except (AttributeError, resource.error):
pass
return limits
def connect_websock(apid):
# pylint: disable=no-member
""" Connect to the application websocket """
try:
url = make_ws_url("apis/pals/v1/apps/%s/stdio" % apid)
headers = get_ws_headers()
# TODO: enable SSL verification
sslopt = {"cert_reqs": ssl.CERT_NONE}
echo("Connecting to %s" % url, level=LOG_DEBUG)
return websocket.create_connection(
url, header=headers, sslopt=sslopt, enable_multithread=True
)
except (websocket.WebSocketException, socket.error) as err:
raise click.ClickException("Connection error: %s" % str(err))
class PALSApp(object):
""" Class representing a running PALS application """
def __init__(self):
""" Initialize this application """
self.apid = ""
self.exit_codes = set()
self.stream_rpcid = str(uuid.uuid4())
self.start_rpcid = str(uuid.uuid4())
self.procinfo_rpcid = str(uuid.uuid4())
self.complete = False
self.started = False
def launch(self, launchreq, transfer=False, label=False, procinfo_file=None):
""" Launch this application, transfer binaries, and run """
executables = get_executables(launchreq, transfer)
# Launch ATP frontend if enabled
# returns list of environment variables to set for job
(atp_frontend_handle, atp_envlist) = atp.launch_atp_frontend(executables)
if atp_envlist:
launchreq["environment"] += atp_envlist
# Set custom fanout if requested
if "PALS_FANOUT" in os.environ:
launchreq["fanout"] = int(os.environ["PALS_FANOUT"])
# Set RPC timeout if requested
if "PALS_RPC_TIMEOUT" in os.environ:
launchreq["rpc_timeout"] = int(os.environ["PALS_RPC_TIMEOUT"])
try:
# Send launch request
resp = request("POST", "apis/pals/v1/apps", json=launchreq)
self.apid = resp.json().get("apid")
mpir.set_current_apid(self.apid)
echo("Launched application %s" % self.apid, level=LOG_INFO)
# Send newly-launched apid to ATP frontend to monitor
if atp_frontend_handle:
atp.send_launched_apid(atp_frontend_handle, self.apid)
# Transfer executables
if transfer:
for executable in executables:
self.transfer(executable)
return self.run(label, procinfo_file)
finally:
# Terminate frontend on error and rethrow exception
if atp_frontend_handle:
atp.terminate_frontend(atp_frontend_handle)
# Delete application from the PALS database
if self.apid:
try:
request("DELETE", "apis/pals/v1/apps/" + self.apid)
except BadResponseError:
# Ignore 404 errors
pass
def transfer(self, executable):
""" Transfer a file to application compute nodes """
try:
mode = stat.S_IMODE(os.stat(executable).st_mode)
params = {"mode": "0%o" % mode, "name": os.path.basename(executable)}
headers = {"Content-Type": "application/octet-stream"}
with open(executable, "rb") as a_execfile:
resp = request(
"POST",
"apis/pals/v1/apps/%s/files" % self.apid,
params=params,
headers=headers,
data=a_execfile,
)
path = resp.json().get("path")
echo("Transferred executable to %s" % path, level=LOG_DEBUG)
except (OSError, IOError) as err:
raise click.ClickException("Couldn't transfer binary: %s" % str(err))
def handle_rpc(self, websock, rpc, label=False, procinfo_file=None):
""" Handle a received RPC. Return True if complete. """
# Parse the RPC
method = rpc.get("method")
params = rpc.get("params", {})
errmsg = rpc.get("error", {}).get("message")
rpcid = rpc.get("id")
result = rpc.get("result")
# Handle stdout notification
if method == "stdout":
print_output(params, sys.stdout, label)
# Handle stderr notification
elif method == "stderr":
print_output(params, sys.stderr, label)
# Handle exit notification
elif method == "exit":
rankid = int(params.get("rankid", -1))
host = params.get("host", "unknown")
status = int(params.get("status", 0))
self.exit_codes.add(get_exit_code(status))
log_rank_exit(rankid, host, status)
# Handle complete notification
elif method == "complete":
self.complete = True
# Handle unknown RPC method
elif method:
echo("Received unknown %s RPC" % method, level=LOG_WARN)
# Handle error responses
elif errmsg:
raise click.ClickException(errmsg)
# Handle stream response
elif rpcid == self.stream_rpcid:
if not self.started:
send_rpc(websock, "start", self.start_rpcid)
# Handle start response
elif rpcid == self.start_rpcid:
self.started = True
if procinfo_file:
# Add delay before procinfo if requested
# This is needed until the startup barrier works correctly
if "PALS_PROCINFO_DELAY" in os.environ:
time.sleep(float(os.environ["PALS_PROCINFO_DELAY"]))
send_rpc(websock, "procinfo", self.procinfo_rpcid)
# Handle procinfo response
elif rpcid == self.procinfo_rpcid and procinfo_file:
write_procinfo_file(result, procinfo_file)
def run(self, label=False, procinfo_file=None):
""" Run this application """
connected = False
spawn_mpir_thread(click.get_current_context(), self.apid)
while not self.complete:
try:
if not connected:
# Connect to stdio websocket endpoint
websock = connect_websock(self.apid)
connected = True
# Spawn threads to handle signals, stdin, and pings
spawn_threads(websock)
# Send the stream RPC to start things off
send_rpc(websock, "stream", self.stream_rpcid)
# Read an RPC off the socket
rpc = json.loads(websock.recv())
echo("Received RPC %s" % rpc, level=LOG_RAW)
# Handle the RPC
self.handle_rpc(websock, rpc, label, procinfo_file)
except websocket.WebSocketException as err:
echo(
"Lost application connection (%s), reconnecting" % str(err),
level=LOG_WARN,
)
websock.close()
connected = False
except socket.error as err: # pylint: disable=no-member
if err.errno != errno.EINTR:
echo(
"Lost application connection (%s), reconnecting" % str(err),
level=LOG_WARN,
)
websock.close()
connected = False
except ValueError as err:
echo(
"Error decoding application message: %s" % str(err), level=LOG_WARN
)
# Clean up after ourselves
websock.close()
mpir.free_MPIR_proctable()
return self.exit_codes
|
import serial
"""
# VE.Direct parser inspired by https://github.com/karioja/vedirect/blob/master/vedirect.py
"""
class Vedirect:
# The error code of the device (relevant when the device is in the fault state).
#
# Error 19 can be ignored, this condition regularly occurs during start-up or shutdown of the MPPT charger.
# Since version 1.15 this error will no longer be reported.
#
# Error 21 can be ignored for 5 minutes, this condition regularly occurs during start-up or shutdown
# of the MPPT charger. Since version 1.16 this warning will no longer be reported when it is not persistent.
#
VICTRON_ERROR = {
'0': 'No error',
'2': 'Battery voltage too high',
'17': 'Charger temperature too high',
'18': 'Charger over current',
'19': 'Charger current reversed',
'20': 'Bulk time limit exceeded',
'21': 'Current sensor issue',
'26': 'Terminals overheated',
'28': 'Converter issue', # (dual converter models only)
'33': 'Input voltage too high (solar panel)',
'34': 'Input current too high (solar panel)',
'38': 'Input shutdown (excessive battery voltage)',
'39': 'Input shutdown (due to current flow during off mode)',
'65': 'Lost communication with one of devices',
'66': 'Synchronised charging device configuration issue',
'67': 'BMS connection lost',
'68': 'Network misconfigured',
'116': 'Factory calibration data lost',
'117': 'Invalid/incompatible firmware',
'119': 'User settings invalid'
}
# The state of operation
VICTRON_CS = {
'0': 'Off',
'2': 'Fault',
'3': 'Bulk',
'4': 'Absorption',
'5': 'Float',
'7': 'Equalize (manual)',
'245': 'Starting-up',
'247': 'Auto equalize / Recondition',
'252': 'External control'
}
# The possible values for the tracker operation
VICTRON_MTTP = {
'0': 'Off',
'1': 'Limited',
'2': 'Active'
}
# Off reason, this field described why a unit is switched off.
#
# Available on SmartSolar mppt chargers since firmware version v1.44 (VE.Direct models)
# and v1.03 (SmartSolar VE.Can models)
# FIXME: This might not work as a dictionary
VICTRON_OFF_REASON = {
"0x00000001": "No input power",
"0x00000002": "Switched off (power switch)",
"0x00000004": "Switched off (device mode register)",
"0x00000008": "Remote input",
"0x00000010": "Protection active",
"0x00000020": "Paygo",
"0x00000040": "BMS",
"0x00000080": "Engine shutdown detection",
"0x00000100": "Analysing input voltage"
}
def __init__(self, port='/dev/ttyAMA0', timeout=5):
"""
Initialise serial component of the Victron parser. Default value is the standard serial port on Raspberry pi
:param port:
:param timeout:
"""
self.ser = serial.Serial(port, 19200, timeout=timeout)
self.header1 = b'\r'
self.header2 = b'\n'
self.delimiter = b'\t'
self.hexmarker = b':'
self.key = bytearray()
self.value = bytearray()
self.bytes_sum = 0
self.state = self.wait_header
self.dict = {}
hex, wait_header, in_key, in_value, in_checksum = range(5)
def input(self, byte):
if byte == self.hexmarker and self.state != self.in_checksum:
self.state = self.hex
if self.state == self.wait_header:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
elif byte == self.header2:
self.state = self.in_key
return None
elif self.state == self.in_key:
self.bytes_sum += ord(byte)
if byte == self.delimiter:
if self.key.decode() == 'Checksum':
self.state = self.in_checksum
else:
self.state = self.in_value
else:
self.key += byte
return None
elif self.state == self.in_value:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
self.dict[self.key.decode()] = self.value.decode()
self.key = bytearray()
self.value = bytearray()
else:
self.value += byte
return None
elif self.state == self.in_checksum:
self.bytes_sum += ord(byte)
self.key = bytearray()
self.value = bytearray()
self.state = self.wait_header
if self.bytes_sum % 256 == 0:
self.bytes_sum = 0
return self.dict
else:
print('Malformed packet')
print('----------------')
for k, v in self.dict.items():
print("{} {}".format(k, v))
self.bytes_sum = 0
elif self.state == self.hex:
self.bytes_sum = 0
if byte == self.header2:
self.state = self.wait_header
else:
raise AssertionError()
def read_data_single(self):
while True:
byte = self.ser.read(1)
packet = self.input(byte)
if packet is not None:
return packet
def read_data_callback(self, callback):
while True:
byte = self.ser.read(1)
if byte:
packet = self.input(byte)
if packet is not None:
callback(packet)
else:
break |
import requests
from bs4 import BeautifulSoup
from preprocessing.enums import Modifiers
def get_modifier_list_from_eu4_wiki(with_default_value=False):
req = requests.get('https://eu4.paradoxwikis.com/Modifier_list')
if not req.ok:
raise ConnectionError
html = req.text
soup = BeautifulSoup(html, 'html.parser')
modifier_tables = soup.find_all(
'table',
{'class': ['wikitable', 'sortable', 'jquery-tablesorter']}
)[:len(Modifiers)]
modifier_list = []
for idx, modifier_table in enumerate(modifier_tables):
trs = modifier_table.find_all('tr')
tr_th, tr_tds = trs[0], trs[1:]
modifier_names = [th.text.strip().lower().replace(' ', '_') for th in tr_th.find_all('th')]
for tr_td in tr_tds:
tds = tr_td.find_all('td')
modifier_values = [td.text.strip().replace('<', '<').replace('>', '>') for td in tds]
modifier = {
'm_type': Modifiers(idx).name
}
for i in range(len(modifier_names)):
modifier[modifier_names[i]] = modifier_values[i]
if with_default_value:
try:
modifier['default_value'] = float(modifier['example'].split()[-1])
except:
modifier['default_value'] = 1.0
modifier_list.append(modifier)
return modifier_list
def get_country_list_from_eu4_wiki(with_img_url=False):
req = requests.get('https://eu4.paradoxwikis.com/Countries')
if not req.ok:
raise ConnectionError
html = req.text
soup = BeautifulSoup(html, 'html.parser')
country_table = soup.find_all(
'table',
{'class': ['wikitable', 'sortable', 'jquery-tablesorter']}
)[0]
trs = country_table.find_all('tr')
tr_tds = trs[1:]
image_src_base = 'https://eu4.paradoxwikis.com'
country_list = []
for tr_td in tr_tds:
tds = tr_td.find_all('td')
_, raw_country_info, raw_country_tag, _, _ = tds
country_name = raw_country_info.find_all('a')[1].text.strip()
country_tag = raw_country_tag.text.strip()
img_src = image_src_base + raw_country_info.a.img.attrs['src']
img_name = img_src.split('/')[-1]
country = {
'country_name': country_name,
'country_tag': country_tag,
'img_name': img_name
}
if with_img_url:
country['img_src'] = img_src
country_list.append(country)
return country_list
def code_test():
from pprint import pprint as prt
# prt(get_modifier_list_from_eu4_wiki())
# prt(get_modifier_list_from_eu4_wiki(with_default_value=True))
prt(get_country_list_from_eu4_wiki())
prt(get_country_list_from_eu4_wiki(with_img_url=True))
if __name__ == '__main__':
code_test()
|
from django.conf.urls import url
from calaccess_website import views, sitemaps
from django.views.generic.base import RedirectView
urlpatterns = [
# The homepage
url(
r'^$',
RedirectView.as_view(url='https://www.californiacivicdata.org/'),
name="home",
),
#
# Downloads
#
# Version archive views
url(
r'^downloads/$',
views.VersionArchiveIndex.as_view(),
name="version_archive_index",
),
url(
r'^downloads/(?P<year>[0-9]{4})/$',
views.VersionYearArchiveList.as_view(),
name="version_archive_year"
),
url(
r'^downloads/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/$',
views.VersionMonthArchiveList.as_view(),
name="version_archive_month"
),
url(
r'^downloads/latest/$',
views.LatestVersion.as_view(),
name='version_latest'
),
url(
r'^downloads/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<time>[0-9]{6})/$',
views.VersionDetail.as_view(),
name="version_detail"
),
#
# Documentation
#
# Index
url(
r'^documentation/$',
views.DocumentationIndex.as_view(),
name='docs_index'
),
# CAL-ACCESS file views
url(
r'^documentation/raw-files/$',
views.CalAccessFileList.as_view(),
name='calaccess_file_list'
),
url(
r'^documentation/raw-files/(?P<slug>[-\w]+)/$',
views.CalAccessFileDetail.as_view(),
name='calaccess_file_detail',
),
url(
r'^documentation/raw-files/(?P<slug>[-\w]+)/downloads/$',
views.CalAccessFileDownloadsList.as_view(),
name='calaccess_file_downloads_list',
),
# CCDC file views
url(
r'^documentation/processed-files/$',
views.CcdcFileList.as_view(),
name='ccdc_file_list'
),
url(
r'^documentation/processed-files/(?P<slug>[-\w]+)/$',
views.CcdcFileDetail.as_view(),
name='ccdc_file_detail',
),
url(
r'^documentation/processed-files/(?P<slug>[-\w]+)/downloads/$',
views.CcdcFileDownloadsList.as_view(),
name='ccdc_file_downloads_list',
),
# Form views
url(
r'^documentation/calaccess-forms/$',
views.FormList.as_view(),
name='form_list'
),
url(
r'^documentation/calaccess-forms/(?P<id>\w+)/$',
views.FormDetail.as_view(),
name='form_detail',
),
# Official documentation
url(
r'^documentation/calaccess-official-documentation/$',
views.OfficialDocumentation.as_view(),
name='official_documentation'
),
# Frequently asked questions
url(
r'^documentation/frequently-asked-questions/$',
views.FAQ.as_view(),
name='faq'
),
#
# Redirects
#
url(
r'^redirect/latest/processed/(?P<slug>[-\w\_\.]+)/$',
views.redirect_latest_processed,
name='redirect_latest_processed'
),
url(
r'^redirect/latest/raw/(?P<slug>[-\w\_\.]+)/$',
views.redirect_latest_raw,
name='redirect_latest_raw'
),
#
# Machine-readable stuff
#
url(
r'^robots.txt$',
views.CalAccessRobotsTxt.as_view(),
name='robots_txt'
),
url(
r'^raw-file-sitemap.xml$',
sitemaps.CalAccessFileSitemap.as_view(),
name='calaccess_file_sitemap'
),
url(
r'^raw-file-downloads-sitemap.xml$',
sitemaps.CalAccessFileDownloadsSitemap.as_view(),
name='calaccess_file_downloads_sitemap'
),
url(
r'^processed-file-sitemap.xml$',
sitemaps.CcdcFileSitemap.as_view(),
name='ccdc_file_sitemap'
),
url(
r'^processed-file-downloads-sitemap.xml$',
sitemaps.CcdcFileDownloadsSitemap.as_view(),
name='ccdc_file_downloads_sitemap'
),
url(
r'^form-sitemap.xml$',
sitemaps.FormSitemap.as_view(),
name='form_sitemap'
),
url(
r'^downloads-sitemap.xml$',
sitemaps.VersionSitemap.as_view(),
name='version_sitemap'
),
url(
r'^downloads-year-sitemap.xml$',
sitemaps.VersionYearSitemap.as_view(),
name='version_archive_year_sitemap'
),
url(
r'^downloads-month-sitemap.xml$',
sitemaps.VersionMonthSitemap.as_view(),
name='version_archive_month_sitemap'
),
url(
r'^other-sitemap.xml$',
sitemaps.OtherSitemap.as_view(),
name='other_sitemap'
),
]
|
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
DAction = SCons.Action.Action("$DCOM", "$DCOMSTR")
ShDAction = SCons.Action.Action("$SHDCOM", "$SHDCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src, symlinks=True):
"""
If symlinks (is true), then a symbolic link will be
shallow copied and recreated as a symbolic link; otherwise, copying
a symbolic link will be equivalent to copying the symbolic link's
final target regardless of symbolic link depth.
"""
dest = str(dest)
src = str(src)
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
if symlinks:
return os.symlink(os.readlink(src), dest)
else:
return copy_func(dest, os.path.realpath(src))
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, symlinks)
Copy = ActionFactory(
copy_func,
lambda dest, src, symlinks=True: 'Copy("%s", "%s")' % (dest, src)
)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.iteritems():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
"""Loading a report from JSON
This manual marshalling/de-marshalling is not awesome.
"""
import json
import logging
import uuid as U
from pbcommand.models.report import (Report, Plot, PlotGroup, Attribute,
Table, Column, ReportSpec, PlotlyPlot)
from pbcommand.schemas import validate_report, validate_report_spec
log = logging.getLogger(__name__)
__all__ = ["load_report_from_json", "load_report_from", "load_report_spec_from_json"]
def _to_id(s):
if '.' in s:
return s.split('.')[-1]
else:
return s
def _to_plot(d):
id_ = _to_id(d['id'])
caption = d.get('caption', None)
image = d['image']
thumbnail = d.get('thumbnail', None)
title = d.get('title', None)
plot_type = d.get("plotType", Plot.PLOT_TYPE)
plotly_version = d.get("plotlyVersion", None)
if plot_type == Plot.PLOT_TYPE:
return Plot(id_, image, caption=caption, thumbnail=thumbnail, title=title)
elif plot_type == PlotlyPlot.PLOT_TYPE:
return PlotlyPlot(id_, image, caption=caption, thumbnail=thumbnail, title=title, plotly_version=plotly_version)
else:
raise ValueError("Unrecognized plotType '{t}'".format(t=plot_type))
def _to_plot_group(d):
id_ = _to_id(d['id'])
legend = d.get('legend', None)
thumbnail = d.get('thumbnail', None)
# is this optional?
title = d.get('title', None)
if 'plots' in d:
plots = [_to_plot(pd) for pd in d['plots']]
else:
plots = []
return PlotGroup(id_, title=title, legend=legend, plots=plots,
thumbnail=thumbnail)
def _to_attribute(d):
id_ = _to_id(d['id'])
name = d.get('name', None)
# this can't be none
value = d['value']
return Attribute(id_, value, name=name)
def _to_column(d):
id_ = _to_id(d['id'])
header = d.get('header', None)
values = d.get('values', [])
return Column(id_, header=header, values=values)
def _to_table(d):
id_ = _to_id(d['id'])
title = d.get('title', None)
columns = []
for column_d in d.get('columns', []):
c = _to_column(column_d)
columns.append(c)
# assert that all the columns have the same number of values
nvalues = {len(c.values) for c in columns}
assert len(nvalues) == 1
return Table(id_, title=title, columns=columns)
def dict_to_report(dct):
# Use `load_report_from` instead.
# FIXME. Add support for different version schemas in a cleaner, more
# concrete manner.
report_id = dct['id']
# Make this optional for now
report_uuid = dct.get('uuid', str(U.uuid4()))
tags = dct.get('tags', [])
# Make sure the UUID is well formed
_ = U.UUID(report_uuid)
# Legacy Reports > 0.3.9 will not have the title key
title = dct.get('title', "Report {i}".format(i=report_id))
plot_groups = []
if 'plotGroups' in dct:
pg = dct['plotGroups']
if pg:
plot_groups = [_to_plot_group(d) for d in pg]
attributes = []
for r_attr in dct.get('attributes', []):
attr = _to_attribute(r_attr)
attributes.append(attr)
tables = []
for table_d in dct.get('tables', []):
t = _to_table(table_d)
tables.append(t)
report = Report(report_id,
title=title,
plotgroups=plot_groups,
tables=tables,
attributes=attributes,
dataset_uuids=dct.get('dataset_uuids', ()),
uuid=report_uuid, tags=tags)
return report
def __load_json_or_dict(processor_func):
def wrapper(json_path_or_dict):
if isinstance(json_path_or_dict, dict):
return processor_func(json_path_or_dict)
else:
with open(json_path_or_dict, 'r') as f:
d = json.loads(f.read())
return processor_func(d)
return wrapper
def load_report_from(json_path_or_dict):
"""
Load a Report from a raw dict or path to JSON file
:param json_path_or_dict:
:type json_path_or_dict: dict | str
:return:
"""
return __load_json_or_dict(dict_to_report)(json_path_or_dict)
def load_report_from_json(json_file):
"""Convert a report json file to Report instance."""
# This should go way in favor of `load_report_from`
return load_report_from(json_file)
def _to_report(nfiles, attribute_id, report_id):
# this should have version of the bax/bas files, chemistry
attributes = [Attribute(attribute_id, nfiles)]
return Report(report_id, attributes=attributes)
def fofn_to_report(nfofns):
return _to_report(nfofns, "nfofns", "fofn_report")
def load_report_spec_from_json(json_file, validate=True):
with open(json_file, 'r') as f:
d = json.loads(f.read())
if validate:
validate_report_spec(d)
return ReportSpec.from_dict(d)
|
import re
from collections import defaultdict
PATTERN = re.compile(r'#(?P<id>\d+) @ (?P<x>\d+),(?P<y>\d+): (?P<w>\d+)x(?P<h>\d+)')
def get_cells(x_s, y_s, w, h):
for x in range(x_s, x_s + w):
for y in range(y_s, y_s + h):
yield (x, y)
def build_cells(data):
cells = defaultdict(lambda: tuple())
for r in data:
id_, x_s, y_s, w, h = [int(x) for x in PATTERN.match(r).groups()]
for x, y in get_cells(x_s, y_s, w, h):
cells[(x, y)] += (id_,)
return cells
def run_slice(data):
cells = build_cells(data)
return sum(1 for v in cells.values() if len(v) > 1)
def run_slice2(data):
cells = build_cells(data)
for r in data:
id_, x_s, y_s, w, h = [int(x) for x in PATTERN.match(r).groups()]
if all(len(cells[(x, y)]) == 1 for x, y in get_cells(x_s, y_s, w, h)):
return id_
if __name__ == '__main__':
with open('./data/day3.txt') as f:
data = f.readlines()
print(run_slice(data))
print(run_slice2(data))
|
#!/usr/bin/evn python
# -*- coding: utf-8 -*-
# python version 2.7.6
from snakebite.client import Client
import glob
'''
使用snakebite可以读取hdfs中数据的信息
目前没有发现往HDFS中写入数据的方法
url:https://github.com/spotify/snakebite
'''
def display():
client = Client("study", 9000, use_trash=False)
for x in client.ls(['/data/gz']):
print x
def copy():
client = Client("study", 9000, use_trash=False)
client.copyToLocal(["/data/gz"],"/root/data/",check_crc=False)
def count():
client = Client("study", 9000, use_trash=False)
count = client.count(["/data/gz"])
for i in count:
print i
def getDf():
client = Client("study", 9000, use_trash=False)
info = client.df()
print info
def delete():
client = Client("study", 9000, use_trash=False)
client.delete(["/data/gz"], recurse=False)
def test():
for filename in glob.glob("/Users/mac/Documents/data/fastq/*.fastq"):
print filename
if __name__ == '__main__':
copy()
|
import websocket
websocket.enableTrace(True)
ws = websocket.create_connection("ws://127.0.0.1:8888/websocket")
ws.send('Hello, Tornado')
result = ws.recv()
print(result)
ws.close()
# import socket
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# host = 'localhost'
# port = 8888
# s.connect((host , port))
# message = "GET / HTTP/1.1\r\n\r\n"
# s.sendall(message)
# chunk = s.recv(4096)
# data = []
# data.append(chunk)
# ''.join(data)
|
from setuptools import setup
setup(
name='flask_microservices_builder',
version='0.0.1',
packages=['flask_microservices_builder'],
url='https://github.com/invana/flask-microservice-builder',
license='MIT License',
author='Ravi Raja Merugu',
author_email='ravi@invanalabs.ai',
description='Python module that lets you aggregate isolated Flask APIs apps, into '
'a single application, with release notes, documentation in one place.'
)
|
# ------------------------------------
# CODE BOOLA 2015 PYTHON WORKSHOP
# Mike Wu, Jonathan Chang, Kevin Tan
# Puzzle Challenges Number 6
# ------------------------------------
# Let's go back to the basics.
# ------------------------------------
# INSTRUCTIONS:
# Write a function that takes a single
# input to calculate its factorial.
# Remember that the factorial of 0
# is 1! And any factorial of a negative
# number should return None.
# EXAMPLE:
# factorial(1) => 1
# factorial(10) => 3628800
# factorial(-1) => None
# factorial(0) => 1
# factorial(4) => 24
# HINT:
# The easiest way is to use a for loop!
def factorial(n):
pass
|
budget = float(input())
season = input()
money = 0
if budget <= 100:
if season == "winter":
money = budget * 0.70
print(f"Somewhere in Bulgaria")
print(f"Hotel - {money:.2f}")
elif season == "summer":
money = budget * 0.30
print(f"Somewhere in Bulgaria")
print(f"Camp - {money:.2f}")
elif budget <= 1000:
if season == "winter":
money = budget * 0.80
print(f"Somewhere in Balkans")
print(f"Hotel - {money:.2f}")
elif season == "summer":
money = budget * 0.40
print(f"Somewhere in Balkans")
print(f"Camp - {money:.2f}")
elif budget > 1000:
if season == "winter":
money = budget * 0.90
print(f"Somewhere in Europe")
print(f"Hotel - {money:.2f}")
elif season == "summer":
money = budget * 0.90
print(f"Somewhere in Europe")
print(f"Hotel - {money:.2f}") |
#pip install newspaper3
from newspaper import Article
#pip install beautifulsoup4
from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
#네이버 뉴스 주소
url = "https://news.naver.com"
#페이지 정보 불러오기
response=urllib.request.urlopen(url)
soup=BeautifulSoup(response,'html.parser')
#네이버 뉴스의 헤더뉴스 정보 불러오기
results = soup.select(".hdline_article_tit a")
#헤더뉴스 url목록을 포함하는 배열 생성
urls = list()
for result in results:
#배열에 헤더라인 뉴스의 주소 저장
urls.append(url+result.attrs["href"])
for url in urls :
#한국어와 뉴스 주소 불러오기
lang = Article(url, language = 'ko')
lang.download()
lang.parse()
#뉴스 제목
print(lang.title+"\n")
#뉴스 150자 까지 불러오기
print(lang.text[:150]+"\n")
print("다음 뉴스\n")
#참고자료
#뱅뱅님의 블로그 [Python] 20171117 crawling - 네이버 뉴스 헤드라인 가져오기
# https://m.blog.naver.com/PostView.nhn?blogId=kbw0104&logNo=221142104174&proxyReferer=https%3A%2F%2Fwww.google.com%2F
#EXCELSIOR님의 블로그 Newspaper 모듈을 이용하여 뉴스 기사 크롤링하기
#https://excelsior-cjh.tistory.com/92 |
from atlas.modules.commands.base import BaseCommand
from atlas.modules.resource_creator.creators import AutoGenerator
class Generate(BaseCommand):
help = "Auto generate resources from Swagger file and update Res Mapping and Swagger File"
def handle(self, **options):
gen = AutoGenerator()
gen.parse()
gen.update()
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import os.path
if 'TOOLBOX_PATH' in os.environ:
sys.path.append(os.path.join(os.environ['TOOLBOX_PATH'], 'python'))
else:
raise EnvironmentError(f"BART TOOLBOX_PATH not set. Please set TOOLBOX_PATH environment variable")
import cfl
import skimage.metrics as metrics
import pandas as pd
import matplotlib
matplotlib.use('Agg')
undersampling_factors_string = ["2", "3", "4"]
recon_methods = "ENLIVE DNLINV FastMRI_Unet".split()
patient_directories = [f'P{idx + 1}' for idx in range(10)]
method_filenames_key = {
'DNLINV': 'rsos_multicoil_estimate',
'DIP': 'rsos_multicoil_estimate',
'ENLIVE': 'r_mm_',
'SAKE': 'r_sake_',
'ESPIRiT': 'r_mm_abs_',
'FastMRI_Unet': 'r_mm_',
}
psnr_results = np.empty([len(undersampling_factors_string), len(recon_methods), len(patient_directories)])
ssim_results = np.empty([len(undersampling_factors_string), len(recon_methods), len(patient_directories)])
for idx_p, pdir in enumerate(patient_directories):
# Load reference image
ref_img_path = os.path.join(pdir, 'data', 'knee-fully-sampled-reference')
ref_img = cfl.readcfl(ref_img_path)
ref_img = np.abs(ref_img.squeeze())
# ref_img = transpose_flip_img(ref_img, args)
height, width = ref_img.shape
for idx_us, us in enumerate(undersampling_factors_string):
mask = cfl.readcfl(os.path.join(pdir, 'data', f'full_pat_{us}')) \
if os.path.exists(os.path.join(pdir, 'data', f'full_pat_{us}.cfl')) \
else cfl.readcfl(os.path.join(pdir, 'data', f'pat_{us}'))
mask = np.abs(mask.squeeze())
# mask = transpose_flip_img(mask, args)
mask /= mask.max()
for idx_method, method in enumerate(recon_methods):
if method == 'DNLINV':
input_img_path = os.path.join(pdir, f'reco_{method}', f'dnlinv_{us}', method_filenames_key[method])
elif method == 'DIP':
input_img_path = os.path.join(pdir, f'reco_{method}', f'dip_{us}', method_filenames_key[method])
else:
input_img_path = os.path.join(pdir, f'reco_{method}', f'{method_filenames_key[method]}{us}')
ndims_ref = len(ref_img.shape)
img = cfl.readcfl(input_img_path)
img = np.abs(img.squeeze())
# img = transpose_flip_img(img, args)
mse = metrics.mean_squared_error(ref_img, img)
nrmse = metrics.normalized_root_mse(ref_img, img)
psnr = metrics.peak_signal_noise_ratio(ref_img, img, data_range=ref_img.max())
ssim = metrics.structural_similarity(ref_img, img, multichannel=False, data_range=ref_img.max())
results = {'MSE': mse, 'NRMSE': nrmse, 'PSNR': psnr, 'SSIM': ssim}
psnr_results[idx_us, idx_method, idx_p] = psnr
ssim_results[idx_us, idx_method, idx_p] = ssim
summary_psnr_results = np.stack([np.mean(psnr_results, axis=-1), np.std(psnr_results, axis=-1)], axis=-1)
summary_ssim_results = np.stack([np.mean(ssim_results, axis=-1), np.std(ssim_results, axis=-1)], axis=-1)
fig_size = (2, 4)
text_data = ""
for idx_us, us in enumerate(undersampling_factors_string):
plt.figure(figsize=fig_size)
plt.title(f'AF = {us.replace("-", ".")}')
plt.bar(range(len(recon_methods)), summary_psnr_results[idx_us, ..., 0],
fc="None", edgecolor="k")
plt.xticks(range(len(recon_methods)), recon_methods)
plt.ylabel('Peak signal-to-noise ratio (dB)')
plt.errorbar(x=range(len(recon_methods)), y=summary_psnr_results[idx_us, ..., 0],
yerr=summary_psnr_results[idx_us, ..., 1], fmt='None', capsize=5, ecolor='k')
plt.ylim([0, 40])
plt.tight_layout()
plt.savefig(f'summary_psnr_{us}.png', dpi=300)
plt.close()
plt.figure(figsize=fig_size)
plt.title(f'AF = {us.replace("-", ".")}')
plt.bar(range(len(recon_methods)), summary_ssim_results[idx_us, ..., 0],
fc="None", edgecolor="k")
plt.xticks(range(len(recon_methods)), recon_methods)
plt.ylabel('Structural similarity index (a.u.)')
plt.errorbar(x=range(len(recon_methods)), y=summary_ssim_results[idx_us, ..., 0],
yerr=summary_ssim_results[idx_us, ..., 1], fmt='None', capsize=5, ecolor='k')
plt.ylim([0, 1])
plt.tight_layout()
plt.savefig(f'summary_ssim_{us}.png', dpi=300)
plt.close()
# Write data to text
text_data = text_data + f'AF = {us.replace("-", ".")} \n'
text_data = text_data + 'Peak signal-to-noise ratio (dB) \n'
for idx_method, method in enumerate(recon_methods):
text_data = text_data + f'{method} = {summary_psnr_results[idx_us, idx_method, 0]:.2f} +/- {summary_psnr_results[idx_us, idx_method, 1]:.2f} \n'
# Write data to text
text_data = text_data + 'Structural similarity index (a.u.) \n'
for idx_method, method in enumerate(recon_methods):
text_data = text_data + f'{method} = {summary_ssim_results[idx_us, idx_method, 0]:.4f} +/- {summary_ssim_results[idx_us, idx_method, 1]:.4f} \n'
print(text_data)
with open('summary_results.txt', 'w') as f:
f.write(text_data)
|
# -*- coding: utf-8 -*-
import os
from datetime import timedelta
from enum import IntEnum
from peewee import *
from playhouse.hybrid import hybrid_property
from typing import List
from botlistbot import helpers
from botlistbot import settings
from botlistbot import util
from botlistbot.models.basemodel import BaseModel, EnumField
from botlistbot.models.category import Category
from botlistbot.models.country import Country
from botlistbot.models.revision import Revision
from botlistbot.models.user import User
class Bot(BaseModel):
class DisabledReason(IntEnum):
# In prioritized order, cannot go from banned to offline
banned = 10
offline = 20
@classmethod
def to_str(cls, value):
if value == cls.banned:
return "is banned"
elif value == cls.offline:
return "has been offline for too long"
id = PrimaryKeyField()
revision = IntegerField()
category = ForeignKeyField(Category, null=True)
name = CharField(null=True)
username = CharField(unique=True)
description = TextField(null=True)
date_added = DateField()
country = ForeignKeyField(Country, null=True)
inlinequeries = BooleanField(default=False)
official = BooleanField(default=False)
extra = CharField(null=True)
spam = BooleanField(default=False)
bot_info_version = CharField(null=True)
restriction_reason = CharField(null=True)
last_ping = DateTimeField(null=True)
last_response = DateTimeField(null=True)
disabled = BooleanField(default=False)
disabled_reason = EnumField(DisabledReason, null=True)
userbot = BooleanField(default=False)
botbuilder = BooleanField(default=False)
chat_id = IntegerField(null=True)
approved = BooleanField(default=True)
submitted_by = ForeignKeyField(User, null=True, related_name='submitted_by')
approved_by = ForeignKeyField(User, null=True, related_name='approved_by')
@hybrid_property
def offline(self) -> bool:
if not self.last_ping:
return False
return self.last_response != self.last_ping
@hybrid_property
def online(self) -> bool:
return not self.offline
@property
def offline_for(self) -> timedelta:
if not self.last_response:
return timedelta(days=365 * 2)
return self.last_ping - self.last_response if self.offline else None
@staticmethod
def select_approved():
return Bot.select().where(
Bot.approved == True,
Bot.revision <= Revision.get_instance().nr,
Bot.disabled == False
)
@staticmethod
def select_unapproved():
return Bot.select().where(Bot.approved == False, Bot.disabled == False)
@staticmethod
def select_pending_update():
return Bot.select().where(
Bot.approved == True,
Bot.revision == Revision.get_instance().next,
Bot.disabled == False
)
@property
def serialize(self):
return {
'id': self.id,
'category_id': self.category.id,
# 'name': self.name,
'username': self.username,
'description': self.description,
'date_added': self.date_added,
'inlinequeries': self.inlinequeries,
'official': self.official,
'extra_text': self.extra,
'offline': self.offline,
'spam': self.spam,
'botlist_url': helpers.botlist_url_for_category(self.category),
}
def disable(self, reason: DisabledReason):
if self.disabled:
if self.disabled_reason == reason:
return False # if value unchanged
if reason.value > self.disabled_reason:
raise ValueError("Invalid reason, cannot go from {} to {}.".format(
self.disabled_reason.name,
reason.name
))
self.disabled = True
self.disabled_reason = reason
return True # if value changed
def enable(self):
if not self.disabled:
return False # if value unchanged
self.disabled = False
self.disabled_reason = None
return True # if value changed
@hybrid_property
def is_new(self):
# today = datetime.date.today()
# delta = datetime.timedelta(days=settings.BOT_CONSIDERED_NEW)
# result = today - self.date_added <= delta
# return result
return self.revision >= Revision.get_instance().nr - settings.BOT_CONSIDERED_NEW + 1
def __str__(self):
return util.escape_markdown(self.str_no_md).encode('utf-8').decode('utf-8')
@property
def detail_text(self):
from botlistbot.models import Keyword
keywords = Keyword.select().where(Keyword.entity == self)
txt = '{}'.format(self.__str__())
txt += '\n_{}_'.format(util.escape_markdown(self.name)) if self.name else ''
txt += '\n\n{}'.format(self.description) if self.description else ''
txt += util.escape_markdown(
'\n\nKeywords: {}'.format(
', '.join([str(k) for k in keywords])
) if keywords else ''
)
return txt
@property
def str_no_md(self):
return ('💤 ' if self.offline else '') + \
('🚮 ' if self.spam else '') + \
('🆕 ' if self.is_new else '') + \
self.username + \
(' ' if any([self.inlinequeries, self.official, self.country]) else '') + \
('🔎' if self.inlinequeries else '') + \
('🔹' if self.official else '') + \
(self.country.emoji if self.country else '') + \
(' ' + self.extra if self.extra else '')
@staticmethod
def by_username(username: str, include_disabled=False):
if include_disabled:
result = Bot.select().where(
fn.lower(Bot.username) == username.lower()
)
else:
result = Bot.select().where(
fn.lower(Bot.username) == username.lower(),
Bot.disabled == False
)
if len(result) > 0:
return result[0]
else:
raise Bot.DoesNotExist()
@staticmethod
def explorable_bots():
results = Bot.select().where(
~(Bot.description.is_null()),
(Bot.approved == True),
(Bot.revision <= Revision.get_instance().nr),
(Bot.offline == False),
(Bot.disabled == False)
)
return list(results)
@staticmethod
def many_by_usernames(names: List):
results = Bot.select().where(
(fn.lower(Bot.username) << [n.lower() for n in names]) &
(Bot.revision <= Revision.get_instance().nr) &
(Bot.approved == True) &
(Bot.disabled == False)
)
if results:
return results
raise Bot.DoesNotExist
@staticmethod
def of_category_without_new(category):
return Bot.select().where(
(Bot.category == category),
(Bot.approved == True),
(Bot.revision <= Revision.get_instance().nr),
(Bot.disabled == False)
).order_by(fn.Lower(Bot.username))
@staticmethod
def select_official_bots():
return Bot.select().where(Bot.approved == True, Bot.official == True,
Bot.disabled == False)
@staticmethod
def select_new_bots():
return Bot.select().where(
Bot.is_new == True,
Bot.revision < Revision.get_instance().next,
Bot.approved == True,
Bot.disabled == False
)
@staticmethod
def get_official_bots_markdown():
return '\n'.join([' {}'.format(str(b)) for b in Bot.select_official_bots()])
@staticmethod
def get_new_bots_markdown():
return '\n'.join([' {}'.format(str(b)) for b in Bot.select_new_bots()])
@staticmethod
def get_pending_update_bots_markdown():
return '\n'.join([' {}'.format(str(b)) for b in Bot.select_pending_update()])
@property
def keywords(self):
from botlistbot.models.keywordmodel import Keyword
return Keyword.select().where(Keyword.entity == self)
@property
def thumbnail_file(self):
path = os.path.join(settings.BOT_THUMBNAIL_DIR, self.username[1:].lower() + '.jpg')
return path
|
import streamlit as st
# Analysis packages
import pandas as pd
import numpy as np
import pickle
import joblib
import sklearn
from sklearn.preprocessing import StandardScaler
# Geo Pkgs
import folium
import geopandas as gpd
import json
#__________main page_________________
st.title("toddler-town")
st.subheader("estimating population access to daycare in cities")
st.text("toddler-town estimates the number of children under 6 years of age without a daycare spot")
#city = st.selectbox("select a city", ("houston", "austin"))
if st.button("austin"):
# polygon data
polygons = gpd.read_file("Austin_Census_Tracts_9Gg.json", driver='geoJSON')
polygons = polygons[['geometry','FIPS']]
polygons['FIPS'] = polygons['FIPS'].astype(int)
# loading city data
city = pd.read_csv('austin_66_features.csv')
city_analysis = city.drop(columns=['FIPS'])
sc = StandardScaler()
city_analysis_scaled = sc.fit_transform(city_analysis)
# loading the mode
loaded_model = joblib.load('finalized_model_lasso.sav')
city['desert_score'] = loaded_model.predict(city_analysis_scaled)
# mapping the choropleth
m = folium.Map(location=[30.2672, -97.7431], zoom_start=10)
folium.Choropleth(
geo_data=polygons,
name='choropleth',
data=city,
columns=['FIPS', 'desert_score'],
key_on='feature.properties.FIPS',
fill_color='YlOrRd',
fill_opacity=0.5,
line_opacity=0.2,
legend_name='toddler-town score',
highlight=True
).add_to(m)
st.markdown(m._repr_html_(), unsafe_allow_html=True)
if st.button("houston"):
# polygon data
polygons = gpd.read_file("Census_Tracts_Houston1_MgV.json", driver='geoJSON')
polygons = polygons[['geometry','FIPS']]
polygons['FIPS'] = polygons['FIPS'].astype(int)
# loading city data
city = pd.read_csv('houston_66_features.csv')
city_analysis = city.drop(columns=['FIPS'])
sc = StandardScaler()
city_analysis_scaled = sc.fit_transform(city_analysis)
# loading the mode
loaded_model = joblib.load('finalized_model_lasso.sav')
city['desert_score'] = loaded_model.predict(city_analysis_scaled)
# mapping the choropleth
m = folium.Map(location=[29.7604, -95.3698], zoom_start=10)
folium.Choropleth(
geo_data=polygons,
name='choropleth',
data=city,
columns=['FIPS', 'desert_score'],
key_on='feature.properties.FIPS',
fill_color='YlOrRd',
fill_opacity=0.5,
line_opacity=0.2,
legend_name='toddler-town score',
highlight=True
).add_to(m)
st.markdown(m._repr_html_(), unsafe_allow_html=True)
|
from django.apps import AppConfig
class TreasurySSConfig(AppConfig):
name = "treasurySS"
verbose_name = "treasury segments"
|
from kissom.storeManager import StoreManager
from kissom.appExceptions import (
KissomException,
TableNameDoesNotExistException,
TableNameNotDefinedException,
ObjectNotProvidedException,
PrimaryKeyNotProvidedException,
ObjectAttributeValueException,
)
from kissom.utils.conditions import getConditions, getConditionGroup
|
from django.shortcuts import render
from account.models import Account
def earl_list_view(request):
queryset = Account.objects.all()
context = {
"earl_list": queryset,
"users": queryset.filter(is_user=True),
"ficthist": queryset.filter(is_user=False),
"active_page": "browse",
}
return render(request, "earls/earllist.html", context)
def earl_public_page(request, pk):
earl = Account.objects.get(id=pk)
return render(request, 'earls/public_page.html', {'earl': earl, "active_page": "browse"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.