content
stringlengths 5
1.05M
|
|---|
import datetime
player_position = {
'1B': ['el primera base', 'el inicialista'],
'2B': ['el segunda base', 'el camarero'],
'SS': ['el campo corto'],
'3B': ['el tercera base', 'el antesalista'],
'C' : ['el receptor', 'el catcher'],
'LF': ['el jardinero izquierdo', 'el left fielder'],
'CF': ['el jardinero central', 'el center fielder'],
'RF': ['el jardinero derecho', 'el rigth fielder'],
'P' : ['el lanzador', 'el pitcher'],
'DH': ['el bateador designado', 'el designado'],
'PH': ['el bateador emergente', 'el emergente'],
'PR': ['el corredor emergente', 'el corredor sustitudo']
}
ordinal = {
1: ['primer', 'primera',],
2: ['segundo', 'segunda'],
3: ['tercer', 'tercera'],
4: ['cuarto', 'cuarta'],
5: ['quinto', 'quinta'],
6: ['sexto', 'sexta'],
7: ['séptimo', 'séptima'],
8: ['octavo', 'octava'],
9: ['noveno', 'novena'],
10: ['décimo', 'décima'],
11: ['onceno', 'oncena'],
12: ['duodécimo', 'duodécima'],
13: ['décimo tercer', 'décimo tercera'],
14: ['décimo cuarto', 'décimo cuarta'],
15: ['décimo quinto', 'décimo quinta']
}
cardinal = {
2: "dos",
3: "tres",
4: "cuatro",
5: "cinco",
6: "seis",
7: "siete",
8: "ocho",
9: "nueve"
}
direction = {
'1B': ['primera base', 'la inicial'],
'2B': ['segunda base', 'la intermedia'],
'SS': ['el campo corto'],
'3B': ['tercera base', 'la antesala'],
'LF': ['el jardín izquierdo', 'la pradera izquierda', 'el left field'],
'CF': ['el jardín central', 'la pradera central', 'el center field'],
'RF': ['el jardín derecho', 'la pradera derecha', 'el rigth field'],
'C': ['el receptor', 'el catcher'],
'P': ['el pitcher', 'el lanzador']
}
month = {
1: 'enero',
2: 'febrero',
3: 'marzo',
4: 'abril',
5: 'mayo',
6: 'junio',
7: 'julio',
8: 'agosto',
9: 'septiembre',
10: 'octubre',
11: 'noviembre',
12: 'diciembre'
}
def get_yesterday_date():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
a = ['ro de ', ' de ']
i = 1
if yesterday.day == 1:
i = 0
return str(yesterday.day) + a[i] + month[yesterday.month]
def fill_template(template, d):
cont = 0
slots = []
current = ''
founded = False
for i in template:
if i == '{':
founded = True
cont += 1
if founded:
current += i
if i == '}':
cont -= 1
if cont == 0:
slots.append(current)
current = ''
founded = False
for s in slots:
x = s.replace('{{ ', '').replace(' }}', '')
x = d[x]
template = template.replace(s, x)
return template
def number(n):
if 2 <= n <= 9:
return cardinal[n]
return str(n)
|
# Script to train a gperc model on time series data from NSE Stock dataset for TATAMOTORS
# Dataset preparation from https://github.com/ElisonSherton/TimeSeriesForecastingNN/blob/main/curateData.py
from nsepy import get_history
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from tqdm import trange
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from gperc.utils import set_seed
from gperc import PerceiverConfig, Perceiver
from gperc.models import build_position_encoding
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
# --------------- Preparing the dataset --------------- #
def get_dataset(symbol,start_date,end_date):
df = get_history(symbol=symbol,start=start_date,end=end_date)
df['Date']=df.index
return df
def prepare_data(df,price_column,date_column,num_steps=5):
for i in range(num_steps):
df[f"lag_{i + 1}"] = df[price_column].shift(periods = (i + 1))
new_df = df[[date_column, price_column] + [f"lag_{x + 1}" for x in range(num_steps)]]
new_df = new_df.iloc[num_steps:-1, :]
inputs,outputs = [],[]
for record in new_df.itertuples():
input = record[-num_steps:][::-1]
output = record[-(num_steps+1)]
inputs.append(input)
outputs.append(output)
size = len(inputs)
inputs = np.array(inputs)
outputs = np.array(outputs)
transformation = MinMaxScaler(feature_range=(-1, 1))
inputs = transformation.fit_transform(inputs)
outputs = transformation.fit_transform(outputs.reshape(-1, 1))
trainX = inputs[:int(0.8 * size)]
trainY = outputs[:int(0.8 * size)]
testX = inputs[int(0.2 * size):]
testY = outputs[int(0.2 * size):]
return trainX,trainY,testX,testY
# ----------------------------------------------------- #
class StockDataset(Dataset):
def __init__(self,inputs,outputs):
self.inputs = inputs
self.outputs = outputs
def __len__(self):
return len(self.inputs)
def __getitem__(self,i):
input = torch.Tensor(self.inputs[i])
output = torch.Tensor(self.outputs[i])
return (input,output)
start_date = datetime.strptime('02/02/18', '%m/%d/%y')
end_date = datetime.strptime('02/09/20', '%m/%d/%y')
dataset = get_dataset(symbol="TATAMOTORS",start_date=start_date,end_date=end_date)
trainX,trainY,testX,testY = prepare_data(dataset,'Close','Date')
train_ds = StockDataset(trainX,trainY)
test_ds = StockDataset(testX,testY)
# Defining the configs
config = PerceiverConfig(
input_len = len(train_ds[0][0]),
input_dim = 1,
latent_dim = 1,
num_heads=1,
output_len = 1,
output_dim = 1,
decoder_cross_attention=True,
decoder_projection=False,
)
# Instantiating model with configs
class StockDataPrediction(torch.nn.Module):
def __init__(self,config):
super().__init__()
self.config = config
self.emb = build_position_encoding("trainable", config, config.input_len, config.input_dim)
self.perceiver = Perceiver(config)
def num_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def forward(self, x):
pos_emb = torch.cat([self.emb[None, ...] for _ in range(x.shape[0])], dim=0)
out = x + pos_emb
return self.perceiver(out)
# Defining the dataloaders
batch_size=32
dl_train = DataLoader(train_ds,batch_size=32,shuffle=True,drop_last=True)
dl_test = DataLoader(test_ds, batch_size, shuffle=False, drop_last=False)
set_seed(config.seed)
model = StockDataPrediction(config)
print("model parameters:", model.num_parameters())
iter_dl_train = iter(dl_train)
pbar = trange(10000)
optim = Adam(model.parameters(), lr=0.001)
loss_func = torch.nn.MSELoss()
all_loss = []
# train!
for i in pbar:
try:
x, y = next(iter_dl_train)
except StopIteration:
iter_dl_train = iter(dl_train)
x, y = next(iter_dl_train)
optim.zero_grad()
_y = model(torch.unsqueeze(x,2))
loss = loss_func(_y, torch.unsqueeze(y,2))
all_loss.append(loss.item())
pbar.set_description(f"loss: {np.mean(all_loss[-50:]):.4f}")
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
if (i + 1) % 500 == 0:
model.eval()
with torch.no_grad():
_all_loss = []
for x, y in dl_test:
_y = model(torch.unsqueeze(x,2))
loss = loss_func(_y, torch.unsqueeze(y,2))
_all_loss.append(loss.item())
print(f"Test Loss: {sum(_all_loss)}")
model.train()
# plotting loss & accuracy
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.plot(savgol_filter(all_loss, window_length=51, polyorder=3))
plt.title("Training Loss")
plt.subplot(1, 2, 2)
plt.plot(_all_loss)
plt.title("Testing Loss")
plt.show()
|
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
# 随机大写字母
def upperChar():
# chr() 方法返回数字对应的英文字母
return chr(random.randint(65, 90))
# 随机颜色
def randomColor():
return (random.randint(50,255), random.randint(50,255), random.randint(50,255))
width = 200
height = 50
fontSize = 45
image = Image.new('RGB', (width, height), (255, 255, 255))
# 用 truetype font 创建Font对象:
# 参数:font=None, size=10, index=0, encoding='', layout_engine=None
font = ImageFont.truetype('Futura LT Bold.ttf', fontSize)
# font = ImageFont.truetype('arial.ttf', fontSize)
# 创建Draw对象:
draw = ImageDraw.Draw(image)
# 填充每个像素:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=(255,255,255))
# 输出文字:
for t in range(4):
xy = (50 * t + 10, -5)
draw.text(xy, upperChar(), font=font, fill=randomColor())
draw.line([(0,0), (50,30)],'#223388',3)
draw.line([(70,25), (170,0)],'#223388',2)
# 最后一个参数的值为外边框的宽度
# 以两个点位构成一个矩形,以矩形中点为扇形中心,以向右(3点钟方向)为0度,顺时针旋转
draw.pieslice([(100,5), (140,40)],0, 130, '#223388',1)
# 模糊:
# image = image.filter(ImageFilter.BLUR)
image.save('code.jpg', 'jpeg')
|
# -*- coding: utf-8 -*-
# file: train_atepc.py
# time: 2021/5/21 0021
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
########################################################################################################################
# ATEPC training_tutorials script #
########################################################################################################################
from pyabsa.functional import ATEPCModelList
from pyabsa.functional import Trainer, ATEPCTrainer
from pyabsa.functional import ABSADatasetList
from pyabsa.functional import ATEPCConfigManager
SemEval = ABSADatasetList.Chinese
atepc_config_english = ATEPCConfigManager.get_atepc_config_english()
atepc_config_english.num_epoch = 10
atepc_config_english.evaluate_begin = 4
atepc_config_english.lot_step = 100
atepc_config_english.model = ATEPCModelList.LCF_ATEPC
aspect_extractor = ATEPCTrainer(config=atepc_config_english, # set config=None to use default model
dataset=SemEval, # file or dir, dataset_utils(s) will be automatically detected
save_checkpoint=True, # set model_path_to_save=None to avoid save model
auto_device=True # Auto choose CUDA or CPU
)
|
from scipy.special import softmax
import numpy as np
"""
RNN and LSTM
https://colab.research.google.com/github/mrm8488/shared_colab_notebooks/blob/master/basic_self_attention_.ipynb
Attentions
https://www.youtube.com/watch?v=S27pHKBEp30
Position Embeddings
https://www.youtube.com/watch?v=dichIcUZfOw
@ symbol
https://www.python.org/dev/peps/pep-0465/#semantics
"""
x = np.array([
[1, 0, 1, 0], # Input 1
[0, 2, 0, 2], # Input 2
[1, 1, 1, 1], # Input 3
[1, 2, 1, 2], # Input 4
[2, 2, 2, 2], # Input 5
])
seql, emb = x.shape
w_query = np.array([
[1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 1, 1]
])
w_key = np.array([
[0, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 0]
])
w_value = np.array([
[0, 2, 0, 0, 2, 0],
[0, 3, 0, 0, 3, 0],
[1, 0, 3, 1, 0, 3],
[1, 1, 0, 1, 1, 0]
])
key = []
query = []
value = []
# Generate Query, Key, and Value
for i in range(len(x)):
# The out dim: 1X4 @ 4X3 = 1X3 = array(3)
query_i = x[i] @ w_query
key_i = x[i] @ w_key
value_i = x[i] @ w_value
query.append(query_i)
key.append(key_i)
value.append(value_i)
# print(query)
# print(key)
# print(value)
# exit()
heads = 2
head_dim = 3
# Convert list into numpy array
query = np.stack(query).reshape((seql, heads, head_dim))
key = np.stack(key).reshape((seql, heads, head_dim))
value = np.stack(value).reshape((seql, heads, head_dim))
query = np.transpose(query, (1, 0, 2))
key = np.transpose(key, (1, 0, 2))
value = np.transpose(value, (1, 0, 2))
# Transpose key again to get relevance score per head
key = np.transpose(key, (0, 2, 1))
# Generate the relevance score
relevance = query @ key
# Apply softmax to get probability scores of relevance
relevance_scores = softmax(relevance, axis=-1)
print(relevance_scores.round(decimals=2))
exit()
out = relevance_scores @ value
print(out)
|
#!/usr/bin/env python3
import argparse, importlib, inputs
from datetime import datetime
def import_solver(day, part):
try:
return importlib.import_module(f'solvers.advent_{day.zfill(2)}_{part}')
except ImportError as e:
raise Exception(f'Cannot find solver for day {day} part {part}. Reason: {str(e)}')
def print_table(lines):
width = max(map(lambda line: len(line), lines))
separator = ''.ljust(width, '-') + '\n'
print(separator.join([ line + '\n' for line in lines ]), end='')
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument('day')
argp.add_argument('--part', '-p', default='1')
argp.add_argument('--submit', '-s', action='store_true')
args = argp.parse_args()
try:
solver = import_solver(args.day, args.part)
input = inputs.get_input(args.day)
time_start = datetime.now()
output = solver.solve(input)
time_end = datetime.now()
print_table([
str(output),
f'solved in {str(time_end - time_start)}'
])
if args.submit:
inputs.submit_answer(args.day, args.part, output)
print('⭐️')
except Exception as e:
print(f'{type(e).__name__}: {str(e)}')
|
# -*- encoding: utf-8 -*-
import random
# V tej datoteki so definicije razredov Snake in Field ter nekaj pomožnih konstant in
# funkcij.
# Igralno polje sestoji iz mreze kvadratkov (blokov)
WIDTH = 50 # sirina polja (stevilo blokov)
HEIGHT = 30 # visina polja
BLOCK = 20 # velikost enega bloka v tockah na zaslonu
# Pomozne funkcije
def brick(canvas, x, y):
"""Ustvari graficni element, ki predstavlja opeko (na robu polja)."""
return canvas.create_rectangle(x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK,
fill='brown', width=2)
def mouse(canvas, x, y):
"""Ustvari graficni element, ki predstavlja misko."""
return canvas.create_oval(x*BLOCK+2, y*BLOCK+2, (x+1)*BLOCK-2, (y+1)*BLOCK-2,
fill='gray')
# Razredi
class Snake():
"""Razred, ki predstavlja kaco.
Vse kace v igrici so podrazredi tega razreda. Objekt razreda Snake
ima naslednje atribute:
field -- objekt razreda Field, v katerem je kaca
(dx, dy) -- smerni vektor premikanja, eden od (-1,0), (1,0), (0,-1), (0,1)
grow -- za koliko clenkov mora kaca zrasti
color_head -- barva glave
color_tail -- barva repa
coords -- seznam koordinat clenkov kace (glava je coords[0])
cells -- seznam graficnih elementov, ki predstavljajo kaco
"""
def __init__(self, field, color_head, color_tail, x, y, dx, dy):
self.field = field
self.dx = dx
self.dy = dy
self.grow = 0
self.color_head = color_head
self.color_tail = color_tail
self.coords = []
self.cells = []
# the tail
for k in range(2, 0, -1):
self.add_cell(x - k * self.dx, y - k * self.dy)
self.add_cell(x, y) # the head
def add_cell(self, x, y):
"""Dodaj kaci novo celico."""
cell = self.field.canvas.create_oval(
x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK, fill=self.color_head)
if len(self.cells) > 0:
self.field.canvas.itemconfigure(
self.cells[0], fill=self.color_tail)
self.coords.insert(0, (x, y))
self.cells.insert(0, cell)
def turn_left(self):
"""Obrni kaco v levo."""
(self.dx, self.dy) = (-self.dy, self.dx)
def turn_right(self):
"""Obrni kaco v desno."""
(self.dx, self.dy) = (self.dy, -self.dx)
def move(self):
"""Premakni kaco v smer, v katero je obrnjena.
Ce je na polju, kamor se premaknemo, miska, jo pojemo.
Ce je polje zasedeno z drugo kaco ali opeko, se ne zgodi nic."""
(x, y) = self.coords[0]
x += self.dx
y += self.dy
if self.field.is_mouse(x, y):
self.grow = 1
self.field.remove_mouse(x, y)
if self.field.is_empty(x, y):
if self.grow > 0:
self.grow -= 1
self.add_cell(x, y)
else:
# Reuse the last one
self.coords.pop()
self.coords.insert(0, (x, y))
self.field.canvas.itemconfigure(
self.cells[0], fill=self.color_tail)
cell = self.cells.pop()
self.field.canvas.coords(
cell, x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK)
self.field.canvas.itemconfigure(cell, fill=self.color_head)
self.cells.insert(0, cell)
def turn(self):
"""Po potrebi obrni kaco.
Ta funkcija ne dela nicesar in jo je treba redefinirati v podrazredu,
ki predstavlja kaco, glej prilozene primere."""
pass
class Field():
"""Igralno polje, po katerem se gibljejo kace.
Atributi:
width -- sirina polja
height -- visina polja
snakes -- seznam kac, ki so v polju
mice -- slovar, ki slika koordinate misk v id-je pripadajocih graficnih objektov
"""
def __init__(self, canvas, width, height):
self.width = width
self.height = height
self.canvas = canvas
self.snakes = []
self.mice = {}
self.bricks = []
# The bricks
for i in range(width):
self.bricks.append(brick(canvas, i, 0))
self.bricks.append(brick(canvas, i, height-1))
for j in range(1, height-1):
self.bricks.append(brick(canvas, 0, j))
self.bricks.append(brick(canvas, width-1, j))
def add_snake(self, s):
"""Dodaj novo kaco v polje."""
s.id = len(self.snakes)
self.snakes.append(s)
def is_mouse(self, x, y):
"""Ali je na lokaciji (x,y) miska?"""
return (0 < x < self.width-1 and
0 < y < self.height-1 and
(x, y) in self.mice)
def is_empty(self, x, y):
"""Ali je polje (x,y) prazno?"""
if (0 < x < self.width-1 and
0 < y < self.height-1 and
(x, y) not in self.mice):
for s in self.snakes:
if (x, y) in s.coords:
return False
return True
else:
return False
def find_empty(self):
"""Nakljucno izberi prazno polje, poskusi najvec petkrat."""
for i in range(5):
x = random.randint(1, self.width-2)
y = random.randint(1, self.height-2)
if self.is_empty(x, y):
return (x, y)
return (None, None)
def new_mouse(self):
"""Dodaj misko na nakljucno izbrano polje."""
(x, y) = self.find_empty()
if x and y:
self.mice[(x, y)] = mouse(self.canvas, x, y)
def remove_mouse(self, x, y):
"""Odstrani misko na lokaciji (x,y)."""
m = self.mice.get((x, y))
if m:
self.canvas.delete(m)
del self.mice[(x, y)]
|
from django.urls import path
from users.views import profile
app_name = "users"
urlpatterns = [
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
path("profile", profile, name="profile"),
]
|
from __future__ import division
from __future__ import print_function
import pickle
import random
import logging
import torch
import torch.nn as nn
import torch.optim as optim
torch.backends.cudnn.enabled = False
import torch.utils.data as data
from Models.Encoder import Encoder
from Models.EncoderTxtCtx import EncoderTxtCtx
from Models.PointerDecoder import PointerDecoder
from dataset import Dataset, collate_fn
from utils import *
from trainer import Trainer
from config import parse_args
try:
from apex import amp
APEX_AVAILABLE = True
except ModuleNotFoundError:
APEX_AVAILABLE = False
def main(args):
global APEX_AVAILABLE
# global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
# file logger
if not os.path.exists(args.save):
os.mkdir(args.save)
dfh = logging.FileHandler(os.path.join(args.save, args.expname)+'_DEBUG.log', mode='w')
dfh.setLevel(logging.DEBUG)
dfh.setFormatter(formatter)
logger.addHandler(dfh)
fh = logging.FileHandler(os.path.join(args.save, args.expname)+'.log', mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
# console logger
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if args.debug:
args.input_dim = 8
args.hidden_dim = 16
args.batch_size = 5
args.num_workers = 0
args.lr = 0.05
args.num_of_layers = 1
args.dropout = 0
args.early_stopping = 10
args.max_seq_len = 13
# args.disable_prog_bar = True
args.shuffle_path = False
args.data = "data_50_new/"
logger.debug("Running on toy configuration")
# argument validation
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda is True:
logger.debug("CUDA is available")
if APEX_AVAILABLE is True and args.cuda is True:
logger.debug("APEX is available")
else:
APEX_AVAILABLE = False
device = torch.device("cuda:0" if args.cuda else "cpu")
logger.debug(args)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True # cause bugs!!! in Tensor.topk with NaN
if not os.path.exists(args.save):
os.makedirs(args.save)
path_vocab = torch.load(os.path.join(args.data, 'path.pth'))
logger.debug('==> path vocabulary size: {} '.format(path_vocab.size()))
src_tgt_vocab = torch.load(os.path.join(args.data, 'src_tgt.pth'))
logger.debug('==> src_tgt vocabulary size: {} '.format(src_tgt_vocab.size()))
position_vocab = torch.load(os.path.join(args.data, 'position.pth'))
logger.debug('==> position vocabulary size: {} '.format(position_vocab.size()))
if args.context_mode == 'txt':
ctx_vocab = torch.load(os.path.join(args.data, 'ctx.pth'))
logger.debug('==> context vocabulary size: {} '.format(ctx_vocab.size()))
train_dir = os.path.join(args.data, 'train/')
dev_dir = os.path.join(args.data, 'dev/')
test_dir = os.path.join(args.data, 'test/')
train_params = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': args.num_workers,
'collate_fn': collate_fn,
'pin_memory': args.pin_memory}
test_params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': 0,
'collate_fn': collate_fn,
'pin_memory': args.pin_memory}
train_set = Dataset(train_dir, device, args)
train_generator = data.DataLoader(train_set, **train_params)
logger.debug('==> Size of train data: {} '.format(len(train_set)))
dev_set = Dataset(dev_dir, device, args)
dev_generator = data.DataLoader(dev_set, **test_params)
logger.debug('==> Size of dev data: {} '.format(len(dev_set)))
test_set = Dataset(test_dir, device, args)
test_generator = data.DataLoader(test_set, **test_params)
logger.debug('==> Size of test data: {} '.format(len(test_set)))
if args.context_mode == 'txt':
encoder = EncoderTxtCtx(
path_vocab_size=path_vocab.size(),
src_tgt_vocab_size=src_tgt_vocab.size(),
position_vocab_size=position_vocab.size(),
ctx_vocab_size=ctx_vocab.size(),
in_dim=args.input_dim,
h_dim=args.hidden_dim,
num_layers=args.num_of_layers,
dropout=args.dropout,
device=device,
)
else:
encoder = Encoder(
path_vocab_size=path_vocab.size(),
src_tgt_vocab_size=src_tgt_vocab.size(),
position_vocab_size=position_vocab.size(),
in_dim=args.input_dim,
h_dim=args.hidden_dim,
num_layers=args.num_of_layers,
dropout=args.dropout,
device=device,
ctx_mode=args.context_mode
)
decoder = PointerDecoder(
in_dim=args.hidden_dim,
h_dim=args.hidden_dim,
num_of_layers=args.num_of_layers,
device=device,
dropout=args.dropout,
use_attention=args.attention
)
logger.debug('==> Total trainable parameters: {} '.format((count_parameters(encoder) + count_parameters(decoder))))
encoder.to(device)
decoder.to(device)
parameters = filter(lambda p: p.requires_grad, list(encoder.parameters()) + list(decoder.parameters()))
if args.optim == 'adam':
optimizer = optim.Adam(parameters, lr=args.lr, weight_decay=args.wd)
elif args.optim == 'adagrad':
optimizer = optim.Adagrad(parameters, lr=args.lr, weight_decay=args.wd)
elif args.optim == 'sgd':
optimizer = optim.SGD(parameters, lr=args.lr, weight_decay=args.wd, momentum=args.momentum)
criterion = nn.CrossEntropyLoss(ignore_index=Constants.PAD)
criterion.to(device)
if APEX_AVAILABLE:
[encoder, decoder], optimizer = amp.initialize(
[encoder, decoder], optimizer, opt_level="O2",
keep_batchnorm_fp32=True, loss_scale="dynamic"
)
# create trainer object for training and testing
trainer = Trainer(args, encoder, decoder, optimizer, criterion, device, args.disable_prog_bar)
best = -float('inf')
counter = 0
epoch = 0
if args.load_checkpoint != '' and os.path.exists(args.load_checkpoint):
logger.debug('==> Loading checkpoint "{}" from disk'.format(args.load_checkpoint))
checkpoint = torch.load(args.load_checkpoint, map_location=torch.device(device))
trainer.encoder.load_state_dict(checkpoint['encoder'])
trainer.decoder.load_state_dict(checkpoint['decoder'])
if APEX_AVAILABLE:
amp.load_state_dict(checkpoint['amp'])
best = checkpoint['dev_acc']
epoch = checkpoint['epoch'] + 1
trainer.epoch = checkpoint['epoch'] + 1
if args.inference:
logger.debug('==> Running inference on test set')
test_loss, res_test = trainer.test(test_generator)
test_precision, test_recall, test_f1, test_acc = res_test['precision'], res_test['recall'], res_test['f1'], \
res_test['acc']
logger.info(
'==> Epoch {}, Test\t\tLoss: {:0.3f}\tAcc: {:0.3f}'.format(
epoch + 1, test_loss, test_acc))
mapping = dict()
for i, (project_name, idx) in enumerate(res_test['ids']):
if project_name not in mapping:
mapping[project_name] = list()
mapping[project_name].append((idx, res_test['predicted_ops'][i]))
for k in mapping.keys():
_, mapping[k] = zip(*sorted(mapping[k], key=lambda t: t[0]))
with open(os.path.join(args.save, args.expname + "_test_results.pickle"), "wb") as f:
pickle.dump(mapping, f, protocol=pickle.HIGHEST_PROTOCOL)
return
while True:
train_loss, res_train = trainer.train(train_generator)
train_precision, train_recall, train_f1, train_acc = res_train['precision'], res_train['recall'], res_train['f1'], res_train['acc']
logger.info('==> Epoch {}, Train\t\tLoss: {:0.3f}\tAcc: {:0.3f}'.format(
epoch + 1, train_loss, train_acc))
dev_loss, res_dev = trainer.test(dev_generator)
dev_precision, dev_recall, dev_f1, dev_acc = res_dev['precision'], res_dev['recall'], res_dev['f1'], res_dev['acc']
logger.info('==> Epoch {}, Dev\t\tLoss: {:0.3f}\tAcc: {:0.3f}'.format(
epoch + 1, dev_loss, dev_acc))
if best < dev_acc:
best = dev_acc
checkpoint = {
'encoder': trainer.encoder.state_dict(),
'decoder': trainer.decoder.state_dict(),
'optim': trainer.optimizer,
'dev_acc': dev_acc,
'args': args, 'epoch': epoch,
'amp': amp.state_dict() if APEX_AVAILABLE else None
}
logger.debug('==> New optimum found, checkpointing everything now...')
torch.save(checkpoint, '%s.pt' % os.path.join(args.save, args.expname))
mapping = dict()
for i, (project_name, idx) in enumerate(res_dev['ids']):
if project_name not in mapping:
mapping[project_name] = list()
mapping[project_name].append((idx, res_dev['predicted_ops'][i]))
for k in mapping.keys():
_, mapping[k] = zip(*sorted(mapping[k], key=lambda t: t[0]))
with open(os.path.join(args.save, args.expname + "_dev_results.pickle"), "wb") as f:
pickle.dump(mapping, f, protocol=pickle.HIGHEST_PROTOCOL)
counter = 0
else:
counter += 1
logger.debug('')
if counter >= args.early_stopping:
logger.debug('==> {} epochs have been passed without Acc improvement, running inference on test:'.format(counter))
checkpoint = torch.load('%s.pt' % os.path.join(args.save, args.expname))
trainer.encoder.load_state_dict(checkpoint['encoder'])
trainer.transformer.load_state_dict(checkpoint['transformer'])
trainer.operation_mix.load_state_dict(checkpoint['operation_mix'])
trainer.decoder.load_state_dict(checkpoint['decoder'])
test_loss, test_acc, test_precision, test_recall, test_f1 = trainer.test(test_generator)
checkpoint['test_accuracy'] = test_acc
torch.save(checkpoint, '%s.pt' % os.path.join(args.save, args.expname))
logger.info('==> Epoch {}, Test\t\tLoss: {}\tAcc: {}'.format(
checkpoint['epoch'] + 1, test_loss, test_acc))
return
epoch += 1
if __name__ == "__main__":
global args
args = parse_args()
main(args)
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ceilometer_contexts as contexts
from test_utils import CharmTestCase
TO_PATCH = [
'config',
'relation_get',
'relation_ids',
'related_units',
]
class CeilometerContextsTest(CharmTestCase):
def setUp(self):
super(CeilometerContextsTest, self).setUp(contexts, TO_PATCH)
self.config.side_effect = self.test_config.get
self.relation_get.side_effect = self.test_relation.get
def tearDown(self):
super(CeilometerContextsTest, self).tearDown()
def test_ceilometer_service_context(self):
self.relation_ids.return_value = ['ceilometer-service:0']
self.related_units.return_value = ['ceilometer/0']
data = {
'debug': True,
'verbose': False,
'rabbitmq_host': 'foo',
'rabbitmq_hosts': 'foo,bar',
'rabbitmq_user': 'bar',
'rabbitmq_password': 'baz',
'rabbitmq_virtual_host': 'openstack',
'rabbit_ssl_ca': None,
'rabbit_ssl_port': None,
'api_version': 3,
'auth_protocol': 'http',
'auth_host': 'keystone',
'auth_port': '80',
'service_protocol': 'http',
'service_host': 'keystone',
'service_port': '80',
'signing_dir': '/var/lib/ceilometer',
'admin_domain_name': 'admin_domain',
'admin_tenant_name': 'admin',
'admin_user': 'admin',
'admin_password': 'password',
'metering_secret': 'secret',
'polling_batch_size': 50,
}
self.test_relation.set(data)
self.assertEqual(contexts.CeilometerServiceContext()(), data)
def test_ceilometer_service_context_not_related(self):
self.relation_ids.return_value = []
self.assertEqual(contexts.CeilometerServiceContext()(), {})
def test_ceilometer_context(self):
self.assertEqual(contexts.CeilometerAgentContext()(), {
'polling_interval': 300,
'enable_all_pollsters': False,
})
def test_ceilometer_context_enable_all_pollsters(self):
self.test_config.set('enable-all-pollsters', True)
self.assertEqual(contexts.CeilometerAgentContext()(), {
'polling_interval': 300,
'enable_all_pollsters': True,
})
def test_ceilometer_context_polling_interval(self):
self.test_config.set('polling-interval', 600)
self.assertEqual(contexts.CeilometerAgentContext()(), {
'polling_interval': 600,
'enable_all_pollsters': False,
})
|
import os
import shutil
import xml.etree.ElementTree as ET
def copy_sources_from_proj(root_path, csproj_name):
csproj_path = os.path.join(root_path, csproj_name)
tree = ET.parse(csproj_path)
for e in tree.getroot().iter("{http://schemas.microsoft.com/developer/msbuild/2003}Compile"):
if 'Include' in e.attrib:
file = e.attrib['Include']
print file
file_parts = os.path.split(file)
if len(file_parts[0]) > 0:
try:
os.makedirs(file_parts[0])
except:
pass
shutil.copy(os.path.join(root_path, file), file)
copy_sources_from_proj(
r'..\..\..\Newtonsoft.Json\Src\Newtonsoft.Json.Tests',
r'Newtonsoft.Json.Tests.Unity3D.csproj')
|
""" KNN Prediction View"""
__docformat__ = "numpy"
import logging
from typing import Union, Optional, List
import pandas as pd
from matplotlib import pyplot as plt
from gamestonk_terminal.common.prediction_techniques import knn_model
from gamestonk_terminal.common.prediction_techniques.pred_helper import (
plot_data_predictions,
print_pretty_prediction,
)
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
# pylint:disable=too-many-arguments
@log_start_end(log=logger)
def display_k_nearest_neighbors(
ticker: str,
data: Union[pd.DataFrame, pd.Series],
n_neighbors: int,
n_input_days: int,
n_predict_days: int,
test_size: float,
end_date: str = "",
no_shuffle: bool = True,
time_res: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display predictions using knn
Parameters
----------
ticker : str
Stock data
data : Union[pd.DataFrame, pd.Series]
Data to use for ML
n_neighbors : int
Number of neighbors for knn
n_input_days : int
Length of input sequences
n_predict_days : int
Number of days to predict
test_size : float
Fraction of data for testing
end_date : str, optional
End date for backtesting, by default ""
no_shuffle : bool, optional
Flag to shuffle data randomly, by default True
time_res : str
Resolution for data, allowing for predicting outside of standard market days
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
(
forecast_data_df,
preds,
y_valid,
y_dates_valid,
scaler,
) = knn_model.get_knn_model_data(
data, n_input_days, n_predict_days, n_neighbors, test_size, end_date, no_shuffle
)
if forecast_data_df.empty:
console.print("Issue performing data prep and prediction")
return
if time_res:
forecast_data_df.index = pd.date_range(
data.index[-1], periods=n_predict_days + 1, freq=time_res
)[1:]
print_pretty_prediction(forecast_data_df[0], data.values[-1])
plot_data_predictions(
data=data,
preds=preds,
y_valid=y_valid,
y_dates_valid=y_dates_valid,
scaler=scaler,
title=f"KNN Model with {n_neighbors} Neighbors on {ticker}",
forecast_data=forecast_data_df,
n_loops=1,
time_str=time_res,
external_axes=external_axes,
)
console.print("")
|
import numpy as np
import pickle
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from mvn.utils.op import translate_quaternion_to_euler
angle_names = [
'knee_angle_r',
'hip_flexion_r',
'hip_adduction_r',
'hip_rotation_r',
'hip_flexion_l',
'hip_adduction_l',
'hip_rotation_l',
'knee_angle_l',
'elbow_flexion_r',
'shoulder_flex_r',
'shoulder_add_r',
'shoulder_rot_r',
'shoulder_flex_l',
'shoulder_add_l',
'shoulder_rot_l',
'elbow_flexion_l'
]
angle_names_overview = [
'knee_angle',
'hip_flexion',
'hip_adduction',
'hip_rotation',
'shoulder_flex',
'shoulder_add',
'shoulder_rot',
'elbow_flexion'
]
angle_pairs = [
(0, 7), (1, 4), (2, 5), (3, 6),
(9, 12), (10, 13), (11, 14), (8, 15)
]
def main(gt_angles_trajs, pred_angles_trajs):
if pred_angles_trajs.shape[1] == 32:
eulers = []
for a in pred_angles_trajs:
eulers.append(translate_quaternion_to_euler(list(a)))
pred_angles_trajs = np.array(eulers)
print('<Angle>: <MSE> | <MAE>')
maes = []
mses = []
for idx, an in enumerate(angle_names):
mae = mean_absolute_error(gt_angles_trajs[:, idx], pred_angles_trajs[:, idx])
mse = mean_squared_error(gt_angles_trajs[:, idx], pred_angles_trajs[:, idx])
maes.append(mae)
mses.append(mse)
print(f'{an}: {mse:.3f} rad | {mae:.3f} rad ({np.rad2deg(mae):.3f} deg)')
print('-------------------')
print('Overview')
for idx, p in enumerate(angle_pairs):
avg_mae = (maes[p[0]] + maes[p[1]]) / 2
avg_mse = (mses[p[0]] + mses[p[1]]) / 2
print(f'{angle_names_overview[idx]}: {avg_mse:.3f} rad | {avg_mae:.3f} rad ({np.rad2deg(avg_mae):.3f} deg)')
print('-------------------')
print('Average:')
mae = mean_absolute_error(gt_angles_trajs, pred_angles_trajs)
mse = mean_squared_error(gt_angles_trajs, pred_angles_trajs)
r2 = r2_score(gt_angles_trajs, pred_angles_trajs)
print(f'MAE: {mae} rad ({np.rad2deg(mae):.3f} deg)')
print(f'MSE: {mse} rad')
print(f'R2: {r2}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Save predictions for easier analysis pipeline')
parser.add_argument('--records', '-r', required=True, type=str, help='prediction records file')
args = parser.parse_args()
labels = np.load('../baseline-angles/roofing-multiview-v2.npy', allow_pickle=True).item()
test_subjects = ['S08', 'S10']
test_subjects = list(labels['subject_names'].index(x) for x in test_subjects)
indices = []
mask = np.isin(labels['table']['subject_idx'], test_subjects, assume_unique=True)
indices.append(np.nonzero(mask)[0][::1])
labels['table'] = labels['table'][np.concatenate(indices)]
angles_gt = np.deg2rad(labels['table']['angles'][:, :16])
with open(args.records, 'rb') as infile:
data = pickle.load(infile)
angles_pred = data['angles']
main(angles_gt, angles_pred)
|
from pydantic import BaseModel
class UserIp(BaseModel):
address: str
|
from app.common import settings
class FlaskConfiguration:
ENV = settings.app_env
DEBUG = ENV == "development"
API_TITLE = settings.app_name
API_VERSION = settings.app_version
MONGODB_SETTINGS = {
"host": settings.mongodb_uri,
}
OPENAPI_VERSION = settings.openapi_version
OPENAPI_URL_PREFIX = settings.openapi_url_prefix
OPENAPI_SWAGGER_UI_PATH = settings.openapi_swagger_ui_path
OPENAPI_SWAGGER_UI_URL = settings.openapi_swagger_ui_url
OPENAPI_REDOC_PATH = settings.openapi_redoc_path
OPENAPI_REDOC_URL = settings.openapi_redoc_url
API_SPEC_OPTIONS = {
"info": {"title": API_TITLE, "description": settings.app_description},
}
|
"""
* Create a program to convert a variable of one type to another.
* Create a variable named x and assign a string value '5' to it.
* Create another variable y and assign '10' to it.
* Convert the value stored in x and y to integers and multiply them.
* Print the result.
"""
x = '5'
y = '10'
x = int(x)
y = int(y)
product = x * y
print(product) # Output: 50
|
"""
smorest_sfs.modules.email_templates.schemas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
电子邮件模板模块的Schemas
"""
from marshmallow import Schema, fields
from smorest_sfs.extensions.marshal import SQLAlchemyAutoSchema
from smorest_sfs.extensions.marshal.bases import BaseMsgSchema, BasePageSchema
from . import models
class EmailTemplateSchema(SQLAlchemyAutoSchema):
"""
电子邮件模板的序列化类
"""
id_ = fields.Int(data_key="id", dump_only=True)
class Meta:
model = models.EmailTemplate
class EmailTemplatePageSchema(BasePageSchema):
"""电子邮件模板的分页"""
data = fields.List(fields.Nested(EmailTemplateSchema))
class EmailTemplateItemSchema(BaseMsgSchema):
"""电子邮件模板的单项"""
data = fields.Nested(EmailTemplateSchema)
class EmailTemplateOptsSchema(Schema):
"""电子邮件模板的选项"""
id_ = fields.Int(data_key="id")
class Meta:
fields = ("id_", "name")
class EmailTemplateListSchema(BaseMsgSchema):
"""电子邮件模板的选项列表"""
data = fields.List(fields.Nested(EmailTemplateOptsSchema))
|
import os
import getpass
import socket
import subprocess
from urllib.parse import urlunparse, urlparse
from tornado import web, gen, httpclient, process, ioloop
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
from nbserverproxy.handlers import LocalProxyHandler
def detectR():
'''Detect R's version, R_HOME, and various other directories that rsession
requires.
Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp'''
cmd = ['R', '--slave', '--vanilla', '-e',
'cat(paste(R.home("home"),R.home("share"),R.home("include"),R.home("doc"),getRversion(),sep=":"))']
p = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise Exception('Error detecting R')
R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \
p.stdout.decode().split(':')
return {
'R_DOC_DIR': R_DOC_DIR,
'R_HOME': R_HOME,
'R_INCLUDE_DIR': R_INCLUDE_DIR,
'R_SHARE_DIR': R_SHARE_DIR,
'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME,
'RSTUDIO_DEFAULT_R_VERSION': version,
}
class AddSlashHandler(IPythonHandler):
"""Handler for adding trailing slash to URLs that need them"""
@web.authenticated
def get(self, *args):
src = urlparse(self.request.uri)
dest = src._replace(path=src.path + '/')
self.redirect(urlunparse(dest))
class RSessionProxyHandler(LocalProxyHandler):
'''Manage an RStudio rsession instance.'''
# R and RStudio environment variables required by rsession.
env = {
'RSTUDIO_LIMIT_RPC_CLIENT_UID':'998',
'RSTUDIO_MINIMUM_USER_ID':'500',
}
# rsession command. Augmented with user-identity and www-port.
cmd = [
'rsession',
'--standalone=1',
'--program-mode=server',
'--log-stderr=1',
'--session-timeout-minutes=0',
]
def initialize(self, state):
self.state = state
@property
def port(self):
"""
Allocate a random empty port for use by rstudio
"""
if 'port' not in self.state:
sock = socket.socket()
sock.bind(('', 0))
self.state['port'] = sock.getsockname()[1]
sock.close()
return self.state['port']
def rsession_uri(self):
return '{}proxy/{}/'.format(self.base_url, self.port)
@gen.coroutine
def is_running(self):
'''Check if our proxied process is still running.'''
if 'proc' not in self.state:
return False
# Check if the process is still around
proc = self.state['proc']
if proc.proc.poll() == 0:
self.log.info('Cannot poll on process.')
return False
client = httpclient.AsyncHTTPClient()
req = httpclient.HTTPRequest('http://localhost:{}'.format(self.port))
try:
yield client.fetch(req)
self.log.debug('Got positive response from rstudio server')
except:
self.log.debug('Got negative response from rstudio server')
return False
return True
@gen.coroutine
def start_process(self):
"""
Start the rstudio process
"""
self.state['starting'] = True
try:
cmd = self.cmd + [
'--user-identity=' + getpass.getuser(),
'--www-port=' + str(self.port)
]
server_env = os.environ.copy()
# Seed RStudio's R and RSTUDIO env variables
server_env.update(self.env)
try:
r_vars = detectR()
server_env.update(r_vars)
except:
raise web.HTTPError(reason='could not detect R', status_code=500)
@gen.coroutine
def exit_callback(code):
"""
Callback when the rsessionproxy dies
"""
self.log.info('rsession process died with code {}'.format(code))
del self.state['proc']
if code != 0:
yield self.start_process()
# Runs rsession in background
proc = process.Subprocess(cmd, env=server_env)
self.log.info('Starting rsession process...')
self.state['proc'] = proc
proc.set_exit_callback(exit_callback)
for i in range(5):
if (yield self.is_running()):
self.log.info('rsession startup complete')
break
# Simple exponential backoff
wait_time = max(1.4 ** i, 5)
self.log.debug('Waiting {} before checking if rstudio is up'.format(wait_time))
yield gen.sleep(wait_time)
else:
raise web.HTTPError('could not start rsession in time', status_code=500)
finally:
self.state['starting'] = False
@gen.coroutine
@web.authenticated
def proxy(self, port, path):
if not path.startswith('/'):
path = '/' + path
# if we're in 'starting' let's wait a while
for i in range(5):
if not self.state.get('starting', False):
break
# Simple exponential backoff
wait_time = max(1.4 ** i, 5)
self.log.debug('Waiting {} before checking if rstudio is up'.format(wait_time))
yield gen.sleep(wait_time)
else:
raise web.HTTPError('could not start rsession in time', status_code=500)
# FIXME: try to not start multiple processes at a time with some locking here
if 'proc' not in self.state:
self.log.info('No existing process rsession process found')
yield self.start_process()
return (yield super().proxy(self.port, path))
def get(self, path):
return self.proxy(self.port, path)
def post(self, path):
return self.proxy(self.port, path)
def put(self, path):
return self.proxy(self.port, path)
def delete(self, path):
return self.proxy(self.port, path)
def head(self, path):
return self.proxy(self.port, path)
def patch(self, path):
return self.proxy(self.port, path)
def options(self, path):
return self.proxy(self.port, path)
def setup_handlers(web_app):
web_app.add_handlers('.*', [
(ujoin(web_app.settings['base_url'], 'rstudio/(.*)'), RSessionProxyHandler, dict(state={})),
(ujoin(web_app.settings['base_url'], 'rstudio'), AddSlashHandler)
])
# vim: set et ts=4 sw=4:
|
from pathlib import Path
from demisto_sdk.commands.common.handlers import YAML_Handler
from TestSuite.integration import Integration
from TestSuite.test_tools import suite_join_path
yaml = YAML_Handler()
class Script(Integration):
# Im here just to have one!!!
def __init__(self, tmpdir: Path, name, repo, create_unified=False):
super().__init__(tmpdir, name, repo, create_unified)
def create_default_script(self, name: str = 'sample_script'):
"""Creates a new script with basic data.
Args:
name: The name and ID of the new script, default is "sample_script".
"""
default_script_dir = 'assets/default_script'
with open(suite_join_path(default_script_dir, 'sample_script.py')) as code_file:
code = str(code_file.read())
with open(suite_join_path(default_script_dir, 'sample_script.yml')) as yml_file:
yml = yaml.load(yml_file)
yml['name'] = yml['commonfields']['id'] = name
with open(suite_join_path(default_script_dir, 'sample_script_image.png'), 'rb') as image_file:
image = image_file.read()
with open(suite_join_path(default_script_dir, 'CHANGELOG.md')) as changelog_file:
changelog = str(changelog_file.read())
with open(suite_join_path(default_script_dir, 'sample_script_description.md')) as description_file:
description = str(description_file.read())
self.build(
code=code,
yml=yml,
image=image,
changelog=changelog,
description=description
)
|
#coding=utf-8
"""
Code based on Wang GUOJUN.
Licensed under MIT License [see LICENSE].
"""
import sys
sys.path.append("..")
import os
import time
import numpy as np
import torch
import rospy
from ros_numpy import point_cloud2
from sensor_msgs.msg import PointCloud2, PointField
from numpy.lib.recfunctions import structured_to_unstructured
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
from pyquaternion import Quaternion
from visualization_msgs.msg import Marker,MarkerArray
import argparse
from torch2trt import TRTModule
from params.configs import cfg,cfg_from_yaml_file
from core import (build_target_assigner,build_anchor_generator,
build_voxel_generator,build_box_coder)
from libs.ops import box_np_ops
from data.preprocess import voxel_padding
from models import build_network
class SecondModel:
def __init__(self,
trt_dir,
weights_file,
config_path,
max_voxel_num = 12000,
tensorrt = True,
anchors_area= 0.01):
self.trt_dir = trt_dir
self.config_path = config_path
self.anchors_area = anchors_area
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load config
config = cfg_from_yaml_file(config_path,cfg)
self.config = config
self.model_cfg = config.MODEL
voxel_cfg = config.VOXEL_GENERATOR
classes_cfg= config.TARGET_ASSIGNER.ANCHOR_GENERATORS
#build generators
self.anchor_generator = build_anchor_generator(classes_cfg[0])
self.voxel_generator = build_voxel_generator(voxel_cfg)
# build parameter
self.voxel_size=self.voxel_generator.voxel_size
self.grid_size=self.voxel_generator.grid_size
self.pc_range =self.voxel_generator.point_cloud_range
self.max_voxel_num=config.TRAIN_INPUT_READER.MAX_NUMBER_OF_VOXELS
out_size_factor = self.model_cfg.BACKBONE.layer_strides[0] //self.model_cfg.BACKBONE.upsample_strides[0]
feature_map_size = self.grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
self.anchors = self.anchor_generator.generate(feature_map_size).reshape((1, -1, 7))
self.anchors_bv = box_np_ops.rbbox2d_to_near_bbox(self.anchors[0,:][:,[0, 1, 3, 4, 6]])
# buld network
net = self.build_network().to(self.device)
#load checkpoint
state_dict=torch.load(weights_file)
net.load_state_dict(state_dict,strict = False)
#use tensorrt
if tensorrt:
pfn_trt = TRTModule()
pfn_trt.load_state_dict(torch.load(os.path.join(trt_dir,'pfn.trt')))
rpn_trt = TRTModule()
rpn_trt.load_state_dict(torch.load(os.path.join(trt_dir,'backbone.trt')))
net.pfn = pfn_trt
net.rpn = rpn_trt
self.net=net.eval()
def build_network(self):
######################
# BUILD TARGET ASSIGNER
######################
bv_range = self.voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = build_box_coder(self.config.BOX_CODER)
target_assigner_cfg = self.config.TARGET_ASSIGNER
target_assigner = build_target_assigner(target_assigner_cfg,
bv_range, box_coder)
######################
# BUILD NET
######################
self.model_cfg.XAVIER = True
net = build_network(self.model_cfg,
self.voxel_generator,
target_assigner)
return net
def predict(self,pointclouds):
t0 = time.time()
ret = self.voxel_generator.generate(pointclouds, max_voxels=self.max_voxel_num)
voxels = ret[0]
coords = ret[1]
num_points = ret[2]
voxels, num_points, coords, voxel_mask = voxel_padding(voxels, num_points,
coords, max_voxel_num=self.max_voxel_num)
example = {
"anchors": self.anchors,
"voxels": voxels,
"num_points": num_points,
"coordinates": coords,
'voxel_mask': voxel_mask,
"metadata": [{"image_idx": '000000'}]}
#build anchors mask
if self.anchors_area >= 0:
# 计算每个grid map坐标位置是否有pillars(非空)
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coords, tuple(self.grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
# 计算每个anchor_bev占有的非空的pillars
anchors_area = box_np_ops.fused_get_anchors_area(
dense_voxel_map, self.anchors_bv, self.voxel_size, self.pc_range, self.grid_size)
anchors_mask = anchors_area >= anchors_area
if anchors_mask.sum() < 1:
anchors_mask = np.zeros(anchors_area.shape[0], dtype=np.bool)
print("anchors_mask is zero")
example['anchors_mask'] = anchors_mask.reshape(-1,1)
# turn torch list
example_list = example_to_tensorlist_with_batch(example, self.config, device = self.device)
#inference
with torch.no_grad():
b1 = time.time()
boxes,scores = self.net(example_list)[0][:2]
spf = time.time()-b1
try:
boxes=boxes.detach().cpu().numpy()
scores=scores.detach().cpu().numpy()
except:
pass
print("current frame process time is {:.3f}ms".format((time.time()-t0)*1000))
print('second/frame:{:3f}ms'.format(spf*1000))
return boxes,scores
class SecondROS:
def __init__(self,
trt_dir,
weights_file,
config_path,
is_tensorrt=True,
anchors_area =0.1):
rospy.init_node("second_ros")
print("TensorRT Engine: {}.".format(is_tensorrt))
# Subscriber
self.model = SecondModel(trt_dir=trt_dir,
weights_file = weights_file,
config_path = config_path,
tensorrt = is_tensorrt,
anchors_area = anchors_area)
print("Waiting for ROS topic: /raw_cloud")
self.sub_lidar = rospy.Subscriber("/raw_cloud",PointCloud2,self.lidar_callback,queue_size = 1)
# Publisher
self.pub_bbox = rospy.Publisher("/boxes", BoundingBoxArray, queue_size=1)
self.pub_text=rospy.Publisher("/scores",MarkerArray,queue_size=0)
self.pub_cloud = rospy.Publisher("/cloud_filtered", PointCloud2, queue_size=0)
rospy.spin()
def lidar_callback(self,msg):
pc_arr=point_cloud2.pointcloud2_to_array(msg)
pc_arr = structured_to_unstructured(pc_arr)
#print(pc_arr.shape)
pc_arr=pc_arr.reshape(-1,4)
lidar_boxes,lidar_scores = self.model.predict(pc_arr)
#print(lidar_boxes)
# points.dtype=[('x', np.float32),('y', np.float32),('z', np.float32),('intensity', np.float32)]
# cloud_msg=point_cloud2.array_to_pointcloud2(points,rospy.Time.now(),"rslidar")
if lidar_boxes is not None:
num_detects = len(lidar_boxes) #if len(lidar_boxes)<=10 else 10
arr_bbox = BoundingBoxArray()
arr_score=MarkerArray()
for i in range(num_detects):
bbox = BoundingBox()
bbox.header.frame_id = msg.header.frame_id
bbox.header.stamp = rospy.Time.now()
bbox.pose.position.x = float(lidar_boxes[i][0])
bbox.pose.position.y = float(lidar_boxes[i][1])
#bbox.pose.position.z = float(lidar_boxes[i][2])
bbox.pose.position.z = float(lidar_boxes[i][2]) + float(lidar_boxes[i][5]) / 2
bbox.dimensions.x = float(lidar_boxes[i][3]) # width
bbox.dimensions.y = float(lidar_boxes[i][4]) # length
bbox.dimensions.z = float(lidar_boxes[i][5]) # height
q = Quaternion(axis=(0, 0, 1), radians=float(-lidar_boxes[i][6]))
bbox.pose.orientation.x = q.x
bbox.pose.orientation.y = q.y
bbox.pose.orientation.z = q.z
bbox.pose.orientation.w = q.w
arr_bbox.boxes.append(bbox)
marker = Marker()
marker.header.frame_id =msg.header.frame_id
marker.header.stamp = rospy.Time.now()
marker.ns = "basic_shapes"
marker.id = i
marker.type = Marker.TEXT_VIEW_FACING
marker.action = Marker.ADD
marker.lifetime=rospy.Duration(0.15)
marker.scale.x = 4
marker.scale.y = 4
marker.scale.z = 4
# Marker的颜色和透明度
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 1
marker.color.a = 1
marker.pose.position.x=float(lidar_boxes[i][0])
marker.pose.position.y = float(lidar_boxes[i][1])
marker.pose.position.z = float(lidar_boxes[i][2]) + float(lidar_boxes[i][5]) / 2
marker.text=str(np.around(lidar_scores[i],2))
arr_score.markers.append(marker)
arr_bbox.header.frame_id = msg.header.frame_id
arr_bbox.header.stamp = rospy.Time.now()
print("Number of detections: {}".format(num_detects))
self.pub_bbox.publish(arr_bbox)
self.pub_text.publish(arr_score)
# self.pub_cloud.publish(cloud_msg)
# conver torch functions
def get_paddings_indicator_np(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = np.expand_dims(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = np.arange(max_num, dtype=np.int).reshape(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.astype(np.int32) > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
def example_to_tensorlist_with_batch(example, config,device=None,float_type=torch.float32):
example_list = [None] * 13
pillar_x = example['voxels'][:, :, 0][np.newaxis,np.newaxis,:,:] # (1,K,T)
pillar_y = example['voxels'][:, :, 1][np.newaxis,np.newaxis,:,:]
pillar_z = example['voxels'][:, :, 2][np.newaxis,np.newaxis,:,:]
pillar_i = example['voxels'][:, :, 3][np.newaxis,np.newaxis,:,:]
num_points_per_pillar = example['num_points'][np.newaxis,:] # (N,K,)
#print(num_points_per_pillar.shape)
coors = example['coordinates'] [np.newaxis,:] # (N,K,3)
anchors = example['anchors'] # (B,num_anchors,7)
image_ids = [int(elem['image_idx']) for elem in example['metadata']]
image_ids = np.array(image_ids, dtype=np.int32)
voxel_mask = example['voxel_mask'][np.newaxis,:] # (N,K)
# ################################################################
# Find distance of x, y, z from pillar center
coors_x = example['coordinates'][:, 2][np.newaxis,:] # (N,K)
coors_y = example['coordinates'][:, 1][np.newaxis,:]
pc_range = cfg.MODEL.POST_PROCESSING.post_center_limit_range
x_sub = coors_x[:, np.newaxis, :, np.newaxis] * cfg.VOXEL_GENERATOR.VOXEL_SIZE[0] +pc_range[0] # Pillars的中心的位置坐标 (N,1,K,1)
y_sub = coors_y[:, np.newaxis, :, np.newaxis] * cfg.VOXEL_GENERATOR.VOXEL_SIZE[1] +pc_range[1]
# print("before repeat x_sub nan is ",torch.nonzero(torch.isnan(x_sub)).shape)
# print("before repeat y_sub nan is ", torch.nonzero(torch.isnan(y_sub)).shape)
x_sub_shaped = x_sub.repeat(pillar_x.shape[3], -1)
y_sub_shaped = y_sub.repeat(pillar_x.shape[3], -1) # (N,1,K,T)
# print("after repeat x_sub nan is ", torch.nonzero(torch.isnan(x_sub_shaped)).shape)
# print("after repeat y_sub nan is ", torch.nonzero(torch.isnan(y_sub_shaped)).shape)
num_points_for_a_pillar = pillar_x.shape[3] # (T)
mask = get_paddings_indicator_np(num_points_per_pillar, num_points_for_a_pillar, axis=0) # (N,T,K)
mask = mask.transpose(0, 2, 1) # (N,K,T)
mask = mask[:, np.newaxis, :, :] # (N,1,K,T)
mask = mask.astype(pillar_x.dtype)
example_list[0] = torch.tensor(pillar_x, dtype=float_type, device=device)
example_list[1] = torch.tensor(pillar_y, dtype=float_type, device=device)
example_list[2] = torch.tensor(pillar_z, dtype=float_type, device=device)
example_list[3] = torch.tensor(pillar_i, dtype=float_type, device=device)
example_list[4] = torch.tensor(num_points_per_pillar, dtype=float_type, device=device)
example_list[5] = torch.tensor(x_sub_shaped, dtype=float_type, device=device)
example_list[6] = torch.tensor(y_sub_shaped, dtype=float_type, device=device)
example_list[7] = torch.tensor(mask, dtype=float_type, device=device)
example_list[8] = torch.tensor(example['coordinates'], dtype=torch.int32, device=device)
example_list[9] = torch.tensor(voxel_mask, dtype=torch.bool, device=device)
example_list[10] = torch.tensor(anchors, dtype=float_type, device=device)
example_list[11] = torch.tensor(image_ids, dtype=torch.int32, device=device)
if 'anchors_mask' in example.keys():
example_list[12]=torch.tensor(example['anchors_mask'], dtype=torch.bool, device=device)
#print(example_list[12])
else:
example_list[12]=None
return example_list
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument("--weights_file", type=str, default="/home/hova/Lidardet/params/weights/pointpillars/PointPillars.tckpt")
parse.add_argument("--config_path",type=str,default="/home/hova/Lidardet/params/configs/pointpillars_kitti_car_xy16.yaml")
parse.add_argument("--trt_dir",type=str,default="/home/hova/Lidardet/params/TensorRT/pointpillar_0827")
parse.add_argument("--anchors_area", type=int, default=0.001)
args=parse.parse_args()
second_ros=SecondROS(trt_dir=args.trt_dir,
weights_file = args.weights_file,
config_path=args.config_path,
is_tensorrt=True,
anchors_area=args.anchors_area)
|
import os, sys
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
import simpy
from collections import deque
from msg import Msg, InfoType
from priority_dict import *
from debug_utils import *
class RRQueue(): # Round Robin
def __init__(self, max_qlen):
self.max_qlen = max_qlen
self.cid_q_m = {}
self.next_cid_to_pop_q = deque()
self.num_dropped = 0
def len(self):
return sum(len(q) for _, q in self.cid_q_m.items())
def reg(self, cid):
if cid not in self.cid_q_m:
self.cid_q_m[cid] = deque()
self.next_cid_to_pop_q.append(cid)
log(DEBUG, "reged", cid=cid)
def unreg(self, cid):
if cid in self.cid_q_m:
self.cid_q_m.pop(cid)
self.next_cid_to_pop_q.remove(cid)
log(DEBUG, "unreged", cid=cid)
def push(self, msg):
if msg.payload.cid not in self.cid_q_m:
self.reg(msg.payload.cid)
r = True
q = self.cid_q_m[msg.payload.cid]
if len(q) == self.max_qlen:
# msg_popped = q.popleft()
# log(DEBUG, "Was full, popped the oldest req", msg_popped=msg_popped)
log(DEBUG, "Was full, dropped", msg=msg)
self.num_dropped += 1
r = False
else:
q.append(msg)
log(DEBUG, "pushed", msg=msg)
return r
def pop(self):
for _ in range(len(self.cid_q_m)):
q = self.cid_q_m[self.next_cid_to_pop_q[0]]
self.next_cid_to_pop_q.rotate(-1)
if len(q) > 0:
return q.popleft()
return None
class WQueue(): # Worker
def __init__(self, wid_s, w_token_s, max_qlen):
self.w_token_s = w_token_s
self.max_qlen = max_qlen
self.wid_qlen_heap_m = priority_dict()
for wid in wid_s:
self.wid_qlen_heap_m[wid] = 0
for _ in range(self.max_qlen):
self.w_token_s.put(1)
log(DEBUG, "WQueue constructed", wid_s=wid_s)
def update(self, new_wid_s):
log(DEBUG, "started", new_wid_s=new_wid_s)
## Drop wid's that got removed
for wid in [i for i in self.wid_qlen_heap_m]:
if wid not in new_wid_s:
qlen = self.wid_qlen_heap_m[wid]
log(DEBUG, "dropping", wid=wid, qlen=qlen)
for _ in range(self.max_qlen - qlen):
yield self.w_token_s.get()
self.wid_qlen_heap_m.pop(wid)
## Add new wid's
for wid in new_wid_s:
if wid not in self.wid_qlen_heap_m:
log(DEBUG, "adding", wid=wid)
self.wid_qlen_heap_m[wid] = 0
for _ in range(self.max_qlen):
self.w_token_s.put(1)
log(DEBUG, "done")
def inc_qlen(self, wid):
log(DEBUG, "started", wid=wid)
self.wid_qlen_heap_m[wid] += 1
check(self.wid_qlen_heap_m[wid] <= self.max_qlen, "Q-len cannot be greater than max_qlen= {}".format(self.max_qlen))
log(DEBUG, "done", wid=wid, qlen=self.wid_qlen_heap_m[wid])
def dec_qlen(self, wid):
log(DEBUG, "started", wid=wid)
try:
self.wid_qlen_heap_m[wid] -= 1
except KeyError:
log(DEBUG, "tried on non-existent key", wid=wid)
return
check(self.wid_qlen_heap_m[wid] >= 0, "Q-len cannot be negative")
self.w_token_s.put(1)
log(DEBUG, "done", wid=wid, qlen=self.wid_qlen_heap_m[wid])
def pop(self):
wid = self.wid_qlen_heap_m.smallest()
qlen = self.wid_qlen_heap_m[wid]
if qlen >= self.max_qlen:
log(WARNING, "Attempted to return a full worker", qlen=qlen)
return None
return wid
class Master():
def __init__(self, _id, env, w_l):
self._id = _id
self.env = env
self.id_w_m = {}
for w in w_l:
w.reg_master(self)
self.id_w_m[w._id] = w
self.msg_token_s = simpy.Store(env)
self.msg_q = RRQueue(max_qlen=5)
self.w_token_s = simpy.Store(env)
self.w_q = WQueue([w._id for w in w_l], self.w_token_s, max_qlen=30)
self.epoch_num_req_l = []
self.act = env.process(self.run())
def __repr__(self):
return "Master(id= {})".format(self._id)
def set_out(self, out):
for _, w in self.id_w_m.items():
w.out = out
def record_load(self):
total_w_qlen = sum(qlen for _, qlen in self.w_q.wid_qlen_heap_m.items())
total_num_req = total_w_qlen + self.msg_q.len()
self.epoch_num_req_l.append((self.env.now, total_num_req))
slog(DEBUG, self.env, self, "done", total_num_req=total_num_req)
def put(self, msg):
slog(DEBUG, self.env, self, "recved", msg=msg)
self.record_load()
p = msg.payload
if p.is_req():
p.epoch_arrived_cluster = self.env.now
if self.msg_q.push(msg):
self.msg_token_s.put(1)
elif p.is_info():
if p.typ == InfoType.client_disconn:
# TODO: uncomment the following
# self.msg_q.unreg(msg.src_id)
pass
elif p.typ == InfoType.worker_req_completion:
self.w_q.dec_qlen(msg.src_id)
else:
slog(ERROR, self.env, self, "Unexpected payload type", payload=p)
def run(self):
while True:
slog(DEBUG, self.env, self, "Waiting for msg")
yield self.msg_token_s.get()
msg = self.msg_q.pop()
check(msg is not None, "Msg must have arrived")
slog(DEBUG, self.env, self, "Waiting for worker")
yield self.w_token_s.get()
slog(DEBUG, self.env, self, "", w_token_s_len=len(self.w_token_s.items))
wid = self.w_q.pop()
check(wid is not None, "There should have been an available worker")
## Send to worker
msg.src_id = self._id
msg.dst_id = wid
self.id_w_m[wid].put(msg)
if msg.payload.is_req():
slog(DEBUG, self.env, self, "Will inc_qlen", wid=wid)
self.w_q.inc_qlen(wid)
|
import requests
import io
import base64
"""
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Brain:
def __init__(self, **kwargs):
"""Init and store the user's credentials for future calls. If no credentials are supplied the login function must be called .
:param url:
:param signed_username: use the passed apiToken to authenticate
:param user_id: optional instead of apiToken, must be passed with token
:param token: optional instead of apiToken, must be passed with user_id
:param authenticate: only valid with apiToken. Force a call to the server to authenticate the passed credentials.
"""
self.url = kwargs.get('url', 'https://brain.deepgram.com')
if any(i in ['user_id', 'token', 'signed_username'] for i in kwargs):
self.login(**kwargs)
def _checkReturn(self, response):
if response.status_code == 200:
value = response.json()
return value
raise Exception('Call failed: {}'.format(response.status_code))
@property
def signedUsername(self):
if self._signedUsername is None:
raise Exception('Signed username not set. Either set it or login with a user_id/token first')
return self._signedUsername
@signedUsername.setter
def signedUsername(self, signedUsername):
self._signedUsername = signedUsername
def login(self, **kwargs):
"""Logs the current user into the server with the passed in credentials. If successful the apiToken will be changed to match the passed in credentials.
:param apiToken: use the passed apiToken to authenticate
:param user_id: optional instead of apiToken, must be passed with token
:param token: optional instead of apiToken, must be passed with user_id
:param authenticate: only valid with apiToken. Force a call to the server to authenticate the passed credentials.
:return:
"""
if 'signed_username' in kwargs:
apiToken = kwargs['signed_username']
if kwargs.get('authenticate', False):
self._checkReturn(requests.get("{}/users?signed_username={}".format(self.url, apiToken)))
self.signedUsername = apiToken
else:
auth = (kwargs['user_id'], kwargs['token'])
self.signedUsername = self._checkReturn(requests.get("{}/users/login".format(self.url), auth=auth))[
'signed_username']
@property
def user(self):
return self._checkReturn(requests.get("{}/users?signed_username={}".format(self.url, self.signedUsername)))
@property
def assets(self):
returnValue = requests.get("{}/assets?signed_username={}&done=false".format(self.url, self.signedUsername))
return self._checkReturn(returnValue)['results']
def asset(self, assetId, times=False):
if times == True:
returnValue = requests.get("{}/assets/{}?times=true&signed_username={}".format(self.url, assetId, self.signedUsername))
return self._checkReturn(returnValue)
returnValue = requests.get("{}/assets/{}?signed_username={}".format(self.url, assetId, self.signedUsername))
return self._checkReturn(returnValue)
def updateAsset(self, assetId, transcript=None, metadata=None):
body = {}
if transcript is not None:
body['transcript'] = transcript
if metadata is not None:
body['metadata'] = metadata
return self._checkReturn(
requests.put("{}/assets/{}?signed_username={}".format(self.url, assetId, self.signedUsername), json=body))
def createAssetFromURL(self, url, async=False, metadata=None, callback=None):
"""Users the passed URL to load data. If async=false a json with the result is returned otherwise a json with an asset_id is returned.
:param url:
:param metadata: arbitrary additional description information for the asset
:param async:
:param callback: Callback URL
:return:
"""
audio = {'uri': url}
config = {'async': async}
if callback is not None:
config['callback'] = callback
if metadata is not None:
body = {'audio': audio, 'config': config, 'metadata': metadata}
else:
body = {'audio': audio, 'config': config}
return self._checkReturn(
requests.post("{}/speech:recognize?signed_username={}".format(self.url, self.signedUsername), json=body))
def transcribeFromURL(self, url):
return self.createAssetFromURL(url, async=False)['transcript']
def uploadAsset(self, data, async=False, metadata=None, callback=None):
"""Takes an array of bytes or a BufferedReader and uploads it. If async=false a json with the result is returned otherwise a json with an asset_id is returned.
:param data: array of bytes or BufferedReader
:param metadata: arbitrary additional description information for the asset
:param async:
:param callback: Callback URL
:return:
"""
#todo: has atter read would be better here
if isinstance(data, io.BufferedReader):
data = data.read()
assert isinstance(data, bytes)
data = base64.b64encode(data)
audio = {'content': data.decode("utf-8")}
config = {'async': async}
if callback is not None:
config['callback'] = callback
if metadata is not None:
body = {'audio': audio, 'config': config, 'metadata': metadata}
else:
body = {'audio': audio, 'config': config}
return self._checkReturn(
requests.post("{}/speech:recognize?signed_username={}".format(self.url, self.signedUsername), json=body))
def transcribe(self, data):
return self.uploadAsset(data, async=False)['transcript']
def deleteAsset(self, assetId):
return self._checkReturn(
requests.delete("{}/assets/{}?signed_username={}".format(self.url, assetId, self.signedUsername)))
def searchAssets(self, query, assetIds, npp=None, page=None, limit=None):
"""
:param query:
:param assetIds: list of asset Ids
:param npp: number per page or None (default) for all results
:param page: page number to start results from or None (default) for 0
:param limit: max results or None (default) for no limit
:return:
"""
body = {"query":query, 'asset_ids':assetIds}
if npp is not None:
body['npp'] = npp
if page is not None:
body['p'] = page
if limit is not None:
body['limit'] = limit
return self._checkReturn(
requests.post("{}/assets/search?signed_username={}".format(self.url, self.signedUsername), json=body))
|
# =============== DEFINE BLUEPRINTS ==============
import logging
from functools import wraps
import sys
# =============== DEFINE LOGSTREAM ==============
class LogStream():
def __init__(self, MAX_RECORDS ):
self.logs = []
self.max_records = MAX_RECORDS
self.lines = 0
def write(self, str):
print(str[:-1])
if (self.lines >= self.max_records ):
self.logs.append(str)
self.logs.pop(0)
else:
self.logs.append(str)
self.lines +=1
def flush(self):
pass
def __str__(self):
return "".join(self.logs)
def _getStream(self):
return "".join(self.logs)
# =============== DEFINE APPLICATION LOGGER CLASS ==============
class ApplicationLogger():
def __init__(self,logname):
self.logname = logname
self._LOGGER = None
self._activated = False
self.debugs = 0
self.infos = 0
self.warnings = 0
self.errors = 0
self.criticals = 0
self._logginglevels = {
'CRITICAL' : logging.CRITICAL,
'ERROR' : logging.ERROR,
'WARNING' : logging.WARNING,
'INFO' : logging.INFO,
'DEBUG' : logging.DEBUG
}
def init_app(self,APP):
if APP.config["LOGGING_SERVICE"]:
self.stream = LogStream(APP.config["MAX_LOG_RECORDS"])
logging.basicConfig(
stream=self.stream,
level=self._logginglevels[APP.config["LOGGING_LEVELS"]],
format=APP.config["LOGGER_FORMAT"]
)
self._LOGGER = logging.getLogger(self.logname)
self._activated = True
self.info(f"LoggerServes '{self.logname}' is activated!!")
return True
else:
print("Logging service is not activated")
return True
def _isActive(f):
@wraps(f)
def inner1(inst,*args, **kwargs):
if inst._activated:
return f(inst,*args, **kwargs)
else:
#print("Logger is not activated so the method is not processed")
return
return inner1
def isLoggerActive(self):
return self._activated
@_isActive
def getStream(self):
return self.stream._getStream()
@_isActive
def debug(self, str):
self.debugs +=1
self._LOGGER.debug(str)
@_isActive
def info(self, str):
self.infos +=1
self._LOGGER.info(str)
@_isActive
def warning(self, str):
self.warnings +=1
self._LOGGER.warning(str)
@_isActive
def error(self, str):
self.errors += 1
self._LOGGER.error(str)
@_isActive
def critical(self, str):
self.criticals += 1
self._LOGGER.critical(str)
def getFunctionName(self):
return sys._getframe(1).f_code.co_name
# =============== EXECUTE TEST CODE ===============
if __name__ == "__main__":
pass
|
import os
import time
import numpy as np
import torch
from utils import prep_batch, epoch_time
def train_step(
model,
source,
src_len,
targets,
task,
criterion,
optimizer,
clip,
teacher_forcing=None,
):
# source = [src_len, bsz]
# targets = [trg_len, bsz]
# src_len = [bsz]
if task == "translation":
output = model(source, src_len, targets, teacher_forcing)
elif task == "tagging":
output = model(source, src_len)
# output = [src_len, bsz, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
# output = [(trg len - 1) * bsz, output dim]
targets = targets[1:].view(-1)
# targets = [(trg len - 1) * bsz]
loss = criterion(output, targets)
# delete variables to free up memory
del output, targets
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# get the value of the loss
loss = loss.item()
return loss
def train_model(
model,
iterator,
task,
optimizer,
criterion,
clip,
device,
epoch,
start_time,
save_path,
dropout,
pad_indices,
num_batches,
teacher_forcing=None,
checkpoint=None,
repr_layer=None,
):
model.train()
epoch_loss = 0
batch_loss = []
if task == "tagging":
# save 10 times throughout training
save_loss = np.linspace(0, num_batches, num=10, dtype=int)
elif task == "translation":
# save 100 times throughout training
save_loss = np.linspace(0, num_batches, num=100, dtype=int)
try:
for i, batch in enumerate(iterator):
source, targets, src_len = prep_batch(batch, device, pad_indices)
optimizer.zero_grad()
loss = train_step(
model,
source,
src_len,
targets,
task,
criterion,
optimizer,
clip,
teacher_forcing,
)
epoch_loss += loss
if i in save_loss:
batch_loss.append(loss)
end_time = time.time()
batch_mins, batch_secs = epoch_time(start_time, end_time)
print(
f"epoch {epoch} batch: {i} | Train loss: {loss:.3f} | Time: {batch_mins}m {batch_secs}s"
)
start_time = end_time
# optionally checkpoint
if checkpoint is not None:
if i % checkpoint == 0:
adam, sparse_adam = optimizer.return_optimizers()
torch.save(
{
"epoch": epoch,
"model_state_dict": model.state_dict(),
"adam_state_dict": adam.state_dict(),
"sparse_adam_state_dict": sparse_adam.state_dict(),
"loss": loss,
"dropout": dropout,
"repr_layer": repr_layer,
},
os.path.join(save_path, f"checkpoint_{epoch}_{i}.pt"),
)
print(
f"Checkpoint saved at epoch {epoch} batch {i}. Train loss is {loss:.3f}"
)
# skip batch in case of OOM
except RuntimeError as e:
if "out of memory" in str(e):
print(f"| WARNING: ran out of memory, skipping batch number {i:,}")
return epoch_loss / num_batches, batch_loss
|
use_on_heroku = True
use_mockDB = True
base_url = ''
if use_on_heroku:
base_url = 'https://bring-my-food.herokuapp.com/'
else:
base_url = 'http://127.0.0.1:5000/'
|
import time
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, date2num
import numpy as np
import subprocess as sp
import time
import sys
import os
# Takes south_local_signal.txt and south_global_signal.txt
# and adds microseconds, UTC time, as well as trigger type,
# in addition to the VEM signals for anode and dynode
secsInWeek = 604800
secsInDay = 86400
gpsEpoch = (1980, 1, 6, 0, 0, 0) # (year, month, day, hh, mm, ss)
def UTCFromGps(gpsWeek, SOW, leapSecs=18):
"""converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024)
"""
secFract = SOW % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC
tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs
t = t0 + tdiff
(year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t)
#use gmtime since localtime does not allow to switch off daylighsavings correction!!!
return (year, month, day, hh, mm, ss + secFract)
date_list = ['20161107',
'20161108',
'20161109',
'20161110',
'20161111',
'20161112',
'20161113',
'20161114',
'20161115',
'20161116',
'20161117',
'20161118',
'20161119',
'20161120',
'20161121',
'20161122',
'20161123',
'20161124',
'20161125',
'20161126',
'20161127',
'20161128',
'20161129',
'20161130',
'20161201',
'20161202',
'20161203',
'20161204',
'20161205',
'20161206',
'20161207',
'20161208',
'20161209',
'20161210',
'20161211',
'20161212',
'20161213',
'20161214',
'20161215',
'20161216',
'20161217',
'20161218',
'20161219',
'20161220',
'20161221',
'20161222',
'20161223',
'20161224',
'20161225',
'20161226',
'20161227',
'20161228',
'20161229',
'20161230',
'20161231',
'20170101',
'20170102',
'20170103',
'20170104',
'20170105',
'20170106',
'20170107',
'20170108',
'20170109',
'20170110',
'20170111',
'20170112',
'20170114',
'20170117',
'20170118',
'20170119',
'20170120',
'20170121',
'20170122',
'20170123',
'20170127',
'20170128',
'20170129',
'20170130',
'20170131',
'20170201',
'20170202',
'20170203',
'20170204',
'20170205',
'20170206',
'20170207',
'20170208',
'20170209',
'20170210',
'20170211',
'20170212',
'20170213',
'20170214',
'20170215',
'20170216',
'20170217',
'20170218',
'20170219',
'20170220',
'20170221',
'20170222',
'20170223',
'20170224',
'20170225',
'20170226',
'20170227',
'20170228',
'20170301',
'20170302',
'20170303',
'20170304',
'20170305',
'20170306',
'20170307',
'20170308',
'20170309',
'20170310',
'20170311',
'20170312',
'20170313',
'20170314',
'20170315',
'20170316',
'20170317',
'20170318',
'20170319',
'20170320',
'20170321',
'20170322',
'20170323',]
for date in date_list:
fdate = date
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
# Array columns: GPS sec, A1, A2, A3, D1, D2, D3
local_sig = np.loadtxt('/home/augta/web_monitor/south_local_signal.txt',dtype='S500')
local_coi = np.loadtxt('/home/augta/data/coincidence/%i_%02d_%02d.CTAL.gz' %(yr,mo,dy)
,usecols=(1,),dtype='S100')
sp.call(['cp','/home/augta/data/south/t2/%i_%02d_%02d.T2.gz' %(yr,mo,dy),'.'])
sp.call(['gunzip','%i_%02d_%02d.T2.gz' %(yr,mo,dy)])
file_name = "%i_%02d_%02d.T2" %(yr,mo,dy)
with open(file_name,'r') as f:
all_data = f.read()
sp.call(['rm',file_name])
new_slf = '/home/augta/web_monitor/south_local_signal_extra.txt'
if local_coi.size > 0:
if local_coi.size == 1:
tmp = str(local_coi)
local_coi = []
local_coi.append(tmp)
for i in local_coi:
# Find where GPS second is
try:
adi = all_data.index(i.split('.')[0])
#Time stamp not in file, edit manually
except:
print i
print "Previous second: %i" %gps_int
continue
# Get string blob with T2 list
blob = all_data[adi:adi+1000]
our_second = blob.split('--\n')[0]
micro = i.split('.')[1]
mi = our_second.index('%s' %str(int(micro)))
ttype = our_second[mi-2]
# Compute UTC time
gps_sec_str = i.split('.')[0]
gps_int = int(gps_sec_str)
week = gps_int / secsInWeek
sow = gps_int - week*secsInWeek
utc = UTCFromGps(week,sow)
utc_str = "%i-%02d-%02d-%02d:%02d:%02d" %utc
utc_str = utc_str + '.%06d' %int(micro)
# Find matching local signal data
for j in local_sig:
if gps_sec_str in j[0]:
vems = j[1:]
# Now we have everything we need to write to a file
with open(new_slf,'a') as f:
out_str = '%s %s %s' %(i,utc_str,ttype)
out_str += ' %s'*12 %tuple(vems)
out_str += '\n'
f.write(out_str)
global_sig = np.loadtxt('/home/augta/web_monitor/south_global_signal.txt',dtype='S500')
global_coi = np.loadtxt('/home/augta/data/coincidence/%i_%02d_%02d.CTAG.gz' %(yr,mo,dy),
usecols=(6,),dtype='S100',comments=None)
new_sgf = '/home/augta/web_monitor/south_global_signal_extra.txt'
print global_coi.size
if global_coi.size > 0:
if global_coi.size == 1:
tmp = str(global_coi)
global_coi = []
global_coi.append(tmp)
for i in global_coi:
# Find where GPS second is
try:
adi = all_data.index(i.split('.')[0])
#Time stamp not in file, edit manually
except:
print i
print "Previous second: %i" %gps_int
continue
# Get string blob with T2 list
blob = all_data[adi:adi+1000]
our_second = blob.split('--\n')[0]
micro = i.split('.')[1]
mi = our_second.index('%s' %str(int(micro)))
ttype = our_second[mi-2]
# Compute UTC time
gps_sec_str = i.split('.')[0]
gps_int = int(gps_sec_str)
week = gps_int / secsInWeek
sow = gps_int - week*secsInWeek
utc = UTCFromGps(week,sow)
utc_str = "%i-%02d-%02d-%02d:%02d:%02d" %utc
utc_str = utc_str + '.%06d' %int(micro)
# Find matching local signal data
for j in global_sig:
if gps_sec_str in j[0]:
vems = j[1:]
# Now we have everything we need to write to a file
with open(new_sgf,'a') as f:
out_str = '%s %s %s' %(i,utc_str,ttype)
out_str += ' %s'*12 %tuple(vems)
out_str += '\n'
f.write(out_str)
|
import unittest
from dicom_factory.factory import DicomFactory
class TestFactory(unittest.TestCase):
def test_create_factory_with_custom_data_size_works_properly(self):
data_size = (100, 100)
factory_args = {'Rows': data_size[0], 'Columns': data_size[1]}
dicom = DicomFactory.build(factory_args)
self.assertEqual(data_size, dicom.pixel_array.shape)
def test_create_factory_with_custom_series_adds_series_description(self):
expected_series = 'leg'
factory_args = {'SeriesDescription': expected_series}
dicom = DicomFactory.build(factory_args)
self.assertEqual(expected_series, dicom.SeriesDescription)
def test_create_factory_with_unsupported_arguments_raises_value_error(self):
with self.assertRaises(ValueError):
factory_args = {'FakeArg': 123}
DicomFactory.build(factory_args)
|
import requests
from bs4 import BeautifulSoup
import smtplib
import json
import re
from tld import get_tld
##############################################
class scrapit():
def __init__(self, query, price ,email, soup):
self.query = query
self.price = price
self.email = email
self.soup = soup
def send_mail(self,product_body):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('your email', 'your emails password')
subject = 'price fell down\n'
msg = "Subject: "+subject+"\n "+self.query+"\n"+product_body
server.sendmail(
'your email',
self.email,
msg.encode('utf-8')
)
print('HEY EMAIL HAS BEEN SENT')
server.quit()
def search_for_myntra(self):
script_contents = self.soup.find_all('script')
product_script = script_contents[1]
json_product = product_script.string.replace(" ","").replace("\n","").replace("\t","")
product_json = json.loads(json_product)
item_price = int(product_json['offers']['price'])
brand_name = product_json['brand']['name']
product_name = product_json['name'][len(brand_name):]
product_name = " ".join(re.findall('[A-Z][^A-Z]*',product_name))
product_dict = {
'price':price,
'brand_name':brand_name,
'product_name': product_name
}
product_body = 'price : '+str(item_price)+'\nbrand_name : '+brand_name+'\nproduct_name : '+ product_name
self.check_prize(item_price, product_body)
def search_for_amazone(self):
product_div = self.soup.find('div',{'id':'titleSection'}).h1.get_text()
product_name = product_div.strip()
find_div = self.soup.find('div',{'id':'price'})
tr = find_div.table.find_all('tr')[1]
td = tr.find_all('td')[1]
span_block = td.find('span')
price_in_string = str(span_block.encode('utf-8'))
slice_price = price_in_string[116:-11].split(',')
s = [str(i) for i in slice_price]
item_price = int("".join(s))
product_body = 'price : '+str(item_price)+'\nproduct_name : '+ str(product_name)
self.check_prize(item_price, product_body)
def search_for_flipkart(self):
product_name = self.soup.find('h1',{'class':'_9E25nV'}).get_text()
product_div = self.soup.find('div',{'class':'_1uv9Cb'}).find('div').get_text()
product_price = str(product_div.encode('utf-8'))
product_price = product_price[14:-1].split(',')
s = [str(i) for i in product_price]
item_price = int("".join(s))
product_body = 'price : '+str(item_price)+'\nproduct_name : '+ str(product_name)
self.check_prize(item_price, product_body)
def check_prize(self,item_price,product_body):
if(item_price < self.price):
self.send_mail(product_body)
else:
print('no price update')
url = input('enter the url of product you want to track price for: ')
price = int(input('enter the price you want your product to be notify when it euqal to or less than it: '))
email = input('enter your email: ')
url_ext = get_tld(url, as_object = True)
domain = url_ext.domain
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
req = requests.get(url,headers = headers)
soup = BeautifulSoup(req.content , 'html.parser')
res = scrapit(url, price, email,soup)
if domain == 'myntra':
res.search_for_myntra()
elif domain == 'amazon':
res.search_for_amazone()
elif domain == 'flipkart':
res.search_for_flipkart()
else:
print('not in our range sir')
|
# -*- coding: utf-8 -*-
#! \file ./tests/test_support/test_cmd/test_runtime.py
#! \author Jiří Kučera, <sanczes@gmail.com>
#! \stamp 2016-04-07 18:58:51 (UTC+01:00, DST+01:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Command processor's runtime module tests.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import unittest
from doit.support.cmd.errors import \
CommandError
from doit.support.cmd.runtime import \
isderived, \
Location, \
Evaluable, \
BaseIterator, \
FiniteIterator, \
Iterable, \
Pair, \
List, \
HashMap, \
UserType, \
ExceptionClass, \
Traceback, \
Procedure
from doit.support.cmd.eval import \
CommandProcessor
class PseudoCommand(object):
__slots__ = [ 'name', 'location', '__isfunc' ]
def __init__(self, name, location, isfunc = True):
self.name = name
self.location = location
self.__isfunc = isfunc
#-def
def __str__(self):
return self.name
#-def
def isfunc(self):
return self.__isfunc
#-def
#-class
class PseudoContext(object):
__slots__ = [ 'cmd' ]
def __init__(self, name, location, isfunc = True):
self.cmd = PseudoCommand(name, location, isfunc)
#-def
#-class
class TestLocationCase(unittest.TestCase):
def test_methods(self):
loc0 = Location()
loc1 = Location("A", 1, 2)
self.assertIsNone(loc0.file())
self.assertEqual(loc0.line(), -1)
self.assertEqual(loc0.column(), -1)
self.assertEqual(loc0, (None, -1, -1))
x, y, z = loc0
self.assertEqual((x, y, z), (None, -1, -1))
self.assertEqual(str(loc0), "(internal)")
self.assertEqual(loc1.file(), "A")
self.assertEqual(loc1.line(), 1)
self.assertEqual(loc1.column(), 2)
self.assertEqual(loc1, ("A", 1, 2))
x, y, z = loc1
self.assertEqual((x, y, z), ("A", 1, 2))
self.assertEqual(str(loc1), 'at ["A":1:2]')
#-def
#-class
class TestEvaluableCase(unittest.TestCase):
def test_equality(self):
x = Evaluable()
y = Evaluable()
z = Evaluable()
z.set_location("g", 2, 5)
z.properties['a'] = "xyz"
zz = Evaluable()
zz.set_location("g", 2, 5)
zz.properties['b'] = "xyz"
zzz = Evaluable()
zzz.set_location("g", 2, 5)
zzz.properties['a'] = "xyz"
self.assertEqual(x, y)
self.assertEqual(x, x)
self.assertNotEqual(x, 1)
self.assertNotEqual(x, z)
self.assertNotEqual(zz, z)
self.assertEqual(zzz, z)
#-def
#-class
class TestIteratorCase(unittest.TestCase):
def test_BaseIterator(self):
i = BaseIterator()
i.reset()
self.assertIs(i.next(), i)
#-def
def test_FiniteIterator(self):
fi0 = FiniteIterator(())
fi1 = FiniteIterator("abc")
fi0.reset()
self.assertEqual(fi0.next(), fi0)
self.assertEqual(fi0.next(), fi0)
fi0.reset()
self.assertEqual(fi0.next(), fi0)
self.assertEqual(fi0.next(), fi0)
fi1.reset()
self.assertEqual(fi1.next(), "a")
self.assertEqual(fi1.next(), "b")
fi1.reset()
self.assertEqual(fi1.next(), "a")
self.assertEqual(fi1.next(), "b")
self.assertEqual(fi1.next(), "c")
self.assertEqual(fi1.next(), fi1)
self.assertEqual(fi1.next(), fi1)
fi1.reset()
self.assertEqual(fi1.next(), "a")
#-def
#-class
class TestIterableCase(unittest.TestCase):
def test_Iterable(self):
self.assertIsInstance(Iterable().iterator(), BaseIterator)
#-def
def test_Pair(self):
p = Pair(1, 2)
q = Pair(*p)
i = q.iterator()
self.assertEqual(p, (1, 2))
self.assertEqual(q, (1, 2))
self.assertEqual(i.next(), 1)
self.assertEqual(i.next(), 2)
self.assertIs(i.next(), i)
#-def
def test_List(self):
l = List((1, 2, 3))
m = List(l)
i = m.iterator()
self.assertEqual(l, [1, 2, 3])
self.assertEqual(m, [1, 2, 3])
self.assertEqual(i.next(), 1)
self.assertEqual(i.next(), 2)
self.assertEqual(i.next(), 3)
self.assertIs(i.next(), i)
#-def
def test_HashMap(self):
d = {'a': '1', 1: 'b', "xy": 0.25}
c = (lambda x: {1: 0, 'a': 1, "xy": 2}.get(x, -1))
h = HashMap(d)
hh = HashMap(h)
i = hh.iterator()
k = list(hh.keys())
k.sort(key=c)
l = []
self.assertEqual(h, d)
self.assertEqual(hh, d)
x = i.next()
self.assertTrue(x in k and x not in l)
l.append(x)
x = i.next()
self.assertTrue(x in k and x not in l)
l.append(x)
x = i.next()
self.assertTrue(x in k and x not in l)
l.append(x)
self.assertIs(i.next(), i)
l.sort(key=c)
self.assertEqual(l, k)
self.assertEqual(l, [1, 'a', "xy"])
#-def
#-class
class TestUserTypeCase(unittest.TestCase):
def test_equality(self):
a = UserType().set_location("a", 1, 2)
b = UserType().set_location("b", 1, 2)
c = UserType().set_location("a", 1, 2)
self.assertNotEqual(a, 1)
self.assertEqual(a, a)
self.assertNotEqual(a, b)
self.assertEqual(a, c)
#-def
def test_UserType(self):
p = CommandProcessor()
self.assertTrue(UserType().to_bool(p))
with self.assertRaises(CommandError):
UserType().to_int(p)
with self.assertRaises(CommandError):
UserType().to_float(p)
with self.assertRaises(CommandError):
UserType().to_str(p)
with self.assertRaises(CommandError):
UserType().to_pair(p)
with self.assertRaises(CommandError):
UserType().to_list(p)
with self.assertRaises(CommandError):
UserType().to_hash(p)
with self.assertRaises(CommandError):
UserType().do_visit(p, (lambda x: x), 0, 1)
#-def
#-class
class TestExceptionClassCase(unittest.TestCase):
def setUp(self):
self.e0 = ExceptionClass('BaseException', '::BaseException', None)
self.e01 = ExceptionClass('Exception', '::Exception', self.e0)
self.e02 = ExceptionClass('SystemError', '::SystemError', self.e0)
self.e011 = ExceptionClass('NameError', '::NameError', self.e01)
self.e012 = ExceptionClass('TypeError', '::TypeError', self.e01)
#-def
def test_members(self):
self.assertEqual(self.e0.qname, '::BaseException')
#-def
def test_getters(self):
self.assertIsNone(self.e0.base())
self.assertIsInstance(self.e01.base(), ExceptionClass)
self.assertEqual(str(self.e01), 'Exception')
self.assertIs(self.e011.base(), self.e01)
#-def
def test_isderived(self):
self.assertTrue(isderived(self.e0, self.e0))
self.assertTrue(isderived(self.e01, self.e0))
self.assertTrue(isderived(self.e02, self.e0))
self.assertTrue(isderived(self.e011, self.e0))
self.assertTrue(isderived(self.e012, self.e0))
self.assertTrue(isderived(self.e01, self.e01))
self.assertTrue(isderived(self.e011, self.e01))
self.assertTrue(isderived(self.e012, self.e01))
self.assertTrue(isderived(self.e011, self.e011))
self.assertTrue(isderived(self.e012, self.e012))
self.assertTrue(isderived(self.e02, self.e02))
self.assertFalse(isderived(self.e0, self.e01))
self.assertFalse(isderived(self.e0, self.e02))
self.assertFalse(isderived(self.e0, self.e011))
self.assertFalse(isderived(self.e0, self.e012))
self.assertFalse(isderived(self.e01, self.e011))
self.assertFalse(isderived(self.e01, self.e012))
self.assertFalse(isderived(self.e01, self.e02))
self.assertFalse(isderived(self.e02, self.e01))
self.assertFalse(isderived(self.e02, self.e011))
self.assertFalse(isderived(self.e02, self.e012))
self.assertFalse(isderived(self.e011, self.e02))
self.assertFalse(isderived(self.e012, self.e02))
self.assertFalse(isderived(self.e011, self.e012))
self.assertFalse(isderived(self.e012, self.e011))
self.assertFalse(isderived(self.e011, 2))
self.assertFalse(isderived((), self.e012))
#-def
#-class
class TestTracebackCase(unittest.TestCase):
def test_traceback_methods(self):
testdata = [(
[],
Location(),
"In <main>:\n" \
">"
), (
[],
Location("foo.g", 1, 1),
"In <main>:\n" \
"> At [\"foo.g\":1:1]:"
), (
[ "cmd0" ],
Location(),
"In cmd0:\n" \
">"
), (
[ "cmd1" ],
Location("foo.g", 2, 3),
"In cmd1:\n" \
"> At [\"foo.g\":2:3]:"
), (
[ "A", "B", "C" ],
Location(),
"In A\n" \
"| from B\n" \
"| from C:\n" \
">"
), (
[ "f", "g", "h" ],
Location("foo.g", 4, 7),
"In f\n" \
"| from g\n" \
"| from h:\n" \
"> At [\"foo.g\":4:7]:"
)]
for i, l, r in testdata:
stack = []
for cn in i:
stack.append(PseudoContext(cn, Location()))
stack.append(PseudoContext("<cmd>", l, False))
tb = Traceback(stack)
self.assertEqual(str(tb), r)
#-def
#-class
class TestProcedureTemplateCase(unittest.TestCase):
def test_Procedure(self):
name, qname, bvars, params, vararg, body, outer = \
"proc", "::proc", ['x'], ['y', 'z'], True, [], [[]]
proc = Procedure(name, qname, bvars, params, vararg, body, outer)
_name, _qname, _bvars, _params, _vararg, _body, _outer = proc
self.assertEqual(_name, name)
self.assertEqual(_qname, qname)
self.assertEqual(_bvars, bvars)
self.assertEqual(_params, params)
self.assertEqual(_vararg, vararg)
self.assertEqual(_body, body)
self.assertEqual(_outer, outer)
#-def
#-class
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestLocationCase))
suite.addTest(unittest.makeSuite(TestEvaluableCase))
suite.addTest(unittest.makeSuite(TestIteratorCase))
suite.addTest(unittest.makeSuite(TestIterableCase))
suite.addTest(unittest.makeSuite(TestUserTypeCase))
suite.addTest(unittest.makeSuite(TestExceptionClassCase))
suite.addTest(unittest.makeSuite(TestTracebackCase))
suite.addTest(unittest.makeSuite(TestProcedureTemplateCase))
return suite
#-def
|
from django.contrib import admin
from posts.models import Auther , Content
class ContentInline(admin.TabularInline):
model = Content
extra = 1
class AutherAdmin(admin.ModelAdmin):
list_display = ['name','topic','pub_date']
list_filter = ['pub_date']
search_fields = ['topic']
inlines = [ContentInline]
class Media:
js = ('js/admin/place.js',)
admin.site.register(Auther, AutherAdmin)
admin.site.register(Content)
|
# Chap06/stack_classification_ui.py
import sys
import json
import pickle
from argparse import ArgumentParser
def get_parser():
parser = ArgumentParser()
parser.add_argument('--model')
return parser
def exit():
print("Goodbye.")
sys.exit()
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
with open(args.model, 'rb') as f:
model = pickle.load(f)
classifier = model['classifier']
vectorizer = model['vectorizer']
mlb = model['mlb']
while True:
print("Type your question, or type \"exit\" to quit.")
user_input = input('> ')
if user_input == 'exit':
exit()
else:
X = vectorizer.transform([user_input])
print("Question: {}".format(user_input))
prediction = classifier.predict(X)
labels = mlb.inverse_transform(prediction)[0]
labels = ', '.join(labels)
if labels:
print("Predicted labels: {}".format(labels))
else:
print("No label available for this question")
|
from hypothesis import given
import hypothesis.strategies as st
from hypothesis import given
from volga.fields import Int, Bool, Float, Null, Str
from volga.json import deserialize
from volga.exceptions import ParsingError
import json
import pytest # type: ignore
@given(st.integers())
def test_deserialize_int(x: int):
assert deserialize(str(x), Int) == x
@given(st.booleans())
def test_serialize_bool(x: bool):
assert deserialize(json.dumps(x), Bool) == x
@given(st.floats(allow_infinity=False, allow_nan=False))
def test_serialize_float(x: float):
assert deserialize(json.dumps(x), Float) == x
# TODO failing for the edge case: x = '"'
# @given(st.text())
def test_serialize_str():
assert deserialize(json.dumps("hello"), Str) == "hello"
# TODO fix the way we check for equality
@given(st.none())
def test_serialize_none(x: None):
assert deserialize(json.dumps(x), Null) == None
def test_parsing_error_int():
with pytest.raises(ParsingError): # type: ignore
deserialize("Not an int", Int)
def test_parsing_error_bool():
with pytest.raises(ParsingError): # type: ignore
deserialize("Not a bool", Bool)
def test_parsing_error_float():
with pytest.raises(ParsingError): # type: ignore
deserialize("Not a float", Float)
def test_parsing_error_str():
with pytest.raises(ParsingError): # type: ignore
deserialize(0, Str) # type: ignore
|
from google.protobuf.descriptor import FieldDescriptor
from protobuf_serialization.constants import VALUE_TYPES
from protobuf_serialization.deserialization.utils import proto_timestamp_to_datetime
def protobuf_to_dict(proto, dict_cls=dict, omit_keys_if_null=False, fields=None):
if fields:
fields = set(fields)
result = dict_cls()
fields_by_name = proto.DESCRIPTOR.fields_by_name
for name, field in fields_by_name.items():
if fields and name not in fields:
continue
if field.type == FieldDescriptor.TYPE_MESSAGE:
if _is_map_field(field):
# Map types have to be handled differently, HasField won't work.
# For now treat as dict, which handles string->string ok
value = dict(getattr(proto, name))
elif field.label == FieldDescriptor.LABEL_REPEATED:
# Run this after the map check, since map fields also have LABEL_REPEATED..
raw_values = getattr(proto, name)
value = [
convert_message_type(
raw_value, field.message_type.full_name,
extra_args=dict(omit_keys_if_null=omit_keys_if_null, dict_cls=dict_cls)
) for raw_value in raw_values
]
elif proto.HasField(name):
# This only works for singular (non-repeated), non-map types
raw_value = getattr(proto, name)
value = convert_message_type(
raw_value, field.message_type.full_name,
extra_args=dict(omit_keys_if_null=omit_keys_if_null, dict_cls=dict_cls)
)
else:
value = None
else:
value = getattr(proto, name)
if not (omit_keys_if_null and value is None):
result[name] = value
return result
def _is_map_field(field):
"""
Detection logic borrowed from https://github.com/kaporzhu/protobuf-to-dict/blob/master/protobuf_to_dict/convertor.py
"""
return (
field.type == FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry
)
def convert_message_type(value, type_name, extra_args=None):
extra_args = extra_args or {}
# Assume value is not None
if type_name == 'google.protobuf.Timestamp':
return proto_timestamp_to_datetime(value)
elif type_name in VALUE_TYPES:
return value.value
else:
return protobuf_to_dict(value, **extra_args)
|
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue # pylint: disable=import-error
import traceback
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
import dnnlib.tflib as tflib
import h5py
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
#----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert os.path.isdir(self.tfrecord_dir)
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image_complex(self, img):
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.compat.v1.io.TFRecordOptions(tf.compat.v1.io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.compat.v1.io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=img.shape)),
'data': tf.train.Feature(float_list=tf.train.FloatList(value=img.flatten()))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_image_magnitude(self, img):
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.compat.v1.python_io.TFRecordOptions(tf.compat.v1.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.compat.v1.python_io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img*255).clip(0, 255).astype(np.uint8)
ex = tf.compat.v1.train.Example(features=tf.compat.v1.train.Features(feature={
'shape': tf.compat.v1.train.Feature(int64_list=tf.compat.v1.train.Int64List(value=quant.shape)),
'data': tf.compat.v1.train.Feature(bytes_list=tf.compat.v1.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------
def create_from_hdf5_complex(tfrecord_dir, hdf5_filename1, hdf5_filename2, hdf5_filename3, h5_key, label_index = None, shuffle = 0):
hdf5_file1 = h5py.File(hdf5_filename1, 'r')
hdf5_data1 = hdf5_file1[h5_key]
hdf5_file2 = h5py.File(hdf5_filename2, 'r')
hdf5_data2 = hdf5_file2[h5_key]
hdf5_file3 = h5py.File(hdf5_filename3, 'r')
hdf5_data3 = hdf5_file3[h5_key]
file_names = [hdf5_data1,hdf5_data2,hdf5_data3]
n = file_names[0].shape[0] + file_names[1].shape[0] + file_names[2].shape[0]
with TFRecordExporter(tfrecord_dir, n) as tfr:
for file_index in range(3):
order = np.arange(file_names[file_index].shape[0])
if shuffle:
np.random.shuffle(order)
for idx in range(file_names[file_index].shape[0]):
hdf5_data = file_names[file_index]
temp_slice = hdf5_data[order[idx]]
real = np.transpose(temp_slice['real'])
imag = np.transpose(temp_slice['imag'])
ones = np.ones([512,512])
w = int((512 - real.shape[0]) /2)
h = int((512 - real.shape[1]) /2)
abs_image_max = np.max(np.abs(real + 1j * imag))
real = real / abs_image_max
imag = imag / abs_image_max
real = np.pad(real, ((w,w),(h,h)), mode='constant', constant_values=0)
imag = np.pad(imag, ((w,w),(h,h)), mode='constant', constant_values=0)
tfr.add_image_complex(np.stack([real,imag,ones],axis=0))
if label_index != None:
onehot = np.zeros((n, 3), dtype=np.float32)
onehot[:,label_index] = 1
tfr.add_labels(onehot)
def create_from_hdf5_magnitude(tfrecord_dir, hdf5_filename, h5_key, label_index = None, shuffle = 0):
with h5py.File(hdf5_filename, 'r') as hdf5_file:
hdf5_data = hdf5_file[h5_key]
with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
order = np.arange(hdf5_data.shape[0])
if shuffle:
np.random.shuffle(order)
for idx in range(order.size):
temp_slice = hdf5_data[order[idx],:,:]
w = int((512 - temp_slice.shape[0]) /2)
h = int((512 - temp_slice.shape[1]) /2)
temp_slice = np.pad(temp_slice, ((w,w),(h,h)), mode='constant', constant_values=0)
temp = np.expand_dims(np.transpose(temp_slice), axis=0)
tfr.add_image_magnitude(temp)
if label_index != None:
onehot = np.zeros((hdf5_data.shape[0], 3), dtype=np.float32)
onehot[:,label_index] = 1
tfr.add_labels(onehot)
|
import os
import glob
import json
default_N_assign_more_sounds = 10
PATH_TO_FSL10K = "/static/FSL10K"
PATH_TO_AC_ANALYSIS = os.path.join(PATH_TO_FSL10K, 'ac_analysis/')
PATH_TO_METADATA = os.path.join(PATH_TO_FSL10K, 'fs_analysis/')
def compile_annotated_sounds(annotations_path):
annotated_sounds = {}
annotation_files = glob.glob(annotations_path + '/*/*.json', recursive=True)
for an_file in annotation_files:
an = json.load(open(an_file,'rb'))
#remove the path, then the extension and then the "sound-" string
sound_id = os.path.basename(os.path.splitext(an_file)[0]).replace('sound-','')
if sound_id in annotated_sounds:
annotated_sounds[sound_id] = { "genres" : list(set(an["genres"] + annotated_sounds[sound_id]["genres"])),
"num_annotations" : annotated_sounds[sound_id]["num_annotations"] + 1 }
else:
annotated_sounds[sound_id] = { "genres" : an["genres"],
"num_annotations" : 1 }
return annotated_sounds
def collect_assigned_sounds():
assigned_sounds = []
#for key in sound_id_user.keys():
user_path = os.path.join(PATH_TO_FSL10K, 'annotators/')
for user_file in os.listdir(user_path):
if user_file.endswith(".json"):
assigned_user_sounds = json.load(open(os.path.join(user_path,user_file), 'rb'))
for sound in assigned_user_sounds:
assigned_sounds.append(sound)
return assigned_sounds
def collect_authors(sounds_annotated,metadata):
author_sounds = {}
for sound in sounds_annotated:
author = metadata[sound]["username"]
if author in author_sounds.keys():
author_sounds[author] = author_sounds[author] + 1
else:
author_sounds[author] = 1
return author_sounds
def collect_packs(sounds_annotated,metadata):
pack_sounds = {}
for sound in sounds_annotated:
pack = metadata[sound]["pack_name"]
if pack is not None:
if pack in pack_sounds.keys():
pack_sounds[pack] = pack_sounds[pack] + 1
else:
pack_sounds[pack] = 1
return pack_sounds
def collect_genres(sounds_annotated):
genre_sounds = {}
for sound in sounds_annotated:
genres = sounds_annotated[sound]["genres"]
for genre in genres:
if genre in genre_sounds.keys():
genre_sounds[genre] = genre_sounds[genre] + 1
else:
genre_sounds[genre] = 1
return genre_sounds
def genre_importance(sound_genres, genre_sounds):
importance = 0
genre_sounds_sorted = sorted(genre_sounds.items(), key=lambda x: x[1])
less_annotated_sounds = [genre_sounds_sorted[0][0], genre_sounds_sorted[1][0]]
for genre in sound_genres:
if genre in less_annotated_sounds:
importance = importance + 1
return importance
def discard_packs(all_sound_ids,metadata):
packs_to_discard = []
for sound in all_sound_ids:
if metadata[sound]["pack"] in packs_to_discard:
all_sounds_ids.remove(sound)
return all_sound_ids
def select_relevant_sounds(annotations_path, metadata, genre_metadata, all_sound_ids, N=default_N_assign_more_sounds):
sounds_annotated = compile_annotated_sounds(annotations_path)
#These weights are used to create an irrelevance metric for each loop
#based on the existing annotated loops
#If a loop has been annotated already, multiply the number of times it has been
#annotated by this weight. 1000 makes sure that we first annotate sounds which
#haven't been annotated
anno_weight=500
#The number of times the author has been annotated should lightly influence the overall
#irrelevance score. We chose a lower value which allows for more important metrics
#such as the number of times annotated to predominate
auth_weight=1
#Same applies to the number of times a pack has been annotated
pack_weight=1
#The genre importance forces the algorithm to fetch sounds from the less annotated
#genres. As gen_importance is proportional to the importance of the loop to be annotated
#we select a negative weight, to make the loops "less irrelevant"
genre_weight=-10
authors_sounds = collect_authors(sounds_annotated,metadata)
pack_sounds = collect_packs(sounds_annotated,metadata)
genre_sounds = collect_genres(sounds_annotated)
sounds_to_rate = discard_packs(all_sound_ids,metadata)
sound_irrelevance_list = []
assigned_sounds = collect_assigned_sounds()
for sound in sounds_to_rate:
num_annotated = 0
if sounds_annotated.get(sound) != None:
num_annotated = sounds_annotated[sound]["num_annotations"]
num_author = 0
if authors_sounds.get(metadata[sound]["username"]) != None:
num_author = authors_sounds[metadata[sound]["username"]]
num_pack = 0
if pack_sounds.get(metadata[sound]["pack"]) != None:
num_pack = pack_sounds[metadata[sound]["pack"]]
num_assigned = assigned_sounds.count(sound)
gen_importance = genre_importance(genre_metadata.get(sound,[]), genre_sounds)
irrelevance = (num_annotated+num_assigned)*anno_weight + num_author*auth_weight + num_pack*pack_weight + gen_importance*genre_weight
ac_analysis_filename = metadata[sound]["preview_url"]
base_name = ac_analysis_filename[ac_analysis_filename.rfind("/"):ac_analysis_filename.find("-hq")]
ac_analysis_filename = base_name + "_analysis.json"
if os.path.exists(PATH_TO_AC_ANALYSIS + ac_analysis_filename):
sound_irrelevance_list.append((sound,irrelevance))
sound_irrelevance_sorted = sorted(sound_irrelevance_list, key=lambda x: x[1])
sound_irrelevance_ids = [lis[0] for lis in sound_irrelevance_sorted]
return sound_irrelevance_ids[0:N]
|
import tensorflow as tf
import horovod.keras as hvd
import model
import keras
def run():
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
data = model.dataset()
complete_model = model.model()
loss = model.loss()
opt = model.opt()
opt = hvd.DistributedOptimizer(opt)
complete_model.compile(loss=loss,
optimizer=opt,
metrics=["accuracy"]
)
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
]
if hvd.rank() == 0:
callbacks.append(keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))
if not model.EPOCHS:
model.EPOCHS = 64
if not model.BATCH_SIZE:
model.BATCH_SIZE = None
if not model.SHUFFLE:
model.SHUFFLE = True
if not model.CLASS_WEIGHT:
model.CLASS_WEIGHT = None
if not model.SAMPLE_WEIGHT:
model.SAMPLE_WEIGHT = None
if not model.INITIAL_EPOCH:
model.INITIAL_EPOCH = 0
if not model.STEP_PER_EPOCH:
model.STEP_PER_EPOCH = None
if not model.MAX_QUEUE_SIZE:
model.MAX_QUEUE_SIZE = 10
complete_model.fit(data, epochs=model.EPOCHS, verbose=0, batch_size=model.BATCH_SIZE,
shuffle=model.SHUFFLE, class_weight=model.CLASS_WEIGHT,
sample_weight=model.SAMPLE_WEIGHT, initial_epoch=model.INITIAL_EPOCH,
steps_per_epoch=model.STEP_PER_EPOCH, max_queue_size=model.MAX_QUEUE_SIZE,
callbacks=callbacks
)
complete_model.save("./model.h5")
run()
|
# Copyright (c) 2019 Martin Olejar
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
import pytest
from mboot.properties import Version, BoolValue, EnumValue, IntValue, VersionValue, ReservedRegionsValue, \
AvailableCommandsValue, AvailablePeripheralsValue, ExternalMemoryAttributesValue, \
DeviceUidValue, FlashReadMargin, IrqNotifierPinValue, PfrKeystoreUpdateOpt, \
parse_property_value, PropertyTag
def test_version_class():
version = Version('K0.1.2')
assert version.mark == 'K'
assert version.major == 0
assert version.minor == 1
assert version.fixation == 2
assert version.to_str() == 'K0.1.2'
assert version.to_str(no_mark=True) == '0.1.2'
assert version.to_int() == 0x4B000102
assert version.to_int(no_mark=True) == 0x00000102
assert version > Version(major=0, minor=1, fixation=1)
assert version >= Version('0.1.1')
assert version < Version('0.2.1')
assert version <= Version('0.2.1')
assert version != Version(0x00000102)
assert version == Version(0x4B000102)
assert str(version)
assert repr(version)
with pytest.raises(TypeError):
_ = Version(0.5)
def test_mone_value():
value = parse_property_value(1000, [0])
assert value is None
def test_bool_value():
value = parse_property_value(PropertyTag.VERIFY_WRITES, [0])
assert isinstance(value, BoolValue)
assert value.tag == PropertyTag.VERIFY_WRITES
assert value.name == PropertyTag[PropertyTag.VERIFY_WRITES]
assert value.desc == PropertyTag.desc(PropertyTag.VERIFY_WRITES)
assert not value
assert value.to_int() == 0
assert value.to_str() == 'OFF'
def test_enum_value():
value = parse_property_value(PropertyTag.FLASH_READ_MARGIN, [0])
assert isinstance(value, EnumValue)
assert value.tag == PropertyTag.FLASH_READ_MARGIN
assert value.name == PropertyTag[PropertyTag.FLASH_READ_MARGIN]
assert value.desc == PropertyTag.desc(PropertyTag.FLASH_READ_MARGIN)
assert value.value == 0
assert value.to_int() == 0
assert value.to_str() == 'NORMAL'
def test_int_value():
value = parse_property_value(PropertyTag.FLASH_SIZE, [1024])
assert isinstance(value, IntValue)
assert value.tag == PropertyTag.FLASH_SIZE
assert value.name == PropertyTag[PropertyTag.FLASH_SIZE]
assert value.desc == PropertyTag.desc(PropertyTag.FLASH_SIZE)
assert value.value == 1024
assert value.to_str() == '1.0 kiB'
def test_version_value():
value = parse_property_value(PropertyTag.CURRENT_VERSION, [0x4B000102])
assert isinstance(value, VersionValue)
assert value.tag == PropertyTag.CURRENT_VERSION
assert value.name == PropertyTag[PropertyTag.CURRENT_VERSION]
assert value.desc == PropertyTag.desc(PropertyTag.CURRENT_VERSION)
assert value.value == Version(0x4B000102)
assert value.to_int() == 0x4B000102
assert value.to_str() == 'K0.1.2'
def test_device_uid_value():
value = parse_property_value(PropertyTag.UNIQUE_DEVICE_IDENT, [0x4B000102, 0x4B000102])
assert isinstance(value, DeviceUidValue)
assert value.tag == PropertyTag.UNIQUE_DEVICE_IDENT
assert value.name == PropertyTag[PropertyTag.UNIQUE_DEVICE_IDENT]
assert value.desc == PropertyTag.desc(PropertyTag.UNIQUE_DEVICE_IDENT)
assert value.value == 0x4B0001024B000102
assert value.to_str() == '4B0001024B000102'
def test_reserved_regions_value():
pass
|
current_n = '1113222113'
for n in xrange(40):
current_count = 0
next_n = ''
prev_d = current_n[0]
for i,d in enumerate(current_n):
if d == prev_d:
current_count += 1
if i == len(current_n)-1:
next_n += str(current_count) + prev_d
else:
next_n += str(current_count) + prev_d
current_count = 1
prev_d = d
if i == len(current_n)-1:
next_n += str(current_count) + prev_d
current_n = next_n
print len(current_n)
|
class NotepadqqMessageError(RuntimeError):
"""An error from Notepadqq"""
class ErrorCode:
NONE = 0
INVALID_REQUEST = 1
INVALID_ARGUMENT_NUMBER = 2
INVALID_ARGUMENT_TYPE = 3
OBJECT_DEALLOCATED = 4
OBJECT_NOT_FOUND = 5
METHOD_NOT_FOUND = 6
def __init__(self, error_code, error_string):
self._error_code = error_code
self._error_string = error_string
super(NotepadqqMessageError, self).__init__(self.description)
@property
def error_code(self):
return self._error_code
@property
def error_string(self):
return self._error_string
@property
def description(self):
if self._error_code == self.ErrorCode.NONE: descr = "None"
elif self._error_code == self.ErrorCode.INVALID_REQUEST: descr = "Invalid request"
elif self._error_code == self.ErrorCode.INVALID_ARGUMENT_NUMBER: descr = "Invalid argument number"
elif self._error_code == self.ErrorCode.INVALID_ARGUMENT_TYPE: descr = "Invalid argument type"
elif self._error_code == self.ErrorCode.OBJECT_DEALLOCATED: descr = "Object deallocated"
elif self._error_code == self.ErrorCode.OBJECT_NOT_FOUND: descr = "Object not found"
elif self._error_code == self.ErrorCode.METHOD_NOT_FOUND: descr = "Method not found"
else: descr = "Unknown error"
if self._error_string is not None and self._error_string != "":
descr += ': ' + self._error_string
return descr
|
"""
This file defines Petra expressions.
"""
import re
from abc import ABC, abstractmethod
from llvmlite import ir
from typing import Optional
from .codegen import CodegenContext
from .validate import ValidateError
from .symbol import Symbol
from .type import Type
from .typecheck import TypeContext, TypeCheckError
class Expr(ABC):
"""
An expression. Expressions have a type and can be evaluated.
"""
@abstractmethod
def get_type(self) -> Type:
"""
Returns the type of the expression.
This function can only be called after typecheck() has been called on self.
"""
pass
@abstractmethod
def validate(self) -> None:
"""
Validate the expression.
This performs structural checks on the validity of an expression (e.g. an
arithemtic expression has a valid operator). It does not perform semantics checks.
"""
pass
@abstractmethod
def typecheck(self, ctx: TypeContext) -> None:
"""Type check the expression.
This performs semantic checks on the validity of an expression (e.g. the right and
left hand sides of an arithmetic expression are of compatible types). After
calling this method it is possible to call get_type() to determine the type of the
expression.
"""
pass
@abstractmethod
def codegen(self, builder: ir.IRBuilder, ctx: CodegenContext) -> ir.Value:
"""Code generate the expression.
Produces LLVM code to evaluate the expression.
"""
pass
class Var(Expr):
"""
A Petra variable. Variables must be introduced in a Declare statement before
use.
"""
def __init__(self, symbol: Symbol):
self.symbol = symbol
self.validate()
def get_type(self) -> Type:
return self.symbol.get_type()
def validate(self) -> None:
pass
def typecheck(self, ctx: TypeContext) -> None:
if self.symbol not in ctx.variables:
raise TypeCheckError("Variable '%s' not defined" % self.symbol)
def codegen(self, builder: ir.IRBuilder, ctx: CodegenContext) -> ir.Value:
var = ctx.vars[self.symbol]
return builder.load(ctx.vars[self.symbol])
|
"""Test for single coil files from fastmri."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import fastmri
class FastMRITest(testing.DatasetBuilderTestCase):
DATASET_CLASS = fastmri.FastMRI
SPLITS = {
"train": 1,
"test": 1
}
DL_EXTRACT_RESULT = ['extracted/singlecoil_train.tar.gz',
'extracted/singlecoil_test_v2.tar.gz']
BUILDER_CONFIG_NAMES_TO_TEST = ["singlecoil"]
if __name__ == "__main__":
testing.test_main()
|
from os import getenv
from kombu import Queue, Exchange
REDIS_HOST = getenv('REDIS_HOST')
REDIS_PORT = getenv('REDIS_PORT')
"""
=========
Notes:
=========
CPU Bound task
celery -A <task> worker -l info -n <name of task> -c 4 -Ofair -Q <queue name> — without-gossip — without-mingle — without-heartbeat
I/O task
celery -A <task> worker -l info -n <name of task> -Ofair -Q <queue name> -P eventlet -c 1000 — without-gossip — without-mingle — without-heartbeat
"""
broker_url = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
result_backend = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
result_exchange = 'default'
task_track_started = True
task_ignore_result = False # allows flower to check results
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['application/json', 'json', 'yaml']
timezone = 'UTC'
worker_send_task_event = True
task_time_limit = 2400 # task will be killed after 20 mins
task_soft_time_limit = 1800 # task will raise exception SoftTimeLimitExceeded after 10 mins
task_acks_late = True # task messages will be acknowledged after the task has been executed, not just before (the default behavior).
worker_prefetch_multiplier = 10 # One worker taks 10 tasks from queue at a time and will increase the performance
# exchanges
# default_exchange = Exchange('default', type='direct')
# media_exchange = Exchange('media', type='direct')
# queue
task_default_queue = 'default'
task_default_routing_key = 'default'
task_queue_max_priority = 10
task_default_priority = 5
# broker_transport_options = {'queue_order_strategy': 'priority',} # used for redis priority; may not be ideal
# task_queues = (
# Queue('default', routing_key='default'), # Queue('default', default_exchange, routing_key='task.#'),
# Queue('alpha_tasks', routing_key='alpha.#', queue_arguments={'x-max-priority': 10}),
# Queue('omega_tasks', routing_key='omega.#', queue_arguments={'x-max-priority': 1}),
# )
# https://docs.celeryproject.org/en/stable/userguide/routing.html
# task_routes = {
# 'alpha.*': {
# 'queue': 'alpha_tasks',
# 'routing_key': 'alpha',
# },
# 'omega.*': {
# 'queue': 'omega_tasks',
# 'routing_key': 'omega',
# },
#}
# task_default_exchange_type = 'default'
task_default_exchange_type = 'direct'
|
#!/usr/bin/env python
import sys
import simplejson as json
import eiii_crawler_server as c
import psycopg2
import psycopg2.extras
psycopg2.extras.register_uuid()
import uuid
"""
Use this script to reconstruct the crawler_result from the file dumped by the crawler.
Supply the file with the crawler stats as well as the site_result_uid for the site
result you want to update.
An additional, third parameter can be supplied to force overwriting of preexisting
crawler result.
Example:
% eiii-crawler/eiii_crawler/reconstruct_crawl_results.py \
/home/tingtun/.eiii/crawler/stats/a3282a48-08eb-4595-a5fc-ea2670de3ad3.json \
'4b237224-039e-47a5-99a2-cb2dd12634d9'
UPDATE 1
commit
"""
def make_url_graph(crawl_graph):
return c.make_directed_graph(c.fix_url_graph(crawl_graph))
if __name__ == "__main__":
crawler_stats = sys.argv[1]
site_result_uid = uuid.UUID(sys.argv[2])
force = len(sys.argv) == 4
conn=psycopg2.connect("dbname=eiii")
cur=conn.cursor()
# sanity check
cur.execute("""SELECT site_result_uid
FROM site_results
WHERE site_result_uid=%s
AND crawler_result IS NULL""",
(site_result_uid,))
if cur.rowcount is 0 and not force:
print "that site result already has a crawl result."
exit(1);
stats_dict = json.loads(open(crawler_stats).read())
url_graph = make_url_graph(stats_dict['url_graph'])
crawler_result = json.dumps({'stats': stats_dict, 'result': url_graph})
# print crawler_result
cur.execute("""UPDATE site_results
SET crawler_result=%s
WHERE site_result_uid=%s""",
(crawler_result,site_result_uid))
print cur.statusmessage
# sanity check, again
if cur.statusmessage == 'UPDATE 1':
print 'commit'
conn.commit()
else:
print 'rollback'
conn.rollback()
cur.close()
conn.close()
|
import cv2
import numpy as np
img = cv2.imread('./images/blue_carpet.png')
img_gaussian = cv2.GaussianBlur(img, (13,13), 0)
img_bilateral = cv2.bilateralFilter(img, 13, 70, 50)
cv2.imshow('Input', img)
cv2.imshow('Gaussian filter', img_gaussian)
cv2.imshow('Bilateral filter', img_bilateral)
cv2.waitKey()
|
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import transaction
from gearbox.command import Command
from paste.deploy import loadapp
from webtest import TestApp
from tracim.lib.exception import CommandAbortedError
class BaseCommand(Command):
""" Setup ap at take_action call """
auto_setup_app = True
def run(self, parsed_args):
try:
super().run(parsed_args)
except CommandAbortedError as exc:
if parsed_args.raise_error:
raise
print(exc)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
"--raise",
help='Raise CommandAbortedError errors instead print it\'s message',
dest='raise_error',
action='store_true',
)
return parser
class AppContextCommand(BaseCommand):
"""
Command who initialize app context at beginning of take_action method.
"""
def __init__(self, *args, **kwargs):
super(AppContextCommand, self).__init__(*args, **kwargs)
@staticmethod
def _get_initialized_app_context(parsed_args):
"""
:param parsed_args: parsed args (eg. from take_action)
:return: (wsgi_app, test_app)
"""
config_file = parsed_args.config_file
config_name = 'config:%s' % config_file
here_dir = os.getcwd()
# Load locals and populate with objects for use in shell
sys.path.insert(0, here_dir)
# Load the wsgi app first so that everything is initialized right
wsgi_app = loadapp(config_name, relative_to=here_dir, global_conf={
'disable_daemons': 'true',
})
test_app = TestApp(wsgi_app)
# Make available the tg.request and other global variables
tresponse = test_app.get('/_test_vars')
return wsgi_app, test_app
def take_action(self, parsed_args):
super(AppContextCommand, self).take_action(parsed_args)
if self.auto_setup_app:
self._get_initialized_app_context(parsed_args)
def get_parser(self, prog_name):
parser = super(AppContextCommand, self).get_parser(prog_name)
parser.add_argument("-c", "--config",
help='application config file to read (default: development.ini)',
dest='config_file', default="development.ini")
return parser
def run(self, parsed_args):
super().run(parsed_args)
transaction.commit()
class Extender(argparse.Action):
"""
Copied class from http://stackoverflow.com/a/12461237/801924
"""
def __call__(self, parser, namespace, values, option_strings=None):
# Need None here incase `argparse.SUPPRESS` was supplied for `dest`
dest = getattr(namespace, self.dest, None)
# print dest,self.default,values,option_strings
if not hasattr(dest, 'extend') or dest == self.default:
dest = []
setattr(namespace, self.dest, dest)
# if default isn't set to None, this method might be called
# with the default as `values` for other arguements which
# share this destination.
parser.set_defaults(**{self.dest: None})
try:
dest.extend(values)
except ValueError:
dest.append(values)
|
from .course import course_service
from .section import section_service
|
def i_love_python():
"""
He charmed me^ now in any language can not write :)
"""
return "I love Python!"
def test_function():
assert i_love_python() == "I love Python!"
|
from fastapi import FastAPI,Depends,status,Response,HTTPException
from . import schemas,models
from .database import engine,SessionLocal
from sqlalchemy.orm import Session
from typing import List
app = FastAPI()
models.Base.metadata.create_all(bind=engine)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post('/blog/', status_code=status.HTTP_201_CREATED)
def create(request:schemas.Blog,db: Session=Depends(get_db)):
new_blog = models.Blog(title=request.title,body=request.body)
db.add(new_blog)
db.commit()
db.refresh(new_blog)
return new_blog
# Getting the blog from the database
@app.get('/blog',status_code=200, response_model=List[schemas.ShowBlog])
def all_blogs(db: Session=Depends(get_db)):
blogs = db.query(models.Blog).all()
return blogs
#getting a particular id by passing it's reference throught the endpoint
@app.get('/blog/{id}',status_code=200,response_model=schemas.ShowBlog)
def show_one(id,response:Response,db: Session=Depends(get_db)):
blog = db.query(models.Blog).filter(models.Blog.id == id).first()
if not blog:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f'The blog with id {id} does not exist')
# response.status_code=status.HTTP_404_NOT_FOUND
# return {'detail':f'The blog with id {id} does not exist'}
return blog
#deleting a content of a blog
@app.delete('/blog/{id}',status_code=status.HTTP_204_NO_CONTENT)
def delete_blog(id,db: Session=Depends(get_db)):
blog = db.query(models.Blog).filter(models.Blog.id == id)
if not blog.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"The blog with ID {id} does not exist")
blog.delete(synchronize_session = False)
db.commit()
return 'done'
#updating blog contents
@app.put('/blog/{id}',status_code=status.HTTP_202_ACCEPTED)
def update(id,request:schemas.Blog, db: Session = Depends(get_db)):
blog = db.query(models.Blog).filter(models.Blog.id == id)
if not blog.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"The Blog with ID {id} doesn't exist")
blog.update(request.dict())
db.commit()
return {'detail':f'A Blog with ID : {id}, was updated succesfully'}
#deleting all
@app.delete('/blog/',status_code=status.HTTP_204_NO_CONTENT)
def delete_all(db: Session=Depends(get_db)):
blogs = db.query(models.Blog).all().delete(synchronize_session = False)
db.commit()
return 'done'
## for the user
@app.post('/user/')
def create_user(request: schemas.User,db:Session = Depends(get_db)):
new_user = models.User(name=request.name,email=request.email,password=request.password)
db.add(new_user)
db.commit()
db.refresh(new_user)
return request
|
# Generated by Django 3.0.7 on 2020-08-22 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0008_auto_20200822_2133'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='mname',
field=models.CharField(blank=True, max_length=20, verbose_name='Middle Name'),
),
]
|
from rest_framework import serializers
from .models import Articles, Rating, Likes, Comments, Favorites
from django.db.models import Avg
from authors.apps.authentication.serializers import UserSerializer
from authors.apps.profiles.serializers import ProfileSerializer
import readtime
from drf_yasg.utils import swagger_serializer_method
class ArticlesSerializer(serializers.ModelSerializer):
rating_count = serializers.SerializerMethodField(read_only=True, default=0)
avg_rating = serializers.SerializerMethodField(read_only=True, default=0)
author = serializers.SerializerMethodField(read_only=True)
favorited = serializers.SerializerMethodField(
read_only=True, default=False)
favoritesCount = serializers.SerializerMethodField(
read_only=True, default=0)
read_time = serializers.SerializerMethodField()
class Meta:
model = Articles
page_size = serializers.IntegerField()
favorited_by = serializers.CharField()
fields = [
'id',
'title',
'description',
'body',
'image_url',
'author',
'created_at',
'updated_at',
'slug',
'avg_rating',
'rating_count',
'tags',
'favorited',
'favoritesCount',
'read_time',
'flag'
]
read_only_fields = ["id", "author", "slug", "created_at", "avg_rating",
"rating_count"]
def create(self, validated_data):
return Articles.objects.create(**validated_data)
def get_rating_count(self, obj=object):
qs = Rating.objects.filter(article_id=obj.id).count()
return qs
def get_avg_rating(self, obj=object):
qs = Rating.objects.filter(article_id=obj.id).aggregate(Avg('rating'))
if qs['rating__avg'] is None:
return 0
return qs['rating__avg']
def get_author(self, obj):
return {
"username": obj.author.username,
"bio": obj.author.bio,
"image": obj.author.image
}
def get_favoritesCount(self, obj):
count = Favorites.objects.filter(article_id=obj.id).count()
return count
def get_favorited(self, obj):
count = Favorites.objects.filter(article_id=obj.id).count()
if count > 0:
return True
else:
return False
def get_read_time(self, obj):
read_time = str(readtime.of_text(obj.body))
return read_time
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = [
'id',
'article_id',
'user_id',
'rating'
]
read_only_fields = ["id", "author", "created_at"]
def create(self, validated_data):
return Rating.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.article_id = validated_data.get(
'article_id', instance.article_id)
instance.user_id = validated_data.get('user_id', instance.user_id)
instance.rating = validated_data.get('rating', instance.rating)
instance.save()
return instance
class LikesSerializer(serializers.ModelSerializer):
like = serializers.IntegerField()
id = serializers.IntegerField(read_only=True)
class Meta:
model = Likes
fields = ('id', 'article', 'user', 'like', 'created_at')
read_only_fields = ("id", "article", "created_at", "user")
def create(self, validated_data):
return Articles.objects.create(**validated_data)
class TagsSerializer(serializers.ModelSerializer):
class Meta:
model = Articles
page_size = serializers.IntegerField()
fields = [
'id',
'tags'
]
class FavoritesSerializer(serializers.ModelSerializer):
""" Define favourite model serializer"""
article = ArticlesSerializer(many=False, read_only=True, required=False)
class Meta:
model = Favorites
fields = ('id', 'article', 'user', 'created_at')
|
# coding: utf-8
__version__ = '0.0.1'
default_app_config = 'myarticles.apps.AppConfig'
|
import pytest
from web3.utils.abi import (
merge_args_and_kwargs,
)
FUNCTION_ABI = {
"constant": False
,"inputs": [
{"name":"a", "type":"int256"},
{"name":"b", "type":"int256"},
{"name":"c", "type":"int256"},
{"name":"d", "type":"int256"},
],
"name": "testFn",
"outputs": [],
"type":"function",
}
NO_INPUTS_FUNCTION_ABI = {
"constant": False
,"inputs": [],
"name": "testFn",
"outputs": [],
"type":"function",
}
@pytest.mark.parametrize(
'args,kwargs,expected_args',
(
((1, 4, 2, 3), {}, (1, 4, 2, 3)),
((1, 4, 2), {'d': 3}, (1, 4, 2, 3)),
((1, 4), {'d': 3, 'c': 2}, (1, 4, 2, 3)),
((1,), {'d': 3, 'b': 4, 'c': 2}, (1, 4, 2, 3)),
(tuple(), {'d': 3, 'b': 4, 'a': 1, 'c': 2}, (1, 4, 2, 3)),
),
)
def test_merging_of_args_and_kwargs(args, kwargs, expected_args):
actual_args = merge_args_and_kwargs(FUNCTION_ABI, args, kwargs)
assert actual_args == expected_args
def test_merging_of_args_and_kwargs_with_no_inputs():
actual = merge_args_and_kwargs(NO_INPUTS_FUNCTION_ABI, tuple(), {})
assert actual == tuple()
|
"""
Терминал
"""
import src.terminal.utils as terminal_utils
__MESSAGE_ESCAPE = 'До встречи!'
__MESSAGE_PRESS_ANY_KEY = 'Нажмите клавишу ВВОД, чтобы продолжить...'
__MENU_POINT_EXIT = 'Выход'
def __stop(terminal):
terminal.to_terminal()
terminal.to_terminal(__MESSAGE_ESCAPE)
def run(actions=None, utils=None, stop_handler=None):
if actions is None:
actions = ()
if utils is None:
utils = terminal_utils
if stop_handler is None:
stop_handler = __stop
running = True
menu_actions = ((__MENU_POINT_EXIT, stop_handler),) + actions
counter = 0
while utils.check_is_running(running, counter, menu_actions):
utils.print_menu(menu_actions)
action = utils.select_action(menu_actions, counter)
_, handler = action
handler(utils)
counter += 1
if handler == stop_handler:
running = False
continue
utils.from_terminal(f'\n{__MESSAGE_PRESS_ANY_KEY}\n')
if __name__ == '__main__':
run()
|
from django.contrib import admin
from .models import Category
class CategoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Category, CategoryAdmin)
|
"""
Author Joshua I. Meza Magaña
Date 16/08/2021
Version 1.0.0
A bot which helps with team organization.
"""
import discord
from discord.ext import commands
from dotenv import load_dotenv
import os
from Controller.keepAlive import *
from Model.model import *
from Controller.Commands.commandHelp import *
from Controller.Commands.commandClean import *
from Controller.Commands.commandLogs import *
from Controller.Commands.commandNotebook import *
from Controller.Commands.commandUser import *
from Controller.Commands.commandIntroduction import *
# Variables
model = Model()
# Bot specs
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix=model.getPrefix(), intents=intents)
client.remove_command('help')
# Events
@client.event
async def on_ready():
print(f'We have logged in as {client.user}')
@client.event
async def on_message(message):
if message.author != client.user:
await client.process_commands(message)
@client.event
async def on_guild_join(guild):
model.addGuild(guild.id)
@client.event
async def on_guild_remove(guild):
model.removeGuild(guild.id)
@client.event
async def on_command_error(ctx, error):
embed = discord.Embed
errColor = model.getErrorColor()
if isinstance(error, commands.CommandNotFound):
embed = discord.Embed(
title='Command Error',
description='I don\'t recognize your command.',
colour=errColor
)
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title='Cooldown Error',
description='This command is on cooldown, try again later.',
colour=errColor
)
elif isinstance(error, commands.MissingPermissions):
embed = discord.Embed(
title='Permissions Error',
description='You don\'t have enough permissions to execute that command.',
colour=errColor
)
elif isinstance(error, commands.MissingRequiredArgument) or isinstance(error, commands.BadArgument) or isinstance(error, commands.TooManyArguments):
embed = discord.Embed(
title='Arguments Error',
description='There was a problem with your given parameters.',
colour=errColor
)
elif isinstance(error, commands.DisabledCommand):
embed = discord.Embed(
title='Status Error',
description='You tried to use a disabled command.',
colour=errColor
)
elif isinstance(error, commands.MissingRole):
embed = discord.Embed(
title='Role Error',
description='You don\'t have a required role to execute that command.',
colour=errColor
)
else:
embed = discord.Embed(
title='Undefined Error',
description='Something unexpected happend...',
colour=errColor
)
print(error)
embed.set_footer(text=f'Use the command {model.getPrefix()}help for more information.')
await ctx.send(embed=embed, delete_after=30.0)
# Activate web host
keep_alive()
# Adding commands
client.add_cog(Help(client, model))
client.add_cog(Clean(client, model))
client.add_cog(Logs(client, model))
client.add_cog(Notebook(client, model))
client.add_cog(User(client, model))
client.add_cog(Introduction(client, model))
# Activate bot
load_dotenv()
client.run(os.getenv('TOKEN'))
|
from rllab.sampler.utils import rollout
from rllab.sampler.stateful_pool import singleton_pool
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc import tensor_utils
import numpy as np
def _worker_init(G, id):
import os
os.environ['THEANO_FLAGS'] = 'device=cpu'
G.worker_id = id
def initialize(n_parallel):
singleton_pool.initialize(n_parallel)
singleton_pool.run_each(
_worker_init, [(id,) for id in range(singleton_pool.n_parallel)])
def _worker_populate_task(G, env, policy, dynamics):
G.env = env
G.policy = policy
G.dynamics = dynamics
def populate_task(env, policy, dynamics):
logger.log("Populating workers...")
singleton_pool.run_each(
_worker_populate_task,
[(env, policy, dynamics)] * singleton_pool.n_parallel
)
logger.log("Populated")
def _worker_set_seed(_, seed):
ext.set_seed(seed)
def set_seed(seed):
singleton_pool.run_each(
_worker_set_seed,
[(seed + i,) for i in range(singleton_pool.n_parallel)]
)
def _worker_set_policy_params(G, params):
G.policy.set_param_values(params)
def _worker_set_dynamics_params(G, params):
G.dynamics.set_param_values(params)
def _worker_collect_one_path(G, max_path_length, itr, normalize_reward,
reward_mean, reward_std, kl_batch_size, n_itr_update, use_replay_pool,
obs_mean, obs_std, act_mean, act_std, second_order_update):
# Path rollout.
path = rollout(G.env, G.policy, max_path_length)
# Computing intrinsic rewards.
# ----------------------------
# Save original reward.
path['rewards_orig'] = np.array(path['rewards'])
if itr > 0:
# Iterate over all paths and compute intrinsic reward by updating the
# model on each observation, calculating the KL divergence of the new
# params to the old ones, and undoing this operation.
obs = (path['observations'] - obs_mean) / (obs_std + 1e-8)
act = (path['actions'] - act_mean) / (act_std + 1e-8)
rew = path['rewards']
# inputs = (o,a), target = o'
obs_nxt = np.vstack([obs[1:]])
_inputs = np.hstack([obs[:-1], act[:-1]])
_targets = obs_nxt
# KL vector assumes same shape as reward.
kl = np.zeros(rew.shape)
for j in range(int(np.ceil(obs.shape[0] / float(kl_batch_size)))):
# Save old params for every update.
G.dynamics.save_old_params()
start = j * kl_batch_size
end = np.minimum(
(j + 1) * kl_batch_size, obs.shape[0] - 1)
if second_order_update:
# We do a line search over the best step sizes using
# step_size * invH * grad
# best_loss_value = np.inf
for step_size in [0.01]:
G.dynamics.save_old_params()
loss_value = G.dynamics.train_update_fn(
_inputs[start:end], _targets[start:end], step_size)
kl_div = np.clip(loss_value, 0, 1000)
# If using replay pool, undo updates.
if use_replay_pool:
G.dynamics.reset_to_old_params()
else:
# Update model weights based on current minibatch.
for _ in range(n_itr_update):
G.dynamics.train_update_fn(
_inputs[start:end], _targets[start:end])
# Calculate current minibatch KL.
kl_div = np.clip(
float(G.dynamics.f_kl_div_closed_form()), 0, 1000)
for k in range(start, end):
kl[k] = kl_div
# If using replay pool, undo updates.
if use_replay_pool:
G.dynamics.reset_to_old_params()
# Last element in KL vector needs to be replaced by second last one
# because the actual last observation has no next observation.
kl[-1] = kl[-2]
# Stuff it in path
path['KL'] = kl
# ----------------------------
return path, len(path["rewards"])
def sample_paths(
policy_params,
dynamics_params,
max_samples,
max_path_length=np.inf,
itr=None,
normalize_reward=None,
reward_mean=None,
reward_std=None,
kl_batch_size=None,
n_itr_update=None,
use_replay_pool=None,
obs_mean=None,
obs_std=None,
act_mean=None,
act_std=None,
second_order_update=None
):
"""
:param policy_params: parameters for the policy. This will be updated on each worker process
:param max_samples: desired maximum number of samples to be collected. The actual number of collected samples
might be greater since all trajectories will be rolled out either until termination or until max_path_length is
reached
:param max_path_length: horizon / maximum length of a single trajectory
:return: a list of collected paths
"""
singleton_pool.run_each(
_worker_set_policy_params,
[(policy_params,)] * singleton_pool.n_parallel
)
# Set dynamics params.
# --------------------
singleton_pool.run_each(
_worker_set_dynamics_params,
[(dynamics_params,)] * singleton_pool.n_parallel
)
# --------------------
return singleton_pool.run_collect(
_worker_collect_one_path,
threshold=max_samples,
args=(max_path_length, itr, normalize_reward, reward_mean,
reward_std, kl_batch_size, n_itr_update, use_replay_pool, obs_mean, obs_std, act_mean, act_std, second_order_update),
show_prog_bar=True
)
def truncate_paths(paths, max_samples):
"""
Truncate the list of paths so that the total number of samples is exactly equal to max_samples. This is done by
removing extra paths at the end of the list, and make the last path shorter if necessary
:param paths: a list of paths
:param max_samples: the absolute maximum number of samples
:return: a list of paths, truncated so that the number of samples adds up to max-samples
"""
# chop samples collected by extra paths
# make a copy
paths = list(paths)
total_n_samples = sum(len(path["rewards"]) for path in paths)
while len(paths) > 0 and total_n_samples - len(paths[-1]["rewards"]) >= max_samples:
total_n_samples -= len(paths.pop(-1)["rewards"])
if len(paths) > 0:
last_path = paths.pop(-1)
truncated_last_path = dict()
truncated_len = len(
last_path["rewards"]) - (total_n_samples - max_samples)
for k, v in last_path.iteritems():
if k in ["observations", "actions", "rewards"]:
truncated_last_path[k] = tensor_utils.truncate_tensor_list(
v, truncated_len)
elif k in ["env_infos", "agent_infos"]:
truncated_last_path[k] = tensor_utils.truncate_tensor_dict(
v, truncated_len)
else:
raise NotImplementedError
paths.append(truncated_last_path)
return paths
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='Home'),
path('merchants/', views.vendors_list, name='Vendors'),
path('merchants/<int:pk>/<str:pk_test>/', views.vendors_view, name='VendorsView'),
path('about/', views.about_us, name='AboutUs'),
path('contact/', views.Contact, name='Contact'),
path('shop/', views.shop, name='Shop'),
path('search/', views.search, name='Search'),
path('search_auto/', views.search_auto, name='search_auto'),
path('shop/<str:pk_test>/', views.ProductView, name='ProductView'),
path('category/<int:id>/<slug:slug>/', views.CategoryView, name='CategoryView'),
path('shop/products/<int:pk_test>/<str:pk_live>/', views.ProductView, name='ProductView'),
]
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def pyunit_model_params():
pros = h2o.import_file(tests.locate("smalldata/prostate/prostate.csv"))
m = h2o.kmeans(pros,k=4)
print m.params
print m.full_parameters
if __name__ == "__main__":
tests.run_test(sys.argv, pyunit_model_params)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
class CommandFilter(object):
"""Command filter only checking that the 1st argument matches exec_path"""
def __init__(self, exec_path, run_as, *args):
self.exec_path = exec_path
self.run_as = run_as
self.args = args
def match(self, userargs):
"""Only check that the first argument (command) matches exec_path"""
if (os.path.basename(self.exec_path) == userargs[0]):
return True
return False
def get_command(self, userargs):
"""Returns command to execute (with sudo -u if run_as != root)."""
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:]
return [self.exec_path] + userargs[1:]
def get_environment(self, userargs):
"""Returns specific environment to set, None if none"""
return None
class RegExpFilter(CommandFilter):
"""Command filter doing regexp matching for every argument"""
def match(self, userargs):
# Early skip if command or number of args don't match
if (len(self.args) != len(userargs)):
# DENY: argument numbers don't match
return False
# Compare each arg (anchoring pattern explicitly at end of string)
for (pattern, arg) in zip(self.args, userargs):
try:
if not re.match(pattern + '$', arg):
break
except re.error:
# DENY: Badly-formed filter
return False
else:
# ALLOW: All arguments matched
return True
# DENY: Some arguments did not match
return False
class DnsmasqFilter(CommandFilter):
"""Specific filter for the dnsmasq call (which includes env)"""
def match(self, userargs):
if (userargs[0].startswith("FLAGFILE=") and
userargs[1].startswith("NETWORK_ID=") and
userargs[2] == "dnsmasq"):
return True
return False
def get_command(self, userargs):
return [self.exec_path] + userargs[3:]
def get_environment(self, userargs):
env = os.environ.copy()
env['FLAGFILE'] = userargs[0].split('=')[-1]
env['NETWORK_ID'] = userargs[1].split('=')[-1]
return env
class KillFilter(CommandFilter):
"""Specific filter for the kill calls.
1st argument is the user to run /bin/kill under
2nd argument is the location of the affected executable
Subsequent arguments list the accepted signals (if any)
This filter relies on /proc to accurately determine affected
executable, so it will only work on procfs-capable systems (not OSX).
"""
def __init__(self, *args):
super(KillFilter, self).__init__("/bin/kill", *args)
def match(self, userargs):
if userargs[0] != "kill":
return False
args = list(userargs)
if len(args) == 3:
# A specific signal is requested
signal = args.pop(1)
if signal not in self.args[1:]:
# Requested signal not in accepted list
return False
else:
if len(args) != 2:
# Incorrect number of arguments
return False
if len(self.args) > 1:
# No signal requested, but filter requires specific signal
return False
try:
command = os.readlink("/proc/%d/exe" % int(args[1]))
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
# the end if an executable is updated or deleted
if command.endswith(" (deleted)"):
command = command[:command.rindex(" ")]
if command != self.args[0]:
# Affected executable does not match
return False
except (ValueError, OSError):
# Incorrect PID
return False
return True
class ReadFileFilter(CommandFilter):
"""Specific filter for the utils.read_file_as_root call"""
def __init__(self, file_path, *args):
self.file_path = file_path
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
def match(self, userargs):
if userargs[0] != 'cat':
return False
if userargs[1] != self.file_path:
return False
if len(userargs) != 2:
return False
return True
|
# -*- coding: utf-8 -*-
"""Backend room logic."""
from time import sleep
from random import random
from sqlalchemy import or_, and_, func, text, select, update
from ..database.table import Room, get_engine, get_session
def update_period():
"""Heart beat period"""
return 3
def take_room(session, worker_id):
"""Try to take a room"""
# Query 1 or 0 room with out dated timestamp
find_query = select(
[Room.id]
).where(
or_(Room.worker == worker_id,
Room.updated == None, # noqa
Room.updated < (
func.now() -
text("interval '{} seconds'".format(update_period()))
)),
).order_by(
Room.id
).limit(
1
).with_for_update()
# Update room with our name
update_query = update(
Room
).values({
Room.updated: func.now(),
Room.worker: worker_id,
}).where(
Room.id == find_query.as_scalar()
)
update_proxy = session.execute(update_query)
session.commit()
if update_proxy.rowcount == 0:
return None
# Select back what we found
result_query = select(
[Room.id, Room.title]
).where(
Room.worker == worker_id
).limit(
1
)
result = session.execute(result_query).fetchone()
return result
def keep_room(session, worker_id, room_id):
"""Try to keep a room"""
# Update room current timestamp
query = update(
Room
).values({
Room.updated: func.now(),
}).where(
and_(Room.worker == worker_id,
Room.id == room_id)
)
proxy = session.execute(query)
session.commit()
return proxy.rowcount == 1
def take_room_block(session, worker_id):
"""Blocks until a room is available"""
while True:
room = take_room(session, worker_id)
if room:
return room
sleep(update_period() + 1 + random())
def keep_room_block(stop, worker_id, room_id):
"""Block and keep the room until we don't have a room"""
session = get_session(get_engine())
while not stop() and keep_room(session, worker_id, room_id):
sleep(update_period())
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
from django.test import TestCase
from api.apikey import APIKey
from api.redis import save_key, exists, get_apikeys, flush_all
from config.settings.base import get_config
from api.apikeyhandler import ApiKeyHandler
class RedisTestCase(TestCase):
def test_redis(self):
# create data
username = get_config("TEST_USERNAME")
api_key = APIKey()
# save the APIKey
save_key(username, api_key)
# check if the APIKey exists
self.assertIsNotNone(exists(api_key.access_key, api_key.secret_key_clear))
handler = ApiKeyHandler()
self.assertIsNotNone(handler.validate(api_key.access_key, api_key.secret_key_clear))
# get all the keys of the test user
keys = get_apikeys(username)
# check that the key is there
self.assertTrue(api_key.access_key in keys)
flush_all()
def test_not_exists(self):
# Check is APIKEY exists
self.assertFalse(exists("1234", "4321"))
|
class Class:
def __init__(self, name):
self.name = name
self.fields = []
def __str__(self):
lines = ['class %s:' % self.name]
if not self.fields:
lines.append(' pass')
else:
lines.append(' def __init__(self):')
for f in self.fields:
lines.append(' %s' % f)
return '\n'.join(lines)
|
from Cimpl import *
file = choose_file()
original_image = load_image(file)
def grayscale(image: Image) -> Image:
"""Return a grayscale copy of image.
>>> image = load_image(choose_file())
>>> gray_image = grayscale(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
brightness = (r + g + b) // 3
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(brightness, brightness, brightness)
set_color(new_image, x, y, gray)
return new_image
def sepia(image):
new_image = copy(image)
for pixel in image:
x, y, (r, g, b) = pixel
if r < 63:
r = r * 1.1
b = b * 0.9
elif r <= 191:
r = r * 1.15
b = b * 0.85
else:
r = r * 1.08
b = b * 0.93
new_colour = create_color(r, g, b)
set_color(new_image, x, y, new_colour)
return new_image
show(sepia(grayscale(original_image)))
# save(sepia(grayscale(original_image)))
|
import unittest
from numpy.testing import assert_array_almost_equal
import torch
import numpy as np
from siren.siren import Sine, SIREN
class SineTestCase(unittest.TestCase):
"""Class to test the Sine activation function"""
def test_sine(self):
dummy = torch.FloatTensor([np.pi, np.pi / 2])
sine = Sine(w0=1)
out = sine(dummy)
target = torch.FloatTensor([0, 1])
assert_array_almost_equal(target, out, decimal=6)
class SIRENTestCase(unittest.TestCase):
"""Class to test the SIREN model"""
def test_siren(self):
in_features = 10
layers = [64, 128]
out_features = 5
dummy = torch.ones(in_features)
model = SIREN(layers, in_features, out_features)
out = model(dummy)
self.assertEqual(out.shape, (5,))
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function
import sys
import copy
import random
import numpy as np
from collections import defaultdict
import os
def data_partition(fname, FLAGS):
print("inside utility function")
usernum = 1
itemnum = 1
User = defaultdict(list)
user_train = {}
user_valid = {}
user_test = {}
# assume user/item index starting from 1
f = open(fname, 'r')
for line in f:
u, i = line.rstrip().split(' ')
u = int(u)
i = int(i)
usernum = max(u, usernum)
itemnum = max(i, itemnum)
User[u].append(i)
path = FLAGS.data_path_kgat + FLAGS.dataset_name
for user in User:
nfeedback = len(User[user])
if nfeedback < 3:
user_train[user] = User[user]
user_valid[user] = []
user_test[user] = []
else:
user_train[user] = User[user][:-2]
user_valid[user] = []
user_valid[user].append(User[user][-2])
user_test[user] = []
user_test[user].append(User[user][-1])
#print " ".join(map(str, user_train[user]))
with open(path + "/"+ "train.txt", "a") as train_file:
train_file.write(str(user) + " " +" ".join([str(x) for x in user_train[user]]) + "\n")
with open(path + "/"+ "test.txt", "a") as test_file:
test_file.write(str(user) + " " +" ".join([str(x) for x in user_test[user]]) + "\n")
return [user_train, user_valid, user_test, usernum, itemnum]
|
import requests
import re
class Content():
def __init__(self, Requests, log):
self.Requests = Requests
self.log = log
self.gameContent = {}
def get_content(self):
content = self.Requests.fetch("custom", f"https://shared.{self.Requests.region}.a.pvp.net/content-service/v3/content", "get")
self.gameContent = content
self.log(f"content: {self.gameContent}")
return content
def get_latest_season_id(self):
for season in self.gameContent["Seasons"]:
if season["IsActive"]:
self.log(f"[Content] Retrieved season id: {season['ID']}")
return season["ID"]
def get_current_season(self):
return self.get_current_act() + " - " + self.get_current_episode()
def get_current_act(self):
act = ""
for season in self.gameContent["Seasons"]:
if season["IsActive"] and season["Type"] == "act":
act = season["Name"].capitalize()
break
return act
def get_current_episode(self):
episode = ""
for season in self.gameContent["Seasons"]:
if season["IsActive"] and season["Type"] == "episode":
episode = season["Name"].capitalize()
break
return episode
def get_all_maps(self):
rMaps = requests.get("https://valorant-api.com/v1/maps").json()
map_dict = {}
for map in rMaps["data"]:
url = map["mapUrl"]
name = map["displayName"]
map_dict.update({url: name})
self.log(f"[Content] Retrieved maps: {map_dict}")
return map_dict
def get_all_modes(self):
rModes = requests.get("https://valorant-api.com/v1/gamemodes").json()
mode_dict = {}
for mode in rModes["data"]:
path = mode["assetPath"].replace("ShooterGame/Content/", "/Game/").split("_", 1)[0]
name = mode["displayName"].capitalize()
mode_dict.update({path: name})
self.log(f"[Content] Retrieved modes: {mode_dict}")
return mode_dict
def get_all_ranks(self, episode):
rRanks = requests.get("https://valorant-api.com/v1/competitivetiers").json()
rank_arr = []
for data in rRanks["data"]:
episode_value = re.sub(r"\s+", "", episode)
obj = episode_value + "_CompetitiveTierDataTable"
if obj == data["assetObjectName"]:
for tier in data["tiers"]:
rank_arr.append(tier["tierName"].capitalize())
self.log(f"[Content] Retrieved ranks: {rank_arr}")
return rank_arr
def get_all_agents(self):
rAgents = requests.get("https://valorant-api.com/v1/agents?isPlayableCharacter=true").json()
agent_dict = {}
for agent in rAgents["data"]:
agent_dict.update({agent['uuid'].lower(): agent['displayName']})
self.log(f"[Content] Retrieved agents: {agent_dict}")
return agent_dict
def get_all_icons(self, episode):
rRanks = requests.get("https://valorant-api.com/v1/competitivetiers").json()
rank_dict = {}
for data in rRanks["data"]:
episode_value = re.sub(r"\s+", "", episode)
obj = episode_value + "_CompetitiveTierDataTable"
if obj == data["assetObjectName"]:
for tier in data["tiers"]:
rank_dict.update({tier["tierName"].capitalize(): tier["largeIcon"]})
self.log(f"[Content] Retrieved ranks (icons): {rank_dict}")
rAgents = requests.get("https://valorant-api.com/v1/agents?isPlayableCharacter=true").json()
agent_dict = {}
for agent in rAgents["data"]:
agent_dict.update({agent["displayName"]: agent["displayIcon"]})
self.log(f"[Content] Retrieved agents (icons): {agent_dict}")
icons_dict = {}
icons_dict.update({"Ranks": rank_dict, "Agents": agent_dict})
self.log(f"[Content] Retrieved all icons: {icons_dict}")
return icons_dict
|
# Shiny dataset but enable
import os
import glob
import numpy as np
import torch
from PIL import Image
from .shiny import ShinyDataset, center_poses, normalize, get_spiral, average_poses
from .ray_utils import get_ray_directions_blender, get_rays, ndc_rays_blender
class ShinyFewDataset(ShinyDataset):
def __init__(self, *args, **kwargs):
super().__init__(args[0][0], split=args[1]['split'], downsample=args[1]['downsample'], is_stack=args[1]['is_stack'], ndc_ray=args[1]['ndc_ray'], max_t=args[1]['max_t'], hold_every=args[1]['hold_every'])
def get_image_ids(self):
i_test = np.arange(0, self.poses.shape[0], self.hold_every) # [np.argmin(dists)]
img_list = i_test if self.split != 'train' else list(set(np.arange(len(self.poses))) - set(i_test))
if self.split == 'train':
img_list = img_list[::self.pick_train_every]
print("NUM_IMAGE: {} / {} images".format(self.split, len(img_list)))
return img_list
def read_meta(self):
poses_bounds = np.load(os.path.join(self.root_dir, 'poses_bounds.npy')) # (N_images, 17)
self.image_paths = sorted(glob.glob(os.path.join(self.root_dir, 'images/*')))
# load full resolution image then resize
if self.split in ['train', 'test']:
assert len(poses_bounds) == len(self.image_paths), \
'Mismatch between number of images and number of poses! Please rerun COLMAP!'
#poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5)
poses = poses_bounds[:, :-2].reshape(-1, 3, 4) # (N_images, 3, 5)
self.near_fars = poses_bounds[:, -2:] # (N_images, 2)
intrinsic_arr = np.load(os.path.join(self.root_dir, 'hwf_cxcy.npy'))
hwf = poses[:, :, -1]
# Step 1: rescale focal length according to training resolution
#H, W, self.focal = poses[0, :, -1] # original intrinsics, same for all images
H, W ,self.focal = intrinsic_arr[:3,0]
self.img_wh = np.array([int(W / self.downsample), int(H / self.downsample)])
self.focal = [self.focal * self.img_wh[0] / W, self.focal * self.img_wh[1] / H]
# Step 2: correct poses
# Original poses has rotation in form "down right back", change to "right up back"
# See https://github.com/bmild/nerf/issues/34
poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
# (N_images, 3, 4) exclude H, W, focal
self.poses, self.pose_avg = center_poses(poses, self.blender2opencv)
#pdb.set_trace()
# Step 3: correct scale so that the nearest depth is at a little more than 1.0
# See https://github.com/bmild/nerf/issues/34
near_original = self.near_fars.min()
if hasattr(self, 'override_near'):
near_original = self.override_near
scale_factor = near_original * 0.75 # 0.75 is the default parameter
# the nearest depth is at 1/0.75=1.33
self.near_fars /= scale_factor
self.poses[..., 3] /= scale_factor
# build rendering path
N_views, N_rots = 120, 2
tt = self.poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
up = normalize(self.poses[:, :3, 1].sum(0))
rads = np.percentile(np.abs(tt), 90, 0)
self.render_path = get_spiral(self.poses, self.near_fars, rads_scale=0.1, N_views=N_views)
# distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1)
# val_idx = np.argmin(distances_from_center) # choose val image as the closest to
# center image
# ray directions for all pixels, same for all images (same H, W, focal)
W, H = self.img_wh
self.directions = get_ray_directions_blender(H, W, self.focal) # (H, W, 3)
average_pose = average_poses(self.poses)
dists = np.sum(np.square(average_pose[:3, 3] - self.poses[:, :3, 3]), -1)
img_list = self.get_image_ids()
# use first N_images-1 to train, the LAST is val
self.all_rays = []
self.all_rgbs = []
for i in img_list:
image_path = self.image_paths[i]
c2w = torch.FloatTensor(self.poses[i])
img = Image.open(image_path).convert('RGB')
if self.downsample != 1.0:
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
self.all_rgbs += [img]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
if self.ndc_ray:
rays_o, rays_d = ndc_rays_blender(H, W, self.focal[0], 1.0, rays_o, rays_d)
# viewdir = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6)
if not self.is_stack:
self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3)
self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w,3)
else:
self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames]),h,w, 3)
self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3)
class ShinyFew1(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.self.override_near = 40
self.pick_train_every = 1
super().__init__(args,kwargs)
class ShinyFew5(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.self.override_near = 40
self.pick_train_every = 5
super().__init__(args,kwargs)
class ShinyFew10(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.self.override_near = 40
self.pick_train_every = 10
super().__init__(args,kwargs)
class ShinyFew15(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.self.override_near = 40
self.pick_train_every = 15
super().__init__(args,kwargs)
class ShinyFew20(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.self.override_near = 40
self.pick_train_every = 20
super().__init__(args,kwargs)
class ShinyFern400(ShinyFewDataset):
def __init__(self, *args, **kwargs):
self.pick_train_every = 20
super().__init__(args,kwargs)
self.near_far = [0.0, 1.0]
self.scene_bbox = torch.tensor([[-1.5, -1.67, -1.0], [1.5, 1.67, 1.0]])
self.center = torch.mean(self.scene_bbox, dim=0).float().view(1, 1, 3)
self.invradius = 1.0 / (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
def get_image_ids(self):
TOTAL_FILE = 420
TOTAL_ORIGINAL_FILE = 20
i_test = np.arange(0, TOTAL_ORIGINAL_FILE, self.hold_every) # [np.argmin(dists)]
img_list = i_test if self.split != 'train' else list(set(np.arange(TOTAL_FILE)) - set(i_test))
print("NUM_IMAGE: {} / {} images".format(self.split, len(img_list)))
return img_list
|
from allauth.account.models import EmailAddress
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
#
# class MySocialAccountAdapter(DefaultSocialAccountAdapter):
# def authentication_error(self, request, provider_id, error, exception, extra_context):
# print(provider_id)
# print(error.__str__())
# print(exception.__str__())
# print(extra_context)
#
# def pre_social_login(self, request, sociallogin):
# """
# Invoked just after a user successfully authenticates via a
# social provider, but before the login is actually processed
# (and before the pre_social_login signal is emitted).
#
# We're trying to solve different use cases:
# - social account already exists, just go on
# - social account has no email or email is unknown, just go on
# - social account's email exists, link social account to existing user
# """
# # Ignore existing social accounts, just do this stuff for new ones
# if sociallogin.is_existing:
# return
#
# # some social logins don't have an email address, e.g. facebook accounts
# # with mobile numbers only, but allauth takes care of this case so just
# # ignore it
# # some social logins don't have an email address
# if not sociallogin.email_addresses:
# return
#
# if 'email' not in sociallogin.account.extra_data:
# return
#
# # find the first verified email that we get from this sociallogin
# verified_email = None
# for email in sociallogin.email_addresses:
# if email.verified:
# verified_email = email
# break
#
# # no verified emails found, nothing more to do
# if not verified_email:
# return
#
# # check if given email address already exists as a verified email on
# # an existing user's account
# try:
# existing_email = EmailAddress.objects.get(email__iexact=email.email, verified=True)
# except EmailAddress.DoesNotExist:
# # easy fast fix
# raise ImmediateHttpResponse(redirect('/accounts/login'))
# return
#
# # if it does, bounce back to the login page
# # account = User.objects.get(email=email).socialaccount_set.first()
# # messages.error(request, "A "
# # + account.provider.capitalize() + " account already exists associated to " +
# # email_address.email + ". Log in with that instead, and connect your " +
# # sociallogin.account.provider.capitalize() +
# # " account through your profile page to link them together.")
# # raise ImmediateHttpResponse(redirect('/accounts/login'))
#
# # if it does, connect this new social login to the existing user
# # I can do this, because when user is created in standard way, i'am using email verification -> then i know,
# # they cannot bypass email address.
# sociallogin.connect(request, existing_email.user)
|
from slacker.utility import parse_line
from prompt_toolkit.completion import WordCompleter
class Completer(WordCompleter):
def __init__(self, words, meta_dict, registrar):
self.__registrar = registrar
self.__completers = {}
super(Completer, self).__init__(words, meta_dict=meta_dict, ignore_case=True)
def get_completions(self, document, complete_event):
# If first command has been entered then use completer for command, if any, otherwise don't
# try to complete further.
text = document.text_before_cursor.lstrip()
if ' ' in text:
(cmd, args) = parse_line(text)
completer = None
if cmd in self.__completers:
completer = self.__completers[cmd]
else:
completer = self.__registrar.get_completer(cmd)
if not completer:
return []
self.__completers[cmd] = completer
return completer.get_completions(document, complete_event)
# Fallback to the normal completer.
return super(Completer, self).get_completions(document, complete_event)
|
# movie/forms.py
# Django modules
from django.forms import ModelForm, Textarea
# Locals
from movie.models import Review
# Form: ReviewForm
# We need to inherit from ModelForm.
class ReviewForm(ModelForm):
'''
Similar as what we did with the
UserCreationForm, we set some Bootstrap
classes for our form fields.
'''
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['text'].widget.attrs.update( {'class': 'form-control'})
self.fields['watchAgain'].widget.attrs.update({'class': 'form-check-input'})
'''
We then specify which model the form is for
and the fields we want in the form. In our
case, our review form will need just the text
and watchAgain fields. If you recall, in our
Review model:
class Review(models.Model):
text = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
user =
models.ForeignKey(User,on_delete=models.CASCADE)
movie =
models.ForeignKey(Movie,on_delete=models.CASCADE)
watchAgain = models.BooleanField()
date is auto-populated, user and movie are
already provided. Thus, we need only user to
input the text and watchAgain fields in the
form.
'''
class Meta:
model = Review
fields = ['text','watchAgain']
'''
We have a labels object where we can create
custom labels for each of our fields. For e.g.,
we want to display ‘ Watch Again ’ instead
of watchAgain (our users are not
programmers!).
'''
labels = {'watchAgain': ('Watch Again')}
'''
By default, a CharField is displayed as an
input text. We override this default field (with
the use of widgets) to have a Textarea for our
text field.
'''
widgets = {'text': Textarea(attrs={'rows': 4}),}
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='django-translate-po-files',
version='0.0.7',
scripts=['django-translate-po'],
author="Bart Machielsen",
author_email="bartmachielsen@gmail.com",
description="Automatically translate all Django PO files in the correct languages using google translate.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bartmachielsen/django-translate-po-files",
packages=setuptools.find_packages(),
install_requires=[
"polib",
"requests"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
#
# Copyright 2020 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fractions import Fraction
from uuid import UUID
from ...cogenums import CogFrameFormat
from ...grain import VIDEOGRAIN
from ..abc import FixedRatePatternGenerator
__all__ = ["VideoPatternGenerator"]
class VideoPatternGenerator (FixedRatePatternGenerator[VIDEOGRAIN]):
def __init__(self, src_id, flow_id, width, height,
rate=Fraction(25, 1),
cog_frame_format=CogFrameFormat.U8_444):
super().__init__(rate)
self._src_id = src_id
self._flow_id = flow_id
self._width = width
self._height = height
self._cog_frame_format = cog_frame_format
@property
def src_id(self) -> UUID:
return self._src_id
@property
def flow_id(self) -> UUID:
return self._flow_id
@property
def width(self) -> int:
return self._width
@property
def height(self) -> int:
return self._height
@property
def cog_frame_format(self) -> CogFrameFormat:
return self._cog_frame_format
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 23:43:32 2018
@author : gaurav gahukar
: caffeine110
AIM : (p) TO Impliment UDACITY MiniProject ( Naive Bayes )
: (s) Email Classification to predict Author of the Email
: --- Inspired by the story of J K Roling ---
The Sunday (UK) Times recently revealed that J.K. Rowling wrote the
detective novel The Cuckoo's Calling under the pen name Robert Galbraith.
After classification using classification algorithms it was confirmed that
J K Rolling wrote that book.
"""
"""
This is the code to accompany the Lesson 3 (decision tree) mini-project.
Use a Decision Tree to identify emails from the Enron corpus by author:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#importing sklearn
from sklearn import tree
#initialising dtc classifier
dtc_classifier = tree.DecisionTreeClassifier()
#fitting data to the classifier
dtc_classifier.fit(features_train, labels_train)
#prediction of author
author_pred = dtc_classifier.predict(features_test)
#importing accuracy score
from sklearn.metrics import accuracy_score
#printing the accuracy scores
print(accuracy_score(labels_test, author_pred))
print(dtc_classifier.score(features_test, labels_test))
|
# 使用 __miss__ 构造一个默认值处理字典不存在 key 的异常情况
|
from flowws import try_to_import
AutoencoderVisualizer = try_to_import(
'.AutoencoderVisualizer', 'AutoencoderVisualizer', __name__
)
BondDenoisingVisualizer = try_to_import(
'.BondDenoisingVisualizer', 'BondDenoisingVisualizer', __name__
)
ClassifierPlotter = try_to_import('.ClassifierPlotter', 'ClassifierPlotter', __name__)
EmbeddingDistance = try_to_import('.EmbeddingDistance', 'EmbeddingDistance', __name__)
EmbeddingDistanceTrajectory = try_to_import(
'.EmbeddingDistanceTrajectory', 'EmbeddingDistanceTrajectory', __name__
)
EmbeddingPlotter = try_to_import('.EmbeddingPlotter', 'EmbeddingPlotter', __name__)
EvaluateEmbedding = try_to_import('.EvaluateEmbedding', 'EvaluateEmbedding', __name__)
NoisyBondVisualizer = try_to_import(
'.NoisyBondVisualizer', 'NoisyBondVisualizer', __name__
)
PCAEmbedding = try_to_import('.PCAEmbedding', 'PCAEmbedding', __name__)
RegressorPlotter = try_to_import('.RegressorPlotter', 'RegressorPlotter', __name__)
ShiftIdentificationVisualizer = try_to_import(
'.ShiftIdentificationVisualizer', 'ShiftIdentificationVisualizer', __name__
)
UMAPEmbedding = try_to_import('.UMAPEmbedding', 'UMAPEmbedding', __name__)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import hashlib
import requests
import time
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# debug key and secret key
SECRET_KEY = 'ij1a4iltc4p2ml29swvkgjoanxyron5m'
APP_KEY = '575e809b67e58eb219000d78'
if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), '__test__')):
PRODUCTION_MODE = "false"
else:
PRODUCTION_MODE = "true"
BODYTPL = {
"ticker": True,
"title": True,
"text": True,
"icon": True,
"largeIcon": True,
"img": True,
"sound": True,
"builder_id": True,
"play_vibrate": True,
"play_lights": True,
"play_sound": True,
"after_open": True,
"url": True,
"activity": True,
"custom": True,
}
POLICYTPL = {
"start_time": True,
"expire_time": True,
"max_send_num": True,
}
def timestamp(self):
return int(time.time()*1000)
def unserialize(self, data):
return json.dumps(data, ensure_ascii=False, encoding='utf8', sort_keys=True).encode('utf-8')
def sign(self, url, data, key):
return hashlib.md5('%s%s%s%s'%('POST', url, unserialize(data), key)).hexdigest()
def check(obj, src):
for key in obj:
if src.get(key):
pass
else:
raise Exception("Key %s is not in (%s)." % (key, ','.join(src.keys())))
class UmengPush(object):
"""
appkey:应用唯一标识。
友盟消息推送服务提供的appkey
和友盟统计分析平台使用的同一套appkey。
app_master_secret:服务器秘钥,
用于服务器端调用API,
请求时对发送内容做签名验证。
production_mode: 正式/测试模式。
测试模式下,广播/组播只会将消息发给测试设备。
测试设备需要到web上添加。
Android测试设备属于正式设备的一个子集。
thirdparty_id: 开发者自定义消息标识ID,
开发者可以为同一批发送的多条消息。
"""
def __init__(self, url, appkey, appsecret, thirdparty=None):
self.url = url
self.appkey = appkey
self.appsecret = appsecret
self.thirdparty = thirdparty
def cast(self, content,
cast_type='unicast',
token='',
intent='',
activity='com.adesk.picasso.view.HomeActivity',
display_type='notification',
**kwargs):
if 'title' not in message:
message['title'] = '安卓壁纸'
if 'ticker' not in message:
message['ticker'] = message['text']
if display_type == 'message':
message['custom'] = self.touchuan_message(message['title'], message['text'],
message['ticker'], action=intent)
message['after_open'] ='go_custom'
if 'activity' not in message:
message['activity'] = activity
data = {
'appkey': APP_KEY,
'device_tokens': token,
'payload': {
"aps":{
"alert": "我们都是好孩子"
},
},
'timestamp': timestamp(),
'type': cast_type,
'production_mode': PRODUCTION_MODE,
'description': message['title'],
}
data.update(kwargs)
if 'after_open' not in message:
message['after_open'] = 'go_activity'
sign = sign(self.PUSH_URL, data, SECRET_KEY)
body = unserialize(data)
result = requests.post(('%s?sign=%s')%(self.PUSH_URL, sign), data=body)
print result.content
def send(self, payload, policy={}):
check(payload['body'], BODYTPL)
check(policy, POLICYTPL)
sign = sign(self.PUSH_URL, data, SECRET_KEY)
body = unserialize(data)
result = requests.post(('%s?sign=%s')%(self.PUSH_URL, sign), data=body)
return result.content
@abstractmethod
def directedcast(self, device_token, body, display_type='notification', extra={}, policy={}, description=""):
"""
定向播: 向指定的设备发送消息,
包括单播(unicast) or 列播(listcast)
向若干个device_token或者若干个alias发消息。
"""
pass
@abstractmethod
def broadcast(self, body, display_type='notification', extra={}, policy={}, description=""):
"""
广播(broadcast,属于task): 向安装该App的所有设备发送消息。
"""
pass
@abstractmethod
def filecast(self, content, token, intent='', **kwargs):
"""
文件播(filecast,属于task):开发者将批量的device_token
或者alias存放到文件, 通过文件ID进行消息发送。
"""
pass
@abstractmethod
def groupcast(self, content, token, intent='', **kwargs):
"""
组播(groupcast,属于task): 向满足特定条件的设备集合发送消息,
例如: "特定版本"、"特定地域"等。
友盟消息推送所支持的维度筛选
和友盟统计分析所提供的数据展示维度是一致的,
后台数据也是打通的
"""
pass
@abstractmethod
def customizedcast(self, content, token, intent='', **kwargs):
"""
自定义播(customizedcast,属于task): 开发者通过自有的alias进行推送,
可以针对单个或者一批alias进行推送,
也可以将alias存放到文件进行发送。
"""
pass
class UmengIosPush(UmengPush):
def unicast(self, content, device_token, display_type='', intent='', **kwargs):
self.cast(content, 'unicast', token, intent=intent, **kwargs)
def broadcast(self, content,):
self.cast(content, 'broadcast', intent=intent, **kwargs)
def listcast(self, content, token, intent='', **kwargs):
self.cast(content, 'unicast', token, intent=intent, **kwargs)
def filecast(self, content, token, intent='', **kwargs):
self.cast(content, 'unicast', token, intent=intent, **kwargs)
def groupcast(self, content, token, intent='', **kwargs):
self.cast(content, 'unicast', token, intent=intent, **kwargs)
def customizedcast(self, content, token, intent='', **kwargs):
self.cast(content, 'unicast', token, intent=intent, **kwargs)
def touchuan_message(self, title='', content='', ticker='', action=None, voice=True, delete=True):
msg = {"title":"安卓壁纸", "content":"", "voice":voice, "vibrate":voice, "delete":delete, "action":None}
if title:
msg['title'] = title
if ticker:
msg['ticker'] = ticker
if content:
msg['content'] = content
if action:
msg['action'] = action
return unserialize(msg)
def umen_push(_id):
print 'abc'
if __name__ == '__main__':
intent = 'androidesk://subject/558269d369401b611a4c86bb'
token = 'add1d6d1a09a738d6c4d5c56f2de2c2bb1375a768d9ad03e52bc2408a56cb5fc'
tui = UmengPush()
tui.broadcast({'text': 'broadcast'}, token=token)
# tui.broadcast_message({'text': 'broadcast'}, intent=intent)
# tui.unicast_message({'text': 'test'}, token=token, intent=intent)
# tui.unicast({'title': 'test', 'text': 'test', 'ticker': 'test'}, token=token, intent=intent)
|
from config_parser import parseConfig
from db_lib import getColNames, getBrokerIp, isUp
import json
import logging
import os
from mqtt_socket import MqttSocket
import paho.mqtt.publish as publish
def printLogFolder():
log_folder = parseConfig()['log_folder']
print(os.path.expanduser(log_folder))
def initLogger():
log_folder = os.path.expanduser(parseConfig()['log_folder'])
log_file = '%s/node.log' % log_folder
os.system('mkdir %s' % log_folder)
logging.basicConfig(
filename=log_file,
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Note: This function only works when the broker and nodes are up
# @param serial: a list of integers representing the serial numbers to get the
# statuses of.
# @return: a dictionary mapping key = serial to value = a dict
# the inner dict is of the form {'status' : str}
def getNodeStatus(serials):
initLogger()
# TODO: Error checking. what if broker not up?
sock = MqttSocket()
result = dict()
for s in serials:
logging.info('Webapp requested status of node %d' % s)
if not isUp(s):
result[s] = {'status' : '-1'}
logging.info('Node %d is dead, not requesting' % s)
continue
listenTopic = '%d/status_response' % s
sock.setListen(listenTopic)
# Publish empty message to trigger response from node
requestTopic = '%d/status_request' % s
logging.info('Node %d is up, sending status request' % s)
status = sock.getResponse(requestTopic, '{}')
result[s] = '-1' if status is None else status
sock.cleanup()
logging.info('Status request result: %s' % str(result))
return result
# @param serial: serial (int) number of the device to set the status of
# statuses of.
# @param status: the status, as a string.
def setNodeStatus(serial, status):
payload = {'status': status}
publish.single(topic='%d/status_change_request' % serial,
hostname=getBrokerIp(),
payload=json.dumps(payload))
# @param vals: a dict mapping col name to col value. All columns must be present
# excpet for interaction_id. See README.md for columns.
def addInteraction(vals):
# Validate input
neededColNames = set(getColNames('interactions'))
neededColNames.remove('interaction_id')
givenColNames = set(vals.keys())
if len(neededColNames.difference(givenColNames)) != 0:
raise Exception('Error: addInteraction called with incorrect columns: '
+ str(vals.keys()))
payload = {'type': 'add',
'table': 'interactions',
'vals': vals}
publish.single(topic='webapp/updates',
hostname=getBrokerIp(),
payload=json.dumps(payload))
# @param interaction_id: the interaction to delete
def deleteInteraction(interaction_id):
payload = {'type': 'delete',
'table': 'interactions',
'interaction_id': interaction_id}
publish.single(topic='webapp/updates',
hostname=getBrokerIp(),
payload=json.dumps(payload))
# @param interaction_id: the interaction to delete
# @param vals: a dict mapping col name to col value. All columns must be present
# excpet for interaction_id. See README.md for columns.
def updateInteraction(interaction_id, vals):
# Validate input
neededColNames = set(getColNames('interactions'))
neededColNames.remove('interaction_id')
givenColNames = set(vals.keys())
if len(neededColNames.difference(givenColNames)) != 0:
raise Exception('Error: updateInteraction called with incorrect columns: '
+ str(vals.keys()))
payload = {'type': 'update',
'table': 'interactions',
'interaction_id': interaction_id,
'vals': vals}
publish.single(topic='webapp/updates',
hostname=getBrokerIp(),
payload=json.dumps(payload))
# @param vals: a dict containing only 'display_name' and 'description'
def updateNode(serial, vals):
if len(vals) != 2 or 'display_name' not in vals or 'description' not in vals:
raise Exception('Error: updateNode called with incorrect columns: '
+ str(vals.keys()))
payload = {'type': 'update',
'table': 'node_data',
'serial': serial,
'vals': vals}
publish.single(topic='webapp/updates',
hostname=getBrokerIp(),
payload=json.dumps(payload))
|
class FormElementLacksAction(RuntimeError):
"""
The form element lacks an "action" attribute
"""
class FormElementLacksMethod(RuntimeError):
"""
The form element lacks a "method" attribute
"""
class FormNotFound(RuntimeError):
"""
The specified HTML form was not found on the page
"""
|
"""
This package is designed to wrap data for the data profiler.
"""
|
# Herança e Metodos Privados
class ContaBancaria(object):
def __init__(self, *args, **kwargs):
self.saldo = 0
def depositar(self, valor):
self.saldo += valor
self.consultar_saldo()
def sacar(self, valor):
self.saldo -= valor
self.consultar_saldo()
def consultar_saldo(self):
print(self.saldo)
class ContaPoupanca(ContaBancaria):
def _consulta_rentabilidade(self): # Este e um metodo privado!
return 1.6
def rentabilidade(self):
rentabilidade = self._consulta_rentabilidade()
if rentabilidade < 0.5:
print("Consulte o seu Gerente")
else:
print(rentabilidade)
conta_poupanca = ContaPoupanca()
conta_poupanca.rentabilidade()
conta_poupanca.depositar(100)
conta_poupanca.sacar(100)
|
# coding=utf-8
import json
from datetime import datetime
from api.snippets import dthandler, delete_item_in_list, lookahead
from common.settings import MONGO_LIMIT_REPORT
def pretty_data(cursor_db, data_set_name, total, per_page, page_n,
response_format='json', keys=None, search_data=None):
return preparation_response(cursor_db, data_set_name, total, per_page, page_n, response_format, search_data, keys)
def get_contract_id(contract):
if contract.get('regNum', False):
return {'regNum': contract.get('regNum', False)}
elif contract.get('id', False):
return {'id': contract.get('id', False)}
else:
return None
def get_report(data_cursor):
"""
Для демонстрации масштабов исследуемого сегмента рынка должны выводиться в табличной форме:
avg_price - Размер среднего контракта,
max_price - Размер максимального контракта,
min_price - Размер минимального контракта,
sum_price - Сумма всех контрактов,
total_contracts - Количество всех контрактов за указанный период,
Максимальный годовой объём контрактов,
Минимальный годовой объём контрактов,
amount_suppliers - Количество поставщиков,
amount_customers - Количество заказчиков в указанном периоде,
Доля контрактов сектора в общем объёме госконтрактов сферы экономики и динамика этой доли,
regions_contracts_stat - Топ 10 регионов по объёму заключённых в сегменте контрактов,
Топ 10 муниципальных образований по объёму заключенных в сегменте контрактов,
period_contracts - Сумма и количество контрактов по месяцам и доля по объёму и количеству за все месяцы указанного временного интервала для выявления сезонности закупок в секторе,
period_economic_sectors - Сумма и количество контрактов в секторе по временам года, а также доля контрактов в секторе по объёму и количеству по временам года,
top_contracts - Топ 30 крупнейших контрактов в выборке,
top_customers - Топ 25 заказчиков в выборке,
top_suppliers - Топ 25 поставщиков выборке,
local_suppliers - Доля местных (когда заказчик и поставщик зарегистрированы в одном регионе) поставщиков (по объёму) по годам.
"""
response = {
'avg_price': 0,
'total_contracts': data_cursor.count(),
'top_contracts': [],
'period_contracts': {},
'period_economic_sectors': {},
'local_suppliers': {
'bad_suppliers': 0
}
}
custom_contrac_index = 0
regions_contracts_stat = dict()
stat_suppliers = dict()
stat_customers = dict()
for contract, position in lookahead(data_cursor):
custom_contrac_index += 1
response['avg_price'] += contract.get('price', 0)
if position == 1:
response['max_price'] = contract.get('price', 0)
elif position == -1:
response['min_price'] = contract.get('price', 0)
if len(response['top_contracts']) < 30:
response['top_contracts'].append(
{
'price': contract.get('price', 0),
'id': get_contract_id(contract),
'name': contract.get('name', '')
}
)
customer_name = u'{}_{}'.format(contract.get('customer', {}).get('inn'), contract.get('customer', {}).get('kpp'))
if not stat_customers.get(customer_name):
stat_customers[customer_name] = {
'fullName': contract.get('customer', {}).get('fullName'),
'price': 0,
'inn': contract.get('customer', {}).get('inn'),
'kpp': contract.get('customer', {}).get('kpp'),
'regNum': contract.get('customer', {}).get('regNum')
}
stat_customers[customer_name]['price'] += contract.get('price', 0)
if contract.get('regionCode', False):
regions_contracts_stat[contract['regionCode']] = regions_contracts_stat.get(contract['regionCode'], 0) + contract.get('price', 0)
for supplier in contract.get('suppliers', {}):
supplier_name = u'{}_{}'.format(supplier.get('inn'), supplier.get('kpp'))
if not stat_suppliers.get(supplier_name):
stat_suppliers[supplier_name] = {
'organizationName': supplier.get('organizationName'),
'price': 0,
'inn': supplier.get('inn'),
'kpp': supplier.get('kpp'),
}
stat_suppliers[supplier_name]['price'] += contract.get('price', 0)
try:
if customer_name[:2] == supplier_name[:2]:
date = datetime.strptime(contract.get('publishDate', '')[:19], "%Y-%m-%dT%H:%M:%S")
if not date.year in response['local_suppliers']:
response['local_suppliers'][date.year] = {}
response['local_suppliers'][date.year][date.month] = response['local_suppliers'][date.year].get(date.month, 0) + 1
except (NameError, ValueError):
response['local_suppliers']['bad_suppliers'] += 1
if contract.get('publishDate'):
try:
date = datetime.strptime(contract.get('publishDate')[:19], "%Y-%m-%dT%H:%M:%S")
if not response['period_contracts'].get(date.year):
response['period_contracts'][date.year] = {}
response['period_contracts'][date.year][date.month] = \
response['period_contracts'][date.year].get(date.month, 0) + contract.get('price', 0)
if contract.get('economic_sectors'):
try:
for sector in contract.get('economic_sectors'):
name = sector['name'] if 'name' in sector else sector['title']
if not name in response['period_economic_sectors']:
response['period_economic_sectors'][name] = {}
if not response['period_economic_sectors'][name].get(date.year):
response['period_economic_sectors'][name][date.year] = {}
if not response['period_economic_sectors'][name][date.year].get(date.month):
response['period_economic_sectors'][name][date.year][date.month] = {'price': 0, 'amount': 0}
response['period_economic_sectors'][name][date.year][date.month]['price'] += contract.get('price', 0)
response['period_economic_sectors'][name][date.year][date.month]['amount'] += 1
except KeyError as e:
pass
except ValueError:
pass
response['regions_contracts_stat'] = sorted(regions_contracts_stat.items(), key=lambda x: x[1], reverse=True)[:10]
response['top_customers'] = sorted(stat_customers.items(), key=lambda x: x[1], reverse=True)[:25]
response['amount_customers'] = len(stat_customers)
response['top_suppliers'] = sorted(stat_suppliers.items(), key=lambda x: x[1], reverse=True)[:25]
response['amount_suppliers'] = len(stat_suppliers)
response['sum_price'] = response['avg_price']
response['avg_price'] /= custom_contrac_index
response['limit'] = MONGO_LIMIT_REPORT
return format_data(response, 'report')
def preparation_response(data_cursor, data_set_name, total, per_page, page_n,
response_format, search_data=None, keys=None):
"""
Makes JSON-data from MongoDB usable for REST API
Преобразует JSON-ответ из MongoDb в формат ответа API
"""
d = {data_set_name: {'total': total, 'perpage': per_page, 'page': page_n + 1, 'data': []}}
if not search_data:
for doc in data_cursor:
try:
doc["mongo_id"] = str(doc.pop(u"_id"))
except:
pass
d[data_set_name]['data'].append(doc)
else:
doc_list = list()
ordered_by_relevance = list()
for doc in data_cursor:
try:
doc["mongo_id"] = str(doc.pop(u"_id"))
except:
pass
doc[u"searchRank"] = search_data[doc["mongo_id"]]
doc_list.append(doc)
if doc and not keys: keys = doc.keys()
ordered_by_relevance = sorted(doc_list, key=lambda doc: doc[u"searchRank"])
d[data_set_name]['data'].extend(ordered_by_relevance)
return format_data(d, data_set_name, response_format=response_format, keys=keys)
def format_data(data, data_set_name, response_format='json', keys=None, params = {}):
"""
Returns data using specific format CSV or JSON
Преобразует данные в указанный формат JSON или CSV
"""
if not response_format: response_format = ''
if u'csv' in response_format:
response = u''
if keys:
delete_item_in_list(keys, 'mongo_id')
delete_item_in_list(keys, 'searchRank')
response += u'total:{total};perpage:{perpage};page:{page}\n'.format(**data.get(data_set_name, {}))
response += u"{}\n".format(u";".join(keys))
for item in data.get(data_set_name, {}).get('data', []):
response += u"{} \n".format(u';'.join([unicode(item.get(key, u'')) for key in keys]))
return response
else:
raise StandardError
elif u'xls' in response_format:
pass
elif response_format == 'json':
s = json.dumps(data, indent=4, default=dthandler)
value = u'\n'.join([l.rstrip() for l in s.splitlines()])
return value
else: # by default - return JSON data
s = json.dumps(data, indent=4, default=dthandler)
value = u'\n'.join([l.rstrip() for l in s.splitlines()])
return value
|
from sqlalchemy import create_engine, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from scraper.settings import SQLALCHEMY_DATABASE_URL
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
pool_pre_ping=True
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
meta = MetaData()
def create_schema():
meta.create_all(engine)
def create_session() -> SessionLocal:
db = SessionLocal()
return db
|
'''
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
Note:
Note that in some languages, such as Java, there is no unsigned integer type. In this case, the input will be given as a signed integer type. It should not affect your implementation, as the integer's internal binary representation is the same, whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3, the input represents the signed integer. -3.
'''
class Solution:
def hammingWeight(self, n: int) -> int:
return bin(n)[2:].count('1')
|
#%%
# -*- coding: utf-8 -*-
"""
El DBSCAN solito sin nada más... °-°
"""
import LibraryTT.txt2array as conversion
import numpy as np
from numpy import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import random
import math
from mpl_toolkits.mplot3d import Axes3D
#import open3d as o3d
#%%
conversion.bytxt()
# %matplotlib inline
D = conversion.txt2array()
D = np.delete(D,0,axis=0)
DD = np.copy(D) # Creamos copia de datos para no afectar a los originales
Epsilon = 40 #35 #30
MinPts = 20 #40 #75 #78
# result = DBSCAN(DD,Epsilon,MinPts)
chch = conversion.RObjetos(DD,Epsilon,MinPts)
TN = conversion.usar(chch)
# print(chch)
# Graficar un dato
#%%
#conversion.imprimir3D(D)
#%%
conversion.imprimirObjetos(DD,TN,chch,0,0)
#%%
conversion.imprimirObjetos(DD,TN,chch,2,0)
#%% Graficar con ruido
conversion.imprimirObjetos(DD,TN,chch,1,0)
# %%
|
# You are given an array of non-negative integers that represents a
# two-dimensional elevation map where each element is unit-width wall and the
# integer is the height. Suppose it will rain and all spots between two walls
# get filled up.
def contain_water(height):
max_left, max_right = 0, 0
left, right = 0, len(height) - 1
ans = 0
while left < right:
if height[left] < height[right]:
if height[left] > max_left:
max_left = height[left]
else:
ans += max_left - height[left]
left += 1
else:
if height[right] > max_right:
max_right = height[right]
else:
ans += max_right - height[right]
right -= 1
return ans
if __name__ == '__main__':
for height in [[0,1,0,2,1,0,1,3,2,1,2,1], [2, 1, 2], [3, 0, 1, 3, 0, 5]]:
print('{}: {}'.format(height, contain_water(height)))
|
def main(request, response):
if "full" in request.GET:
return request.url
else:
return request.request_path
|
# -*- coding: utf-8 -*-
from django.contrib.syndication.views import Feed
from django.shortcuts import reverse
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
from .models import Article
class DBlogRssFeed(Feed):
""" rss 订阅 , 仅显示文章摘要"""
feed_type = Rss201rev2Feed
title = 'xiaowu‘s django_blog'
description = 'xiaowu的博客文章订阅'
link = '/'
# rss显示内容
def items(self):
return Article.objects.all()
# 订阅标题
def item_title(self, item):
return item.title
# 订阅摘要
def item_description(self, item):
return item.digest
# 订阅链接
def item_link(self, item):
return reverse('blog:detail', args=(item.pk,))
class DBlogAtomFeed(DBlogRssFeed):
""" Atom 子类"""
feed_type = Atom1Feed # 修改类型为 Atom1Feed
subtitle = DBlogRssFeed.description
|
import random
class Player:
"""
Plays in games for their team
Has skill/overall
Has a salary
"""
def __init__(self, name, skill, salary):
self.name = name
self.salary = salary
# skill/overall
# generates the given overall of the player
self.skill = skill
def salary(self):
return 50000 + self.skill * 100
def generate_player():
player_names = [
"""
Lebron James,
Anthony Davis,
Giannis Antetokounmpo,
Danny Green,
Stephen Curry,
Klay Thompson,
Kevin Durant,
Demarcus Cousins,
Damian Lillard,
Kyrie Irving,
Ben Simmons,
Joel Embiid,
Derrick Rose,
Bradley Beal,
John Wall,
D'Angelo Russell,
Victor Oladipo,
CJ Mccollum,
Lonzo Ball,
Kemba Walker,
Kawhi Leonard,
Kyle Lowry,
James Harden,
Chris Paul,
Russel Westbrook,
Paul George,
Jimmy Butler,
Marc Gasol,
Devin Booker,
De'Aaron Fox,
Al Horford,
Blake Griffin,
Andre Iguodala,
Serge Ibaka,
Jayson Tatum,
Donovan Mitchell,
Trae Young,
Kyle Kuzma,
Gordon Hayward,
Karl Anthony Towns
"""
]
player_name = random.choice(player_names)
# generate overall and salary
skill = 60 + random.randint(99)
return Player(player_name, skill)
|
from PyQt5.QtWidgets import QApplication
import sys
from ui import Login
#self.close()
def main():
app = QApplication(sys.argv)
#ex = AppWindow()
ex = Login()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
import unittest
from papertrack.core import Configuration, Field
class TestConfiguration(unittest.TestCase):
def test__empty_configuration_created(self):
config = Configuration()
self.assertIsInstance(config, Configuration)
def test_given_empty_configuration_returns_default_state_list(self):
config = Configuration()
self.assertListEqual(config.get_document_states(), ["READY", "READING", "DONE"])
def test_given_document_states_returns_document_states(self):
config = Configuration({
"states": {
"A": {
"B": "Transition to B"
},
"B": {
"A": "go back to A"
}
}
})
self.assertListEqual(config.get_document_states(), ["A", "B"])
def test_given_document_states_returns_document_state_transitions(self):
config = Configuration({
"states": {
"A": {
"B": "Transition to B"
},
"B": {
"A": "go back to A"
}
}
})
self.assertListEqual(config.get_document_state_transitions("A"), [("B", "Transition to B")])
def test_given_empty_configuration_sreturns_state_transitions_for_default(self):
config = Configuration()
self.assertListEqual(config.get_document_state_transitions("READY"), [
("READING", "Start reading")
])
self.assertListEqual(config.get_document_state_transitions("READING"), [
("DONE", "Finish reading"),
("READY", "Bring back to to-read state")
])
self.assertRaises(ValueError, lambda: config.get_document_state_transitions("AAA"))
def test_given_empty_configuration_default_document_state_is_ready(self):
config = Configuration()
self.assertEqual(config.get_default_document_state(), "READY")
def test_given_custom_configuration_default_document_state_is_obtained(self):
config = Configuration({
"states": {
"A": {},
"B": {}
},
"default_state": "A"
})
self.assertEqual(config.get_default_document_state(), "A")
def test_given_custom_configuration_and_nonexistant_default_state_throws_value_error(self):
config = Configuration({
"states": {
"A": {},
"B": {}
},
"default_state": "C"
})
self.assertRaises(ValueError, lambda: config.get_default_document_state())
def test_given_empty_configuration_default_storage_location_is_given(self):
config = Configuration()
self.assertIsInstance(config.get_storage_location(), str)
def test_given_custom_configuration_gives_correct_storage_location(self):
config = Configuration({
"storage_location": "/my/storage/"
})
self.assertEqual(config.get_storage_location(), "/my/storage/")
def test_get_fields(self):
config = Configuration({
"fields": {
"A": {
"categories": ["A1", "A2"]
},
"B": {
"categories": ["B1", "B2"],
"default": "B2"
}
}
})
self.assertListEqual(config.get_fields(), [
Field(name="A", categories = ["A1", "A2"], default_category="A1"),
Field(name="B", categories = ["B1", "B2"], default_category="B2"),
])
|
#!/usr/bin/env python
import os
import sys
from glob import glob
input = sys.argv[1]
os.chdir(input)
print 'Please wait - gathering filelist...'
tiffs = glob('*.tiff')
counter = 0
filename = tiffs[0]
# create new variable which trims the first 18 characters.
head_good_filename = filename[:-11]
print 'Current filename without extension and number sequence is: %s' % head_good_filename
new_head = raw_input('what do you want to change this to?\n')
for i in tiffs:
tail = i[-11:]
filename_fix = new_head + tail
print filename_fix
os.rename(i, filename_fix)
|
""" Test file for Hello World Module"""
import pytest
from src.hello_world import main_method
def test_main_method(capfd):
"""Test to validate that the main method is called."""
main_method()
out, err = capfd.readouterr()
assert out == "Hello World!\n"
|
#! python3
from pytube import YouTube
print("Please, paste the video's URL (web's address):")
try:
# Get the video url
ytVideoUrl = YouTube(input())
print(ytVideoUrl.title)
# Check if the video is the right one
print('\nIs this the video you want to download? \nYes or No?')
answer = input()
if answer.lower() == 'yes' or answer.lower() == 'y':
print('Your download will begin in a moment...')
ytDownload = ytVideoUrl.streams.filter(progressive=True, file_extension='mp4')
ytDownload.get_highest_resolution().download()
print('Video downloaded succesfully! Check it out.')
else:
print('I am sorry to hear it. Check your URL and try again.')
except Exception as error:
print(error)
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.db.models import Count
from django.urls import reverse
from autoimagefield.fields import AutoImageField
from common_utils.models import TimestampModelMixin
from hitcount.models import HitCountField
from rich_editor.fields import RichTextOriginalField, RichTextFilteredField
DESKTOP_DESCRIPTION_MAX_LENGTH = 10000
class DesktopManager(models.Manager):
def annotated_favorite(self):
return (self.get_queryset()
.annotate(favorited_count=Count('favorited')))
class Desktop(TimestampModelMixin, models.Model):
objects = DesktopManager()
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name='autor',
related_name='my_desktops',
on_delete=models.CASCADE
)
title = models.CharField(
verbose_name='názov',
max_length=255
)
image = AutoImageField(
verbose_name='desktop',
upload_to='desktops',
resize_source=dict(size=(8192, 8192))
)
original_text = RichTextOriginalField(
verbose_name='popis',
filtered_field='filtered_text',
property_name='text',
max_length=DESKTOP_DESCRIPTION_MAX_LENGTH
)
filtered_text = RichTextFilteredField(
)
favorited = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='FavoriteDesktop'
)
comments_header = GenericRelation('comments.RootHeader')
comments = GenericRelation('comments.Comment')
hit = HitCountField()
def get_absolute_url(self):
return reverse('desktops:detail', args=(self.pk,))
def get_update_url(self):
return reverse('desktops:update', args=(self.pk,))
def __str__(self):
return self.title
class Meta:
verbose_name = 'desktop'
verbose_name_plural = 'desktopy'
class FavoriteDesktop(TimestampModelMixin, models.Model):
desktop = models.ForeignKey(
Desktop,
verbose_name='desktop',
on_delete=models.CASCADE
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name='používateľ',
on_delete=models.CASCADE
)
def __str__(self):
return str(self.pk)
class Meta:
unique_together = ('desktop', 'user',)
|
import sys
from PIL import Image
# Paths of raw vs scatter
composite = '/Users/kolbt/Desktop/ALL_FIGURES/supplemental_info/all_phase_planes/SI_plane_figure.png'
rawPth = '/Users/kolbt/Desktop/ALL_FIGURES/supplemental_info/all_phase_planes/sim_overlay/'
sctPth = '/Users/kolbt/Desktop/ALL_FIGURES/supplemental_info/all_phase_planes/mips_algorithm/'
# Get PeB plane
pb = int(sys.argv[1])
file = 'HS_peB_' + str(pb) + '.png'
# All images are 6400 x 4800
width = int(sys.argv[2])
height = int(sys.argv[3])
# Read in the raw file
img1 = Image.open(rawPth + file)
img1 = img1.resize((width,height))
# Read in the scatter file
img2 = Image.open(sctPth + file)
img2 = img2.resize((width,height))
# Open the composite
comp = Image.open(composite)
if pb % 20 == 0:
x1 = 0
else:
x1 = 2 * width
y1 = int(pb / 20) * height
x2 = x1 + width
y2 = y1
# Crop and paste to composite file
comp.paste(img1,(x1,y1))
comp.paste(img2,(x2,y2))
comp.save(composite)
|
import unittest
from io import StringIO
from logging import StreamHandler, getLogger
from pyskeleton.logging import configure_logger, DEBUG_LVL, WARNING_LVL
from tests.utils import (
LoggingTester,
DEBUG_MSG,
INFO_MSG,
WARNING_MSG,
ERROR_MSG,
CRITICAL_MSG,
)
class LoggingTest(LoggingTester):
def test_default_logging(self):
with StringIO() as stream:
logger = getLogger("LoggingTest.test_default_logging")
configure_logger(logger, handler=StreamHandler(stream))
self.assert_empty_log(stream)
logger.debug(DEBUG_MSG)
self.assert_empty_log(stream)
logger.info(INFO_MSG)
self.assert_log(INFO_MSG, stream)
logger.warning(WARNING_MSG)
self.assert_log(WARNING_MSG, stream)
logger.error(ERROR_MSG)
self.assert_log(ERROR_MSG, stream)
logger.critical(CRITICAL_MSG)
self.assert_log(CRITICAL_MSG, stream)
def test_debug_logging(self):
with StringIO() as stream:
logger = getLogger("LoggingTest.test_debug_logging")
configure_logger(logger, level=DEBUG_LVL, handler=StreamHandler(stream))
self.assert_empty_log(stream)
logger.debug(DEBUG_MSG)
self.assert_log(DEBUG_MSG, stream)
logger.info(INFO_MSG)
self.assert_log(INFO_MSG, stream)
logger.warning(WARNING_MSG)
self.assert_log(WARNING_MSG, stream)
logger.error(ERROR_MSG)
self.assert_log(ERROR_MSG, stream)
logger.critical(CRITICAL_MSG)
self.assert_log(CRITICAL_MSG, stream)
def test_warning_logging(self):
with StringIO() as stream:
logger = getLogger("LoggingTest.test_warning_logging")
configure_logger(logger, level=WARNING_LVL, handler=StreamHandler(stream))
self.assert_empty_log(stream)
logger.debug(DEBUG_MSG)
self.assert_empty_log(stream)
logger.info(INFO_MSG)
self.assert_empty_log(stream)
logger.warning(WARNING_MSG)
self.assert_log(WARNING_MSG, stream)
logger.error(ERROR_MSG)
self.assert_log(ERROR_MSG, stream)
logger.critical(CRITICAL_MSG)
self.assert_log(CRITICAL_MSG, stream)
if __name__ == "__main__":
unittest.main()
|
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
Sequence error counts workflow
"""
import os.path
import sys
# add this path to pull in utils in same directory:
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(scriptDir)
# add pyflow path:
pyflowDir=os.path.join(scriptDir,"pyflow")
sys.path.append(os.path.abspath(pyflowDir))
from configBuildTimeInfo import workflowVersion
from pyflow import WorkflowRunner
from sharedWorkflow import getMkdirCmd, getRmdirCmd, getDepthFromAlignments
from strelkaSharedWorkflow import SharedPathInfo, \
StrelkaSharedCallWorkflow, StrelkaSharedWorkflow
from workflowUtil import ensureDir, preJoin, getNextGenomeSegment
__version__ = workflowVersion
def strelkaGermlineRunDepthFromAlignments(self,taskPrefix="getChromDepth",dependencies=None):
bamList=[]
if len(self.params.bamList) :
bamList.append(self.params.bamList[0])
else :
return set()
outputPath=self.paths.getChromDepth()
return getDepthFromAlignments(self, bamList, outputPath, taskPrefix, dependencies)
class TempSegmentFiles :
def __init__(self) :
self.counts = []
self.observedIndelBed = []
def callGenomeSegment(self, gseg, segFiles, taskPrefix="", dependencies=None) :
segStr = str(gseg.id)
segCmd = [ self.params.getCountsBin ]
segCmd.extend(["--region", gseg.chromLabel + ":" + str(gseg.beginPos) + "-" + str(gseg.endPos)])
segCmd.extend(["--ref", self.params.referenceFasta ])
segCmd.extend(["--max-indel-size", self.params.maxIndelSize])
segFiles.counts.append(self.paths.getTmpSegmentErrorCountsPath(segStr))
segCmd.extend(["--counts-file", segFiles.counts[-1]])
for bamPath in self.params.bamList :
segCmd.extend(["--align-file",bamPath])
if self.params.isHighDepthFilter :
segCmd.extend(["--chrom-depth-file", self.paths.getChromDepth()])
def addListCmdOption(optList,arg) :
if optList is None : return
for val in optList :
segCmd.extend([arg, val])
addListCmdOption(self.params.indelCandidatesList, '--candidate-indel-input-vcf')
addListCmdOption(self.params.forcedGTList, '--force-output-vcf')
addListCmdOption(self.params.excludedRegions,"--excluded-regions-bed-file")
if self.params.knownVariants is not None :
segCmd.extend(["--known-variants-vcf-file",self.params.knownVariants])
if self.params.isReportObservedIndels :
tmpObservedIndelBedPath = self.paths.getTmpObservedIndelBedPath(segStr)
segFiles.observedIndelBed.append(tmpObservedIndelBedPath + ".gz")
segCmd.extend(['--observation-bed-file', tmpObservedIndelBedPath])
if self.params.extraCountsArguments is not None :
for arg in self.params.extraCountsArguments.strip().split() :
segCmd.append(arg)
nextStepWait = set()
setTaskLabel=preJoin(taskPrefix,"countGenomeSegment_"+gseg.id)
self.addTask(setTaskLabel,segCmd,dependencies=dependencies,memMb=self.params.callMemMb)
nextStepWait.add(setTaskLabel)
if self.params.isReportObservedIndels :
compressTask=preJoin(taskPrefix,"compressSegmentOutput_"+gseg.id)
compressCmd = "\"%s\" \"%s\"" % (self.params.bgzipBin, tmpObservedIndelBedPath)
self.addTask(compressTask, compressCmd, dependencies=setTaskLabel, isForceLocal=True)
nextStepWait.add(compressTask)
return nextStepWait
def mergeSequenceErrorCounts(self, taskPrefix, dependencies, runStatsLogPaths) :
runMergeLabel=preJoin(taskPrefix,"mergeCounts")
runMergeCmd=[self.params.mergeCountsBin]
for statsFile in runStatsLogPaths :
runMergeCmd.extend(["--counts-file",statsFile])
runMergeCmd.extend(["--output-file", self.paths.getErrorCountsOutputPath()])
return self.addTask(runMergeLabel, runMergeCmd, dependencies=dependencies, isForceLocal=True)
def callGenome(self,taskPrefix="",dependencies=None):
"""
run counter on all genome segments
"""
tmpSegmentDir=self.paths.getTmpSegmentDir()
dirTask=self.addTask(preJoin(taskPrefix,"makeTmpDir"), getMkdirCmd() + [tmpSegmentDir], dependencies=dependencies, isForceLocal=True)
segmentTasks = set()
segFiles = TempSegmentFiles()
for gseg in getNextGenomeSegment(self.params) :
segmentTasks |= callGenomeSegment(self, gseg, segFiles, dependencies=dirTask)
if len(segmentTasks) == 0 :
raise Exception("No genome regions to analyze. Possible target region parse error.")
# create a checkpoint for all segments:
completeSegmentsTask = self.addTask(preJoin(taskPrefix,"completedAllGenomeSegments"),dependencies=segmentTasks)
finishTasks = set()
# merge segment stats:
finishTasks.add(mergeSequenceErrorCounts(self,taskPrefix,completeSegmentsTask, segFiles.counts))
if self.params.isReportObservedIndels :
finishTasks.add(self.concatIndexBed(taskPrefix, completeSegmentsTask, segFiles.observedIndelBed,
self.paths.getObservedIndelBedPath(), "observedIndels"))
if not self.params.isRetainTempFiles :
rmTmpCmd = getRmdirCmd() + [tmpSegmentDir]
rmTask=self.addTask(preJoin(taskPrefix,"rmTmpDir"),rmTmpCmd,dependencies=finishTasks, isForceLocal=True)
nextStepWait = finishTasks
return nextStepWait
class CallWorkflow(StrelkaSharedCallWorkflow) :
"""
A separate call workflow is setup so that we can delay the workflow execution until
the ref count file exists
"""
def __init__(self,params,paths) :
super(CallWorkflow,self).__init__(params)
self.paths = paths
def workflow(self) :
callGenome(self)
class PathInfo(SharedPathInfo):
"""
object to centralize shared workflow path names
"""
def __init__(self, params) :
super(PathInfo,self).__init__(params)
def getTmpSegmentErrorCountsPath(self, segStr) :
return os.path.join( self.getTmpSegmentDir(), "strelkaErrorCounts.%s.bin" % (segStr))
def getTmpObservedIndelBedPath(self, segStr) :
return os.path.join( self.getTmpSegmentDir(), "strelkaObservedIndel.%s.bed" % segStr)
def getErrorCountsOutputPath(self) :
return os.path.join( self.params.variantsDir, "strelkaErrorCounts.bin")
def getObservedIndelBedPath(self) :
return os.path.join( self.params.debugDir, "strelkaObservedIndel.bed.gz")
class SequenceErrorCountsWorkflow(StrelkaSharedWorkflow) :
"""
Sequence error counts workflow
"""
def __init__(self, params) :
global PathInfo
super(SequenceErrorCountsWorkflow,self).__init__(params, PathInfo)
# if debugging output is going to be produced, add a results/debug dir
if self.params.isReportObservedIndels:
self.params.debugDir=os.path.join(self.params.resultsDir,"debug")
ensureDir(self.params.debugDir)
# format bam lists:
if self.params.bamList is None : self.params.bamList = []
def getSuccessMessage(self) :
"Message to be included in email for successful runs"
msg = "Strelka sequence error counts workflow successfully completed.\n\n"
msg += "\tworkflow version: %s\n" % (__version__)
return msg
def workflow(self) :
self.flowLog("Initiating Strelka sequence error counts workflow version: %s" % (__version__))
self.setCallMemMb()
callPreReqs = set()
if self.params.isHighDepthFilter :
callPreReqs |= strelkaGermlineRunDepthFromAlignments(self)
self.addWorkflowTask("CallGenome", CallWorkflow(self.params, self.paths), dependencies=callPreReqs)
|
#!/usr/bin/env python
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
def project_config(context) -> dict: # noqa pylint: disable=unused-argument
return {
'DEBUG': settings.DEBUG,
'ENVIRONMENT_NAME': _(settings.ENVIRONMENT_NAME),
'ENVIRONMENT_COLOR': settings.ENVIRONMENT_COLOR,
}
|
__author__ = ["Francisco Clavero"]
__email__ = ["fcoclavero32@gmail.com"]
__status__ = "Prototype"
""" Trainer class decorators with the implementation of common features, such as common optimizers. """
from typing import Callable
from adabound import AdaBound
from torch.nn import Module
from torch.optim import SGD
from torch.optim import Adam
from torch.optim import AdamW
from torch.optim import RMSprop
class OptimizerMixin:
"""Base class for optimizers."""
def __init__(self, *args, learning_rate=0.01, **kwargs):
"""
:param args: arguments for additional mixins
:type: Tuple
:param learning_rate: learning rate for optimizers
:type: float
:param kwargs: keyword arguments for additional mixins
:type: Dict
"""
self.learning_rate = learning_rate
super().__init__(*args, **kwargs)
class GANOptimizerMixin:
"""Base class for the optimizers of a GAN Trainer. These Trainers require two optimizers, one for the generator and
another for the discriminator. This Mixin is meant to be used with a normal OptimizerMixin.
Args:
Returns:
"""
_optimizer: Callable
discriminator: Module
generator: Module
@property
def optimizer(self):
""":override: return the optimizers for the two adversarial models.
Args:
Returns:
the optimizers for the generator and discriminator model modules.
"""
return self._optimizer(self.generator.parameters()), self._optimizer(self.discriminator.parameters())
class AdaBoundOptimizerMixin(OptimizerMixin):
"""Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s `optimizer` property with an
[AdaBound](https://github.com/Luolc/AdaBound) optimizer.
Args:
Returns:
"""
def __init__(
self,
*args,
betas=(0.9, 0.999),
final_learning_rate=0.1,
gamma=1e-3,
epsilon=1e-08,
weight_decay=0,
amsbound=False,
**kwargs
):
"""
:param args: arguments for additional mixins
:param betas: coefficients used for computing running averages of gradient and its square
:type: Tuple[float, float]
:type: Tuple
:param final_learning_rate: final (SGD) learning rate.
:type: float
:param gamma: convergence speed of the bound functions.
:type: float
:param epsilon: term added to the denominator to improve numerical stability
:type: float
:param weight_decay: weight decay for L2 penalty
:type: float
:param amsbound: whether to use the AMSGrad variant of this algorithm from the paper [Adaptive Gradient Methods
with Dynamic Bound of Learning Rate]( https://openreview.net/forum?id=Bkg3g2R9FX)
:type: bool
:param kwargs: keyword arguments for additional mixins
:type: Dict
"""
self.betas = betas
self.final_learning_rate = final_learning_rate
self.gamma = gamma
self.epsilon = epsilon
self.weight_decay = weight_decay
self.amsbound = amsbound
super().__init__(*args, **kwargs)
def _optimizer(self, parameters):
"""
Args:
parameters:
Returns:
"""
return AdaBound(
parameters,
lr=self.learning_rate,
betas=self.betas,
final_lr=self.final_learning_rate,
gamma=self.gamma,
eps=self.epsilon,
weight_decay=self.weight_decay,
amsbound=self.amsbound,
)
class AdamOptimizerMixin(OptimizerMixin):
"""Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s `optimizer` property with an
[Adam](https://pytorch.org/docs/stable/optim.html#torch.optim.Adam) optimizer.
Args:
Returns:
"""
def __init__(self, *args, betas=(0.9, 0.999), epsilon=1e-08, weight_decay=0, amsgrad=False, **kwargs):
"""
:param args: arguments for additional mixins
:type: Tuple
:param betas: coefficients used for computing running averages of gradient and its square
:type: Tuple[float, float]
:param epsilon: term added to the denominator to improve numerical stability
:type: float
:param weight_decay: weight decay for L2 penalty
:type: float
:param amsgrad: whether to use the AMSGrad variant of this algorithm from the paper [On the Convergence of
Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)
:type: bool
:param kwargs: keyword arguments for additional mixins
:type: Dict
"""
self.betas = betas
self.epsilon = epsilon
self.weight_decay = weight_decay
self.amsgrad = amsgrad
super().__init__(*args, **kwargs)
def _optimizer(self, parameters):
"""
Args:
parameters:
Returns:
"""
return Adam(
parameters,
lr=self.learning_rate,
betas=self.betas,
eps=self.epsilon,
weight_decay=self.weight_decay,
amsgrad=self.amsgrad,
)
class AdamWOptimizerMixin(AdamOptimizerMixin):
"""Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s `optimizer` property with an
[AdamW](https://pytorch.org/docs/stable/optim.html#torch.optim.AdamW) optimizer.
Args:
Returns:
"""
def _optimizer(self, parameters):
"""
Args:
parameters:
Returns:
"""
return AdamW(
parameters,
lr=self.learning_rate,
betas=self.betas,
eps=self.epsilon,
weight_decay=self.weight_decay,
amsgrad=self.amsgrad,
)
class RMSpropOptimizerMixin(OptimizerMixin):
"""Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s `optimizer` property with an
[RMSprop](https://pytorch.org/docs/stable/optim.html#torch.optim.RMSprop) optimizer.
Args:
Returns:
"""
def __init__(self, *args, alpha=0.99, epsilon=1e-08, weight_decay=0, momentum=0, centered=False, **kwargs):
"""
:param args: arguments for additional mixins
:type: Tuple
:param alpha: the smoothing constant.
:type: float
:param epsilon: term added to the denominator to improve numerical stability
:type: float
:param weight_decay: weight decay for L2 penalty
:type: float
:param momentum: momentum factor.
:type: float
:param centered: whether to compute the centered RMSProp (gradient normalized by an estimation of its variance).
:type: bool
:param kwargs: keyword arguments for additional mixins
:type: Dict
"""
self.alpha = alpha
self.epsilon = epsilon
self.weight_decay = weight_decay
self.momentum = momentum
self.centered = centered
super().__init__(*args, **kwargs)
def _optimizer(self, parameters):
"""
Args:
parameters:
Returns:
"""
return RMSprop(
parameters,
lr=self.learning_rate,
alpha=self.alpha,
eps=self.epsilon,
weight_decay=self.weight_decay,
momentum=self.momentum,
centered=self.centered,
)
class SGDOptimizerMixin(OptimizerMixin):
"""Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s `optimizer` property with a
[Stochastic Gradient Descent (SGD)](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD) optimizer.
Args:
Returns:
"""
def __init__(self, *args, momentum=0.8, weight_decay=0.0, dampening=0.0, nesterov=False, **kwargs):
"""
:param args: arguments for additional mixins.
:type: Tuple
:param momentum: momentum factor.
:type: float
:param weight_decay: decay for L2 penalty.
:type: float
:param dampening: momentum dampening.
:type: float
:param nesterov: enable Nesterov momentum.
:type: float
:param kwargs: keyword arguments for additional mixins.
:type: Dict
"""
self.momentum = momentum
self.weight_decay = weight_decay
self.dampening = dampening
self.nesterov = nesterov
super().__init__(*args, **kwargs)
def _optimizer(self, parameters):
"""
Args:
parameters:
Returns:
"""
return SGD(
parameters,
lr=self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
dampening=self.dampening,
nesterov=self.nesterov,
)
|
M = int(input())
for i in range(2, int(M ** 0.5) + 1):
if M % i == 0:
print(i, M // i)
break
else:
print(1, M)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.