content stringlengths 5 1.05M |
|---|
# coding: UTF-8
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Twitter API
API_KEY = os.environ.get("API_KEY")
API_SECRET_KEY = os.environ.get("API_SECRET_KEY")
ACCESS_TOKEN = os.environ.get("ACCESS_TOKEN")
ACCESS_TOKEN_SECRET = os.environ.get("ACCESS_TOKEN_SECRET")
CALLBACK_URL = os.environ.get("CALLBACK_URL")
HONDA_ACCESS_TOKEN = os.environ.get("HONDA_ACCESS_TOKEN")
HONDA_ACCESS_TOKEN_SECRET = os.environ.get("HONDA_ACCESS_TOKEN_SECRET")
# MySQL
MYSQL_ROOT_PASSWORD = os.environ.get("MYSQL_ROOT_PASSWORD")
MYSQL_HOST = os.environ.get("MYSQL_HOST")
MYSQL_DATABASE = os.environ.get("MYSQL_DATABASE")
MYSQL_USER = os.environ.get("MYSQL_USER")
MYSQL_PASSWORD = os.environ.get("MYSQL_PASSWORD") |
__all__ = ['Optimizer', 'OptimizationFunction',
'Quadratic', 'quad1', 'quad2', 'quad3', 'quad4', 'quad5']
from ._base import Optimizer, OptimizationFunction, Quadratic, quad1, quad2, quad3, quad4, quad5
|
import os, random, sys, io
import numpy as np
from keras.models import Model
from keras.layers import Input, GRU, Dense, Embedding, Dropout
from keras.models import load_model
from keras.optimizers import Adam
"""
a simple seq2seq model prepared as a baseline model for DSTC7
https://github.com/DSTC-MSR-NLP/DSTC7-End-to-End-Conversation-Modeling
following Keras tutorial:
https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html
NOTE:
* word-level, GRU-based
* no attention mechanism
* no beam search. greedy decoding, UNK disabled
CONTACT:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
SOS_token = '_SOS_'
EOS_token = '_EOS_'
UNK_token = '_UNK_'
def set_random_seed(seed=912):
random.seed(seed)
np.random.seed(seed)
def makedirs(fld):
if not os.path.exists(fld):
os.makedirs(fld)
class Dataset:
"""
assumptions of the data files
* SOS and EOS are top 2 tokens
* dictionary ordered by frequency
"""
def __init__(self,
path_source, path_target, path_vocab,
max_seq_len=32,
test_split=0.2, # how many hold out as vali data
read_txt=True,
):
# load token dictionary
self.index2token = {0: ''}
self.token2index = {'': 0}
self.max_seq_len = max_seq_len
with io.open(path_vocab, encoding="utf-8") as f:
lines = f.readlines()
for i, line in enumerate(lines):
token = line.strip('\n').strip()
if len(token) == 0:
break
self.index2token[i + 1] = token
self.token2index[token] = i + 1
self.SOS = self.token2index[SOS_token]
self.EOS = self.token2index[EOS_token]
self.UNK = self.token2index[UNK_token]
self.num_tokens = len(self.token2index) - 1 # not including 0-th (padding)
print('num_tokens: %i'%self.num_tokens)
if read_txt:
self.read_txt(path_source, path_target, test_split)
def read_txt(self, path_source, path_target, test_split):
print('loading data from txt files...')
# load source-target pairs, tokenized
seqs = dict()
for k, path in [('source', path_source), ('target', path_target)]:
seqs[k] = []
with io.open(path, encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
seq = []
for c in line.strip('\n').strip().split(' '):
i = int(c)
if i <= self.num_tokens: # delete the "unkown" words
seq.append(i)
seqs[k].append(seq[-min(self.max_seq_len - 2, len(seq)):])
self.pairs = list(zip(seqs['source'], seqs['target']))
# train-test split
np.random.shuffle(self.pairs)
self.n_train = int(len(self.pairs) * (1. - test_split))
self.i_sample_range = {
'train': (0, self.n_train),
'test': (self.n_train, len(self.pairs)),
}
self.i_sample = dict()
self.reset()
def reset(self):
for task in self.i_sample_range:
self.i_sample[task] = self.i_sample_range[task][0]
def all_loaded(self, task):
return self.i_sample[task] == self.i_sample_range[task][1]
def load_data(self, task, max_num_sample_loaded=None):
i_sample = self.i_sample[task]
if max_num_sample_loaded is None:
max_num_sample_loaded = self.i_sample_range[task][1] - i_sample
i_sample_next = min(i_sample + max_num_sample_loaded, self.i_sample_range[task][1])
num_samples = i_sample_next - i_sample
self.i_sample[task] = i_sample_next
print('building %s data from %i to %i'%(task, i_sample, i_sample_next))
encoder_input_data = np.zeros((num_samples, self.max_seq_len))
decoder_input_data = np.zeros((num_samples, self.max_seq_len))
decoder_target_data = np.zeros((num_samples, self.max_seq_len, self.num_tokens + 1)) # +1 as mask_zero
source_texts = []
target_texts = []
for i in range(num_samples):
seq_source, seq_target = self.pairs[i_sample + i]
if not bool(seq_target) or not bool(seq_source):
continue
if seq_target[-1] != self.EOS:
seq_target.append(self.EOS)
source_texts.append(' '.join([self.index2token[j] for j in seq_source]))
target_texts.append(' '.join([self.index2token[j] for j in seq_target]))
for t, token_index in enumerate(seq_source):
encoder_input_data[i, t] = token_index
decoder_input_data[i, 0] = self.SOS
for t, token_index in enumerate(seq_target):
decoder_input_data[i, t + 1] = token_index
decoder_target_data[i, t, token_index] = 1.
return encoder_input_data, decoder_input_data, decoder_target_data, source_texts, target_texts
class Seq2Seq:
def __init__(self,
dataset, model_dir,
token_embed_dim, rnn_units, encoder_depth, decoder_depth, dropout_rate=0.2):
self.token_embed_dim = token_embed_dim
self.rnn_units = rnn_units
self.encoder_depth = encoder_depth
self.decoder_depth = decoder_depth
self.dropout_rate = dropout_rate
self.dataset = dataset
makedirs(model_dir)
self.model_dir = model_dir
def load_models(self):
self.build_model_train()
self.model_train.load_weights(os.path.join(self.model_dir, 'model.h5'))
self.build_model_test()
def _stacked_rnn(self, rnns, inputs, initial_states=None):
if initial_states is None:
initial_states = [None] * len(rnns)
outputs, state = rnns[0](inputs, initial_state=initial_states[0])
states = [state]
for i in range(1, len(rnns)):
outputs, state = rnns[i](outputs, initial_state=initial_states[i])
states.append(state)
return outputs, states
def build_model_train(self):
# build layers
embeding = Embedding(
self.dataset.num_tokens + 1, # +1 as mask_zero
self.token_embed_dim, mask_zero=True,
name='embeding')
encoder_inputs = Input(shape=(None,), name='encoder_inputs')
encoder_rnns = []
for i in range(self.encoder_depth):
encoder_rnns.append(GRU(
self.rnn_units,
return_state=True,
return_sequences=True,
name='encoder_rnn_%i'%i,
))
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
decoder_rnns = []
for i in range(self.decoder_depth):
decoder_rnns.append(GRU(
self.rnn_units,
return_state=True,
return_sequences=True,
name='decoder_rnn_%i'%i,
))
decoder_softmax = Dense(
self.dataset.num_tokens + 1, # +1 as mask_zero
activation='softmax', name='decoder_softmax')
# set connections: teacher forcing
encoder_outputs, encoder_states = self._stacked_rnn(
encoder_rnns, embeding(encoder_inputs))
decoder_outputs, decoder_states = self._stacked_rnn(
decoder_rnns, embeding(decoder_inputs), [encoder_states[-1]] * self.decoder_depth)
decoder_outputs = Dropout(self.dropout_rate)(decoder_outputs)
decoder_outputs = decoder_softmax(decoder_outputs)
self.model_train = Model(
[encoder_inputs, decoder_inputs], # [input sentences, ground-truth target sentences],
decoder_outputs) # shifted ground-truth sentences
def build_model_test(self):
# load/build layers
names = ['embeding', 'decoder_softmax']
for i in range(self.encoder_depth):
names.append('encoder_rnn_%i'%i)
for i in range(self.decoder_depth):
names.append('decoder_rnn_%i'%i)
reused = dict()
for name in names:
reused[name] = self.model_train.get_layer(name)
encoder_inputs = Input(shape=(None,), name='encoder_inputs')
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
decoder_inital_states = []
for i in range(self.decoder_depth):
decoder_inital_states.append(Input(shape=(self.rnn_units,), name="decoder_inital_state_%i"%i))
# set connections: autoregressive
encoder_outputs, encoder_states = self._stacked_rnn(
[reused['encoder_rnn_%i'%i] for i in range(self.encoder_depth)],
reused['embeding'](encoder_inputs))
self.model_infer_encoder = Model(encoder_inputs, encoder_states[-1])
decoder_outputs, decoder_states = self._stacked_rnn(
[reused['decoder_rnn_%i'%i] for i in range(self.decoder_depth)],
reused['embeding'](decoder_inputs),
decoder_inital_states)
decoder_outputs = Dropout(self.dropout_rate)(decoder_outputs)
decoder_outputs = reused['decoder_softmax'](decoder_outputs)
self.model_infer_decoder = Model(
[decoder_inputs] + decoder_inital_states,
[decoder_outputs] + decoder_states)
def save_model(self, name):
path = os.path.join(self.model_dir, name)
self.model_train.save_weights(path)
print('saved to: '+path)
def train(self,
batch_size, epochs,
batch_per_load=10,
lr=0.001):
self.model_train.compile(optimizer=Adam(lr=lr), loss='categorical_crossentropy')
max_load = np.ceil(self.dataset.n_train/batch_size/batch_per_load)
for epoch in range(epochs):
load = 0
self.dataset.reset()
while not self.dataset.all_loaded('train'):
load += 1
print('\n***** Epoch %i/%i - load %.2f perc *****'%(epoch + 1, epochs, 100*load/max_load))
encoder_input_data, decoder_input_data, decoder_target_data, _, _ = self.dataset.load_data('train', batch_size * batch_per_load)
self.model_train.fit(
[encoder_input_data, decoder_input_data],
decoder_target_data,
batch_size=batch_size,)
self.save_model('model_epoch%i.h5'%(epoch + 1))
self.save_model('model.h5')
def evaluate(self, samples_per_load=640):
self.model_train.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy')
self.dataset.reset()
sum_loss = 0.
sum_n = 0
while not self.dataset.all_loaded('test'):
encoder_input_data, decoder_input_data, decoder_target_data, _, _ = self.dataset.load_data('test', samples_per_load)
print('evaluating')
loss = self.model_train.evaluate(
x=[encoder_input_data, decoder_input_data],
y=decoder_target_data,
verbose=0)
n = encoder_input_data.shape[0]
sum_loss += loss * n
sum_n += n
print('avg loss: %.2f'%(sum_loss/sum_n))
print('done')
def _infer(self, source_seq_int):
state = self.model_infer_encoder.predict(source_seq_int)
prev_word = np.atleast_2d([self.dataset.SOS])
states = [state] * self.decoder_depth
decoded_sentence = ''
t = 0
while True:
out = self.model_infer_decoder.predict([prev_word] + states)
tokens_proba = out[0].ravel()
tokens_proba[self.dataset.UNK] = 0 # UNK disabled
tokens_proba = tokens_proba/sum(tokens_proba)
states = out[1:]
sampled_token_index = np.argmax(tokens_proba)
sampled_token = self.dataset.index2token[sampled_token_index]
decoded_sentence += sampled_token+' '
t += 1
if sampled_token_index == self.dataset.EOS or t > self.dataset.max_seq_len:
break
prev_word = np.atleast_2d([sampled_token_index])
return decoded_sentence
def dialog(self, input_text):
source_seq_int = []
for token in input_text.strip().strip('\n').split(' '):
source_seq_int.append(self.dataset.token2index.get(token, self.dataset.UNK))
return self._infer(np.atleast_2d(source_seq_int))
def interact(self):
while True:
print('----- please input -----')
input_text = input()
if not bool(input_text):
break
print(self.dialog(input_text))
def main(mode):
token_embed_dim = 100
rnn_units = 512
encoder_depth = 2
decoder_depth = 2
dropout_rate = 0.5
learning_rate = 1e-3
max_seq_len = 32
batch_size = 100
epochs = 10
path_source = os.path.join('official','source_num.txt')
path_target = os.path.join('official','target_num.txt')
path_vocab = os.path.join('official','dict.txt')
dataset = Dataset(path_source, path_target, path_vocab, max_seq_len=max_seq_len, read_txt=(mode!='interact'))
model_dir = 'model'
s2s = Seq2Seq(dataset, model_dir,
token_embed_dim, rnn_units, encoder_depth, decoder_depth, dropout_rate)
if mode == 'train':
s2s.build_model_train()
else:
s2s.load_models()
if mode in ['train', 'continue']:
s2s.train(batch_size, epochs, lr=learning_rate)
else:
if mode == 'eval':
s2s.build_model_test()
s2s.evaluate()
elif mode == 'interact':
s2s.interact()
if __name__ == '__main__':
set_random_seed()
mode = sys.argv[1] # one of [train, continue, eval, interact]
main(mode)
|
import numpy as np
from spacetime.potential import Potential
###################################################################
class Schwarzschild(Potential):
def __init__(self, theta=np.pi/2, l=3.8, r_range=(2, 18), num=10000,
cont_without_eq=False, verbose=True):
super().__init__(r_range=r_range, num=num,
cont_without_eq=cont_without_eq, verbose=verbose)
self.theta = theta
self.l = l
def compute_w(self):
oben = self.r**2 * (self.r - 2) * np.sin(self.theta)**2
unten = self.r**3 * np.sin(self.theta)**2 - self.l**2 * (self.r - 2)
w = 0.5 * np.log(oben/unten)
return w
###################################################################
class SchwarzschildDeSitter(Potential):
def __init__(self, theta=np.pi/2, l=3.8, lamda=0.00004,
r_range=(2, 18), num=10000, cont_without_eq=False,
verbose=True):
super().__init__(r_range=r_range, num=num,
cont_without_eq=cont_without_eq, verbose=verbose)
self.theta = theta
self.l = l
self.lamda = lamda
def compute_w(self):
alpha = 1 - 2/self.r - self.lamda * self.r**2
oben = alpha * self.r**2 * np.sin(self.theta)**2
unten = self.r**2 * np.sin(self.theta)**2 - alpha * self.l**2
w = 0.5 * np.log(oben/unten)
return w
|
{
'def': '"A protein transport process that contributes to protein import into the nucleus, and that results in the vectorial transfer of a cargo-carrier protein complex through the nuclear pore complex from the cytoplasmic side to the nucleoplasmic side of the nuclear envelope." [GOC:curators, ISBN:019 8506732, PMID:14570049, PMID:9126736]',
'id': 'GO:0000060',
'is_a': ['GO:0006886'],
'name': 'protein import into nucleus, translocation',
'namespace': 'biological_process',
'relationship': ['part_of GO:0006606'],
'synonym': [ '"protein import into cell nucleus, translocation" EXACT []' ]
}
{
'def': '"A protein complex disassembly process that contributes to protein import into the nucleus, and that results in the dissociation of the cargo protein and the carrier (such as an importin alpha/beta heterodimer) from each other and from the nuclear pore complex." [GOC:mah, PMID:14570049, PMID:9126736, PMID:9687515]',
'id': 'GO:0000061',
'is_a': ['GO:0043624'],
'name': 'protein import into nucleus, substrate release',
'namespace': 'biological_process',
'relationship': ['part_of GO:0006606'],
}
|
#! /usr/bin/env python
def generate_power_func(n):
print "id(n): %X" % id(n)
print "entry value of n:", n
def nth_power(x):
print "the value of n:", n
print "the value of x:", x
print "the value of (power operator) x**n:", x**n
return x**n
print "id(nth_power): %X" % id(nth_power)
# nth_power is called a closure
return nth_power
raised_to_4 = generate_power_func(4)
#print __closure__ of raised_to_4
print "__closure__: ", raised_to_4.__closure__
# print repy of raised_to_4
print "Resulting of repr: ", repr(raised_to_4)
# delete generate_power_func
del generate_power_func
# magic of closure in python
raised_to_4(3)
print "__closure__: ", raised_to_4.__closure__
|
""" Game fix for Tomb Raider I
"""
# pylint: disable=C0103
from protonfixes import util
def main():
""" Enable Glide emulation in dosbox config """
conf_dict = {'glide': {'glide': 'emu'}}
util.create_dosbox_conf('glide_fix.conf', conf_dict)
util.append_argument('-conf')
util.append_argument('glide_fix.conf')
|
import re
from .base import (
ANNOTATIONS_REGEX,
WikipediaScraperBase,
)
from ..filters.film import WikipediaFilmFilter
class WikipediaFilmScraper(WikipediaScraperBase):
filter_class = WikipediaFilmFilter
disambiguation_id = "Film_and_television"
disambiguation_keyword = "film"
def make_data(self, url, soup):
data = super().make_data(url, soup)
data["plot"] = self.filter_plot(soup)
data.update(self.filter_genre_and_year(data["description"]))
return data
def filter_genre_and_year(self, description):
year = ""
genre = ""
try:
first_sentence = description.split(".")[0]
try:
pattern = r"^.+(\d{4})(.+)film.+$"
found = re.search(pattern, first_sentence)
year = found.group(1).strip()
genre = found.group(2).strip().lower()
except AttributeError:
pass
except IndexError:
pass
# Try to convert the year to an integer.
try:
year = int(year)
except ValueError:
pass
return {
"year": year,
"genre": genre,
}
def filter_plot(self, soup):
try:
# Collect the text data from the paragraph elements.
paragraphs = []
# Find the plot element on the page.
el = soup.findAll(id="Plot")[0]
# Find its parent, since its in an H2 container.
parent = el.parent
if parent.name == "h2":
# Then get the paragraphs that follow it.
sib = parent.findNextSibling()
while sib.name == "p":
paragraph = sib.getText().strip()
# remove footnote annotations
paragraph = re.sub(
ANNOTATIONS_REGEX,
"",
paragraph
)
paragraphs.append(paragraph)
# get the next sibling to check if its another paragraph.
sib = sib.findNextSibling()
# Combine the paragraphs text together to form the plot.
plot = " ".join(paragraphs)
return plot
except IndexError:
return ""
|
#
import numpy as np
import tensorflow as tf
def read_irsi_softmax():
iris=np.loadtxt('D:\Python\DlgsPark\data\multivariable data/iris_softmax.csv', delimiter=',')
print(iris.shape) #(150,8)
#1. train_set/test_set ๋ถ๋ฆฌํ๊ธฐ
#๋ฐฉ๋ฒ1) vstack
train_set=np.vstack((iris[:40], iris[50:90], iris[100:140]))
print(train_set)
print(train_set.shape) #(120, 8)
test_set=np.vstack((iris[40:50], iris[90:100], iris[140:145]))
print(test_set)
print(test_set.shape) #(25, 8)
return train_set, test_set
#๋ฐฉ๋ฒ2) ์ด๋ ๊ฒ ์์ฑํ๋ฉด ์๋จ. ๊ฐ ์์น ์์๋ผ๋ฆฌ ํฉ
# train_set=iris[:40]+iris[50:90]+iris[100:140] #์ด๋ ๊ฒ ์์ฑํ๋ฉด 40ํ๊น์ง์ ๊ฐ+50~90ํ์ ๊ฐ+100~140ํ์ ๊ฐ์ด ๋ํด์ง.
# print(train_set)
# print(train_set.shape) #(40, 8)
train_set, test_set=read_irsi_softmax()
|
#!/usr/bin/python3
def common_elements(set_1, set_2):
return (set(set_1) & set(set_2))
|
# pygame example: github.com/pddring/pygame-examples
"""
This example shows you how to display text and combine it with images
in the pygame graphics window.
Things to try:
TODO: Change the "Press Q to quit" from red to blue
TODO: Make "Press Q to quit" appear at the bottom of the screen in the middle
TODO: "Press any key..." is displayed nice and smoothly but "Press Q..." is
all blotchy. This is because "Press any key..." uses anti-aliasing.
Make "Press Q..." use anti-aliasing too so that it displays smoothly too.
TODO: Add a joker to the pack
"""
# import modules
import pygame
import random
# wait for the user to press a key and return the keycode
def wait_for_key():
e = pygame.event.wait()
while e.type != pygame.KEYDOWN:
e = pygame.event.wait()
if e.type == pygame.QUIT:
return pygame.K_ESCAPE
return e.key
# show the pygame window
pygame.init()
screen = pygame.display.set_mode((800,600))
pygame.display.set_caption("Pygame Example")
# load images
try:
img_card = pygame.image.load("images/card-back.png")
img_heart = pygame.image.load("images/card-heart.png")
img_club = pygame.image.load("images/card-club.png")
img_spade = pygame.image.load("images/card-club.png")
img_diamond = pygame.image.load("images/card-diamond.png")
except:
print "Error: Could not load images: you can download them from github.com/pddring/pygame-examples"
print "They should be saved in a folder called images in the same place as this python file"
exit()
images_suits = [img_heart, img_club, img_spade, img_diamond]
# create a font
font = pygame.font.Font(None, 36)
# use the font to make an image containg some text
text = font.render("Press any key to show a random card", True, (0, 0, 0))
text_quit = font.render("Press Q to quit", False, (255, 0, 0))
# loop around until the user presses escape or Q
looping = True
while looping:
# fill the screen in white
screen.fill((255, 255, 255))
# display the card
screen.blit(img_card, (100,100))
screen.blit(text, (0,0))
screen.blit(text_quit, (0,400))
# update the screen
pygame.display.flip()
# display a white rectangle over the centre of the card
pygame.draw.rect(screen, (255,255,255), (110, 110, 180, 260), 0)
# choose a suit at random
suit = random.choice(images_suits)
screen.blit(suit, (130, 150))
# choose a number at random
number = random.choice(["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"])
screen.blit(font.render(number, 1, (0, 0, 0)), (120, 120))
pygame.display.flip()
key = wait_for_key()
# stop looping if the user presses Q or escape
if key == pygame.K_q or key == pygame.K_ESCAPE:
looping = False
pygame.quit()
|
import shutil
from pathlib import Path
import pytest
from sending.curation_files import CurationFiles
@pytest.fixture
def curation_files(tmp_path: Path) -> CurationFiles:
old = tmp_path / "old"
old.mkdir()
temp = tmp_path / "temp"
temp.mkdir()
for f in Path("tests/fixtures/files/tremblfiles").glob("*.new"):
shutil.copy2(f, tmp_path)
curation_files = CurationFiles(tmp_path, remote_dir=None)
return curation_files
def test_submission_dir(curation_files: CurationFiles):
assert isinstance(curation_files.submission_dir, Path)
def test_iter(tmp_path: Path, curation_files: CurationFiles):
file_list = list(curation_files)
expected_file = tmp_path / "trembl1.new"
assert expected_file in file_list
def test_len(curation_files: CurationFiles):
assert len(curation_files) == 2
def test_get_entries(curation_files: CurationFiles):
assert len(curation_files.get_entries()) == 7
def test_get_accessions(curation_files: CurationFiles):
expected_accessions = [
"Q8CJC7",
"Q80WP0",
"Q80ZC8",
"Q80ST4",
"B2KG20",
"Q5DT35",
"Q5DT39",
"E9PTP0",
"Q5DT36",
"Q8BS07",
"Q5DT37",
"U3JA75",
]
accessions = curation_files.get_accessions()
assert sorted(accessions) == sorted(expected_accessions)
def test_get_pids(curation_files: CurationFiles):
expected_pids = [
"AAM44080.1",
"AAN31172.1",
"AAO49444.1",
"AAO49445.1",
"AAO49446.1",
"EDM01724.1",
"EDM01725.1",
"AAR00559.1",
"AAQ20111.1",
"AAR00558.1",
"BAC30742.1",
"AAI32407.1",
"AAI32409.1",
"AAR00557.1",
"AAH91468.1",
]
pids = curation_files.get_pids()
assert sorted(pids) == sorted(expected_pids)
def test_backup(curation_files: CurationFiles):
curation_files.backup()
expected_backup = curation_files.submission_dir / "old" / "trembl1.new"
assert expected_backup.is_file()
assert len(curation_files) == 2
def test_delete(curation_files: CurationFiles):
assert len(curation_files) == 2
curation_files.delete()
assert len(curation_files) == 0
def test_write_files(curation_files: CurationFiles, tmp_path: Path):
test_file = tmp_path / "temp" / "test_file.new"
curation_files.write_files(test_file)
assert test_file.exists()
def test_bool_false(tmp_path: Path):
# Point to empty directory
no_files = CurationFiles(submission_dir=tmp_path, remote_dir=None)
assert not no_files
def test_bool_true(curation_files: CurationFiles):
assert curation_files
|
from .settings import (
WHERE_SPECIAL_ARGUMENTS, AUTOMATIC_JOINS_PLACEHOLDER,
FIELD_FORMAT, SELECT_FORMAT, JOIN_CLAUSE_FORMAT, WHERE_CLAUSE_FORMAT, WHERE_AND_CONNECTOR_FORMAT,
WHERE_EQUAL_OPERATION_FORMAT, ORDER_BY_CLAUSE_FORMAT, ORDER_BY_ASC_FORMAT, ORDER_BY_DESC_FORMAT,
LIMIT_FORMAT,VALUE_STRING_FORMAT, VALUE_LIST_FORMAT, VALUE_TUPLE_FORMAT, VALUE_NULL_FORMAT, VALUE_DATETIME_FORMAT,
VALUE_SINGLE_QUOTE_FORMAT, DISTINCT_CLAUSE_FORMAT, FIELD_OR_TABLES_FORMAT
)
from datetime import datetime
import math
import random
class BaseQuery:
def __init__(self, on_table, engine):
self.engine = engine
self.on_table = on_table
# first value of the tuple is the original table name, the second, the value we are using
self.fields_table_relations={
'':dict(table_name=on_table, is_alias=False)
}
def _format_db_tables_names(self, value):
"""
A single key of fields_table_relations dict
Args:
value (dict) a singe dict of the fields_table_relations dict
"""
if value['is_alias']:
return value['table_name']
else:
return self._format_field_or_tables(value['table_name'])
def _format_field_or_tables(self, value):
return FIELD_OR_TABLES_FORMAT.format(value)
def __create_table_name_alias(self, table_name):
"""
Creates a new alias for the table
"""
alias = 'TA{}'.format(str(random.randint(1,100)))
invalid_aliases = [value['table_name'] for value in self.fields_table_relations.values()]
while alias in invalid_aliases:
alias = 'TA{}'.format(str(random.randint(1,100)))
return alias
def _get_table_name_or_alias(self, query_path, table_name):
if query_path in self.fields_table_relations:
return self.fields_table_relations[query_path]
if table_name in [value['table_name'] for value in self.fields_table_relations.values()]:
self.fields_table_relations[query_path] = {
'table_name': self.__create_table_name_alias(table_name),
'is_alias': True
}
else:
self.fields_table_relations[query_path] = {
'table_name': table_name,
'is_alias': False
}
return self.fields_table_relations[query_path]
def __format_joins(self, joins):
"""
Handle all of the joins it must do for the query to succeed
The user can set dynamic join_relations to get the right table. Let's go with the following example
>>> {
"form_value": {
"form": "dynamic_forms"
}
}
The above example means: "When you are making a join with the `form` field and the table is `form_value`, we use the value `dynamic_forms` instead""
WHAT?
Let's dig deeper:
When you do something like this:
>>> connection.query('form_value').filter(form__form__id=2).run()
Let's separate the string by each duble underscore, we get something like this: [form, form, id]
The first `form` is the name of the field in `form_value`, but this field is not from `form` database, instead
it is from `dynamic_forms`, we get the correct value on each join
SO we get something like this
INNER JOIN "dynamic_forms" ON "dyanmic_forms"."id" = "form_value"."form_id"
INNER JOIN "form" ON "form"."id" = "dynamic_forms"."form_id"
Look that the second field, correctly references to "form" table, so we don't need to set any join relation for
this field. Okay, but what if `dynamic_forms` `form` field references `foo` table?
We would do something like the following:
>>> {
"form_value": {
"form": "dynamic_forms"
}
"dynamic_forms": {
"form": "foo"
}
}
"""
to_table_join = self.fields_table_relations['']
reference_string_list = list()
for index, join in enumerate(joins):
# creates a reference of the path to the fields so something like
# depends_on__group__company and so on, with this path we can reuse the created aliases
reference_string_list.append(join)
reference_string = '__'.join(reference_string_list)
from_table_join = to_table_join
# automatically creates alias
to_table_join_name = self.join_relations.get(from_table_join['table_name'], {}).get(join, join)
to_table_join = self._get_table_name_or_alias(reference_string, to_table_join_name)
join_clause = JOIN_CLAUSE_FORMAT.format(
join=join,
from_table_join=self._format_db_tables_names(from_table_join),
to_table_join=FIELD_OR_TABLES_FORMAT.format(to_table_join_name),
to_table_join_name_or_alias=self._format_db_tables_names(to_table_join),
alias=to_table_join['table_name'] if to_table_join['is_alias'] else ''
)
if join_clause not in self.query_joins:
self.query_joins.append(join_clause)
return self._format_db_tables_names(to_table_join)
def _format_db_fields(self, value):
"""
Formats each database field based on a default VALUE_CLAUSE
"""
table_name = self._format_db_tables_names(self.fields_table_relations[''])
splitted_value = value.split(AUTOMATIC_JOINS_PLACEHOLDER)
if len(splitted_value) > 1:
# Handle automatic join operations
joins = splitted_value[:-1]
table_name = self.__format_joins(joins)
values_to_use = splitted_value[-2:]
value = FIELD_FORMAT.format(
table=table_name,
field=self._format_field_or_tables(values_to_use[-1])
)
return value
def format_db_values(self, value):
if type(value) == str:
value = VALUE_STRING_FORMAT.format(value.replace("'",VALUE_SINGLE_QUOTE_FORMAT))
if type(value) == list:
value = VALUE_LIST_FORMAT.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == datetime:
value= VALUE_STRING_FORMAT.format(value.strftime(VALUE_DATETIME_FORMAT))
if type(value) == tuple:
value = '{}'.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == self.__class__:
value = self.format_db_values(list(value))
if value == None:
value = VALUE_NULL_FORMAT
return value
class Insert(BaseQuery):
def bulk_insert(self, values, column_names=None):
"""
This is optimize to be quicker than insert, all your arguments EXCEPTS column names must be a list of values
To be easier you can use it like this:
>>> connection.query('form_value').bulk_insert(values=[[1,2], [3,4], [4,5]], column_names=['column_a', 'column_b'])
Use with the * for positional arguments
Args:
column_names (list): the column names as a list
Returns:
bool: returns True if everything went fine
"""
values = tuple(list(value) for value in values)
columns = column_names if column_names else self.columns
maximum_number_of_values_per_iteration = 999
iterations = math.ceil(len(values)/maximum_number_of_values_per_iteration)
self.engine.connect()
for iteration in range(0, iterations):
iteration_values = values[iteration*maximum_number_of_values_per_iteration : (iteration+1)*maximum_number_of_values_per_iteration]
query = self._format_insert(tuple(iteration_values), columns)
self.engine.execute(query)
self.engine.commit()
return True
def insert(self, **kwargs):
"""
Inserts an handful amount of data in the database
Returns:
[type]: [description]
"""
columns = kwargs.keys()
values = list(kwargs.values())
query = self._format_insert(values, columns)
print(query)
#self.engine.save(query)
return True
def _format_insert(self, values, columns):
INSERT_CLAUSE = 'INSERT INTO "{}" ({}) VALUES {}'
return INSERT_CLAUSE.format(
self.on_table,
', '.join(['"{}"'.format(column) for column in columns]),
self.format_db_values(values)
)
class Select(BaseQuery):
"""
Class responsible for handling select statements.
"""
def __init__(self, join_relations, *args, **kwargs):
self.join_relations = join_relations
self.query_select = ['*']
self.query_distinct = ''
self.query_orders = []
self.query_where = []
self.query_limit = ''
self.query_joins = []
super(Select, self).__init__(*args, **kwargs)
@property
def __get_query(self):
query = SELECT_FORMAT.format(
select=', '.join(self.query_select),
distinct=DISTINCT_CLAUSE_FORMAT,
froms=self.on_table
)
joins = '{} '.format(' '.join(self.query_joins)) if self.query_joins else ''
where = WHERE_CLAUSE_FORMAT.format(where_conditions=WHERE_AND_CONNECTOR_FORMAT.join(self.query_where)) if self.query_where else ''
orders = ORDER_BY_CLAUSE_FORMAT.format(order_by_conditions=', '.join(self.query_orders)) if self.query_orders else ''
limit = self.query_limit
query = query + joins + where + orders + limit
return query
@property
def query(self):
return self.__get_query
def first(self):
"""
Returns the first element of the query, sets limit as 1
"""
return self.limit(1)
def limit(self, number):
"""
Sets your desired limit to the query
Args:
number (int): the limit number
Returns:
self: this object so you can concatenate with other functions
"""
self.query_limit = LIMIT_FORMAT.format(num=number)
return self
def distinct(self):
self.query_distinct = DISTINCT_CLAUSE_FORMAT
return self
def select(self, *args, **kwargs):
"""
Expects the each column names as string. You can also make joins in your select using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').select('id').run()
>>> connection.query('example_db_name').select('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').select('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
Args:
flat (bool, optional): You can set flat=True if you are retrieving only one option field. Defaults to False
"""
# you can retrieve flat values so instead of tuples like this [(1,), (2,)]
# you get your results as a nice flat list like [1,2]
# this just works if you only set ONE argument in the select
self._flat = kwargs.get('flat', False) and len(args) == 1
# you can obviously have multiple selects, but everytime you do it resets the select clause
# so use just one
self.query_select = []
for value in args:
select_clause = self._format_db_fields(value)
if select_clause not in self.query_select:
self.query_select.append(select_clause)
return self
def filter(self, **kwargs):
"""
You need to define filters like the following example:
>>> connection.query('example_db_name').filter(id=2).run()
Or if you need to make any joins you define it like this:
>>> connection.query('example_db_name').filter(connectedfield__id=2).run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are making
where condition is the id on `connectedfield` table.
"""
for key, value in kwargs.items():
where_operation = WHERE_SPECIAL_ARGUMENTS.get(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[-1], WHERE_EQUAL_OPERATION_FORMAT)
if where_operation != WHERE_EQUAL_OPERATION_FORMAT:
key = AUTOMATIC_JOINS_PLACEHOLDER.join(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[:-1])
where_field = self._format_db_fields(key)
value = self.format_db_values(value)
if where_field not in self.query_where:
self.query_where.append(where_field + where_operation + str(value))
return self
def order_by(self, *args):
"""
Expects the each column names as string. You can also make joins in your order using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').order_by('id').run()
>>> connection.query('example_db_name').order_by('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').order_by('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
"""
if any([type(value) != str for value in args]):
raise TypeError('Your arguments MUST be str type')
for value in args:
asc_or_desc = ORDER_BY_ASC_FORMAT
if value[0] == '-':
asc_or_desc = ORDER_BY_DESC_FORMAT
value = value[1:]
order_clause = self._format_db_fields(value)
order_clause = '{} {}'.format(order_clause, asc_or_desc)
if order_clause not in self.query_orders:
self.query_orders.append(order_clause)
return self
def force(self):
"""
Runs a SELECT type of query
Returns:
list/tuple: List or tuple of results
"""
query = self.__get_query
result = self.engine.fetch(query)
if getattr(self, '_flat', False):
result = [value[0] for value in result]
return result
class Query(Insert, Select):
def __repr__(self):
return str(self.force())
def __getstate__(self):
return self.force()
def __iter__(self):
return iter(self.force())
def __bool__(self):
return bool(self.force())
def __getitem__(self, k):
return self.force()[k]
@property
def columns(self):
"""
Returns all of the columns of the current table that you are connected to
Returns:
list: list with each column_name of your table as string
"""
query = SELECT_FORMAT.format(
select='*',
distinct='',
froms=self.on_table
)
self.engine.connect()
query = query + LIMIT_FORMAT.format(num=0)
cursor = self.engine.execute(query)
column_names = [description[0] for description in cursor.description]
self.engine.close()
return column_names
|
from Solution import Solution
class P014(Solution):
mem_cache = {1: 0}
def collatz(self, number):
if number in self.mem_cache:
return self.mem_cache[number]
if number % 2 == 0:
data = self.collatz((number // 2))
self.mem_cache[number] = data + 1
return (1 + data)
if number % 2 == 1:
data = self.collatz((3 * number + 1))
self.mem_cache[number] = data + 1
return (1 + data)
def solve(self):
self.problem_number = 14
last = 1000000
max_number = 1
max_length = self.mem_cache[max_number];
for i in range(2, last):
sequence_length = self.collatz(i)
if sequence_length > max_length:
max_length = sequence_length
max_number = i
return max_number
def main():
P014().run()
if __name__ == "__main__":
main()
|
# Largest product in a grid: https://projecteuler.net/problem=11
# Problem 11
# In the 20x20 grid below, four numbers along a diagonal line have been marked in red.
#
# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
#
# The product of these numbers is 26 x 63 x 78 x 14 = 1788696.
#
# What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20x20 grid?
a = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
data = [int(d) for d in str(a).split()]
matrix = []
for i in range(0, 400, 20):
matrix.append(data[i:20 + i])
product = 1
for i in range(16):
for j in range(16):
diag_down = 1;horizontal = 1;vertical = 1;diag_up = 1
for k in range(4):
diag_down *= matrix[i + k][j + k]
horizontal *= matrix[i][j + k]
vertical *= matrix[i + k][j]
diag_up *= matrix[i + k][j - k - 1]
if max(diag_down, horizontal, vertical, diag_up) > product:
product = max(diag_down, horizontal, vertical, diag_up)
print(product)
|
import functools
def cache(f):
"""
Keep a cache of previous function calls
"""
@functools.wraps(f)
def wrapper_cache(*args, **kwargs):
cache_key = args + tuple(kwargs.items())
if cache_key not in wrapper_cache.cache:
wrapper_cache.cache[cache_key] = f(*args, **kwargs)
return wrapper_cache.cache[cache_key]
wrapper_cache.cache = dict()
return wrapper_cache
|
#!/usr/bin/python
"""
Output Fisher matrix for experiments to a text file.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
cosmo = rf.experiments.cosmo
fmroot = "fishermat"
names = ['EuclidRef_baoonly', ]
# Loop through experiments and output them
_k = range(len(names))
for k in _k:
root = "output/" + names[k]
# Load cosmo fns.
zc, Hc, dAc, Dc, fc = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zs, Hs, dAs, Ds, fs = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
zfns = ['b_HI',]
excl = ['Tb', 'f', 'H', 'DA', 'apar', 'aperp', 'pk*', 'N_eff', 'fs8', 'bs8', 'A']
F, lbls = rf.combined_fisher_matrix( F_list, expand=zfns, names=pnames,
exclude=excl )
"""
# Add DETF Planck prior?
print "*** Using DETF Planck prior ***"
l2 = ['n_s', 'w0', 'wa', 'omega_b', 'omegak', 'omegaDE', 'h', 'sigma8']
F_detf = euclid.detf_to_rf("DETF_PLANCK_FISHER.txt", cosmo, omegab=False)
Fpl, lbls = rf.add_fisher_matrices(F, F_detf, lbls, l2, expand=True)
if len(fixed_params) > 0:
Fpl, lbls = rf.combined_fisher_matrix( [Fpl,], expand=[],
names=lbls, exclude=fixed_params )
"""
# Output Fisher matrix
fname = "%s_%s.dat" % (fmroot, names[k])
np.savetxt(fname, F, header=" / ".join(lbls))
print "Saved Fisher matrix to: %s" % fname
|
import pynq
import pynq.lib
from .constants import *
#from pynq.lib.logictools import TraceAnalyzer
__author__ = "Johannes Vanoverschelde"
__copyright__ = "Copyright 2017, Dekimo"
__email__ = "johannes.vanoverschelde@dekimo.com"
class BaseUZOverlay(pynq.Overlay):
""" The Aquantis overlay for the Microzed
Attributes
----------
iop1 : IOP
IO processor connected to the PMODA interface
iop2 : IOP
IO processor connected to the PMODB interface
trace_pmoda : pynq.logictools.TraceAnalyzer
Trace analyzer block on PMODA interface, controlled by PS.
trace_pmodb : pynq.logictools.TraceAnalyzer
Trace analyzer block on PMODB interface, controlled by PS. .
leds : AxiGPIO
4-bit output GPIO for interacting with the green LEDs LD0-3
buttons : AxiGPIO
2-bit input GPIO for interacting with the buttons BTN0-3
"""
def __init__(self, bitfile, **kwargs):
super().__init__(bitfile, **kwargs)
if self.is_loaded():
#self.iop1.mbtype = "Pmod"
#self.iop2.mbtype = "Pmod"
#self.PMODA = self.iop1.mb_info
#self.PMODB = self.iop2.mb_info
self.leds = self.axi_gpio_0_led.channel1
self.buttons = self.axi_gpio_0_button.channel1
self.leds.setlength(4)
self.buttons.setlength(2)
self.leds.setdirection("out")
self.buttons.setdirection("in")
#self.trace_pmoda = TraceAnalyzer(
# self.trace_analyzer_pmoda.description['ip'],
# PYNQZ1_PMODA_SPECIFICATION)
#self.trace_pmodb = TraceAnalyzer(
# self.trace_analyzer_pmodb.description['ip'],
# PYNQZ1_PMODB_SPECIFICATION)
|
#!/usr/bin/python
import json
import requests
import jsonpickle
import argparse
from urllib import quote
import sys
import glob
import imghdr
def updateObjectImage(url,dimName,imgDir,objt):
objtName = objt['theName']
objtGlob = glob.glob(imgDir + '/' + objtName + '.*')
if len(objtGlob) == 0:
return
elif len(objtGlob) > 1:
raise Exception('Error uploading image for ' + objtName + ': expecting just 1 file for ' + objtName + ', but found ' + str(objtGlob) + '.')
imgFile = objtGlob[0]
if (imghdr.what(imgFile) == None):
raise Exception('Error uploading ' + imgFile + ': invalid image file.')
imgResp = requests.post(url + '/api/upload/image?session_id=test',files=dict(file=open(imgFile,'rb')))
if not imgResp.ok:
raise Exception('Error uploading ' + imgFile + ' :' + imgResp.text + '.')
else:
objt['theImage'] = imgResp.json()['filename']
objt_json = {'session_id' : 'test','object' : objt}
hdrs = {'Content-type': 'application/json'}
objtUpdResp = requests.put(url + '/api/' + dimName + 's/name/' + objtName,data=json.dumps(objt_json),headers=hdrs);
if not objtUpdResp.ok:
raise Exception('Error updating ' + objtName + ': ' + objtUpdResp.text + '.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Import object images')
parser.add_argument('--url',dest='url',help='URL for CAIRIS server')
parser.add_argument('--database',dest='dbName',help='Database name')
parser.add_argument('--image_dir',dest='imageDir',help='Directory for model images')
parser.add_argument('--type',dest='dimName',help='Object type (persona or attacker)')
args = parser.parse_args()
try:
if ((args.dimName != 'attacker') and (args.dimName != 'persona')):
raise Exception('Object type ' + args.dimName + ' not supported.')
openDbResp = requests.post(args.url + '/api/settings/database/' + quote(args.dbName) + '/open?session_id=test')
if not openDbResp.ok:
raise Exception('Fatal error: cannot open database ' + args.dbName + ': ' + openDbResp.text)
objtResp = requests.get(args.url + "/api/" + args.dimName + "s?session_id=test")
if not objtResp.ok:
raise Exception('Fatal error: cannot get ' + args.dimName + 's: ' + objtResp.text + '.')
else:
objts = objtResp.json()
for objtName in objts.keys():
updateObjectImage(args.url,args.dimName,args.imageDir,objts[objtName])
reopenDbResp = requests.post(args.url + '/api/settings/database/cairis_default/open?session_id=test')
if not reopenDbResp.ok:
raise Exception('Fatal error re-opening default database: ' + reopenDbResp.text)
except Exception, e:
print 'Fatal error: ' + str(e)
sys.exit(-1)
|
from django.urls import path
from serempre_todo.task.views import (
task_create_view,
task_time_worked_update_view,
task_update_view,
)
app_name = "task"
urlpatterns = [
path("create/", view=task_create_view, name="task-create"),
path("<str:pk>/", view=task_update_view, name="task-update"),
path("time-worked/<str:pk>/", view=task_time_worked_update_view, name="task-time-worked-update-view"),
]
|
import azure.functions as func
import six
from time import time
from chameleon import PageTemplate
BIGTABLE_ZPT = """\
<table xmlns="http://www.w3.org/1999/xhtml"
xmlns:tal="http://xml.zope.org/namespaces/tal">
<tr tal:repeat="row python: options['table']">
<td tal:repeat="c python: row.values()">
<span tal:define="d python: c + 1"
tal:attributes="class python: 'column-' + %s(d)"
tal:content="python: d" />
</td>
</tr>
</table>""" % six.text_type.__name__
def main(req: func.HttpRequest) -> func.HttpResponse:
num_of_rows = req.params.get('num_of_rows')
num_of_cols = req.params.get('num_of_cols')
start = time()
tmpl = PageTemplate(BIGTABLE_ZPT)
data = {}
for i in range(num_of_cols):
data[str(i)] = i
table = [data for x in range(num_of_rows)]
options = {'table': table}
data = tmpl.render(options=options)
latency = time() - start
return func.HttpResponse(str(latency))
|
# -*- coding: utf-8 -*-
import click
import os
import re
import sys
import json
import requests
import platform
from tqdm import tqdm
__version__ = "1.0.3"
def load_path():
with open(txt_path,'r') as f:
return f.readline()
print("\nๅฝๅๆไฝ็ณป็ป๏ผ"+platform.system())
if platform.system()=='Windows':
import winreg
txt_path = 'D:/path.txt'
def get_desktop():
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
path = winreg.QueryValueEx(key, "Desktop")[0]
with open(txt_path,'w') as f:
f.write(path)
if not os.path.exists(txt_path):
get_desktop()
default_path = load_path()
elif platform.system()=='Linux':
txt_path = '/etc/path.txt'
if not os.path.exists(txt_path):
default_path = '~/acfun'
with open(txt_path,'w') as f:
f.write(path)
else:
default_path = load_path()
else:
txt_path = '/Users/path.txt'
if not os.path.exists(txt_path):
default_path = '/Users/acfun'
with open(txt_path,'w') as f:
f.write(path)
else:
default_path = load_path()
headers = {
'referer': 'https://www.acfun.cn/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83'
}
pgk_dir = os.path.dirname(os.path.abspath(__file__))
class DefaultHelp(click.Command):
def __init__(self, *args, **kwargs):
context_settings = kwargs.setdefault('context_settings', {})
if 'help_option_names' not in context_settings:
context_settings['help_option_names'] = ['-h', '--help']
self.help_flag = context_settings['help_option_names'][0]
super(DefaultHelp, self).__init__(*args, **kwargs)
def parse_args(self, ctx, args):
if not args:
args = [self.help_flag]
print()
return super(DefaultHelp, self).parse_args(ctx, args)
@click.command(cls=DefaultHelp)
@click.version_option(
'{0} from {1} (Python {2})'.format(__version__, pgk_dir, sys.version[:3]),
'-V', '--version', help='ๆพ็คบ็ๆฌไฟกๆฏๅนถ้ๅบ')
@click.option('-u', '--url', prompt="่ฏท่พๅ
ฅ้พๆฅ", help='acfun่ง้ข้พๆฅ')
@click.option('-p', '--path', default=default_path, help='่ง้ขไธ่ฝฝ่ทฏๅพ',show_default=True)
def cli(url, path):
with open(txt_path,'w') as f1:
f1.write(path)
path = load_path()
class m3u8_url():
def __init__(self, f_url):
self.url = f_url
def get_m3u8(self):
global flag, qua, rel_path
html = requests.get(self.url, headers=headers).text
first_json = json.loads(re.findall('window.pageInfo = window.videoInfo = (.*?)};', html)[0] + '}', strict=False)
name = first_json['title'].strip().replace("|",'')
video_info = json.loads(first_json['currentVideoInfo']['ksPlayJson'], strict=False)['adaptationSet'][0]['representation']
Label = {}
num = 0
for quality in video_info: # ๆธ
ๆฐๅบฆ
num += 1
Label[num] = quality['qualityLabel']
print(Label)
choice = int(input("่ฏท้ๆฉๆธ
ๆฐๅบฆ: "))
print("่ง้ขๅญๆพ่ทฏๅพ๏ผ" + path)
Download(name + '[{}]'.format(Label[choice]), video_info[choice - 1]['url'], path).start_download()
class Download():
urls = []
def __init__(self, name, m3u8_url, path):
'''
:param name: ่ง้ขๅ
:param m3u8_url: ่ง้ข็ m3u8ๆไปถ ๅฐๅ
:param path: ไธ่ฝฝๅฐๅ
'''
self.video_name = name
self.path = path
self.f_url = str(m3u8_url).split('hls/')[0] + 'hls/'
with open(self.path + '/{}.m3u8'.format(self.video_name), 'wb')as f:
f.write(requests.get(m3u8_url, headers={'user-agent': 'Chrome/84.0.4147.135'}).content)
def get_ts_urls(self):
with open(self.path + '/{}.m3u8'.format(self.video_name), "r") as file:
lines = file.readlines()
for line in lines:
if '.ts' in line:
self.urls.append(self.f_url + line.replace('\n', ''))
def start_download(self):
self.get_ts_urls()
for url in tqdm(self.urls, desc="ๆญฃๅจไธ่ฝฝ {} ".format(self.video_name)):
movie = requests.get(url, headers={'user-agent': 'Chrome/84.0.4147.135'})
with open(self.path + '/{}.flv'.format(self.video_name), 'ab')as f:
f.write(movie.content)
os.remove(self.path + '/{}.m3u8'.format(self.video_name))
m3u8_url(url).get_m3u8()
if __name__ == '__main__':
cli() |
# 27. Remove Element
# https://leetcode.com/problems/remove-element/
# Easy
# Time Complexity : O(N)
# Space Complexity: O(1)
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
r = len(nums) - 1
l = 0
while l <= r:
if nums[l] == val:
nums[l], nums[r] = nums[r], nums[l]
r -= 1
else:
l += 1
return r + 1 |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import os.path
TRAFFIC_LIGHT_COLORS = ["GREEN", "RED", "YELLOW"]
CONFIDENT_THRESHOLD = 0.5
class TLClassifier(object):
def __init__(self):
#TODO load classifier
MODEL_PATH = os.path.abspath(rospy.get_param('model_name'))
# rospy.loginfo("MODEL_PATH: {}".format(MODEL_PATH))
self.COLOR_ARRAY = [(0, 255, 0), (255, 0, 0), (255, 255, 0)]
self.bridge = CvBridge()
self.image_pub = rospy.Publisher("processed_image", Image, queue_size=1)
# Load tensorflow graph
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
loaded_graph_def = tf.GraphDef()
with tf.gfile.GFile(MODEL_PATH, 'rb') as fid:
serialized_graph = fid.read()
loaded_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(loaded_graph_def, name='')
# Get tensors from loaded graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Get session
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
self.sess = tf.Session(graph=self.detection_graph, config=config)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
with self.detection_graph.as_default():
# expand simention to reshape input image to [1, None, None, 3].
img_expanded = np.expand_dims(image, axis=0)
feed_dict = {
self.image_tensor: img_expanded
}
boxes, scores, classes, num_detections = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections], feed_dict=feed_dict)
outimage = image
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(outimage, "rgb8"))
except CvBridgeError as e:
rospy.logerror(e)
# Get the most likely color with the highest score
color = None
highest_score = scores[0][0]
# rospy.logdebug("num_detections: {}\n".format(num_detections))
# rospy.logdebug("scores: {}\n".format(scores))
# rospy.logdebug("classes: {}\n".format(classes))
most_likely_class = classes[0][0]
if num_detections > 0:
if highest_score >= CONFIDENT_THRESHOLD:
color = most_likely_class
else:
log_msg = '# detections is {}, but confident {} is not enough'.format(
num_detections, highest_score)
rospy.loginfo(log_msg)
if color == 1:
rospy.loginfo('Infered traffic light is GREEN')
return TrafficLight.GREEN
elif color == 2:
rospy.loginfo('Infered traffic light is RED')
return TrafficLight.RED
elif color == 3:
rospy.loginfo('Infered traffic light is YELLOW')
return TrafficLight.YELLOW
else:
rospy.loginfo('UNKNOWN traffic light')
return TrafficLight.UNKNOWN
|
# Generated by Django 2.1.2 on 2018-11-09 01:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Supervisor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'db_supervisor',
},
),
migrations.CreateModel(
name='Translator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.IntegerField(default=0)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'db_translator',
},
),
]
|
import copy
import time
import uuid
import optuna
from tqdm import tqdm
from hana_automl.optimizers.base_optimizer import BaseOptimizer
from hana_automl.pipeline.modelres import ModelBoard
from hana_automl.preprocess.settings import PreprocessorSettings
class OptunaOptimizer(BaseOptimizer):
"""Optuna hyperparameters optimizer. (https://optuna.org/)
Attributes
----------
data : Data
Input data.
algo_list : list
List of algorithms to be tuned and compared.
algo_dict : dict
Dictionary of algorithms to be tuned and compared.
iter : int
Number of iterations.
problem : str
Machine learning problem.
tuned_params : str
Final tuned hyperparameters of best algorithm.
categorical_features : list
List of categorical features in dataframe.
prepset
prepset for preprocessing.
model
Tuned HANA ML model in algorithm.
droplist_columns
Columns in dataframe to be dropped.
"""
def __init__(
self,
algo_list: list,
data,
problem: str,
iterations: int,
time_limit: int,
algo_dict: dict,
categorical_features: list = None,
droplist_columns: list = None,
verbose=2,
tuning_metric: str = None,
):
self.algo_list = algo_list
self.data = data
self.iterations = iterations
self.problem = problem
self.time_limit = time_limit
self.algo_dict = algo_dict
self.categorical_features = categorical_features
self.droplist_columns = droplist_columns
self.verbose = verbose
if self.verbose < 2:
optuna.logging.set_verbosity(optuna.logging.WARNING)
self.model = None
self.prepset: PreprocessorSettings = PreprocessorSettings(data.strategy_by_col)
self.prepset.categorical_cols = categorical_features
if tuning_metric in ["accuracy"]:
self.prepset.task = "cls"
else:
self.prepset.task = "reg"
self.prepset.normalization_exceptions = self.data.check_norm_except(
categorical_features
)
self.leaderboard: list = list()
self.accuracy = 0
self.tuned_params = None
self.algorithm = None
self.study = None
self.tuning_metric = tuning_metric
def inner_params(self, study, trial):
if self.verbose > 1:
time.sleep(1)
print(
"\033[31m {}\033[0m".format(
self.leaderboard[len(self.leaderboard) - 1].algorithm.title
+ " trial params :"
+ str(
self.leaderboard[len(self.leaderboard) - 1]
.algorithm.optuna_opt.trials[
len(
self.leaderboard[
len(self.leaderboard) - 1
].algorithm.optuna_opt.trials
)
- 1
]
.params
)
)
)
def tune(self):
if self.tuning_metric in ["mse", "rmse", "mae"]:
dirc = "minimize"
else:
dirc = "maximize"
self.study = optuna.create_study(
direction=dirc,
study_name="hana_automl optimization process(" + str(uuid.uuid4()) + ")",
)
if self.iterations is not None and self.time_limit is not None:
self.study.optimize(
self.objective,
n_trials=self.iterations,
timeout=self.time_limit,
callbacks=[self.inner_params],
)
elif self.iterations is None:
self.study.optimize(
self.objective, timeout=self.time_limit, callbacks=[self.inner_params]
)
else:
self.study.optimize(
self.objective, n_trials=self.iterations, callbacks=[self.inner_params]
)
time.sleep(2)
self.tuned_params = self.study.best_params
if self.verbose > 0:
res = len(self.study.trials)
if self.iterations is None:
print(
"There was a stop due to a time limit! Completed "
+ str(res)
+ " iterations"
)
elif res == self.iterations:
print("All iterations completed successfully!")
else:
print(
"There was a stop due to a time limit! Completed "
+ str(res)
+ " iterations of "
+ str(self.iterations)
)
print(
f"Starting model {self.tuning_metric} score evaluation on the validation data!"
)
if self.verbose > 1:
time.sleep(1)
lst = tqdm(
self.leaderboard,
desc=f"\033[33m Leaderboard {self.tuning_metric} score evaluation",
colour="yellow",
bar_format="{l_bar}{bar}\033[33m{r_bar}\033[0m",
)
else:
lst = self.leaderboard
for member in lst:
data = self.data.clear(
num_strategy=member.preprocessor.tuned_num_strategy,
strategy_by_col=member.preprocessor.strategy_by_col,
categorical_list=member.preprocessor.categorical_cols,
normalizer_strategy=member.preprocessor.tuned_normalizer_strategy,
normalizer_z_score_method=member.preprocessor.tuned_z_score_method,
normalize_int=member.preprocessor.tuned_normalize_int,
normalization_excp=member.preprocessor.normalization_exceptions,
clean_sets=["valid"],
)
acc = member.algorithm.score(
data=data, df=data.valid, metric=self.tuning_metric
)
member.add_valid_score(acc)
reverse = self.tuning_metric == "r2_score" or self.tuning_metric == "accuracy"
self.leaderboard.sort(
key=lambda member: member.valid_score + member.train_score,
reverse=reverse,
)
self.model = self.leaderboard[0].algorithm.model
self.algorithm = self.leaderboard[0].algorithm
def objective(self, trial: optuna.trial.Trial) -> int:
"""Objective function. Optimizer uses it to search for best algorithm and preprocess method.
Parameters
----------
trial: optuna.trial.Trial
Optuna trial. Details here: https://optuna.readthedocs.io/en/stable/reference/trial.html
Returns
-------
acc: float
Model's accuracy.
"""
algo = self.algo_dict.get(
trial.suggest_categorical("algo", self.algo_dict.keys())
)
algo.set_categ(self.categorical_features)
imputer = trial.suggest_categorical("imputer", self.prepset.num_strategy)
self.prepset.tuned_num_strategy = imputer
normalizer_strategy = trial.suggest_categorical(
"normalizer_strategy", self.prepset.normalizer_strategy
)
self.prepset.tuned_normalizer_strategy = normalizer_strategy
z_score_method = ""
if normalizer_strategy == "z-score":
z_score_method = trial.suggest_categorical(
"z_score_method", self.prepset.z_score_method
)
self.prepset.tuned_z_score_method = z_score_method
normalize_int = trial.suggest_categorical(
"normalize_int", self.prepset.normalize_int
)
self.prepset.tuned_normalize_int = normalize_int
drop_outers = trial.suggest_categorical("drop_outers", self.prepset.drop_outers)
self.prepset.tuned_drop_outers = drop_outers
data = self.data.clear(
strategy_by_col=self.prepset.strategy_by_col,
num_strategy=imputer,
categorical_list=self.categorical_features,
normalizer_strategy=normalizer_strategy,
normalizer_z_score_method=z_score_method,
normalize_int=normalize_int,
drop_outers=drop_outers,
normalization_excp=self.prepset.normalization_exceptions,
clean_sets=["test", "train"],
)
acc = algo.optuna_tune(data, self.tuning_metric)
self.leaderboard.append(
ModelBoard(copy.copy(algo), acc, copy.copy(self.prepset))
)
return acc
def get_tuned_params(self) -> dict:
"""Returns tuned hyperparameters."""
return {
"algorithm": self.tuned_params,
"accuracy": self.leaderboard[0].valid_score,
}
def get_model(self):
"""Returns tuned model."""
return self.model
def get_algorithm(self):
"""Returns tuned AutoML algorithm"""
return self.algorithm
def get_preprocessor_settings(self) -> PreprocessorSettings:
"""Returns tuned preprocessor settings."""
return self.leaderboard[0].preprocessor
def fit(self, algo, data):
"""Fits given model from data. Small method to reduce code repeating."""
ftr: list = data.train.columns
ftr.remove(data.target)
ftr.remove(data.id_colm)
algo.fit(data, ftr, self.categorical_features)
|
import logging
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from playback.studio.equalizer import Equalizer
from playback.studio.recordings_lookup import find_matching_recording_ids, RecordingLookupProperties
_logger = logging.getLogger(__name__)
class PlaybackStudio(object):
"""
Studio that runs multiple playbacks for multiple category using the equalizer to run comparison
"""
DEFAULT_LOOKUP_PROPERTIES = RecordingLookupProperties(
start_date=datetime.utcnow() - timedelta(days=7), limit=20)
def __init__(self, categories, equalizer_tuner, tape_recorder, lookup_properties=None, recording_ids=None,
compare_execution_config=None):
"""
:param categories: Categories (operations) to conduct comparison for
:type categories: list of str
:param recording_ids: List of specific recording ids to play, when given categories are ignored
:type recording_ids: list of str
:param equalizer_tuner: Given a category return a corresponding equalizer tuning to be used for playback and
comparison
:type equalizer_tuner: playback.studio.equalizer_tuning.EqualizerTuner
:param lookup_properties: Lookup properties to use for all recordings
:type lookup_properties: RecordingLookupProperties
:param tape_recorder: The tape recorder that will be used to play the recordings
:type tape_recorder: playback.tape_recorder.TapeRecorder
:param compare_execution_config: Configuration specific to the comparison execution flow
:type compare_execution_config: CompareExecutionConfig
"""
self.categories = categories
self.recording_ids = recording_ids
self.equalizer_tuner = equalizer_tuner
self.lookup_properties = lookup_properties or self.DEFAULT_LOOKUP_PROPERTIES
self.tape_recorder = tape_recorder
self.compare_execution_config = compare_execution_config
def play(self):
"""
Fetch and play recording of all categories and run comparison on each one
:return: Comparison per category of all playbacks
:rtype: dict[(str, collections.Iterator[(playback.studio.equalizer.Comparison or Exception)]]
"""
if self.recording_ids:
categories_recordings = self._group_recording_ids_by_categories()
else:
categories_recordings = {c: None for c in self.categories}
result = {}
for category, recording_ids in categories_recordings.items():
result[category] = self._play_category(category, recording_ids)
return result
def _group_recording_ids_by_categories(self):
"""
:return: Recording ids groups by categories
:rtype: dict[str, list of str]
"""
grouping = defaultdict(list)
for recording_id in self.recording_ids:
category = self.tape_recorder.tape_cassette.extract_recording_category(recording_id)
grouping[category].append(recording_id)
# We want deterministic order of playback
return OrderedDict(sorted(grouping.items()))
def _play_category(self, category, recording_ids):
"""
Play and compare recordings of a single category
:param category: Category to play
:type category: str
:param recording_ids: List of specific recording ids to play, None means to fetch recordings using the
lookup parameters
:type recording_ids: None or list of str
:return: Comparison of all playback of current category
:rtype: (list of playback.studio.equalizer.Comparison) or Exception
"""
_logger.info(u'Playing Category {}'.format(category))
try:
tuning = self.equalizer_tuner.create_category_tuning(category)
except Exception as ex: # pylint: disable=broad-except
_logger.info(u'Cannot tune equalizer for category {} - {}'.format(category, ex))
return ex
if recording_ids:
recording_id_iterator = iter(recording_ids)
else:
recording_id_iterator = find_matching_recording_ids(
self.tape_recorder, category, self.lookup_properties)
def player(recording_id):
return self.tape_recorder.play(recording_id, tuning.playback_function)
equalizer = Equalizer(recording_id_iterator, player, tuning.result_extractor, tuning.comparator,
comparison_data_extractor=tuning.comparison_data_extractor,
compare_execution_config=self.compare_execution_config)
return equalizer.run_comparison()
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
try:
from .common import *
except ImportError:
from common import *
class Prefs(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(
self, 'Preferences', parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_border_width(10)
self.set_icon_name('grive-utils')
self.connect('response', self.on_response)
box = self.get_content_area()
box.set_spacing(10)
listbox = Gtk.ListBox(selection_mode=Gtk.SelectionMode.NONE)
box.pack_start(listbox, True, True, 0)
self.build(listbox)
self.show_all()
def build(self, listbox):
self.interval = Gtk.SpinButton.new_with_range(0, 43200, 1)
self.interval.set_value(CONFIG.getint('DEFAULT', 'Interval'))
listbox.insert(
self.get_row(
'Check Google Drive every', self.interval, 'minutes'), -1)
self.uprate = Gtk.SpinButton.new_with_range(0, 1048576, 1)
self.uprate.set_value(CONFIG.getint('DEFAULT', 'UpRate'))
listbox.insert(
self.get_row(
'Upload rate', self.uprate, 'kbps'), -1)
self.downrate = Gtk.SpinButton.new_with_range(0, 1048576, 1)
self.downrate.set_value(CONFIG.getint('DEFAULT', 'DownRate'))
listbox.insert(
self.get_row(
'Download rate', self.downrate, 'kbps'), -1)
self.notify = Gtk.Switch()
self.notify.set_active(CONFIG.getboolean('DEFAULT', 'Notify'))
listbox.insert(
self.get_row(
'Show notifications', self.notify), -1)
self.light_panel = Gtk.Switch()
self.light_panel.set_active(CONFIG.getboolean('DEFAULT', 'LightPanel'))
listbox.insert(
self.get_row(
'Light panel theme', self.light_panel), -1)
def get_row(self, label, widget, unit=None):
row = Gtk.ListBoxRow(activatable=False)
hbox = Gtk.Box(
orientation=Gtk.Orientation.HORIZONTAL, spacing=10, border_width=10)
row.add(hbox)
hbox.pack_start(Gtk.Label(label), False, False, 0)
if unit:
hbox.pack_end(Gtk.Label(unit), False, False, 0)
hbox.pack_end(widget, False, False, 0)
return row
def on_response(self, dialog, response):
if response == Gtk.ResponseType.OK:
CONFIG['DEFAULT']['Interval'] = str(self.interval.get_value_as_int())
CONFIG['DEFAULT']['UpRate'] = str(self.uprate.get_value_as_int())
CONFIG['DEFAULT']['DownRate'] = str(self.downrate.get_value_as_int())
CONFIG['DEFAULT']['Notify'] = str(
self.notify.get_active()).casefold()
CONFIG['DEFAULT']['LightPanel'] = str(
self.light_panel.get_active()).casefold()
save_config()
class Fatal(Gtk.Dialog):
def __init__(self, parent, title, message):
Gtk.Dialog.__init__(
self, title, parent, 0, (Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_border_width(10)
self.set_icon_name('grive-utils')
box = self.get_content_area()
box.set_spacing(10)
label = Gtk.Label(message)
box.pack_start(label, True, True, 0)
self.show_all()
if __name__ == '__main__':
Prefs(None).run()
|
"""Test module to check if optimizer classes are initialized correcty by the main file.
"""
import hjson
import pytest
from c3.optimizers.optimalcontrol import OptimalControl
from c3.optimizers.modellearning import ModelLearning
from c3.experiment import Experiment
from c3.main import run_cfg
@pytest.mark.integration
def test_main_c1() -> None:
with open("test/c1.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
run_cfg(cfg, "test/c1.cfg", debug=True)
@pytest.mark.integration
def test_main_c2() -> None:
with open("test/c2.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
run_cfg(cfg, "test/c2.cfg", debug=True)
@pytest.mark.integration
def test_main_c3() -> None:
with open("test/c3.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
run_cfg(cfg, "test/c3.cfg", debug=True)
@pytest.mark.integration
def test_main_sens() -> None:
with open("test/sensitivity.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
# pop configs that are not yet supported
cfg.pop("estimator")
cfg.pop("estimator_list")
run_cfg(cfg, "test/sensitivity.cfg", debug=True)
@pytest.mark.integration
def test_create_c1() -> None:
with open("test/c1.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
cfg.pop("optim_type")
cfg.pop("gateset_opt_map")
cfg.pop("opt_gates")
exp = Experiment()
exp.read_config(cfg.pop("exp_cfg"))
OptimalControl(**cfg, pmap=exp.pmap)
@pytest.mark.integration
def test_create_c3() -> None:
with open("test/c3.cfg", "r") as cfg_file:
cfg = hjson.load(cfg_file)
cfg.pop("optim_type")
cfg.pop("exp_opt_map")
exp = Experiment()
exp.read_config(cfg.pop("exp_cfg"))
assert isinstance(ModelLearning(**cfg, pmap=exp.pmap), ModelLearning)
|
# Ooops
"""
class shabeeb: #class creating
def IET(self,dep):
print("studying in dep of\t"+dep)
a=shabeeb() #object creating
dep="INFROMATION TECHNOLOGY"
a.IET(dep)
"""
"""
class shabeeb():
def bio(self,n):
self.name=n
def biod(self):
print(self.name)
x=shabeeb()
name="how are you"
y=shabeeb()
y.bio(17)
x.bio(name)
x.biod()
y.biod()
"""
# Constructor
"""
class shabeeb():
def __init__(self,name,age,dep,year): #this is a constructor
self.name=name
self.age=age
self.dep=dep
self.year=year
x=shabeeb("shabeeb",17,"IT","fourth year")
"""
"""
class Employee:
year=2001
def __init__(self, name, id,age,dep,place):
self.id = id
self.name = name
self.age=age
self.dep=dep
self.place=place
def add_age(self):
self.age=self.age+1
def relocate_dep(self,dep):
self.dep=dep
def display(self):
print("name is :"+self.name)
print("id is :%d" %self.id)
print("year is :" + str(Employee.year))
print("age is :" + str(self.age))
print("dep is :"+self.dep)
print("place is :" + self.place)
emp1 = Employee("John", 101,20,"Informarion technology","tvm")
emp2 = Employee("David", 102,40,"electronics","ekm")
Employee.year=Employee.year+1
emp2.add_age()
emp1.add_age()
emp1.display()
print("_____________")
emp2.display()
"""
"""
#object instance variables(self.)
class Employee:
year = 2012
def __init__(self, name, id, age, dep, place):
self.id = id
self.name = name
self.age = age
self.dep = dep
self.place = place
def add_age(self,):
self.age = self.age + 1
def relocate_dep(self, dep):
self.dep = dep
def display(self):
print("name is :" + self.name)
print("id is :%d" % self.id)
print("year is :" + str(Employee.year))
print("age is :" + str(self.age))
print("dep is :" + self.dep)
print("place is :" + self.place)
@classmethod # used for only class variable and it contain a function also
def add_year(cls):
cls.year = cls.year + 1
@staticmethod
def main_display():
print("EMPLOYEE DETAILS IS:")
emp1 = Employee("John", 101, 20, "Informarion technology", "tvm")
emp2 = Employee("David", 102, 40, "electronics", "ekm")
Employee.main_display()
emp1.display()
print("_____________")
emp2.display()
print("_____________")
Employee.add_year() # class name vechitt aaa fn call cheythu
emp1.add_age()
emp1.relocate_dep("civil engineering")#changing the departmnet
emp1.display()
print("_____________")
emp2.add_age()
emp2.relocate_dep("printing technology")#changing the department
emp2.display()
""" |
#!/usr/bin/python3
from math import log2
'''
Determine minimum number of operations to return n.
For n < 0 return 0
'''
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37,
41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
def PowerOf2Test(n):
'''
Tests if number is a power of 2.
'''
return(log2(n) % 1 == 0)
def minOpsHelper(n):
'''
Min ops helper
'''
if n in primes:
return(n)
decrement = n - 1
ops = 0
while decrement > 1:
if n % decrement == 0:
ops = n / decrement
break
decrement -= 1
if decrement == 1:
primes.append(n)
return(n)
return(ops + minOpsHelper(decrement))
def minOperations(n):
'''
Main func that runs some initial tests
prior to the helper.
'''
if n <= 1 or type(n) != int:
return(0)
if PowerOf2Test(n):
return(int(log2(n) * 2))
return(int(minOpsHelper(n)))
|
import ast
import time
import dill
from itertools import chain
import tensorflow as tf
import numpy as np
from layers import Input, Layer, layer_from_dicts
from utils import print_headers, print_current, print_metrics, val_split
import functions
def save_model(model, filepath):
tensors = []
layers = dict()
params = dict()
def get_model_config(tensor):
if tensor.input is not None:
if isinstance(tensor.input, list):
tensor_input_name = [tensor_i.name for tensor_i in tensor.input]
else:
tensor_input_name = tensor.input.name
else:
tensor_input_name = None
if tensor.node is not None:
layer = tensor.node
if layer.name not in layers:
layer_dict = layer.get_config()
if layer.W is not None:
layer_dict.update({'W': layer.W.name})
params.update({layer.W.name: (layer.W.get_shape().as_list(), Model.session.run(layer.W))})
if layer.b is not None:
layer_dict.update({'b': layer.b.name})
params.update({layer.b.name: (layer.b.get_shape().as_list(), Model.session.run(layer.b))})
layers.update({layer.name: layer_dict})
tensor_tuple = (
tensor.name,
tensor_input_name,
tensor.node.name if tensor.node is not None else 'Input.T.%s' % tensor.output.get_shape().as_list()[1:]
)
if tensor_tuple in tensors:
tensors.remove(tensor_tuple)
tensors.insert(0, tensor_tuple)
model.iterate_tensor_graph(get_model_config)
inputs = [i.name for i in model.inputs] if isinstance(model.inputs, list) else model.inputs.name
outputs = [i.name for i in model.outputs] if isinstance(model.outputs, list) else model.outputs.name
model_dict = {
'inputs': inputs,
'outputs': outputs,
'optimizer': model.optimizer,
'loss': model.loss,
'metrics': model.metrics,
'tensors': tensors,
'layers': layers,
'params': params
}
dill.dump(model_dict, open(filepath, 'wb'))
def load_model(filepath):
model_dict = dill.load(open(filepath, 'rb'))
params = model_dict['params']
layers = model_dict['layers']
for name, layer_dict in layers.items():
W_name = layer_dict.pop('W') if 'W' in layer_dict else None
b_name = layer_dict.pop('b') if 'b' in layer_dict else None
layer = layer_from_dicts(layer_dict)
if W_name is not None:
layer.W = Layer._weight_variable(params[W_name][0], layer.name)
Model.session.run(layer.W.assign(params[W_name][1]))
if b_name is not None:
layer.b = Layer._bias_variable(params[b_name][0], layer.name)
Model.session.run(layer.b.assign(params[b_name][1]))
layers.update({name: layer})
tensor_dict = dict()
for tensor in model_dict['tensors']:
tensor_name = tensor[0]
layer_name = tensor[2]
if 'Input.T.' in layer_name:
tensor_dict.update({tensor_name: Input(ast.literal_eval(layer_name.replace('Input.T.', '')))})
else:
layer = layers[layer_name]
input_tensor_name = tensor[1]
input_tensor = tensor_dict[input_tensor_name] if not isinstance(input_tensor_name, list) else\
[tensor_dict[i_name] for i_name in input_tensor_name]
tensor_dict.update({tensor_name: layer(input_tensor)})
inputs = model_dict['inputs']
inputs = [tensor_dict[i_name] for i_name in inputs] if isinstance(inputs, list) else tensor_dict[inputs]
outputs = model_dict['outputs']
outputs = [tensor_dict[i_name] for i_name in outputs] if isinstance(outputs, list) else tensor_dict[outputs]
return Model(inputs, outputs, model_dict['optimizer'], model_dict['loss'], model_dict['metrics'])
class Model(object):
# TODO: Improve dictionary of "metrics"
# That includes creating all the necessary functions and prepare the logic given some output layers:
# Create a placeholder function with the shape of the output layers, pass them as "y" on the function and
# pass the tensorflow layer as "x"
# I should also include possible adversarial metrics. That means rework metric functions
# to check if the variables are tensors or not. If they aren't create a tensor for it.
_metric_functions = {
'categorical_cross_entropy': functions.cross_entropy,
'accuracy': functions.accuracy
}
_optimizers = {
'adam': tf.train.AdamOptimizer(1e-4),
'adadelta': tf.train.AdadeltaOptimizer(),
}
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
@staticmethod
def initialise_vars():
uninit_vars = [var for var in tf.global_variables() if not Model.session.run(tf.is_variable_initialized(var))]
Model.session.run(tf.variables_initializer(uninit_vars))
@staticmethod
def _to_tf_tensor(tensor):
return tf.placeholder(tf.float32, tensor.output.get_shape().as_list())
def __init__(self, inputs, outputs, optimizer, loss, metrics):
self.inputs = inputs
self.outputs = outputs
self.loss = loss
self.optimizer = optimizer
self.metrics = metrics
self.best_params = dict()
Model.initialise_vars()
@property
def layers(self):
layer_list = []
def list_layers(tensor):
if tensor.node is not None:
if tensor.node in layer_list:
layer_list.remove(tensor.node)
layer_list.insert(0, tensor.node)
self.iterate_tensor_graph(list_layers)
return layer_list
@property
def layers_dict(self):
layers = dict()
for layer in self.layers:
layers.update({layer.name: layer})
return layers
def iterate_tensor_graph(self, f):
tensor_list = [o for o in self.outputs] if isinstance(self.outputs, list) else [self.outputs]
while tensor_list:
tensor = tensor_list.pop()
if tensor.input is not None:
if isinstance(tensor.input, list):
tensor_list += tensor.input
else:
tensor_list.append(tensor.input)
f(tensor)
def _update_best_params(self):
for l in self.layers:
if l.trainable:
best_W = Model.session.run(l.W) if l.W is not None else None
best_b = Model.session.run(l.b) if l.b is not None else None
self.best_params.update({l.name: (best_W, best_b)})
def count_trainable_parameters(self):
parameters = 0
for l in self.layers:
parameters += l.count_trainable_parameters()
return parameters
def save(self, filename):
save_model(self, filename)
def fit(
self,
tr_data,
tr_labels,
epochs,
batch_size,
val_data=None,
val_labels=None,
validation_split=0.25,
patience=np.inf,
monitor='val_acc'
):
# TODO: Multiple metrics
# TODO: Checking if input is tensor if not create one for it
# The multiple input/output stuff is finicky. In order to allow for multiple tensors, we assure that
# both inputs and outputs are lists of either one or multiple tensors.
# For inputs that's just ok as it is as long as we remember to do the same for the training/validation data
# when creating the dictionaries we'll feed to tensorflow.
# For outputs we have to do extra stuff to ensure one final unique metric to optimise. This is just a sum
# of the loss functions for each output.
# I might have to change stuff, but for now it's workable.
model_outputs = self.outputs if isinstance(self.outputs, list) else [self.outputs]
tensor_inputs = [i.output for i in self.inputs] if isinstance(self.inputs, list) else [self.inputs.output]
tensor_outputs = [Model._to_tf_tensor(o_i) for o_i in model_outputs]
# Metrics/loss creation and optimizer. We also initialize the new variables created.
loss_f = Model._metric_functions[self.loss]
mid_losses = [loss_f(o_i_a.output, o_i_gt) for o_i_a, o_i_gt in zip(model_outputs, tensor_outputs)]
loss = tf.add_n(mid_losses)
metric_f = Model._metric_functions[self.metrics]
mid_metrics = [metric_f(o_i_a.output, o_i_gt) for o_i_a, o_i_gt in zip(model_outputs, tensor_outputs)]
metrics = tf.reduce_mean(mid_metrics)
optimizer = Model._optimizers[self.optimizer].minimize(loss)
Model.initialise_vars()
# Metrics/loss stuff for monitoring
acc_init = [-np.inf, -np.inf, 0]
loss_init = [np.inf, np.inf, 0]
train_loss = {'train_loss': list(loss_init)}
train_acc = {'train_acc': list(acc_init)}
val_loss = {'val_loss': list(loss_init)}
val_acc = {'val_acc': list(acc_init)}
other_acc_names = ['val_' + o_i.node.name + '_acc' for o_i in model_outputs] if len(model_outputs) > 1 else []
other_accs = [(acc_name, list(acc_init)) for acc_name in other_acc_names]
val_acc.update(dict(other_accs))
metrics_dict = dict(chain.from_iterable(map(dict.items, [train_loss, train_acc, val_loss, val_acc])))
# DATA and TENSORS preprocessing. We ensure that everything is a list for easier control.
tr_data = tr_data if isinstance(tr_data, list) else [tr_data]
tr_labels = tr_labels if isinstance(tr_labels, list) else [tr_labels]
if val_data is None:
seed = np.random.randint(np.iinfo(np.int32).max)
tr_x, val_x = tuple(map(list, zip(*[val_split(tr_i, validation_split, seed) for tr_i in tr_data])))
tr_y, val_y = tuple(map(list, zip(*[val_split(tr_i, validation_split, seed) for tr_i in tr_labels])))
else:
tr_x = tr_data
tr_y = tr_labels
val_x = val_data if isinstance(val_data, list) else [val_data]
val_y = val_labels if isinstance(val_labels, list) else [val_labels]
tensors = tensor_inputs + tensor_outputs
# Preloop stuff
n_batches = -(-len(tr_x[0]) / batch_size)
val_batches = -(-len(val_x[0]) / batch_size)
no_improvement = 0
print_headers(dict(other_accs))
# General timing
t_start = time.time()
for i in range(epochs):
# Shuffle training data and prepare the variables to compute average loss/metric
idx = np.random.permutation(len(tr_x[0]))
x = [tr_data_i[idx, :] for tr_data_i in tr_x]
y = [tr_labels_i[idx, :] for tr_labels_i in tr_y]
acc_sum = 0
loss_sum = 0
# Epoch timing
t_in = time.time()
for step in range(n_batches):
# Prepare the data dictionary for tensorflow
step_init = step * batch_size
step_end = step * batch_size + batch_size
data = x + y
tr_feed_dict = dict((t_i, v_i[step_init:step_end, :]) for t_i, v_i in zip(tensors, data))
# Compute gradients, backpropagation and update weights using the optimizer
Model.session.run(optimizer, feed_dict=tr_feed_dict)
# Compute batch accuracy and loss and add it for the mean computation.
# For "debugging" reasons we compute the average loss/metric for each step (that way
# we can see the evolution per batch).
curr_acc = Model.session.run(metrics, feed_dict=tr_feed_dict)
curr_loss = Model.session.run(loss, feed_dict=tr_feed_dict)
acc_sum += curr_acc
loss_sum += curr_loss
curr_values = (curr_loss, loss_sum / (step + 1), curr_acc, acc_sum / (step + 1))
print_current(i, step, n_batches, curr_values)
# Epoch loss/metric computation
train_loss['train_loss'][1] = loss_sum / n_batches
train_acc['train_acc'][1] = acc_sum / n_batches
val_loss_sum = 0
val_acc_sum = 0
other_val_acc_sum = [0]*len(other_acc_names)
for step in range(val_batches):
step_init = step * batch_size
step_end = step * batch_size + batch_size
data = val_x + val_y
val_feed_dict = dict((t_i, v_i[step_init:step_end, :]) for t_i, v_i in zip(tensors, data))
val_loss_sum += Model.session.run(loss, feed_dict=val_feed_dict)
val_acc_sum += Model.session.run(metrics, feed_dict=val_feed_dict)
other_val_acc_sum = [acc_sum_i + Model.session.run(metric_i, feed_dict=val_feed_dict)
for acc_sum_i, metric_i in zip(other_val_acc_sum, mid_metrics)]
val_loss['val_loss'][1] = val_loss_sum / val_batches
val_acc['val_acc'][1] = val_acc_sum / val_batches
for acc_i, val_acc_i in zip(other_acc_names, other_val_acc_sum):
val_acc[acc_i][1] = val_acc_i / val_batches
print_metrics(i, train_loss, train_acc, val_loss, val_acc, time.time() - t_in)
# We check if there was improvement and update the best parameters accordingly. Also, if patience is
# specified we might apply early stopping.
# We are enforcing a monitoring on a metric or loss (validation accuracy by default).
if metrics_dict[monitor][2] != i:
no_improvement += 1
else:
self._update_best_params()
no_improvement = 0
if no_improvement >= patience:
break
t_end = time.time() - t_start
print('Training finished in %d epochs (%fs) with %s = %f (epoch %d)' %
(i+1, t_end, monitor, metrics_dict[monitor][0], metrics_dict[monitor][2]))
# Remember to update the best parameters
for k, v in self.best_params.items():
if self.layers_dict[k].W is not None:
Model.session.run(self.layers_dict[k].W.assign(v[0]))
if self.layers_dict[k].b is not None:
Model.session.run(self.layers_dict[k].b.assign(v[1]))
def predict(self, data, batch_size=32):
# DATA preparation
model_outputs = self.outputs if isinstance(self.outputs, list) else [self.outputs]
tensors = [i.output for i in self.inputs] if isinstance(self.inputs, list) else [self.inputs.output]
# Preloop stuff
data = data if isinstance(data, list) else [data]
n_batches = -(-len(data[0]) / batch_size)
outputs = []
for step in range(n_batches):
# Prepare the data dictionary for tensorflow
step_init = step * batch_size
step_end = step * batch_size + batch_size
feed_dict = dict((t_i, v_i[step_init:step_end, :]) for t_i, v_i in zip(tensors, data))
outputs.append([Model.session.run(output_i.output, feed_dict=feed_dict) for output_i in model_outputs])
return [np.concatenate(o_i) for o_i in zip(*outputs)]
|
# encoding: utf-8
from ckan.plugins import toolkit
from nose.tools import assert_in, assert_not_in
import ckan.model as model
try:
from ckan.tests import factories, helpers
except ImportError:
from ckan.new_tests import factories, helpers
from ckanext.pages import db
class TestPages(helpers.FunctionalTestBase):
def setup(self):
super(TestPages, self).setup()
if db.pages_table is None:
db.init_db(model)
self.user = factories.Sysadmin()
self.app = self._get_test_app()
def teardown(self):
helpers.reset_db()
def test_create_page(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
response = self.app.post(
url=toolkit.url_for('pages_edit', page='/test_page'),
params={
'title': 'Page Title',
'name': 'page_name',
},
extra_environ=env,
)
response = response.follow(extra_environ=env)
assert_in('<h1 class="page-heading">Page Title</h1>', response.body)
@helpers.change_config('ckanext.pages.allow_html', 'True')
def test_rendering_with_html_allowed(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
response = self.app.post(
url=toolkit.url_for('pages_edit', page='/test_html_page'),
params={
'title': 'Allowed',
'name': 'page_html_allowed',
'content': '<a href="/test">Test Link</a>',
},
extra_environ=env,
)
response = response.follow(extra_environ=env)
assert_in('<h1 class="page-heading">Allowed</h1>', response.body)
if toolkit.check_ckan_version(min_version='2.3'):
assert_in('<a href="/test">Test Link</a>', response.body)
else:
assert_in('Test Link', response.body)
@helpers.change_config('ckanext.pages.allow_html', False)
def test_rendering_with_html_disallowed(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
response = self.app.post(
url=toolkit.url_for('pages_edit', page='/test_html_page'),
params={
'title': 'Disallowed',
'name': 'page_html_disallowed',
'content': '<a href="/test">Test Link</a>',
},
extra_environ=env,
)
response = response.follow(extra_environ=env)
assert_in('<h1 class="page-heading">Disallowed</h1>', response.body)
assert_in('Test Link', response.body)
assert_not_in('<a href="/test">Test Link</a>', response.body)
def test_pages_index(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
url = toolkit.url_for('pages_index')
response = self.app.get(url, status=200, extra_environ=env)
assert_in('<h2>Pages</h2>', response.body)
assert_in('Add page</a>', response.body)
def test_blog_index(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
url = toolkit.url_for('blog_index')
response = self.app.get(url, status=200, extra_environ=env)
assert_in('<h2>Blog</h2>', response.body)
assert_in('Add Article</a>', response.body)
def test_organization_pages_index(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
org = factories.Organization()
url = toolkit.url_for('organization_pages_index', id=org['id'])
response = self.app.get(url, status=200, extra_environ=env)
assert_in('<h2>Pages</h2>', response.body)
assert_in('Add page</a>', response.body)
def test_group_pages_index(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
group = factories.Group()
url = toolkit.url_for('group_pages_index', id=group['id'])
response = self.app.get(url, status=200, extra_environ=env)
assert_in('<h2>Pages</h2>', response.body)
assert_in('Add page</a>', response.body)
def test_unicode(self):
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
response = self.app.post(
url=toolkit.url_for('pages_edit', page='/test_unicode_page'),
params={
'title': u'Tรฏtlรฉ'.encode('utf-8'),
'name': 'page_unicode',
'content': u'รรถรฑtรฉรฑt'.encode('utf-8'),
'order': 1,
'private': False,
},
extra_environ=env,
)
response = response.follow(extra_environ=env)
assert_in(u'<title>Tรฏtlรฉ - CKAN</title>', response.unicode_body)
assert_in(u'<a href="/pages/page_unicode">Tรฏtlรฉ</a>', response.unicode_body)
assert_in(u'<h1 class="page-heading">Tรฏtlรฉ</h1>', response.unicode_body)
assert_in(u'<p>รรถรฑtรฉรฑt</p>', response.unicode_body)
|
# -*- coding: utf-8 -*-
import datetime
from roboarm import DeviceNotFound, Arm as RoboArm
from usb import USBError
import config
from constants import DEVICE_STATUS_KEY, STATUS_ON, STATUS_OFF, FETCH_TIMEOUT
from utils.kvs import Kvs
class DeviceFetchTimeout(Exception):
"""
timeout error
"""
pass
class ArmDevice(object):
"""
an device obj
interact with the usb device
"""
status = Kvs(DEVICE_STATUS_KEY)
@classmethod
def arm(cls):
if config.DEVICE_DEBUG:
return
arm = cls.fetch_for_loop()
return arm
@classmethod
def get_status(cls):
cls.fetch()
if config.DEVICE_DEBUG:
return True
return int(cls.status.get())
@classmethod
def is_on(cls):
return bool(cls.get_status())
@classmethod
def fetch(cls):
"""
get arm from usb device
set arm on/off status
"""
try:
arm = RoboArm()
cls.status.set(STATUS_ON)
return arm
except (USBError, DeviceNotFound) as e:
cls.status.set(STATUS_OFF)
@classmethod
def fetch_for_loop(cls):
"""
refetch the device when error
raise DeviceFetchTimeout when fetch timeout
"""
start_at = datetime.datetime.now().second
while FETCH_TIMEOUT > datetime.datetime.now().second - start_at:
return cls.fetch()
raise DeviceFetchTimeout
|
import unittest
import importlib.util
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
target_module = module_from_file("page_number_extractor", "../PdfExTools/page_number_extractor.py")
class TestPageNumberExtractor(unittest.TestCase):
"""
Test PageNumberExtractor
"""
def setUp(self):
self.extractor = target_module.PageNumberExtractor()
def tearDown(self):
return
def test_pdf1(self):
pdf_file = r"./sample-pdfs/2-col-pubmed.pdf"
print("\ntest_pdf1(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18, 8: 19, 9: 20, 10: 21, 11: 22, 12: 23}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf2(self):
pdf_file = r"./sample-pdfs/2-col-pubmed-2.pdf"
print("\ntest_pdf2(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 826, 1: 827, 2: 828, 3: 829, 4: 830, 5: 831, 6: 832, 7: 833, 8: 834, 9: 835}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf3(self):
pdf_file = r"./sample-pdfs/ACL-P18-4005.pdf"
print("\ntest_pdf3(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 25, 1: 26, 2: 27, 3: 28, 4: 29, 5: 30}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf4(self):
pdf_file = r"./sample-pdfs/BTRR_Report.pdf"
print("\ntest_pdf4(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf5(self):
pdf_file = r"./sample-pdfs/ICML.pdf"
print("\ntest_pdf5(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf6(self):
pdf_file = r"./sample-pdfs/KDD.pdf"
print("\ntest_pdf6(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 120, 1: 121, 2: 122, 3: 123, 4: 124, 5: 125, 6: 126, 7: 127, 8: 128, 9: 129}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf7(self):
pdf_file = r"./sample-pdfs/NIH-Tech-Report_Aug2020.pdf"
print("\ntest_pdf7(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = { 0: -1, 1: 0, 2: 1,3: 2,4: 3, 5: 4, 6: 5, 7: 6,8: 7, 9: 8, 10: 9, 11: 10, 12: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 26: 25, 27: 26, 28: 27, 29: 28, 30: 29, 31: 30, 32: 31, 33: 32, 34: 33, 35: 34, 36: 35, 37: 36, 38: 37, 39: 38 }
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf8(self):
pdf_file = r"./sample-pdfs/EMCM.pdf"
print("\ntest_pdf8(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {0: 31, 1: 32, 2: 33, 3: 34}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf9(self):
pdf_file = r"./sample-pdfs/BigDataSurvey2014.pdf"
print("\ntest_pdf9(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {}
for i in range(171, 210):
actual_page_numbers[i-171] = i
self.assertEqual(actual_page_numbers, extracted_page_numbers)
def test_pdf10(self):
pdf_file = r"./sample-pdfs/BioTextMining-Survey-2017.pdf"
print("\ntest_pdf10(): " + pdf_file)
extracted_page_numbers = self.extractor.process(pdf_file)
actual_page_numbers = {}
self.assertEqual(actual_page_numbers, extracted_page_numbers)
if __name__ == '__main__':
unittest.main()
# run tests from cmd-line:
# python test_pgn_extractor.py -v
|
import re
from django.conf import settings
from rest_framework import serializers
from apps.users.models import User
class SimpleUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'name', 'email']
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'date_joined', 'name', 'username', 'email']
class UserRegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=6, write_only=True)
email = serializers.EmailField(allow_blank=False, required=True)
class Meta:
model = User
fields = ('username', 'name', 'password', 'email')
def validate_username(self, username):
code_regex = re.compile('[a-zA-Z|0-9|\-_]') # ์์ด + ์ซ์ + -,_
if code_regex.sub('', username):
raise serializers.ValidationError('์ ํจํ์ง ์์ ์ ๊ท์์
๋๋ค.', 'regex_error')
return username
def validate(self, data):
try:
user = User.objects.filter(username=data.get('username'))
if len(user) > 0:
raise serializers.ValidationError("Username already exists")
except User.DoesNotExist:
pass
if User.objects.filter(email=data.get('email')).exists():
raise serializers.ValidationError("email already exists")
return data
def create(self, validated_data):
instance = User.objects.create_user(**validated_data)
return instance
class PasswordFindSerializer(serializers.Serializer):
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
username = serializers.CharField(max_length=150, required=True)
class Meta:
fields = ('email', 'username')
class IDFindSerializer(serializers.Serializer):
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
name = serializers.CharField(max_length=50, required=True)
class Meta:
fields = ('email', 'name')
class SNSLoginSerializer(serializers.Serializer):
type = serializers.ChoiceField(choices=['apple', 'kakao'], required=True)
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
name = serializers.CharField(max_length=50, required=True)
class Meta:
fields = ['type', 'unique_id', 'email', 'name']
class SNSUserPasswordSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'date_joined']
def to_representation(self, user):
new_password = '{}{}{}'.format(
user.email.split('@')[0],
settings.SNS_AUTH_USER_KEY,
user.date_joined.strftime('%y%m%d')
)
ret = {
'username': user.username,
'password': new_password
}
return ret
|
import tensorflow as tf
from agents.network.base_network import BaseNetwork
import numpy as np
from scipy.stats import norm
from utils.boundedvar_gaussian_mixture import BoundedVarGaussianMixture
class QTOPTNetwork(BaseNetwork):
def __init__(self, sess, input_norm, config):
super(QTOPTNetwork, self).__init__(sess, config, config.qnet_lr)
self.rng = np.random.RandomState(config.random_seed)
self.l1 = config.qnet_l1_dim
self.l2 = config.qnet_l2_dim
self.num_iter = config.num_iter
self.num_samples = config.num_samples
self.top_m = config.top_m
self.input_norm = input_norm
self.num_modal = config.num_modal
# Q network
self.inputs, self.phase, self.action, self.outputs = self.build_network(scope_name='qnet')
self.net_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='qnet')
# Target network
self.target_inputs, self.target_phase, self.target_action, self.target_outputs = self.build_network(scope_name='target_qnet')
self.target_net_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_qnet')
# Network target (y_i)
# Obtained from the target networks
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# Op for periodically updating target network with online network weights
self.update_target_net_params = [tf.assign_add(self.target_net_params[idx], self.tau * (self.net_params[idx] - self.target_net_params[idx])) for idx in range(len(self.target_net_params))]
# Op for init. target network with identical parameter as the original network
self.init_target_net_params = [tf.assign(self.target_net_params[idx], self.net_params[idx]) for idx in range(len(self.target_net_params))]
if self.norm_type == 'batch':
# Batchnorm Ops and Vars
self.batchnorm_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='qnet/batchnorm')
self.target_batchnorm_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_qnet/batchnorm')
self.batchnorm_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='qnet/batchnorm')
self.target_batchnorm_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='target_qnet/batchnorm')
self.update_target_batchnorm_params = [tf.assign(self.target_batchnorm_vars[idx], self.batchnorm_vars[idx])
for idx in range(len(self.target_batchnorm_vars))
if self.target_batchnorm_vars[idx].name.endswith('moving_mean:0')
or self.target_batchnorm_vars[idx].name.endswith('moving_variance:0')]
else:
assert (self.norm_type == 'none' or self.norm_type == 'layer' or self.norm_type == 'input_norm')
self.batchnorm_ops = [tf.no_op()]
self.update_target_batchnorm_params = tf.no_op()
# Define loss and optimization Op
with tf.control_dependencies(self.batchnorm_ops):
self.loss = tf.reduce_mean(tf.squared_difference(self.predicted_q_value, self.outputs))
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
def build_network(self, scope_name):
with tf.variable_scope(scope_name):
inputs = tf.placeholder(tf.float32, shape=(None,self.state_dim))
phase = tf.placeholder(tf.bool)
action = tf.placeholder(tf.float32, [None, self.action_dim])
# normalize state inputs if using "input_norm" or "layer" or "batch"
if self.norm_type != 'none':
inputs = tf.clip_by_value(self.input_norm.normalize(inputs), self.state_min, self.state_max)
outputs = self.network(inputs, action, phase)
return inputs, phase, action, outputs
def network(self, inputs, action, phase):
# 1st fc
net = tf.contrib.layers.fully_connected(inputs, self.l1, activation_fn=None,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_IN", uniform=True),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
biases_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_IN", uniform=True))
net = self.apply_norm(net, activation_fn=tf.nn.relu, phase=phase, layer_num=1)
# 2nd fc
net = tf.contrib.layers.fully_connected(tf.concat([net, action], 1), self.l2, activation_fn=None,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_IN", uniform=True),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
biases_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_IN", uniform=True))
net = self.apply_norm(net, activation_fn=tf.nn.relu, phase=phase, layer_num=2)
outputs = tf.contrib.layers.fully_connected(net, 1, activation_fn=None,
weights_initializer=tf.random_uniform_initializer(-3e-3, 3e-3),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
biases_initializer=tf.random_uniform_initializer(-3e-3, 3e-3))
return outputs
def predict_q(self, *args):
# args (inputs, action, phase)
inputs = args[0]
action = args[1]
phase = args[2]
return self.sess.run(self.outputs, feed_dict={
self.inputs: inputs,
self.action: action,
self.phase: phase
})
def predict_q_target(self, *args):
# args (inputs, action, phase)
inputs = args[0]
action = args[1]
phase = args[2]
return self.sess.run(self.target_outputs, feed_dict={
self.target_inputs: inputs,
self.target_action: action,
self.target_phase: phase
})
def iterate_cem_multidim(self, state_batch):
batch_size = len(state_batch)
action_samples_batch = None
gmm_batch = None
## stack states
stacked_state_batch = np.array(
[np.tile(state, (self.num_samples, 1)) for state in state_batch]) # batch_size x self.num_samples x state_dim
stacked_state_batch = np.reshape(stacked_state_batch, (batch_size * self.num_samples, self.state_dim))
for i in range(self.num_iter):
# sample batch_num x num_samples: (n,64)
if action_samples_batch is None and gmm_batch is None:
action_samples_batch = self.rng.uniform(self.action_min, self.action_max, size=(batch_size, self.num_samples, self.action_dim))
else:
# single gaussian (deprecated)
# action_samples_batch = np.array([self.rng.multivariate_normal(mean, std, size=self.num_samples) for (mean, std) in mean_std_arr])
# gaussian mixture
# action_samples_batch = np.array([self.rng.])
action_samples_batch = np.array([gmm.sample(n_samples=self.num_samples)[0] for gmm in gmm_batch])
# evaluate Q-val
## reshape action samples
action_samples_batch_reshaped = np.reshape(action_samples_batch, (batch_size * self.num_samples, self.action_dim))
q_val = self.predict_q(stacked_state_batch, action_samples_batch_reshaped, True)
q_val = np.reshape(q_val, (batch_size, self.num_samples))
# select top-m
selected_idxs = list(map(lambda x: x.argsort()[::-1][:self.top_m], q_val))
selected_action_samples_batch = np.array(
[action_samples_for_state[selected_idx_for_state] for action_samples_for_state, selected_idx_for_state
in zip(action_samples_batch, selected_idxs)])
# fit gaussian mixture
gmm_batch = [BoundedVarGaussianMixture(n_components=self.num_modal, random_state=self.rng, covariance_type="diag", tol=1e-2).fit(action_samples) for action_samples in selected_action_samples_batch]
return gmm_batch
def predict_action(self, state_batch):
gmm_batch = self.iterate_cem_multidim(state_batch)
final_action_mean_batch = np.array([gmm.means_[np.argmax(gmm.weights_)] for gmm in gmm_batch])
return final_action_mean_batch
def sample_action(self, state_batch):
gmm_batch = self.iterate_cem_multidim(state_batch)
final_action_samples_batch = np.array([gmm.sample(n_samples=1)[0] for gmm in gmm_batch])
final_action_mean_batch = np.array([gmm.means_[np.argmax(gmm.weights_)] for gmm in gmm_batch])
weight_mean_var_arr = [(gmm.weights_, gmm.means_, gmm.covariances_) for gmm in gmm_batch]
return final_action_samples_batch, final_action_mean_batch, weight_mean_var_arr
def train(self, *args):
# args (inputs, action, predicted_q_value, phase)
return self.sess.run([self.outputs, self.optimize], feed_dict={
self.inputs: args[0],
self.action: args[1],
self.predicted_q_value: args[2],
self.phase: True
})
def init_target_network(self):
self.sess.run(self.init_target_net_params)
def update_target_network(self):
self.sess.run([self.update_target_net_params, self.update_target_batchnorm_params])
def print_variables(self, variable_list):
variable_names = [v.name for v in variable_list]
values = self.sess.run(variable_names)
for k, v in zip(variable_names, values):
print("Variable: ", k)
print("Shape: ", v.shape)
print(v)
def getQFunction(self, state):
return lambda action: self.sess.run(self.outputs, {self.inputs: np.expand_dims(state, 0),
self.action: np.expand_dims([action], 0),
self.phase: False})
def getPolicyFunction(self, weight_mean_var_arr):
weight, mean, var = weight_mean_var_arr
mean = np.squeeze(mean, axis=1)
var = np.squeeze(var, axis=1)
if len(weight) == len(mean) == len(var) == 2:
return lambda action: np.sum(weight * np.multiply(np.sqrt(1.0 / (2 * np.pi * var)), np.exp(
-np.square(action - mean) / (2.0 * var))))
else:
return lambda action: np.multiply(
np.sqrt(1.0 / (2 * np.pi * var[0])),
np.exp(-np.square(action - mean[0]) / (2.0 * var[0])))
|
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules import Conv1d, Linear, Dropout, Conv2d
import torch.nn.functional as F
from collections import OrderedDict
from torch.autograd import Variable
class Filtration(nn.Module):
def __init__(self, proj_vector_length, n_sino_angles, kernel_size, hidden_dim):
super(Filtration, self).__init__()
self.n_angles = n_sino_angles
self.proj_len = proj_vector_length
self.filter1 = Conv2d(1, hidden_dim, kernel_size, padding=2)
self.filter2 = Conv2d(hidden_dim, 1, kernel_size)
def forward(self, x):
#batch_size = x.shape[0]
#n_channel = x.shape[1]
#x = x.contiguous().view((batch_size, n_channel, -1))
x = self.filter1(x)
x = self.filter2(x)
#x = x.contiguous().view((batch_size, n_channel, self.proj_len, self.n_angles))
return x
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_len, n_angle, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(n_angle, d_ff)
self.w_2 = nn.Linear(d_ff, d_len)
self.dropout = Dropout(dropout)
def forward(self, x):
x = x.permute(0, 1, 3, 2)
x = self.dropout(F.relu(self.w_1(x)))
return self.w_2(x)
class ResidualConv(nn.Module):
def __init__(self, input_dim, output_dim, stride, padding):
super(ResidualConv, self).__init__()
self.conv_block = nn.Sequential(
nn.BatchNorm2d(input_dim),
nn.ReLU(),
nn.Conv2d(
input_dim, output_dim, kernel_size=3, stride=stride, padding=padding
),
nn.BatchNorm2d(output_dim),
nn.ReLU(),
nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1)
)
self.conv_skip = nn.Sequential(
nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(output_dim)
)
def forward(self, x):
return self.conv_block(x) + self.conv_skip(x)
class Upsample(nn.Module):
def __init__(self, input_dim, output_dim, kernel, stride):
super(Upsample, self).__init__()
self.upsample = nn.ConvTranspose2d(
input_dim, output_dim, kernel_size=kernel, stride=stride
)
def forward(self, x):
return self.upsample(x)
class Squeeze_Excite_Block(nn.Module):
def __init__(self, channel, reduction=16):
super(Squeeze_Excite_Block, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=True),
nn.Sigmoid()
)
def forward(self, x):
b, c, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class ASPP(nn.Module):
def __init__(self, in_dims, out_dims, rate=[6, 12, 18]):
super(ASPP, self).__init__()
self.aspp_block1 = nn.Sequential(
nn.Conv2d(
in_dims, out_dims, 3, stride=1, padding=rate[0], dilation=rate[0]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_dims)
)
self.aspp_block2 = nn.Sequential(
nn.Conv2d(
in_dims, out_dims, 3, stride=1, padding=rate[1], dilation=rate[1]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_dims)
)
self.aspp_block3 = nn.Sequential(
nn.Conv2d(
in_dims, out_dims, 3, stride=1, padding=rate[2], dilation=rate[2]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(out_dims)
)
self.output = nn.Conv2d(len(rate) * out_dims, out_dims, 1)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x1 = self.aspp_block1(x)
x2 = self.aspp_block1(x)
x3 = self.aspp_block3(x)
out = torch.cat([x1, x2, x3])
return self.output(out)
class Upsample_(nn.Module):
def __init__(self, scale=2):
super(Upsample_, self).__init__()
self.upsample = nn.Upsample(mode="bilinear", scale_factor=scale)
def forward(self, x):
return self.upsample(x)
class AttentionBlock(nn.Module):
def __init__(self, input_encoder, input_decoder, output_dim):
super(AttentionBlock, self).__init__()
self.conv_encoder = nn.Sequential(
nn.BatchNorm2d(input_encoder),
nn.ReLU(),
nn.Conv2d(input_encoder, output_dim, 3, padding=1),
nn.MaxPool2d(2,2)
)
self.conv_decoder = nn.Sequential(
nn.BatchNorm2d(input_decoder),
nn.ReLU(),
nn.Conv2d(output_dim, 1, 1)
)
self.conv_atten = nn.Sequential(
nn.BatchNorm2d(output_dim),
nn.ReLU(),
nn.Conv2d(output_dim, 1, 1)
)
def forward(self, x1, x2):
out = self.conv_encoder(x1) + self.conv_encoder(x2)
out = self.conv_atten(out)
return out * x2
class ResUnet(nn.Module):
def __init__(self, channel, filters=[64, 128, 256, 512]):
super(ResUnet, self).__init__()
self.input_layer = nn.Sequential(
nn.Conv2d(channel, filters[0], kernel_size=3, padding=1),
nn.BatchNorm2d(filters[0]),
nn.ReLU(),
nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),
)
self.input_skip = nn.Sequential(
nn.Conv2d(channel, filters[0], kernel_size=3, padding=1)
)
self.residual_conv_1 = ResidualConv(filters[0], filters[1], 2, 1)
self.residual_conv_2 = ResidualConv(filters[1], filters[2], 2, 1)
self.bridge = ResidualConv(filters[2], filters[3], 2, 1)
self.upsample_1 = Upsample(filters[3], filters[3], 2, 2)
self.up_residual_conv1 = ResidualConv(filters[3] + filters[2], filters[2], 1, 1)
self.upsample_2 = Upsample(filters[2], filters[2], 2, 2)
self.up_residual_conv2 = ResidualConv(filters[2] + filters[1], filters[1], 1, 1)
self.upsample_3 = Upsample(filters[1], filters[1], 2, 2)
self.up_residual_conv3 = ResidualConv(filters[1] + filters[0], filters[0], 1, 1)
self.output_layer = nn.Sequential(
nn.Conv2d(filters[0], 1, 1, 1),
nn.Sigmoid(),
)
def forward(self, x):
# Encode
x1 = self.input_layer(x) + self.input_skip(x)
x2 = self.residual_conv_1(x1)
x3 = self.residual_conv_2(x2)
# Bridge
x4 = self.bridge(x3)
# Decode
x4 = self.upsample_1(x4)
x5 = torch.cat([x4, x3], dim=1)
x6 = self.up_residual_conv1(x5)
x6 = self.upsample_2(x6)
x7 = torch.cat([x6, x2], dim=1)
x8 = self.up_residual_conv2(x7)
x8 = self.upsample_3(x8)
x9 = torch.cat([x8, x1], dim=1)
x10 = self.up_residual_conv3(x9)
output = self.output_layer(x10)
return output
|
from setuptools import setup
setup(name='butterfingers',
version='0.1',
description='Package to generate highly realistic typos (fuzz-testing)',
author='',
author_email='',
license='',
packages=['butterfingers'],
zip_safe=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2014 Ronan Delacroix
Doremi API Requests definition
:author: Ronan Delacroix
"""
import sys
from tbx.bytes import *
from .message import MessageListWrapper, MessageDefinition as M, ResponseElement as E, ResponseBatch as Batch
RESPONSES = (
# CPL
M('GetCPLList', '010200', [
E('amount', 0, 4, bytes_to_int),
E('item_length', 4, 8, bytes_to_int),
E('list', 8, -1, bytes_to_uuid_list),
E('response', -1, None, bytes_to_int),
]),
M('GetCPLInfo', '010400', [
E('cpl_uuid', 0, 16, bytes_to_uuid),
E('storage', 16, 17, bytes_to_int, {1: 'local', 2: 'remote', 3: 'local+remote'}),
E('content_title_text', 17, 145, bytes_to_text),
E('content_kind', 145, 146, bytes_to_int, {
0: 'Unknown',
1: 'Feature',
2: 'Trailer',
3: 'Test',
4: 'Teaser',
5: 'Rating',
6: 'Advertisement',
7: 'Short',
8: 'Transitional',
9: 'PSA',
10: 'Policy',
128: 'Live CPL',
}),
E('duration', 146, 150, bytes_to_int),
E('edit_rate_a', 150, 154, bytes_to_int),
E('edit_rate_b', 154, 158, bytes_to_int),
E('picture_encoding', 158, 159, bytes_to_int, {0: 'Unknown', 1: 'MPEG2', 2: 'JPEG2000', 3: 'Audio PCM'}),
E('picture_width', 159, 161, bytes_to_int),
E('picture_height', 161, 163, bytes_to_int),
E('picture_encryption', 163, 164, bytes_to_int, {0: 'No Encryption', 1: 'AES 128 CBC'}),
E('sound_encoding', 164, 165, bytes_to_int, {0: 'Unknown', 1: 'MPEG2', 2: 'JPEG2000', 3: 'Audio PCM'}),
E('sound_channel_count', 165, 166, bytes_to_int),
E('sound_quantization_bits', 166, 167, bytes_to_int),
E('sound_encryption', 167, 168, bytes_to_int, {0: 'No Encryption', 1: 'AES 128 CBC'}),
E('crypto_key_id_list', 176, -1, bytes_to_uuid_list),
E('response', -1, None, bytes_to_int),
]),
M('GetCPLInfo2', '010401', [
E('cpl_uuid', 0, 16, bytes_to_uuid),
E('storage', 16, 17, bytes_to_int, {1: 'local', 2: 'remote', 3: 'local+remote'}),
E('content_title_text', 17, 145, bytes_to_text),
E('content_kind', 145, 146, bytes_to_int, {
0: 'Unknown',
1: 'Feature',
2: 'Trailer',
3: 'Test',
4: 'Teaser',
5: 'Rating',
6: 'Advertisement',
7: 'Short',
8: 'Transitional',
9: 'PSA',
10: 'Policy',
128: 'Live CPL',
}),
E('duration', 146, 150, bytes_to_int),
E('edit_rate_a', 150, 154, bytes_to_int),
E('edit_rate_b', 154, 158, bytes_to_int),
E('picture_encoding', 158, 159, bytes_to_int, {0: 'Unknown', 1: 'MPEG2', 2: 'JPEG2000', 3: 'Audio PCM'}),
E('picture_width', 159, 161, bytes_to_int),
E('picture_height', 161, 163, bytes_to_int),
E('picture_encryption', 163, 164, bytes_to_int, {0: 'No Encryption', 1: 'AES 128 CBC'}),
E('sound_encoding', 164, 165, bytes_to_int, {0: 'Unknown', 1: 'MPEG2', 2: 'JPEG2000', 3: 'Audio PCM'}),
E('sound_channel_count', 165, 166, bytes_to_int),
E('sound_quantization_bits', 166, 167, bytes_to_int),
E('sound_encryption', 167, 168, bytes_to_int, {0: 'No Encryption', 1: 'AES 128 CBC'}),
E('crypto_key_id_list', 176, -55, bytes_to_uuid_list),
E('schemas', -55, -54, bytes_to_int, {0: 'Unknown', 1: 'Digicine (Interop)', 2: 'SMPTE'}),
E('stream_type', -54, -53, bytes_to_int, {0: 'None', 1: 'FTP Stream', 2: 'FTP Stream + Ingest'}),
E('complete', -53, -52, bytes_to_int),
E('frame_per_edit', -52, -51, bytes_to_int),
E('reserved2', -51, -49, bytes_to_int),
E('frame_rate_a', -49, -45, bytes_to_int),
E('frame_rate_b', -45, -41, bytes_to_int),
E('sound_sample_rate_a', -41, -37, bytes_to_int),
E('sound_sample_rate_b', -37, -33, bytes_to_int),
E('sound_sampling_rate_a', -33, -29, bytes_to_int),
E('sound_sampling_rate_b', -29, -25, bytes_to_int),
E('content_version_id', -25, -9, bytes_to_uuid),
E('properties1', -9, -5, bytes_to_int),
E('unknown_field', -5, -1, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('DeleteCPL', '010600', [
E('response', -1, None, bytes_to_int),
]),
M('StoreCPL', '010A00', [
E('response', -1, None, bytes_to_int),
]),
M('RetrieveCPL', '010800', [
E('xml', 0, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('ValidateCPL', '010C00', [
E('result', 0, 1, bytes_to_int),
E('error_code', 1, 2, bytes_to_int, {
0: 'No Error nor warning',
1: 'CPL is not registered on this server',
2: 'CPL is partially registered on this server',
3: 'CPL is registered on this server but cannot be loaded',
4: 'CPL requires at least one KDL to play; no KDM found',
5: 'CPL requires at least one KDL to play; out-dated KDM found',
6: 'CPL requires at least one KDL to play; KDM built with a wrong certificate',
7: 'CPL requires at least one KDL to play; all KDM are rejected (the RTC is no longer secured)',
8: 'CPL requires at least one KDL to play; all KDM are rejected (playback of protected content is forbidden)',
9: 'CPL requires at least one KDL to play; KDM with invalid content authenticator found',
10: 'CPL signature check failed',
255: 'Out of memory',
}),
E('error_message', 2, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('GetCPLSize', '010E00', [
E('size', 0, 8, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('GetCPLMarker', '011000', [
Batch('markers', 0, -1, [
E('label', 0, 16, bytes_to_text),
E('offset', 16, 20, bytes_to_int),
]),
E('response', -1, None, bytes_to_int),
]),
M('GetCPLPlayStat', '011200', [
E('error_code', 0, 4, bytes_to_int),
Batch('markers', 4, -1, [
E('uuid', 0, 16, bytes_to_uuid),
E('last_play', 16, 48, bytes_to_text),
]),
]),
# KDM
M('GetKDMList', '020200', [ #TODO : Test
E('amount', 0, 4, bytes_to_int),
E('item_length', 4, 8, bytes_to_int),
E('list', 8, -1, bytes_to_uuid_list),
E('response', -1, None, bytes_to_int),
]),
M('GetKDMInfo', '020400', [ #TODO : Test
E('kdm_uuid', 0, 16, bytes_to_uuid),
E('cpl_uuid', 16, 32, bytes_to_uuid),
E('not_valid_before', 32, 40, bytes_to_int),
E('not_valid_after', 40, 48, bytes_to_int),
E('key_id_list', 56, -1, bytes_to_uuid_list),
E('response', -1, None, bytes_to_int),
]),
M('GetKDMInfo2', '020401', [ #TODO : Test
E('kdm_uuid', 0, 16, bytes_to_uuid),
E('cpl_uuid', 16, 32, bytes_to_uuid),
E('not_valid_before', 32, 40, bytes_to_int),
E('not_valid_after', 40, 48, bytes_to_int),
E('key_id_list', 56, -293, bytes_to_uuid_list),
E('forensic_picture_disable', -293, -292, bytes_to_int),
E('forensic_audio_disable', -292, -291, bytes_to_int),
E('reserved0', -291, -290, bytes_to_int),
E('content_authenticator_length', -290, -289, bytes_to_int),
E('content_authenticator', -289, -257, bytes_to_text),
E('x509_subject_name', -257, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
# MISC
M('GetTimeZone', '052000', [ # BGI
E('timezone', 0, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('WhoAmI', '0E0C00', [ # BGI
E('username', 0, 16, bytes_to_text),
E('dci_level', 16, -1, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
# LOGS
M('GetLog', '110200', [ # BGI
E('errorcode', 0, 1, bytes_to_int),
E('reserved0', 1, 1, bytes_to_int),
E('reserved1', 2, 2, bytes_to_int),
E('xml', 4, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('GetLogLastId', '110400', [ # BGI
E('errorcode', 0, 1, bytes_to_text),
E('reserved0', 1, 1, bytes_to_int),
E('reserved1', 2, 2, bytes_to_int),
E('last_id', 4, -1, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
# SPL
M('GetSPLList', '030200', [
E('amount', 0, 4, bytes_to_int),
E('item_length', 4, 8, bytes_to_int),
E('list', 8, -1, bytes_to_uuid_list),
E('response', -1, None, bytes_to_int),
]),
M('StoreSPL', '032000', [
E('response', -1, None, bytes_to_int),
]),
M('ValidateSPL', '032600', [
E('result', 0, 1, bytes_to_int),
E('error_code', 1, 2, bytes_to_int, {
0: 'No Error nor warning',
1: 'SPL is not registered on this server',
2: 'SPL is not registered on this server',
3: 'SPL is registered on this server but cannot be loaded',
255: 'Out of memory',
}),
E('cpl_id', 2, 18, uuid_to_bytes),
E('error_message', 18, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('PlaySPL', '030C00', [ # BGI
E('response', -1, None, bytes_to_int),
]),
M('PauseSPL', '030E00', [ # BGI
E('response', -1, None, bytes_to_int),
]),
M('StatusSPL', '031C00', [ # BGI
E('playblack_state', 0, 1, bytes_to_int, {0:'Error/Unknown', 1:'Stop', 2:'Play', 3:'Pause'} ),
E('spl_id', 1, 17, bytes_to_uuid),
E('show_playlist_position', 17, 21, bytes_to_int),
E('show_playlist_duration', 21, 25, bytes_to_int),
E('current_cpl_id', 25, 41, bytes_to_uuid),
E('current_event_id', 41, 57, bytes_to_uuid),
E('current_element_id', 57, 73, bytes_to_uuid),
E('current_element_position', 73, 77, bytes_to_int),
E('current_element_duration', 77, 81, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('StatusSPL2', '031C01', [ # BGI
E('playblack_state', 0, 1, bytes_to_int, {0:'Error/Unknown', 1:'Stop', 2:'Play', 3:'Pause'} ),
E('spl_id', 1, 17, bytes_to_uuid),
E('show_playlist_position', 17, 21, bytes_to_int),
E('show_playlist_duration', 21, 25, bytes_to_int),
E('current_cpl_id', 25, 41, bytes_to_uuid),
E('current_event_id', 41, 57, bytes_to_uuid),
E('current_element_id', 57, 73, bytes_to_uuid),
E('current_element_position', 73, 77, bytes_to_int),
E('current_element_duration', 77, 81, bytes_to_int),
E('flags', 81, 85, bytes_to_int),
E('current_element_edit_rate_num', 85, 87, bytes_to_int),
E('current_element_edit_rate_den', 87, 89, bytes_to_int),
E('current_element_edit_position', 89, 93, bytes_to_int),
E('current_element_edit_duration', 93, 97, bytes_to_int),
E('current_element_frames_per_edit', 97, 99, bytes_to_int),
E('current_element_kdm_uuid', 99, 103, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
# SCHEDULE
M('AddSchedule2', '040201', [
E('schedule_id', 0, 8, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('GetScheduleInfo2', '040801', [
E('schedule_id', 0, 8, bytes_to_int),
E('spl_id', 8, 24, bytes_to_uuid),
E('time', 24, 28, text_to_bytes, ),
E('duration', 28, 32, int_to_bytes, ),
E('status', 32, 33, int_to_bytes, {
0: 'recorded',
1: 'success',
2: 'failed',
3: 'failed because a show was running',
}),
E('flags', 33, 41, int_to_bytes),
E('annotation_text', 41, -1, text_to_bytes),
E('response', -1, None, bytes_to_int),
]),
M('GetCurrentSchedule', '040A00', [
E('schedule_id', 0, 8, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('GetNextSchedule', '040C00', [
E('schedule_id', 0, 8, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('SetSchedulerEnable', '040E00', [
E('response', -1, None, bytes_to_int),
]),
M('GetSchedulerEnable', '041000', [
E('response', 0, 1, bytes_to_bool),
E('response', -1, None, bytes_to_int),
]),
# PRODUCT
M('GetProductInfo', '050200', [
E('product_name', 0, 16, bytes_to_text),
E('product_serial', 16, 32, bytes_to_text),
E('product_id', 32, 48, bytes_to_uuid),
E('software_version_major', 48, 49, bytes_to_int),
E('software_version_minor', 49, 50, bytes_to_int),
E('software_version_revision', 50, 51, bytes_to_int),
E('software_version_build', 51, 52, bytes_to_int),
E('hardware_version_major', 52, 53, bytes_to_int),
E('hardware_version_minor', 53, 54, bytes_to_int),
E('hardware_version_build', 54, 55, bytes_to_int),
E('hardware_version_extra', 55, 56, bytes_to_int),
]),
M('GetProductCertificate', '050400', [ #BGI
E('certificate', 0, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
M('GetAPIProtocolVersion', '050600', [ #BGI
E('version_major', 0, 1, bytes_to_int),
E('version_minor', 1, 2, bytes_to_int),
E('version_build', 2, 3, bytes_to_int),
]),
# INGEST
M('IngestAddJob', '071000', [
E('job_id', 0, 8, bytes_to_int),
E('response', -1, None, bytes_to_int),
]),
M('IngestGetJobStatus', '071E00', [
E('error_count', 0, 4, bytes_to_int),
E('warning_count', 4, 8, bytes_to_int),
E('event_count', 8, 12, bytes_to_int),
E('status', 12, 16, bytes_to_int, {
0: 'pending',
1: 'paused',
2: 'running',
3: 'scheduled',
4: 'success',
5: 'aborted',
6: 'unused',
7: 'failed'
}),
E('download_progress', 16, 20, bytes_to_int),
E('process_progress', 20, 24, bytes_to_int),
E('actions', 24, 28, bytes_to_int),
E('title', 28, -1, bytes_to_text),
E('response', -1, None, bytes_to_int),
]),
)
sys.modules[__name__] = MessageListWrapper(sys.modules[__name__], messages=RESPONSES)
|
#! python 3
#! quickWeather.py - Prints the weather for a location from the command line.
import json, requests, sys
# Compute location from command line arguments
if len(sys.argv) < 2
print('Usage: quickWeather.py location')
sys.exit()
location = ''.join(sys.argv[1:])
#TODO: Download the JSON data from OpenWeatherMap.org's API.
url ='http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3' % (location)
response = requests.get(url)
response.raise_for_status()
#TODO: Load JSON data into a Python variable
|
from os.path import join
import conda.config as config
from conda.cli.common import name_prefix, error_and_exit
def read_message(fn):
res = []
for envs_dir in config.envs_dirs:
path = join(envs_dir, '.conda-help', fn)
try:
with open(path) as fi:
s = fi.read().decode('utf-8')
s = s.replace('${envs_dir}', envs_dir)
res.append(s)
except IOError:
pass
return ''.join(res)
def root_read_only(command, prefix, json=False):
assert command in {'install', 'update', 'remove'}
msg = read_message('ro.txt')
if not msg:
msg = """\
Missing write permissions in: ${root_dir}
#
# You don't appear to have the necessary permissions to ${command} packages
# into the install area '${root_dir}'.
# However you can clone this environment into your home directory and
# then make changes to it.
# This may be done using the command:
#
# $ conda create -n my_${name} --clone=${prefix}
"""
msg = msg.replace('${root_dir}', config.root_dir)
msg = msg.replace('${prefix}', prefix)
msg = msg.replace('${name}', name_prefix(prefix))
msg = msg.replace('${command}', command)
error_and_exit(msg, json=json, error_type='RootNotWritable')
|
# | Copyright 2017 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import sys, json
from python_compat import identity, imap, lfilter, lidfilter, lmap, set, sorted
def main():
config_call_list = _get_json('docgen_config_calls.json')
plugin_infos = _get_json('docgen_plugin_infos.json')
available_plugins = _get_available_plugins(plugin_infos)
user_dict = _get_json('docgen_user.json')
user_location_list = user_dict['location_whitelist'] + user_dict['location_blacklist']
_rewrite_user_dict(user_dict)
enum_info_dict = _get_enum_info_dict(user_dict, _get_json('docgen_enums.json')['enums'])
opt_to_cc_list = _get_opt_to_cc_list(config_call_list,
available_plugins, enum_info_dict, plugin_infos)
user_key_used = set()
used_remap = set()
_apply_user_to_cc_list(opt_to_cc_list, user_dict, user_key_used, used_remap, available_plugins)
cc_by_location = _get_cc_by_location(user_dict, opt_to_cc_list, used_remap)
for location in [True]:
# for location in cc_by_location:
_output('grid-control options')
_output('====================')
_output('')
for location in user_dict['location_whitelist']:
_display_location_deep(location, cc_by_location, user_dict, plugin_infos, enum_info_dict)
def _sort_by_inheritance(location):
return (tuple(plugin_infos.get(location, {}).get('bases', [])), location)
for location in sorted(cc_by_location, key=_sort_by_inheritance):
if location not in user_location_list:
_display_location_deep(location, cc_by_location, user_dict, plugin_infos, enum_info_dict)
for entry in sorted(user_dict['options']):
if entry not in user_key_used:
_output('Unused: %r %r' % (entry, user_dict['options'][entry]))
_output('')
def _apply_user_to_cc(cfg_call, user_dict, user_key_used, used_remap, available_plugins):
user_key_used.add(cfg_call['option'])
cfg_call['output_altopt'] = ''
if len(cfg_call['options']) > 1:
cfg_call['output_altopt'] = ' / %s' % str.join(' / ', cfg_call['options'][1:])
cfg_call['option_display'] = cfg_call['option']
tmp = dict(user_dict['option_map'])
tmp.update(cfg_call.get('option_map', {}))
cfg_call['option_map'] = tmp
for entry in cfg_call['option_map']:
new_entry_str = cfg_call['option_map'][entry]
cfg_call['option_display'] = cfg_call['option_display'].replace(entry, new_entry_str)
cfg_call['output_altopt'] = cfg_call['output_altopt'].replace(entry, new_entry_str)
cfg_call.update(user_dict['api'][cfg_call['api']])
cfg_call.update(user_dict['options'].get(cfg_call['option'], {}))
user_key_used.add(cfg_call['option'] + ':' + cfg_call['location'])
cfg_call.update(user_dict['options'].get(cfg_call['option'] + ':' + cfg_call['location'], {}))
if cfg_call['location'] in user_dict['location_remap']:
used_remap.add(cfg_call['location'])
cfg_call['location'] = user_dict['location_remap'][cfg_call['location']]
opl_fmt = user_dict['format']['output_plugin_list']
cfg_call['available_filter_list'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(available_plugins['ListFilter'])))
cfg_call['available_matcher_list'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(available_plugins['Matcher'])))
cfg_call['available_parameter_parser'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(available_plugins['ParameterParser'])))
cfg_call['available_parameter_tuple_parser'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(available_plugins['ParameterTupleParser'])))
if cfg_call.get('available'):
cfg_call['available_list'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(cfg_call['available'])))
if cfg_call.get('available_multi'):
cfg_call['available_multi_list'] = str.join('',
imap(lambda value: opl_fmt % value, sorted(cfg_call['available_multi'])))
if 'cls_bases' in cfg_call:
plugin_info = None
for cls_base in cfg_call['cls_bases']:
plugin_info = user_dict['plugin_details'].get(cls_base, plugin_info)
cfg_call['plugin_singular'] = plugin_info[0]
cfg_call['plugin_plural'] = plugin_info[1]
cfg_call['output_default'] = ''
if cfg_call['default'] is not None:
default = str(cfg_call['default']).strip()
for call in cfg_call.get('call', []):
default = default.replace('<call:%s>' % call, cfg_call['call'][call])
default_map = cfg_call.get('default_map', {})
for key in default_map:
if key not in default:
raise Exception('Unused default map: %r = %r\n%r' % (key, default_map[key], default))
default = default_map.get(default, default)
cfg_call['output_default'] = user_dict['format']['output_default'] % default
cfg_call['user_text'] = cfg_call.get('user_text', '') % cfg_call
cfg_call['append_options'] = _get_sub_cc(cfg_call, cfg_call.get('append_options', []))
cfg_call['prepend_options'] = _get_sub_cc(cfg_call, cfg_call.get('prepend_options', []))
def _apply_user_to_cc_list(opt_to_cc_list, user_dict, user_key_used, used_remap, available_plugins):
# Apply user documentation
opt_to_cc = {}
for opt in opt_to_cc_list:
for cfg_call in opt_to_cc_list[opt]:
if len(opt_to_cc_list[opt]) > 1:
user_specs = user_dict['options'].get(cfg_call['option'])
if user_specs and not user_specs.get('disable_dupe_check', False):
raise Exception('User option %s is not specific enough! %s' % (cfg_call['options'],
json.dumps(opt_to_cc_list[opt], indent=2)))
opt_to_cc[opt] = cfg_call
_apply_user_to_cc(cfg_call, user_dict, user_key_used, used_remap, available_plugins)
return opt_to_cc
def _display_location(location_list, cc_by_location, user_dict, enum_info_dict):
if '.' in location_list[0]:
_output(json.dumps(cc_by_location.get(location_list[0]), indent=2))
raise Exception('Invalid location %r' % location_list[0])
_output('.. _%s:' % location_list[0])
_output('%s options' % location_list[0])
_output('-' * len('%s options' % location_list[0]))
_output('')
all_cc = {}
for location in location_list:
for cfg_call in cc_by_location.get(location, []):
all_cc[cfg_call['option']] = cfg_call
def _sort_by_default_exists(opt):
return (all_cc[opt].get('default') is not None, all_cc[opt]['option_display'])
for opt in sorted(all_cc, key=_sort_by_default_exists):
cfg_call = all_cc[opt]
for sub_cc in cfg_call.get('prepend_options', []):
_display_option(sub_cc, user_dict, enum_info_dict)
_display_option(cfg_call, user_dict, enum_info_dict)
for sub_cc in cfg_call.get('append_options', []):
_display_option(sub_cc, user_dict, enum_info_dict)
_output('')
def _display_location_deep(location, cc_by_location, user_dict, plugin_infos, enum_info_dict):
bases = [location]
for base in plugin_infos.get(location, {}).get('bases', []):
if base not in ['object', 'Plugin', 'ConfigurablePlugin', 'NamedPlugin']:
bases.append(base)
_display_location(bases, cc_by_location, user_dict, enum_info_dict)
def _display_option(cfg_call, user_dict, enum_info_dict):
try:
opt_line = (cfg_call.get('opt_line', user_dict['format']['output_opt']) % cfg_call).strip()
while '%(' in opt_line:
opt_line = opt_line % cfg_call
for enum, enum_info in enum_info_dict.items():
if '<enum_values:%s>' % enum in opt_line:
opt_line = opt_line.replace('<enum_values:%s>' % enum, enum_info['enum_values'])
if '<enum:%s' % enum in opt_line:
for enum_value in enum_info['enum_values_raw']:
opt_line = opt_line.replace('<enum:%s:<attr:%s>>' % (enum, enum_value.lower()), enum_value)
opt_line = opt_line.replace('<enum:%s:<attr:%s>>' % (enum, enum_value.upper()), enum_value)
_output('* %s' % opt_line)
desc = cfg_call.get('desc_line', user_dict['format']['output_desc']) % cfg_call
desc = (desc % cfg_call).strip()
if desc:
for line in desc.split('\n'):
_output((' %s' % line).rstrip())
_output('')
else:
if sys.argv[1:] == ['stop']:
raise Exception(cfg_call['option'] + ' no desc')
elif sys.argv[1:] == ['count']:
_output('N/A')
except Exception:
_output(json.dumps(cfg_call, indent=2, sort_keys=True))
raise
def _get_available_plugins(plugin_infos):
available_plugins = {}
for plugin in plugin_infos:
for base in plugin_infos[plugin]['bases']:
alias_list = plugin_infos[plugin].get('alias', [])
if alias_list:
alias_list = lidfilter(alias_list)
plugin_name = '%s_' % plugin
if alias_list:
plugin_name = '%s_ (alias: %s)' % (plugin, str.join(', ', alias_list))
available_plugins.setdefault(base, []).append(plugin_name)
return available_plugins
def _get_cc_by_location(user_dict, opt_to_cc_list, used_remap):
cc_by_location = {}
for location in user_dict['manual options']:
for cfg_call in user_dict['manual options'][location]:
cfg_call.update(user_dict['api'][cfg_call['api']])
cfg_call['output_altopt'] = ''
cfg_call['option_display'] = cfg_call['option']
if cfg_call['default'] is not None:
cfg_call['output_default'] = user_dict['format']['output_default'] % cfg_call['default']
cc_by_location.setdefault(location, []).append(cfg_call)
for opt in opt_to_cc_list:
for cfg_call in opt_to_cc_list[opt]:
if not cfg_call.get('disabled'):
cc_by_location.setdefault(cfg_call['location'], []).append(cfg_call)
for location in user_dict['location_force']:
cc_by_location.setdefault(location, [])
for location in user_dict['location_remap']:
if location not in used_remap:
raise Exception('Remap location is unused: %s' % location)
user_location_list = user_dict['location_whitelist'] + user_dict['location_blacklist']
for location in user_location_list:
if location not in cc_by_location:
raise Exception('User specified location %r does not exist' % location)
return cc_by_location
def _get_config_default(cfg_call):
if 'default' in cfg_call['kwargs']:
return cfg_call['kwargs'].pop('default')
if cfg_call['args']:
return cfg_call['args'].pop(0)
def _get_config_option_list(cfg_call):
arg_options = cfg_call['args'].pop(0)
if isinstance(arg_options, list):
option_list = lmap(_make_opt, arg_options)
option_list.reverse()
return option_list
return [_make_opt(arg_options)]
def _get_enum_default(cfg_call):
default_raw = cfg_call['default_raw'].lower()
enum_match = _match_enum(default_raw, cfg_call['enum_values_raw'])
if enum_match is not None:
return enum_match
return cfg_call['default_raw']
def _get_enum_info(call_enum_name, call_enum_subset, enum_info_dict):
for enum, enum_info in enum_info_dict.items():
if call_enum_name == '<name:%s>' % enum:
enum_info = dict(enum_info)
if isinstance(call_enum_subset, list):
subset_values = lmap(lambda call_enum:
_match_enum(call_enum, enum_info['enum_values_raw']), call_enum_subset)
enum_info['enum_values_raw'] = subset_values
enum_info['enum_values'] = str.join('|', subset_values)
elif isinstance(call_enum_subset, str):
subset_values = enum_info['enum_alias'][call_enum_subset]
enum_info['enum_values_raw'] = subset_values
enum_info['enum_values'] = str.join('|', subset_values)
return enum_info
def _get_enum_info_dict(user_dict, enum_value_dict):
enum_info_dict = {}
for enum, enum_values_raw in enum_value_dict.items():
enum_info = enum_info_dict.setdefault(enum, {})
enum_info['enum'] = enum
enum_info['enum_desc'] = ''
enum_info['enum_value_desc'] = {}
enum_info['enum_values_raw'] = []
if enum_values_raw != '<manual>':
enum_info['enum_values_raw'] = lmap(str.upper, enum_values_raw)
for enum, user_enum_info in user_dict['enums'].items():
enum_info_dict[enum].update(user_enum_info)
for enum, enum_info in enum_info_dict.items():
if not enum_info.get('enum_values'):
if enum_info['enum_values_raw'] == '<manual>':
enum_info['enum_values'] = '<manual>'
else:
enum_info['enum_values'] = str.join('|', enum_info['enum_values_raw'])
for enum_value in enum_info['enum_values_raw']:
enum_info['enum_value_desc'].setdefault(enum_value, '')
return enum_info_dict
def _get_json(fn):
try:
unicode()
def _remove_unicode(obj):
if unicode == str:
return obj
if isinstance(obj, (list, tuple, set)):
(obj, old_type) = (list(obj), type(obj))
for idx, value in enumerate(obj):
obj[idx] = _remove_unicode(value)
obj = old_type(obj)
elif isinstance(obj, dict):
result = {}
for key, value in obj.items():
result[_remove_unicode(key)] = _remove_unicode(value)
return result
elif isinstance(obj, unicode):
return obj.encode('utf-8')
return obj
except NameError:
def _remove_unicode(obj):
return obj
fp = open(fn)
result = json.load(fp)
fp.close()
return _remove_unicode(result)
def _get_opt_to_cc_list(config_call_list, available_plugins, enum_info_dict, plugin_infos):
opt_to_cc_list = {}
for cfg_call in config_call_list:
try:
cfg_fqfn = cfg_call['fqfn']
if cfg_call['callers'][-1] in ['__init__', '__new__']:
cfg_call['callers'].pop()
cfg_call['location'] = str.join('.', cfg_call['callers'])
cfg_call['bases'] = plugin_infos.get(cfg_call['callers'][0], {}).get('bases', [])
if cfg_fqfn.startswith('pconfig'):
_process_pcfg_call(cfg_call, available_plugins, enum_info_dict, plugin_infos)
else:
_process_cfg_call(cfg_call, available_plugins, enum_info_dict, plugin_infos)
# Catch unknown (kw)args
if cfg_call['args']:
raise Exception('Unknown args!')
if cfg_call['kwargs']:
raise Exception('Unknown kwargs!')
opt_to_cc_list.setdefault(cfg_call['option'], []).append(cfg_call)
except Exception:
_output(json.dumps(cfg_call, indent=2))
raise
return opt_to_cc_list
def _get_sub_cc(cfg_call, sub_cc_list):
result = []
for sub_cc in sub_cc_list:
tmp = dict(cfg_call)
tmp.update(sub_cc)
result.append(tmp)
return result
def _make_opt(value):
tmp = value.lower().replace("'", ' ')
while ' ' in tmp:
tmp = tmp.replace(' ', ' ')
return tmp.strip()
def _match_enum(value, enum_values_raw):
for enum_value in enum_values_raw:
if '<attr:%s>' % enum_value.lower() in value.lower():
return enum_value
def _output(value):
sys.stdout.write(value + '\n')
def _process_cfg_call(cfg_call, available_plugins, enum_info_dict, plugin_infos):
process_handler = {
'get_enum': _process_get_enum,
'get_lookup': _process_get_lookup,
'get_time': _process_get_time,
'get_list': _process_get_list,
'get_path': _process_get_path_api,
'get_path_list': _process_get_path_api,
'get_fn': _process_get_path_api,
'get_fn_list': _process_get_path_api,
'get_dn': _process_get_path_api,
'get_dn_list': _process_get_path_api,
'get_plugin': _process_get_plugin,
'get_composited_plugin': _process_get_composited_plugin,
}
# cfg_call['id'] = os.urandom(10)
cfg_call['raw_args'] = list(cfg_call['args'])
cfg_call['raw_kwargs'] = dict(cfg_call['kwargs'])
cfg_call['options'] = _get_config_option_list(cfg_call)
cfg_call['option'] = cfg_call['options'][0].strip()
# Capture first arg in get_enum - which is not the default but the enum
if cfg_call['api'] == 'get_enum':
enum_name = cfg_call['args'].pop(0)
enum_subset = cfg_call['kwargs'].pop('subset', None)
cfg_call.update(_get_enum_info(enum_name, enum_subset, enum_info_dict))
cfg_call['default_raw'] = _get_config_default(cfg_call)
cfg_call['default'] = cfg_call['default_raw']
process_fun = process_handler.get(cfg_call['api'])
if process_fun:
process_fun(cfg_call)
if cfg_call['api'] == 'get_filter':
cfg_call['negate'] = cfg_call['kwargs'].pop('negate', False)
cfg_call['default_matcher'] = cfg_call['kwargs'].pop('default_matcher', 'start')
cfg_call['default_order'] = cfg_call['kwargs'].pop('default_order', '<attr:source>')
for enum in enum_info_dict['ListOrder']:
cfg_call['default_order'] = cfg_call['default_order'].replace('<attr:%s>' % enum, enum)
cfg_call['default_filter'] = cfg_call['kwargs'].pop('default_filter', 'strict')
if cfg_call['api'] == 'get_matcher':
cfg_call['default_matcher'] = cfg_call['kwargs'].pop('default_matcher', 'start')
cfg_call['available'] = lfilter(_select_normal_cls_name, available_plugins['Matcher'])
if cfg_call['api'] in ['get_plugin', 'get_composited_plugin', 'docgen:get_broker']:
if cfg_call['cls'] not in available_plugins:
cfg_call['available'] = [cfg_call['cls']]
else:
cfg_call['available'] = lfilter(_select_normal_cls_name, available_plugins[cfg_call['cls']])
cfg_call['available_multi'] = lfilter(_select_multi_cls_name, available_plugins[cfg_call['cls']])
cfg_call['cls_bases'] = plugin_infos.get(cfg_call['cls'], {}).get('bases', []) + [cfg_call['cls']]
if cfg_call['api'] == 'get_dict':
cfg_call['default_order'] = cfg_call['kwargs'].pop('default_order', None)
if cfg_call['default_order'] is not None:
cfg_call['default_order'] = lmap(eval, cfg_call['default_order'])
if cfg_call['default'] is not None:
default = eval(cfg_call['default']) # pylint:disable=eval-used
default_dict = (default, cfg_call['default_order'] or list(default))
cfg_call['default'] = repr(_str_dict_cfg(default_dict))
cfg_call['kwargs'].pop('filter_str', None)
cfg_call['kwargs'].pop('filter_parser', None)
cfg_call['kwargs'].pop('strfun', None)
cfg_call['kwargs'].pop('parser', None)
cfg_call['kwargs'].pop('override', None)
cfg_call['kwargs'].pop('parse_item', None)
cfg_call['kwargs'].pop('interactive_msg', None)
def _process_get_composited_plugin(cfg_call):
_process_get_plugin(cfg_call)
if 'default_compositor' in cfg_call['kwargs']:
cfg_call['compositor'] = cfg_call['kwargs'].pop('default_compositor')
else:
cfg_call['compositor'] = cfg_call['args'].pop(0)
if (cfg_call['cls'] == 'Broker') and (cfg_call['api'] == 'get_composited_plugin'):
cfg_call['api'] = 'docgen:get_broker'
cfg_call['broker_prefix'] = cfg_call['pargs'][0].strip('"').strip("'")
def _process_get_enum(cfg_call):
if cfg_call['default_raw'] is not None:
cfg_call['default'] = _get_enum_default(cfg_call)
def _process_get_list(cfg_call):
if (cfg_call['default_raw'] is not None) and not isinstance(cfg_call['default_raw'], str):
cfg_call['default'] = repr(str.join(' ', imap(lambda x: str(x).strip("'"), cfg_call['default'])))
def _process_get_lookup(cfg_call):
cfg_call['single'] = cfg_call['kwargs'].pop('single', True)
if cfg_call['default'] == '{}':
cfg_call['default'] = "''"
cfg_call['default_matcher'] = cfg_call['kwargs'].pop('default_matcher', "'StartMatcher'")
def _process_get_path_api(cfg_call):
cfg_call['must_exist'] = cfg_call['kwargs'].pop('must_exist', True)
if (cfg_call['default_raw'] is not None) and not isinstance(cfg_call['default_raw'], str):
cfg_call['default'] = repr(str.join(' ', imap(lambda x: str(x).strip("'"), cfg_call['default'])))
def _process_get_plugin(cfg_call):
cls_name = cfg_call['kwargs'].pop('cls')
if cls_name.startswith('<name:'):
cls_name = cls_name[6:-1]
else:
cls_name = cls_name.strip("'")
cfg_call['cls'] = cls_name
cfg_call['require_plugin'] = cfg_call['kwargs'].pop('require_plugin', True)
cfg_call['kwargs'].pop('pargs', None)
cfg_call['kwargs'].pop('pkwargs', None)
cfg_call['kwargs'].pop('bind_args', None)
cfg_call['kwargs'].pop('bind_kwargs', None)
def _process_get_time(cfg_call):
if isinstance(cfg_call['default'], int):
default_time = cfg_call['default']
if default_time > 0:
cfg_call['default'] = _str_time(default_time)
def _process_pcfg_call(cfg_call, available_plugins, enum_info_dict, plugin_infos):
if cfg_call['api'] in ['get', 'get_bool', 'get_parameter']:
vn = cfg_call['args'].pop(0).strip("'")
opt = None
default = None
cfg_call['option'] = vn
if cfg_call['args']:
opt = cfg_call['args'].pop(0)
if opt:
opt = opt.strip("'")
cfg_call['option'] = ('%s %s' % (vn, opt)).strip()
if cfg_call['args']:
default = cfg_call['args'].pop(0)
elif 'default' in cfg_call['kwargs']:
default = cfg_call['kwargs'].pop('default')
cfg_call['default'] = default
cfg_call['options'] = [cfg_call['option']]
cfg_call['location'] = cfg_call['location'].replace('.create_psrc', '')
cfg_call['location'] = cfg_call['location'].replace('.parse_tuples', '')
cfg_call['location'] = cfg_call['location'].replace('.parse_value', '')
def _rewrite_user_dict(user_dict):
user_json = json.dumps(user_dict, indent=4, sort_keys=True)
user_json = user_json.replace(' ' * 4, '\t').replace(' \n', '\n')
open('docgen_user.json', 'w').write(user_json)
def _select_multi_cls_name(cls_name):
return ('Multi' in cls_name) and not _select_non_user_cls(cls_name)
def _select_non_user_cls(cls_name):
return ('Testsuite' in cls_name) or ('Base' in cls_name) or ('Internal' in cls_name)
def _select_normal_cls_name(cls_name):
return ('Multi' not in cls_name) and not _select_non_user_cls(cls_name)
def _str_dict_cfg(value, parser=identity, strfun=str):
(srcdict, srckeys) = value
result = ''
if srcdict.get(None) is not None:
result = strfun(srcdict[None])
key_value_iter = imap(lambda k: '%s => %s' % (k, strfun(srcdict[k])), sorted(srckeys))
return (result + str.join(' <newline> ', key_value_iter)).strip()
def _str_time(secs):
return '%02d:%02d:%02d' % (int(secs / 3600), int(secs / 60) % 60, secs % 60)
if __name__ == '__main__':
main()
|
from django.db import migrations
from api.metadata.constants import OrganisationType
ORGANISATIONS = [
{
"name": "The Charity Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Competition and Markets Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Crown Prosecution Service",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Food Standards Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Forestry Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Actuary's Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Legal Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Land Registry",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Revenue & Customs",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "NS&I",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The National Archives",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "National Crime Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Office of Rail and Road",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofgem",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofqual",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofsted",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Serious Fraud Office",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Supreme Court of the United Kingdom",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "UK Statistics Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The Water Services Regulation Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
}
]
def create_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.create(**item)
def delete_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.delete(**item)
class Migration(migrations.Migration):
dependencies = [
("metadata", "0018_auto_20201118_1133"),
]
operations = [
migrations.RunPython(create_organisations, reverse_code=delete_organisations),
]
|
# ะะฐะผ ะฝะตะพะฑั
ะพะดะธะผะพ ัะตะฐะปะธะทะพะฒะฐัั ััะฝะบัะธั duplicate, ะบะพัะพัะฐั ะดะพะปะถะฝะฐ ะฟัะธะฝะธะผะฐัั ะฒ ะบะฐัะตััะฒะต ะฐัะณัะผะตะฝัะฐ ัะฟะธัะพะบ ะธ
# ัะดะฒะฐะธะฒะฐัั ััะพั ัะฟะธัะพะบ "ะฟะพ ะผะตััั" (ะฒะฐะผ ะฝัะถะฝะพ ะฑัะดะตั ะธะทะผะตะฝััั ะธัั
ะพะดะฝัะน ะพะฑัะตะบั ัะฟะธัะบะฐ.
# ะะพะผะฝะธะผ: ัะฟะธัะพะบ ะฟะตัะตะดะฐัััั ะฟะพ ัััะปะบะต!).
# ะฃะดะฒะฐะธะฒะฐะฝะธะต ะทะดะตัั ะพะทะฝะฐัะฐะตั, ััะพ ะฟะพัะปะต ะฟัะธะผะตะฝะตะฝะธั ะบ ะฝะตะผั ััะฝะบัะธะธ ัะฟะธัะพะบ ะดะพะปะถะตะฝ ะธะผะตัั ะบะพะฟะธั ะฒัะตั
ัะปะตะผะตะฝัะพะฒ,
# ะดะพะฑะฐะฒะปะตะฝะฝัั ะฒ ะบะพะฝะตั (ัะผ. ะฟัะธะผะตั ะฝะธะถะต).
def duplicate(paper):
paper *= 2
l = [1, 2]
duplicate(l)
print(l)
|
import pytest
from botocore.exceptions import ClientError
from mock import patch, Mock, call
from common import constants # noqa: F401
from common.clients import boto
from common.clients.boto import ClientType
from common.secret_manager import VcenterSecret
__STAGE = "test-stage"
__ORD_ID = "c64c6ded-d07d-4e1b-8963-d633163776b5"
__CLUSTER_ID = "7d233235-3b38-4c50-bd35-008dd13a6485"
__IP = "1.1.1.1"
__USERNAME = "test-username"
__PASSWORD = "test-password"
__SECRET_STR = '{"username": "test-username", "password": "test-password"}'
__VCENTER_SECRET = VcenterSecret("root", "root-password", "admin", "admin-password")
__VCENTER_SECRET_STR = (
'{"root_username": "root", "root_password": "root-password", '
'"admin_username": "admin", "admin_password": "admin-password"}'
)
def test_secret_manager_init(get_handler):
handler = get_handler("common.secret_manager")
with patch("common.constants.STAGE", __STAGE), patch.object(
boto, "get_client"
) as get_client_mock:
# setup
system_manager_mock = Mock()
secrets_manager_mock = Mock()
def __get_boto_client(*args, **kwargs):
if args[0] == ClientType.SIMPLE_SYSTEMS_MANAGER:
return system_manager_mock
else:
return secrets_manager_mock
get_client_mock.side_effect = __get_boto_client
# when
actual = handler.SecretManager()
# then
assert __get_protected_value(actual, "stage") == __STAGE
assert __get_protected_value(actual, "system_manager") == system_manager_mock
assert __get_protected_value(actual, "secrets_manager") == secrets_manager_mock
def __create_secret_manager_subject(get_handler):
handler = get_handler("common.secret_manager")
system_manager_mock = Mock()
secrets_manager_mock = Mock()
with patch("common.constants.STAGE", __STAGE), patch.object(
boto, "get_client"
) as get_client_mock:
def __get_boto_client(*args, **kwargs):
if args[0] == ClientType.SIMPLE_SYSTEMS_MANAGER:
return system_manager_mock
else:
return secrets_manager_mock
get_client_mock.side_effect = __get_boto_client
secret_manager = handler.SecretManager()
return secret_manager, system_manager_mock, secrets_manager_mock, handler
def test_secret_manager_persist_vcenter_info_create_secret(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
secret_manager.persist_secret(key, __VCENTER_SECRET)
# then
secrets_manager_mock.create_secret.assert_called_with(
Name=f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}",
SecretString=__VCENTER_SECRET_STR,
)
def test_secret_manager_persist_vcenter_info_update_secret(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
# setup
secrets_manager_mock.create_secret.side_effect = ClientError(
{"Error": {"Message": "bla", "Code": "ResourceExistsException"}}, "CreateSecret"
)
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
secret_manager.persist_secret(key, __VCENTER_SECRET)
# then
secrets_manager_mock.create_secret.assert_called_with(
Name=key, SecretString=__VCENTER_SECRET_STR,
)
secrets_manager_mock.update_secret.assert_called_with(
SecretId=key, SecretString=__VCENTER_SECRET_STR,
)
def test_vcenter_info_exists_returns_True(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
# setup
secrets_manager_mock.get_secret_value.return_value = {
"SecretString": f"{__VCENTER_SECRET_STR}"
}
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
actual = secret_manager.secret_info_exists(key)
# then
secrets_manager_mock.get_secret_value.assert_called_with(SecretId=key)
assert actual is True
def test_vcenter_info_exists_returns_False(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
# setup
secrets_manager_mock.get_secret_value.side_effect = ClientError(
{"Error": {"Message": "bla", "Code": "ResourceNotFoundException"}},
"secretsmanager:GetSecretValue",
)
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
actual = secret_manager.secret_info_exists(key)
# then
secrets_manager_mock.get_secret_value.assert_called_with(SecretId=key)
assert actual is False
def test_vcenter_info_exists_raise_exception(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
# setup
secrets_manager_mock.get_secret_value.side_effect = ClientError(
{"Error": {"Message": "bla", "Code": "bla"}}, "secretsmanager:GetSecretValue"
)
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
with pytest.raises(
ClientError, match="GetSecretValue operation: bla",
):
secret_manager.secret_info_exists(key)
# then
secrets_manager_mock.get_secret_value.assert_called_with(SecretId=key)
def test_secret_manager_get_vcenter_info(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
# setup
secrets_manager_mock.get_secret_value.return_value = {
"SecretString": f"{__VCENTER_SECRET_STR}"
}
key = f"/rpcv/{__STAGE}/orgs/{__ORD_ID}/clusters/{__CLUSTER_ID}/vcenters/{__IP}"
# when
actual = secret_manager.get_secret_info(key, VcenterSecret)
# then
secrets_manager_mock.get_secret_value.assert_called_with(SecretId=key)
assert actual == __VCENTER_SECRET
def test_is_vcenter_in_secret_manager_return_true(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
secrets_manager_mock.list_secrets.side_effect = [
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.4"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.5"},
],
"NextToken": "next_1",
},
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.6"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.7"},
],
"NextToken": "next_2",
},
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.8"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.9"},
],
},
]
result = secret_manager.is_vcenter_in_secret_manager("1.2.3.9")
assert result is True
secrets_manager_mock.list_secrets.assert_has_calls(
[call(), call(NextToken="next_1"), call(NextToken="next_2")]
)
def test_is_vcenter_in_secret_manager_return_false(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
secrets_manager_mock.list_secrets.side_effect = [
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.4"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.5"},
],
"NextToken": "next_1",
},
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.6"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.7"},
],
"NextToken": "next_2",
},
{
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.8"},
{"Name": "/rpcv/dev/orgs/o1/clusters/cluster1/vcenters/1.2.3.9"},
],
},
]
result = secret_manager.is_vcenter_in_secret_manager("1.2.3.9")
assert result is False
secrets_manager_mock.list_secrets.assert_has_calls(
[call(), call(NextToken="next_1"), call(NextToken="next_2")]
)
def test_is_vcenter_in_secret_manager_return_true_less_values(get_handler):
(
secret_manager,
system_manager_mock,
secrets_manager_mock,
handler,
) = __create_secret_manager_subject(get_handler)
secrets_manager_mock.list_secrets.return_value = {
"SecretList": [
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.4"},
{"Name": f"/rpcv/{__STAGE}/orgs/o1/clusters/cluster1/vcenters/1.2.3.5"},
],
}
result = secret_manager.is_vcenter_in_secret_manager("1.2.3.5")
assert result is True
secrets_manager_mock.list_secrets.assert_called_once()
def __get_protected_value(obj, name):
class_name = type(obj).__name__
return getattr(obj, f"_{class_name}__{name}")
def __set_protected_value(obj, name, value):
class_name = type(obj).__name__
return setattr(obj, f"_{class_name}__{name}", value)
|
########################################################################
#
# File Name: TreeWalker.py
#
# Documentation: http://docs.4suite.com/4DOM/TreeWalker.py.html
#
"""
Tree Walker from DOM Level 2. Allows multi-directional iteration over nodes.
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from NodeFilter import NodeFilter
from xml.dom import NoModificationAllowedErr
from xml.dom import NotSupportedErr
class TreeWalker:
def __init__(self, root, whatToShow, filter, expandEntityReferences):
self.__dict__['__root'] = root
self.__dict__['__whatToShow'] = whatToShow
self.__dict__['__filter'] = filter
self.__dict__['__expandEntityReferences'] = expandEntityReferences
self.__dict__['__currentNode'] = root
### Attribute Access Methods -- xxx.attr ###
def __getattr__(self, name):
attrFunc = self._readComputedAttrs.get(name)
if attrFunc:
return attrFunc(self)
def __setattr__(self, name, value):
#Make sure attribute is not read-only
if name in self.__class__._readOnlyAttrs:
raise NoModificationAllowedErr()
#If it's computed execute that function
attrFunc = self.__class__._writeComputedAttrs.get(name)
if attrFunc:
attrFunc(self, value)
#Otherwise, just set the attribute
else:
self.__dict__[name] = value
### Attribute Methods -- xxx._get_attr() ###
def _get_root(self):
return self.__dict__['__root']
def _get_filter(self):
return self.__dict__['__filter']
def _get_whatToShow(self):
return self.__dict__['__whatToShow']
def _get_expandEntityReferences(self):
return self.__dict__['__expandEntityReferences']
def _get_currentNode(self):
return self.__dict__['__currentNode']
def _set_currentNode(self, value):
if value == None:
raise NotSupportedErr()
self.__dict__['__currentNode'] = value
### Methods ###
def parentNode(self):
next_node = None
if self.__dict__['__currentNode'] != self.__dict__['__root']:
next_node = self.__dict__['__currentNode']._get_parentNode()
while next_node and next_node != self.__dict__['__root'] \
and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_parentNode()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def firstChild(self):
next_node = None
if self.__checkFilter(self.__dict__['__currentNode']) != NodeFilter.FILTER_REJECT:
next_node = self.__dict__['__currentNode']._get_firstChild()
while next_node and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_nextSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def lastChild(self):
next_node = None
if self.__checkFilter(self.__dict__['__currentNode']) != NodeFilter.FILTER_REJECT:
next_node = self.__dict__['__currentNode']._get_lastChild()
while next_node and not (self.__checkWhatToShow(next_node) \
and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_previousSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def previousSibling(self):
prev_node = None
if self.__dict__['__currentNode'] != self.__root:
prev_node = self.__dict__['__currentNode']._get_previousSibling()
while prev_node and not (self.__checkWhatToShow(prev_node) \
and self.__checkFilter(prev_node) == NodeFilter.FILTER_ACCEPT):
prev_node = prev_node._get_previousSibling()
if prev_node:
self.__dict__['__currentNode'] = prev_node
return prev_node
def nextSibling(self):
next_node = None
if self.__dict__['__currentNode'] != self.__root:
next_node = self.__dict__['__currentNode']._get_nextSibling()
while next_node and not (self.__checkWhatToShow(next_node) and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = next_node._get_nextSibling()
if next_node:
self.__dict__['__currentNode'] = next_node
return next_node
def nextNode(self):
next_node = self.__advance()
while next_node and not (self.__checkWhatToShow(next_node) and self.__checkFilter(next_node) == NodeFilter.FILTER_ACCEPT):
next_node = self.__advance()
return next_node
def previousNode(self):
prev_node = self.__regress()
while prev_node and not (self.__checkWhatToShow(prev_node) and self.__checkFilter(prev_node) == NodeFilter.FILTER_ACCEPT):
prev_node = self.__regress()
return prev_node
def __advance(self):
if self.firstChild():
return self.__dict__['__currentNode']
if self.nextSibling():
return self.__dict__['__currentNode']
if self.parentNode():
return self.nextSibling()
return None
def __regress(self):
if self.previousSibling():
self.lastChild()
return self.__dict__['__currentNode']
if self.parentNode():
return self.__dict__['__currentNode']
return None
def __checkWhatToShow(self, node):
show_bit = 1 << (node._get_nodeType() - 1)
return self.__dict__['__whatToShow'] & show_bit
def __checkFilter(self, node):
if self.__dict__['__filter']:
return self.__dict__['__filter'].acceptNode(node)
else:
return NodeFilter.FILTER_ACCEPT
### Attribute Access Mappings ###
_readComputedAttrs = {'root':_get_root,
'whatToShow':_get_whatToShow,
'filter':_get_filter,
'expandEntityReferences':_get_expandEntityReferences,
'currentNode':_get_currentNode
}
_writeComputedAttrs = {'currentNode': _set_currentNode
}
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
_readComputedAttrs.keys())
|
#!/usr/bin/env python3
#
# Based on:
# http://askubuntu.com/questions/508236/how-can-i-run-code-whenever-a-usb-device-is-unplugged-without-requiring-root/
# http://stackoverflow.com/questions/469243/how-can-i-listen-for-usb-device-inserted-events-in-linux-in-python
# https://pyudev.readthedocs.org/en/latest/guide.html#asynchronous-monitoring
#
#
# Troubleshooting:
#
# Problem: The script aborts after waking the laptop from sleep with the following error:
#
# Exception in thread Thread-2:
# Traceback (most recent call last):
# File "/usr/lib/python3.4/threading.py", line 920, in _bootstrap_inner
# self.run()
# File "/usr/lib/python3/dist-packages/pyudev/monitor.py", line 506, in run
# for fd, _ in notifier.poll():
# InterruptedError: [Errno 4] Interrupted system call
#
# Exception in thread Thread-1:
# Traceback (most recent call last):
# File "/usr/lib/python3.4/threading.py", line 920, in _bootstrap_inner
# self.run()
# File "/usr/lib/python3/dist-packages/pyudev/monitor.py", line 506, in run
# for fd, _ in notifier.poll():
# InterruptedError: [Errno 4] Interrupted system call
#
# Cause/solutions:
#
# * Update pyudev to version 0.17 (Aug 26, 2015) or later.
# * https://github.com/pyudev/pyudev/commit/9dbcbf598f5eb77d8972e8d4e368e2fd1afecda8
# * Ubuntu 15.10 (wily) still has version 0.16.
# * Ubuntu xenial doesn't seem to update the version, yet.
# * http://packages.ubuntu.com/search?keywords=python-pyudev
# * Update Python to 3.5 or later.
# * https://docs.python.org/3/whatsnew/3.5.html#pep-475-retry-system-calls-failing-with-eintr
# * Ubuntu 15.10 (wily) has Python 3.5, although it is not the default interpreter.
import os.path
import pyudev
import subprocess
from threading import Timer
class SubsystemMonitor:
def __init__(self, subsystem, callback, wait_seconds):
self.subsystem = subsystem # String
self.callback = callback
self.wait_seconds = wait_seconds
self.timer = None # Timer object
self.monitor = None # pyudev.Monitor object
self.observer = None # pyudev.MonitorObserver object
def reset_timer(self):
if self.timer is not None:
self.timer.cancel()
self.timer = None
def start_timer(self):
self.reset_timer()
self.timer = Timer(self.wait_seconds, self.callback)
self.timer.start()
def handle_device_notification(self, device):
self.start_timer()
def init_monitors():
'''Returns a dict that maps a subsystem to a SubsystemMonitor object.
'''
# Utility functions.
BASE_PATH = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
path = lambda x: os.path.join(BASE_PATH, os.path.expanduser(x))
call = lambda x, *args: subprocess.call([path(x)] + list(args))
# Runs on USB and Bluetooth events.
def usb_hotplug_callback():
#call('setxkbmap_my_preferences.sh')
call('xinput_configure_all_my_devices.py')
call('xset_my_preferences.sh')
call('xsetwacom_my_preferences.sh', 'desktop')
# Runs on display events.
def drm_hotplug_callback():
call('~/.screenlayout/z-auto-detect-displays.sh')
# Runs only once, when this script is started.
def startup_callback():
usb_hotplug_callback()
drm_hotplug_callback()
# Initializing the monitors.
monitors = [
SubsystemMonitor('__start__', startup_callback , 0.0),
SubsystemMonitor('usb' , usb_hotplug_callback, 1.0),
SubsystemMonitor('bluetooth', usb_hotplug_callback, 2.0),
SubsystemMonitor('drm' , drm_hotplug_callback, 1.0),
]
monitor_by_subsystem = {m.subsystem: m for m in monitors}
return monitor_by_subsystem
def main():
monitor_by_subsystem = init_monitors()
# Handling fake startup event.
startup = monitor_by_subsystem.pop('__start__', None)
if startup:
startup.handle_device_notification(None)
# Setting up pyudev monitors and observers.
context = pyudev.Context()
for m in monitor_by_subsystem.values():
m.monitor = pyudev.Monitor.from_netlink(context)
m.monitor.filter_by(subsystem=m.subsystem)
m.observer = pyudev.MonitorObserver(
m.monitor, callback=m.handle_device_notification, daemon=False)
for m in monitor_by_subsystem.values():
m.observer.start()
# This will prevent the program from finishing:
for m in monitor_by_subsystem.values():
m.observer.join()
# Huh... Does not work? TODO: remove the daemon thing...
# The program will never exit because the observer threads are non-daemon.
# https://docs.python.org/3.5/library/threading.html#threading.Thread.daemon
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from team.models import Team, Contest, TeamContest, Reward
class TeamSerializer(serializers.ModelSerializer):
#users = UserSerializer(many=True, read_only=True)
class Meta:
model = Team
fields = ['id', 'name', 'disable'] # ็ผบuser
read_only_fields = ['id']
class ContestSerializer(serializers.ModelSerializer):
teams = TeamSerializer(many=True, read_only=True)
class Meta:
model = Contest
fields = ['id', 'teams', 'name', 'start_time', 'contest_type', 'contest_level']
read_only_fields = ['id']
class RewardSerializer(serializers.ModelSerializer):
class Meta:
model = Reward
fields = ['id', 'team_contest', 'record']
read_only_fields = ['id']
|
import os
import audlib
from audlib.data.dataset import LongFile
HOME = os.path.dirname(audlib.__file__)
def test_LongFile():
data = LongFile(
os.path.join(HOME, 'samples/welcome16k.wav'), 1, 1)
assert data[0].signal.shape[0] == data[1].signal.shape[0]
if __name__ == '__main__':
test_LongFile()
|
from setuptools import setup
setup(
name="electrum-dmd-server",
version="1.0",
scripts=['run_electrum_dmd_server.py','electrum-dmd-server'],
install_requires=['plyvel','jsonrpclib', 'irc >= 11, <=14.0'],
package_dir={
'electrumdmdserver':'src'
},
py_modules=[
'electrumdmdserver.__init__',
'electrumdmdserver.utils',
'electrumdmdserver.storage',
'electrumdmdserver.deserialize',
'electrumdmdserver.networks',
'electrumdmdserver.blockchain_processor',
'electrumdmdserver.server_processor',
'electrumdmdserver.processor',
'electrumdmdserver.version',
'electrumdmdserver.ircthread',
'electrumdmdserver.stratum_tcp'
],
description="Diamond Electrum Server",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://github.com/bitbandi/electrum-dmd-server/",
long_description="""Server for the Electrum Lightweight Diamond Wallet"""
)
|
from util import case
from gcontext.base import method, pre_hook, post_hook
from gcontext import get_context
class A:
x = 3
@method
def run(self):
return B().walk() + 1
class B:
@method
def walk(self):
return get_context()['x']
class C(A):
def incr(self, *args, ret=None):
return ret + 2
@method
def run(self):
with post_hook(B.walk, self.incr):
return super().run()
o = C()
res = o.run()
case.assertEqual(res, 6)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 James Gaston
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import pyglet
from pyglet.gl import *
from .shape import Rectangle, Ellipse, Cross, Star
from .widget import Widget
# ----------------------------------------------------------------------- HBox
class Canvas(Widget):
''' Canvas widget
any empty widget, acts as a parent container for other widgets
it handles mouse event and draw event routing.
it handles relative move
it does not handle relative scaling of children, if the window is resized
'''
# _________________________________________________________________ __init__
def __init__(self, x=0, y=0, z=0, width=300, height=300,
anchor_x='left', anchor_y='bottom', elements=[]):
Widget.__init__(self,x,y,z,width,height,anchor_x,anchor_y)
length = len(elements)
for i in range(length):
self._elements[i] = elements[i]
self._elements[i].set_parent( self )
# _________________________________________________________________ on_draw
def on_draw(self):
# save scissoring
savescissor = (GLint * 4)()
glGetIntegerv( GL_SCISSOR_BOX, savescissor)
save_scissor_enable = glIsEnabled(GL_SCISSOR_TEST)
# set scissoring
glScissor( self._root_x, self._root_y,
self.width, self.height)
glEnable(GL_SCISSOR_TEST)
# draw self
glTranslatef(self._root_x, self._root_y, self._root_z)
self.dispatch_event('widget_draw', self)
glTranslatef(-self._root_x, -self._root_y, -self._root_z)
# restore scissoring
glScissor( savescissor[0], savescissor[1],
savescissor[2], savescissor[3])
if not save_scissor_enable:
glDisable(GL_SCISSOR_TEST)
# draw children
Widget.on_draw(self)
Canvas.register_event_type('widget_draw')
|
#!/usr/bin/env python
a = 0
b = 1
c = 2
def my_function():
a = 3
print("a = %d" % (a) )
def my_b():
global b
b = 11
print("b = %d" % (b) )
def my_c():
print("c = %d" % (c) )
c = 22
print("c = %d" % (c) )
my_function()
print("a = %d" % (a) )
my_b()
print("b = %d" % (b) )
my_c()
print("c = %d" % (c) ) |
# Generated by Django 3.2.3 on 2022-01-12 11:56
import datetime
from django.db import migrations, models
import base.storage
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Photo",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(
blank=True,
help_text="Enter here the photo title.",
max_length=200,
verbose_name="Title",
),
),
(
"published",
models.DateTimeField(
default=datetime.datetime.now,
help_text="Enter here thr published date.",
verbose_name="Date published",
),
),
(
"image",
models.ImageField(
help_text="Upload your image here.",
storage=base.storage.UUIDStorage,
upload_to="images/",
verbose_name="Image",
),
),
],
),
]
|
# app.py - a minimal flask api using flask_restful
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from flask_cors import CORS
from docker.errors import APIError
import docker
import logging
app = Flask(__name__)
api = Api(app)
CORS(app, resources={r"/*": {"origins": "*"}})
parser = reqparse.RequestParser()
class Index(Resource):
def get(self):
return "Index page"
class Sys_Info(Resource):
def get(self):
return client.info()
class Sys_Version(Resource):
def get(self):
return client.version()
class Sys_Df(Resource):
def get(self):
return client.df()
class Sys_Events(Resource):
def get(self):
return client.events()
## Image Section ##
class Images(Resource):
def get(self):
imagetable = []
for image in client.images.list():
imagetable.append(image.attrs)
return imagetable
def post(self):
parser.add_argument('name', help='Package name')
parser.add_argument('tag', help='Package Tag/Version')
args = parser.parse_args()
if args.tag == "":
args.tag="latest"
img = client.images.pull(args.name, args.tag)
return img.attrs, 201
class Image(Resource):
def get(self, image_id):
return client.images.get(image_id).attrs
def delete(self, image_id):
delImg = 0
try:
delImg = client.images.remove(image_id)
except APIError as err:
app.logger.info('Delete Image : %s', str(err))
return {'message': err.explanation, 'reason': err.response.reason, 'status_code': err.response.status_code}, err.response.status_code
return delImg, 204
class ImageHistory(Resource):
def get(self, image_id):
return client.images.get(image_id).history()
class ImageTag(Resource):
def post(self, image_id):
parser.add_argument('repository', help='Repository name')
parser.add_argument('tag', help='tag name')
args = parser.parse_args()
img = client.images.get(image_id).tag(args.repository, args.tag)
return img, 201
class ImageSearch(Resource):
def post(self):
parser.add_argument('name', help='Package name')
args = parser.parse_args()
img = client.images.search(args.name)
return img, 201
## Container Section ##
class Containers(Resource):
def get(self):
containertable = []
for container in client.containers.list(all=True):
containertable.append(container.attrs)
return containertable
class Container(Resource):
def get(self, container_id):
return client.containers.get(container_id).attrs
def delete(self, container_id):
delCntr = 0
try:
delCntr = client.containers.get(container_id).remove()
except APIError as err:
app.logger.info('Delete Container : %s', str(err))
return {'message': err.explanation, 'reason': err.response.reason, 'status_code': err.response.status_code}, err.response.status_code
return delCntr, 204
class ContainerStatus(Resource):
def get(self, container_id):
return client.containers.get(container_id).status
def put(self, container_id):
parser.add_argument('status', help='actions type')
args = parser.parse_args()
container = client.containers.get(container_id)
if args.status == "start":
return container.start(), 201
elif args.status == "restart":
return container.restart(), 201
elif args.status == "stop":
return container.stop(), 201
elif args.status == "pause":
return container.pause(), 201
elif args.status == "unpause":
return container.unpause(), 201
else:
return False, 400
class ContainerLogs(Resource):
def get(self, container_id):
return client.containers.get(container_id).logs()
## Networks Section ##
class Networks(Resource):
def get(self):
networktable = []
for network in client.networks.list():
networktable.append(network.attrs)
return networktable
class Network(Resource):
def get(self, net_id):
return client.networks.get(net_id)
def delete(self, net_id):
delNet = 0
try:
delNet = client.networks.get(net_id).remove()
except APIError as err:
app.logger.info('Delete Network : %s', str(err))
return {'message': err.explanation, 'reason': err.response.reason, 'status_code': err.response.status_code}, err.response.status_code
return delNet, 204
## Volumes Section ##
class Volumes(Resource):
def get(self):
volumetable = []
for volume in client.volumes.list():
volumetable.append(volume.attrs)
return volumetable
class Volume(Resource):
def get(self, vol_id):
return client.volumes.get(vol_id)
def delete(self, vol_id):
delVol = 0
try:
delVol = client.volumes.get(vol_id).remove()
except APIError as err:
app.logger.info('Delete Volume : %s', str(err))
return {'message': err.explanation, 'reason': err.response.reason, 'status_code': err.response.status_code}, err.response.status_code
return delVol, 204
client = docker.from_env()
api.add_resource(Index, '/')
api.add_resource(Sys_Info, '/system/info')
api.add_resource(Sys_Version, '/system/version')
api.add_resource(Sys_Df, '/system/df')
api.add_resource(Sys_Events, '/system/events')
api.add_resource(Images, '/images')
api.add_resource(Image, '/images/<image_id>')
api.add_resource(ImageHistory, '/images/<image_id>/history')
api.add_resource(ImageTag, '/images/<image_id>/tag')
api.add_resource(ImageSearch, '/images/search')
api.add_resource(Containers, '/containers')
api.add_resource(Container, '/containers/<container_id>')
api.add_resource(ContainerStatus, '/containers/<container_id>/status')
api.add_resource(ContainerLogs, '/containers/<container_id>/logs')
api.add_resource(Networks, '/networks')
api.add_resource(Network, '/networks/<net_id>')
api.add_resource(Volumes, '/volumes')
api.add_resource(Volume, '/volumes/<vol_id>')
if __name__ == '__main__':
print("############ Docker docking status ############")
print("Docker Initialized and Connected ? " + str(client.ping()))
print("###############################################")
app.run(debug=True, host='0.0.0.0')
|
from guizero import App, ButtonGroup
def selected():
print(choice.value)
app = App()
choice = ButtonGroup(app, options=["cheese", "ham", "salad"], selected=1, command=selected)
app.display() |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Turkey - Accounting',
'version': '1.beta',
'category': 'Localization/Account Charts',
'description': """
Tรผrkiye iรงin Tek dรผzen hesap planฤฑ ลablonu OpenERP Modรผlรผ.
==========================================================
Bu modรผl kurulduktan sonra, Muhasebe yapฤฑlandฤฑrma sihirbazฤฑ รงalฤฑลฤฑr
* Sihirbaz sizden hesap planฤฑ ลablonu, planฤฑn kurulacaฤฤฑ ลirket, banka hesap
bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altฤฑnฤฑลฤฑk',
'maintainer':'https://launchpad.net/~openerp-turkey',
'website':'https://launchpad.net/openerp-turkey',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'account_code_template.xml',
'account_tdhp_turkey.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'l10n_tr_wizard.xml',
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas
import plotly.graph_objs as go
def load_df_as_json():
idx = pandas.IndexSlice
rs_raw = pandas.read_hdf('./data/runscanner_illumina_cache.hd5')
inst_raw = pandas.read_hdf('./data/pinery_instruments_cache.hd5')
rs_flow = rs_raw.loc[idx[:, 'flow_cell', :, :], 'value'].unstack('key')
rs_flow = rs_flow[['sequencerName', 'startDate', 'healthType']]
rs_flow = rs_flow.reset_index()
inst_model = inst_raw[['name_instrument', 'name_model']]
rs_flow = rs_flow.merge(
inst_model, 'left', left_on='sequencerName', right_on='name_instrument'
)
rs_lane = rs_raw.loc[
idx[:, ['Read', 'Index'], :, :], 'value'
].unstack('key')
rs_lane_yield = rs_lane.groupby('run_alias')[['Yield']].sum()
rs_lane_yield = rs_lane_yield.rename(columns={'Yield': 'Total Yield (GB)'})
final = rs_flow.merge(rs_lane_yield, on='run_alias', right_index=True)
final = final[final['healthType'] == 'COMPLETED']
final = final.astype({'startDate': 'datetime64[ns]'})
return final
raw_df = load_df_as_json()
raw_df_table_col_names = [
{'name': i, 'id': i} for i in raw_df.columns
]
layout = html.Div([
dcc.Dropdown(
id='freq_dropdown',
options=[
{'label': 'Daily', 'value': 'D'},
{'label': 'Weekly', 'value': 'W'},
{'label': 'Monthly', 'value': 'M'},
{'label': 'Quarterly', 'value': 'BQ-MAR'},
{'label': 'Yearly', 'value': 'Y'},
],
value='M',
clearable=False,
),
dcc.Dropdown(
id='colour_by_dropdown',
options=[
{'label': 'Machine ID', 'value': 'sequencerName'},
{'label': 'Machine Model', 'value': 'name_model'},
],
value=None,
placeholder='Colour By'
),
dcc.Graph(
id='bar_sum',
),
dcc.Tabs(id="table_tabs", value='grouped', children=[
dcc.Tab(label='Grouped Data', value='grouped'),
dcc.Tab(label='All Data', value='all'),
]),
html.Div(
id='table_tabs_content'
),
html.Div(
id='raw_df_json',
style={'display': 'none'},
children=raw_df.to_json(
date_format='iso', orient='records'
),
),
html.Div(
id='df_group_sum',
style={'display': 'none'},
),
])
try:
from app import app
app.layout = layout
except ModuleNotFoundError:
import dash
app = dash.Dash(__name__)
app.layout = layout
@app.callback(
Output('bar_sum', 'figure'),
[Input('df_group_sum', 'children'),
Input('colour_by_dropdown', 'value')]
)
def create_bar_sum_fig(df_group_sum, colour_by):
df = pandas.read_json(df_group_sum, orient='split')
layout = {
'yaxis': {'title': 'PF Yield (GB)'},
'legend': {'orientation': 'h'},
}
if colour_by is None:
return {
'data': [go.Bar(
x=df['startDate'],
y=df['Total Yield (GB)']
)],
'layout': layout,
}
else:
traces = []
for name, data in df.groupby(colour_by):
t = go.Bar(
x=list(data['startDate']),
y=list(data['Total Yield (GB)']),
name=name
)
traces.append(t)
return {
'data': traces,
'layout': layout
}
@app.callback(
Output('table_tabs_content', 'children'),
[Input('table_tabs', 'value'),
Input('raw_df_json', 'children'),
Input('df_group_sum', 'children')]
)
def update_table_tab(selected_tab, raw_df_json, group_df_json):
if selected_tab == 'grouped':
df = pandas.read_json(group_df_json, orient='split')
if selected_tab == 'all':
df = pandas.read_json(raw_df_json, orient='records')
col_names = [{'name': i, 'id': i} for i in df.columns]
return dash_table.DataTable(
id='test',
columns=col_names,
data=df.to_dict('rows')
)
@app.callback(
Output('df_group_sum', 'children'),
[Input('raw_df_json', 'children'),
Input('freq_dropdown', 'value'),
Input('colour_by_dropdown', 'value')]
)
def update_grouped_df(raw_df_json, frequency, colour_grouper):
raw = pandas.read_json(
raw_df_json, orient='records', convert_dates=['startDate']
)
if colour_grouper is None:
grouper = [
pandas.Grouper(key='startDate', freq=frequency)
]
else:
grouper = [
pandas.Grouper(key='startDate', freq=frequency),
colour_grouper
]
return raw.groupby(
grouper
).sum().reset_index().to_json(
date_format='iso', orient='split',
)
if __name__ == '__main__':
app.run_server(debug=True)
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class UnbindAlarmRuleReceiversRequest(Request):
def __init__(self):
super(UnbindAlarmRuleReceiversRequest, self).__init__(
'monitor', 'qcloudcliV1', 'UnbindAlarmRuleReceivers', 'monitor.api.qcloud.com')
def get_alarmRuleId(self):
return self.get_params().get('alarmRuleId')
def set_alarmRuleId(self, alarmRuleId):
self.add_param('alarmRuleId', alarmRuleId)
|
from .socp import Model as SOCModel
from .lp import LinConstr, Bounds, CvxConstr, ConeConstr
from .lp import Vars, VarSub, Affine, Convex
from .lp import DecRule, DecRuleSub
from .lp import RoAffine, RoConstr
from .lp import Solution
from .subroutines import *
import numpy as np
from scipy.sparse import csr_matrix
from collections import Iterable
from .lpg_solver import solve as def_sol
class Model:
"""
The Model class creates an object of robust optimization models
"""
def __init__(self, name=None):
self.rc_model = SOCModel(mtype='R')
self.sup_model = SOCModel(nobj=True, mtype='S')
self.all_constr = []
self.obj = None
self.obj_support = None
self.sign = 1
self.primal = None
self.dual = None
self.solution = None
self.pupdate = True
self.dupdate = True
self.solution = None
self.name = name
def reset(self):
self.all_constr = []
self.pupdate = True
self.dupdate = True
self.primal = None
self.dual = None
self.rc_model.reset()
def dvar(self, shape=(1,), vtype='C', name=None, aux=False):
"""
Returns an array of decision variables with the given shape
and variable type.
Parameters
----------
shape : int or tuple
Shape of the variable array.
vtype : {'C', 'B', 'I'}
Type of the decision variables. 'C' means continuous; 'B'
means binary, and 'I" means integer.
name : str
Name of the variable array
aux : leave it unspecified.
Returns
-------
new_var : rsome.lp.Vars
An array of new decision variables
"""
new_var = self.rc_model.dvar(shape, vtype, name, aux)
return new_var
def rvar(self, shape=(1,), name=None):
"""
Returns an array of random variables with the given shape.
Parameters
----------
shape : int or tuple
Shape of the variable array.
name : str
Name of the variable array
Returns
-------
new_var : rsome.lp.Vars
An array of new random variables
"""
new_var = self.sup_model.dvar(shape, 'C', name)
return new_var
def ldr(self, shape=(1,), name=None):
"""
Returns an array with the given shape of linear decision rule
variables.
Parameters
----------
shape : int or tuple
Shape of the variable array.
name : str
Name of the variable array
Returns
-------
new_var : rsome.ro.DecRule
An array of new linear decision rule variables
"""
new_ldr = DecRule(self, shape, name)
return new_ldr
def min(self, obj):
"""
Minimize the given objective function.
Parameters
----------
obj
An objective function
Notes
-----
The objective function given as an array must have the size
to be one.
"""
if obj.size > 1:
raise ValueError('Incorrect function dimension.')
self.obj = obj
self.sign = 1
self.pupdate = True
self.dupdate = True
def max(self, obj):
"""
Maximize the given objective function.
Parameters
----------
obj
An objective function
Notes
-----
The objective function given as an array must have the size
to be one.
"""
if obj.size > 1:
raise ValueError('Incorrect function dimension.')
self.obj = obj
self.sign = - 1
self.pupdate = True
self.dupdate = True
def minmax(self, obj, *args):
"""
Minimize the maximum objective value over the given uncertainty set.
Parameters
----------
obj
Objective function involving random variables
*args
Constraints or collections of constraints of random variables
used for defining the uncertainty set
Notes
-----
The uncertainty set defined for the objective function is considered
the default uncertainty set for the robust model.
"""
if np.prod(obj.shape) > 1:
raise ValueError('Incorrect function dimension.')
constraints = []
for items in args:
if isinstance(items, Iterable):
constraints.extend(list(items))
else:
constraints.append(items)
sup_model = self.sup_model
sup_model.reset()
for item in constraints:
if item.model is not sup_model:
raise SyntaxError('Models mismatch.')
sup_model.st(item)
self.obj = obj
self.obj_support = sup_model.do_math(primal=False)
self.sign = 1
self.pupdate = True
self.dupdate = True
def maxmin(self, obj, *args):
"""
Maximize the minimum objective value over the given uncertainty set.
Parameters
----------
obj
Objective function involving random variables
*args
Constraints or collections of constraints of random variables
used for defining the uncertainty set
Notes
-----
The uncertainty set defined for the objective function is considered
the default uncertainty set for the robust model.
"""
if np.prod(obj.shape) > 1:
raise ValueError('Incorrect function dimension.')
constraints = []
for items in args:
if isinstance(items, Iterable):
constraints.extend(list(items))
else:
constraints.append(items)
sup_model = self.sup_model
sup_model.reset()
for item in constraints:
if item.model is not sup_model:
raise SyntaxError('Models mismatch.')
sup_model.st(item)
self.obj = obj
self.obj_support = sup_model.do_math(primal=False)
self.sign = - 1
self.pupdate = True
self.dupdate = True
def st(self, *arg):
for constr in arg:
if isinstance(constr, Iterable):
for item in constr:
self.st(item)
elif isinstance(constr, (LinConstr, Bounds, CvxConstr, ConeConstr)):
if (constr.model is not self.rc_model) or \
(constr.model.mtype != 'R'):
raise ValueError('Models mismatch.')
self.all_constr.append(constr)
elif isinstance(constr, RoConstr):
if (constr.dec_model is not self.rc_model) or \
(constr.rand_model is not self.sup_model):
raise ValueError('Models mismatch.')
sense = (constr.sense[0] if isinstance(constr.sense,
np.ndarray)
else constr.sense)
if sense == 0:
self.all_constr.append(constr)
else:
left = RoAffine(constr.raffine, constr.affine,
constr.rand_model)
right = RoAffine(-constr.raffine, -constr.affine,
constr.rand_model)
self.all_constr.append(RoConstr(left, sense=0))
self.all_constr.append(RoConstr(right, sense=0))
else:
raise TypeError('Unknown type of constraints')
def do_math(self, primal=True):
if primal:
if self.primal is not None and not self.pupdate:
return self.primal
else:
if self.dual is not None and not self.dupdate:
return self.dual
else:
self.do_math(primal=True)
return self.rc_model.do_math(False, obj=True)
self.rc_model.reset()
if isinstance(self.obj, (Vars, VarSub, Affine, Convex)):
self.rc_model.obj = self.obj
self.rc_model.sign = self.sign
more_roc = []
elif isinstance(self.obj, RoAffine):
obj_constr = (self.rc_model.vars[0] >= self.sign * self.obj)
obj_constr.support = self.obj_support
more_roc = [obj_constr]
else:
raise TypeError('Incorrect type for the objective function.')
for constr in self.all_constr + more_roc:
if isinstance(constr, (LinConstr, Bounds, CvxConstr)):
self.rc_model.st(constr)
if isinstance(constr, RoConstr):
if constr.support:
rc_constrs = constr.le_to_rc()
else:
rc_constrs = constr.le_to_rc(self.obj_support)
for rc_constr in rc_constrs:
self.rc_model.st(rc_constr)
formula = self.rc_model.do_math(primal, obj=True)
if primal:
self.primal = formula
self.pupdate = False
else:
self.dual = formula
self.dupdate = False
return formula
def solve(self, solver=None, display=True, export=False, params={}):
"""
Solve the model with the selected solver interface.
Parameters
----------
solver : {None, lpg_solver, grb_solver, msk_solver}
Solver interface used for model solution. Use default solver
if solver=None.
display : bool
Display option of the solver interface.
export : bool
Export option of the solver interface. A standard model file
is generated if the option is True.
params : dict
A dictionary that specifies parameters of the selected solver.
So far the argument only applies to Gurobi and MOSEK.
"""
if solver is None:
solution = def_sol(self.do_math(), display, export, params)
else:
solution = solver.solve(self.do_math(), display, export, params)
if isinstance(solution, Solution):
self.rc_model.solution = solution
else:
if solution is None:
self.rc_model.solution = None
else:
x = solution.x
self.rc_model.solution = Solution(x[0], x, solution.status)
self.solution = self.rc_model.solution
def get(self):
if self.rc_model.solution is None:
raise SyntaxError('The model is unsolved or no feasible solution.')
return self.sign * self.rc_model.solution.objval
|
import random
import pytest
import numpy as np
import feature_utils
class TestFeatureCompiler:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_oneHotEncoder(self):
"""Create valid label arrays and verify the one-hot-encoder."""
# Create random labels in [0, 9].
num_labels = 10
# Test various array shapes and data types.
dims = [(3,), (2, 3), (64, 64)]
dtypes = (np.uint8, np.int8, np.int16, np.float16, np.float32, None)
for dim in dims:
for dtype in dtypes:
# Compute random labels and convert the array to the test type.
labels = np.random.randint(0, num_labels, dim)
labels = labels.astype(dtype) if dtype else labels.tolist()
# Encode the labels and verify shape and data type.
hot = feature_utils.oneHotEncoder(labels, num_labels)
assert hot.dtype == np.uint16
assert np.array(hot).shape == (num_labels, *dim)
# The encoding is along the first axis and each column must
# therefore contain exactly one non-zero entry, and that entry
# must be 1.
assert np.array_equal(np.count_nonzero(hot, axis=0), np.ones(dim))
assert np.array_equal(np.sum(hot, axis=0), np.ones(dim))
# Convert the hot-label to normal label and ensure it is correct.
assert np.array_equal(np.argmax(hot, axis=0), labels)
def test_oneHotEncoder_err(self):
"""Degenerate inputs to one-hot-encoder."""
enc = feature_utils.oneHotEncoder
# Must not raise any errors.
enc([0, 2], 3)
# Degenerate input array.
with pytest.raises(AssertionError):
feature_utils.oneHotEncoder([], 10)
# Invalid number of labels.
for num_classes in [-1, 0, 0.5, 1.5]:
with pytest.raises(AssertionError):
enc([0, 2], num_classes)
# Label ID is larger than the number of labels.
with pytest.raises(AssertionError):
enc([1, 2], 2)
# Label ID is negative.
with pytest.raises(AssertionError):
enc([-1, 2], 10)
# Label ID is a float.
with pytest.raises(AssertionError):
enc([0, 1.5], 10)
# Number of classes is larger than 16 Bit number.
with pytest.raises(AssertionError):
enc([0, 1.5], 2 ** 16)
def test_sampleMasks(self):
"""Use a tiny test matrix that is easy to verify manually."""
random.seed(0)
np.random.seed(0)
sampleMasks = feature_utils.sampleMasks
mask_valid = np.zeros((1, 4), np.uint8)
mask_fgbg = np.zeros_like(mask_valid)
mask_bbox = np.zeros_like(mask_valid)
mask_cls = np.zeros_like(mask_valid)
mask_objid_at_pix = np.zeros_like(mask_valid)
# Rows 0-3 are valid, rows 0-1 & 4-5 are suitable for FG/BG estimation,
# rows 0 & 4 are suitable for BBox estimation and, finally, rows 1 & 5
# are suitable for class estimation (eg the cube number).
mask_valid[0] = [1, 1, 0, 0]
mask_fgbg[0] = [1, 1, 0, 0]
mask_bbox[0] = [1, 0, 1, 0]
mask_cls[0] = [0, 1, 1, 0]
mask_objid_at_pix[0] = [1, 2, 0, 3]
for N in [1, 20]:
sm_bbox, sm_isFg, sm_cls = sampleMasks(
mask_valid, mask_fgbg, mask_bbox, mask_cls, mask_objid_at_pix, N)
assert sm_bbox.shape == sm_isFg.shape == sm_cls.shape == mask_valid.shape
assert sm_bbox.dtype == sm_isFg.dtype == sm_cls.dtype == mask_valid.dtype
# FGBG mask must be a subset of valid m_fgbg.
assert sm_bbox[0].tolist() == [1, 0, 0, 0]
assert sm_cls[0].tolist() == [0, 1, 0, 0]
assert sm_isFg[0].tolist() == [1, 1, 0, 0]
def test_sampleMask_objID(self):
""" Verify that the algorithm samples objIDs uniformly.
For this test we assume all locations are valid and suitable for BBox
and Label estimation.
The test will create a distinct regions that belong to different
objects. The sampled BBox/Label masks must have been chosen uniformly
from it.
"""
# Number of distinct object IDs (exluding Zero) in this test.
num_objIDs = 5
mask_valid = np.ones((2 * num_objIDs, 2 * num_objIDs), np.uint8)
mask_bbox = np.ones_like(mask_valid)
mask_cls = np.ones_like(mask_valid)
mask_objid_at_pix = np.zeros_like(mask_valid)
# Create block-diagonal matrix. Each block belongs to a different
# object. Note: IDs cannot be zero because that would be tantamount ot
# background.
block_len = 2
block_area = block_len ** 2
for i in range(num_objIDs):
a, b = i * block_len, (i + 1) * block_len
mask_objid_at_pix[a:b, a:b] = i + 1
# Mark all foreground locations.
mask_fgbg = np.zeros_like(mask_valid)
mask_fgbg[np.nonzero(mask_objid_at_pix)] = 1
# Sample various number of locations. We also want to verify that
# sampleMasks picks locations that belong to different objIDs. This
# implies that we have to sample at least num_objIDs per mask. The
# upper limit in the range expression simply means that we are asking
# for mores locations than have a non-zero objID and the sampling must
# saturate there.
for N in range(1, block_area + 10):
# Sample the masks.
sm_bbox, sm_isFg, sm_cls = feature_utils.sampleMasks(
mask_valid, mask_fgbg, mask_bbox, mask_cls, mask_objid_at_pix, N)
# We must have N entries per distinct objID, unless N is larger
# than the number of valid objects.
num_hits = min(N * num_objIDs, block_area * num_objIDs)
# Cls and BBox must contain N non-zero entries whereas `isFg` must
# contain twice that because we sample N foreground and background
# locations each.
assert np.count_nonzero(sm_cls) == num_hits
assert np.count_nonzero(sm_bbox) == num_hits
assert np.count_nonzero(sm_isFg) == 2 * num_hits
# We must have the same number of samples for foreground and
# background.
ids = mask_objid_at_pix[np.nonzero(sm_isFg)]
assert np.count_nonzero(ids != 0) == num_hits
assert np.count_nonzero(ids == 0) == num_hits
del num_hits
# BBox and Label must each have the samples from every objID except
# Zero, whereas the isFg must contain objID Zero as well.
num_hits = min(N, block_area)
for objID in range(1, num_objIDs + 1):
ids = mask_objid_at_pix[np.nonzero(sm_bbox)]
np.count_nonzero(ids == objID) == num_hits
ids = mask_objid_at_pix[np.nonzero(sm_cls)]
np.count_nonzero(ids == objID) == num_hits
ids = mask_objid_at_pix[np.nonzero(sm_isFg)]
np.count_nonzero(ids == objID) == num_hits
ids = mask_objid_at_pix[np.nonzero(sm_isFg)]
np.count_nonzero(ids == 0) == num_hits
|
from scoap3.modules.search.utils import Scoap3RecordsSearch
def test_escape_query_string():
data = {
'query': {
'query_string': {
'query': '10.1016/j.nuclphysb.2018.07.004'
}
}
}
Scoap3RecordsSearch.escape_query_string(data)
assert data['query']['query_string']['query'] == '10.1016\\/j.nuclphysb.2018.07.004'
def test_escape_query_string_empty():
data = {}
Scoap3RecordsSearch.escape_query_string(data)
assert data == {}
|
#!/usr/bin/env python
import numpy as np
import math
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def rotationMatrixToEulerAngles(R) :
assert(isRotationMatrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([z, y, x])
def Generate_Transformation_Matrix(A, B):
print("Inside function")
assert len(A) == len(B)
N = A.shape[0]; # number of rows
#centroid calculation of points
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
# centre the points
AA = A - np.tile(centroid_A, (N, 1))
BB = B - np.tile(centroid_B, (N, 1))
# Generation of covariance_matrix (3 x 3)
H = np.dot(np.transpose(AA), BB)
#singular value decompositon (SVD)
# U = rotation matrix (3 x 3)
# S = scaling_matrix (3 x 3)
# Vt = rotation matrix (3 x 3)
U, S, Vt = np.linalg.svd(H)
# Matrix multiplication of rotation matrices (U, V) to generate combined --Rotation matrix (R)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
print ("Reflection detected")
Vt[2,:] *= -1
R = np.dot(Vt.T, U.T)
# Translation matrix (position vector)
t = -R.dot(centroid_A.T) + centroid_B.T
#Generation of Transformation matrix
transformation = np.concatenate((R, np.reshape(t, (3,1))),axis = 1)
transformation = (np.concatenate((transformation.T, np.reshape([0, 0, 0, 1], (4, 1))), axis =1)).T
np.set_printoptions(suppress=True)
return R, t, transformation
def main(B,A):
A=np.array(A)
B=np.array(B)
print("Transform Points")
R,t,transformation=Generate_Transformation_Matrix(A,B)
R=transformation[0:3,0:3]
eul=rotationMatrixToEulerAngles(R)
translation=transformation[0:3,3:4].transpose()[0]
print("Translation")
print(translation)
static_tf=np.append(translation,eul)
print("Static Transform")
print(static_tf)
f = open("../scripts/output.tmp", "w")
temp=str(static_tf[0])
for i in range(1,len(static_tf)):
temp=temp+","+str(static_tf[i])
f.write(temp)
f.close()
if __name__ == "__main__":
filepath = '../scripts/input.tmp'
A=[]
B=[]
with open(filepath) as fp:
line = fp.readline()
count = int(line)
for i in range(count):
line = fp.readline()
temp = line.split(",")
temp= [float(x) for x in temp]
A.append(temp)
for i in range(count):
line = fp.readline()
temp = line.split(",")
temp= [float(x) for x in temp]
B.append(temp)
main(A,B)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright ยฉ 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""
Inference Utilities
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from cv2 import resize
import logging
from utils.misc_utils import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def get_saver(keyword, removes, excepts,repl=[]):
vars_need_load = {}
for v in (tf.global_variables()):
vname = v.name
if vname.find(keyword)!=-1:
for eeexxx in excepts:
if vname.find(eeexxx)!=-1:
logging.warning('No Load: '+vname)
break
else:
vname_ori = vname
for r in removes:
vname = vname.replace(r,'')
for r in repl:
vname = vname.replace(r[0],r[1])
vars_need_load[vname] = v
logging.warning('Load: ' + vname + ' as ' + vname_ori)
else:
logging.warning('No Load: '+vname)
return tf.train.Saver(vars_need_load) |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
REPO_URI = "https://github.com/apache/trafficcontrol/"
# -- Implementation detail directive -----------------------------------------
from docutils import nodes
from sphinx.util.docutils import SphinxDirective
from sphinx.locale import translators, _
class impl(nodes.Admonition, nodes.Element):
pass
def visit_impl_node(self, node):
self.visit_admonition(node)
def depart_impl_node(self, node):
self.depart_admonition(node)
class ImplementationDetail(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
label_text = 'Implementation Detail'
def run(self):
impl_node = impl('\n'.join(self.content))
impl_node += nodes.title(_(self.label_text), _(self.label_text))
self.state.nested_parse(self.content, self.content_offset, impl_node)
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
impl_node.append(nodes.paragraph('', '', *(n + m)))
return [impl_node]
# -- Go Version role --------------------------------------------------
# Returns the value of the Go version stored in GO_VERSION to minor version
# precision.
import os
import re
from docutils.nodes import strong
from docutils.parsers.rst.states import Inliner
from typing import Tuple, List
def atc_go_version(unused_typ: str,
unused_rawtext: str,
unused_text: str,
unused_lineno: int,
unused_inliner: Inliner,
options=None,
content=None) -> Tuple[List[strong], list]:
go_version_file = os.path.join(os.path.dirname(__file__), '../../../GO_VERSION')
with open(file=go_version_file) as go_version_file:
go_version = go_version_file.read()
major_minor_version = re.match(pattern=r'\d+\.\d+', string=go_version).group()
strong_node = nodes.strong(major_minor_version, major_minor_version)
return [strong_node], []
# -- Issue role --------------------------------------------------------------
from docutils import utils
ISSUE_URI = REPO_URI + "issues/%s"
def issue_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
issue = utils.unescape(text)
text = 'Issue #' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
# -- Pull Request Role -------------------------------------------------------
PR_URI = REPO_URI + "pull/%s"
def pr_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
pr = utils.unescape(text)
text = 'Pull Request ' + pr
refnode = nodes.reference(text, text, refuri=PR_URI % pr)
return [refnode], []
# -- ATC file role -----------------------------------------------------------
FILE_URI = REPO_URI + "tree/master/%s"
def atc_file_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
litnode = nodes.literal(text, text)
refnode = nodes.reference(text, '', litnode, refuri=FILE_URI % text)
return [refnode], []
# -- GoDoc role (absolute) ---------------------------------------------------
GODOC_URI = "https://godoc.org/"
def godoc_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
litnode = nodes.literal(text, text)
refnode = nodes.reference(text, '', litnode, refuri=GODOC_URI + text.replace('.', '#', 1))
return [refnode], []
# -- GoDoc role (atc-relative) ----------------------------------------------
ATC_GODOC_PREFIX = "github.com/apache/trafficcontrol/"
ATC_GODOC_URI = GODOC_URI + ATC_GODOC_PREFIX
def atc_godoc_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
literaltext = ATC_GODOC_PREFIX + text
litnode = nodes.literal(literaltext, literaltext)
refnode = nodes.reference(text, '', litnode, refuri=ATC_GODOC_URI + text.replace('.', '#', 1))
return [refnode], []
# -- GoDoc role (to-relative) -----------------------------------------------
TO_GODOC_PREFIX = ATC_GODOC_PREFIX + "traffic_ops/traffic_ops_golang/"
TO_GODOC_URI = GODOC_URI + TO_GODOC_PREFIX
def to_godoc_role(unused_typ,
unused_rawtext,
text,
unused_lineno,
unused_inliner,
options=None,
content=None):
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
literaltext = TO_GODOC_PREFIX + text
litnode = nodes.literal(literaltext, literaltext)
refnode = nodes.reference(text, '', litnode, refuri=TO_GODOC_URI + text.replace('.', '#', 1))
return [refnode], []
def setup(app: object) -> dict:
app.add_node(impl,
html=(visit_impl_node, depart_impl_node),
latex=(visit_impl_node, depart_impl_node),
text=(visit_impl_node, depart_impl_node))
app.add_directive("impl-detail", ImplementationDetail)
app.add_role("atc-go-version", atc_go_version)
app.add_role("issue", issue_role)
app.add_role("pr", pr_role)
app.add_role("pull-request", pr_role)
app.add_role("atc-file", atc_file_role)
app.add_role("godoc", godoc_role)
app.add_role("atc-godoc", atc_godoc_role)
app.add_role("to-godoc", to_godoc_role)
return {
'version': '0.2',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
r"""
===========================================
Seismic Ambient Noise (:mod:`seislib.an`)
===========================================
As shown in several theoretical studies, the cross correlation of
seismic ambient noise can be related to the surface-wave Green's
function between the two points of observation [1]_. In case of a `diffuse`
ambient wave-field recorded at two receivers on the vertical component,
the empirical Rayleigh-wave Green's function has a relatively simple
expression, that can be employed to constrain the velocity structure of the
Earth's interior. In the frequency domain, this is proportional to a zeroth
order Bessel function of the first kind (:math:`J_0`), and reads [2]_
.. math::
\Re{\big\lbrace\rho({\bf x}_A, {\bf x}_B, \omega)\big\rbrace}
\approx
J_0\left(\frac{\omega \Delta}{c}\right)
\mbox{e}^{-\alpha \Delta},
where :math:`\Delta`, :math:`\omega`, and `c` denote inter-station
distance, angular frequency, and phase velocity, respectively, :math:`\rho`
the statistical expectation of the normalized cross-spectrum associated
with the two ambient-noise recordings, and :math:`\Re{\big\lbrace \dotso \big\rbrace}`
maps a complex number into its real part. The exponential damping term in the above
equation accounts for the (possibly frequency-dependent) attenuation of the
Rayleigh waves propagating between the two receivers :math:`{\bf x}_A` and
:math:`{\bf x}_B`, through the coefficient :math:`\alpha` [2]_.
In ambient-noise seismology, where continuous seismograms of relatively long duration
(months or years) are employed, the statistical expectation of :math:`\rho` is
replaced by an ensemble average of the cross-spectra calculated over a relatively
large number of time windows. This contributes to approximating the condition of
a diffuse ambient wave-field [1]_, allowing the use of the above equation to measure
the (average) inter-station phase velocity.
In practice, SeisLib makes use of the above equation to
calculate
- Rayleigh and Love phase velocities:
since phase velocity is related to the `phase` of the empirical Green's function,
but not to its amplitude, the exponential damping term is neglected, simplifying
the problem of retrieving `c` from the data. This approach resulted in numerous
successful applications of velocity imaging and monitoring, and can nowadays be
considered standard in ambient-noise tomography [3]_. (See
:mod:`seislib.an.an_velocity`)
- Rayleigh-wave attenuation:
where the attenuation coefficient is retrieved by nonlinear inversion based on
preliminary measurements of phase velocity [2]_. (See
:mod:`seislib.an.an_attenuation`)
References
----------
.. [1] Boschi & Weemstra, (2015), Reviews of Geophysics
Stationary-phase integrals in the cross correlation, Reviews of Geophysics
.. [2] Magrini & Boschi, (2021), SurfaceโWave Attenuation From Seismic Ambient Noise:
Numerical Validation and Application, JGR
.. [3] Nakata et al., (2019), Seismic ambient noise, Cambridge University Press
"""
from .an_processing import *
from .an_velocity import *
from .an_attenuation import *
from .an_downloader import *
|
import os
import numpy as np
from astropy.io import ascii
from scipy.optimize import curve_fit
from astropy.io import fits as pyfits
from astropy import wcs
############################################################################################################
#
# VIRCAM TRANSFORMATION TO RA/DEC
#
############################################################################################################
#I added this class because it was not included initially and is needed for the transformation
class Cts:
def __init__(self):
self.deg2rad=0.017453
self.rad2deg=57.295779
def vircam_xy2standard(x,y,hdr,output='equatorial'):
"""vircam radial distortion, convert from x,y to xi,xn or alpha,delta depending on the output
desired, return the distortcor to fix the photometry, this function uses the header values
determined by CASU.
requires x,y, and the header of the image chip, and the output, default is equatorial
which return alpha,delta and the distortion correction,
the other option is standard which return xi,xn and the distortion correction"""
cts= Cts();
a = hdr['CD1_1']
b = hdr['CD1_2']
c = hdr['CRPIX1']
d = hdr['CD2_1']
e = hdr['CD2_2']
f = hdr['CRPIX2']
tpa=hdr['CRVAL1']
tpd=hdr['CRVAL2']
projp1 = hdr['PV2_1']
projp3 = hdr['PV2_3']
projp5 = hdr['PV2_5']
a = a*cts.deg2rad
b = b*cts.deg2rad
d = d*cts.deg2rad
e = e*cts.deg2rad
tpa = tpa*cts.deg2rad
tpd = tpd*cts.deg2rad
tand= np.tan(tpd)
secd= 1./np.cos(tpd)
x = x-c
y = y-f
xi = a*x+b*y
xn = d*x+e*y
r = np.sqrt(xi**2.+xn**2.)
rfac = projp1+projp3*r**2.+projp5*r**4.
r = r/rfac
rfac = projp1+projp3*r**2.+projp5*r**4.
xi = xi/rfac
xn = xn/rfac
distortcor = 1.0 + 3.0*projp3*r**2./projp1 + 5.0*projp5*r**4./projp1
distortcor = distortcor*(1.0+(projp3*r**2.+projp5*r**4.)/projp1)
distortcor = 1.0/distortcor
if output=='equatorial':
aa = np.arctan(xi*secd/(1.0-xn*tand))
alpha = aa+tpa
delta = np.arctan((xn+tand)*np.sin(aa)/(xi*secd))
x = alpha
y = delta
tpi = 2.*np.pi
for i,j in enumerate(x):
if j>tpi: x[i] = j - tpi
if j<0.000: x[i] = j + tpi
x = x*cts.rad2deg
y = y*cts.rad2deg
return x,y,distortcor
elif output=='standard':
return xi,xn,distorcor
else:
raise NameError('the type of coordinate should be: "equatorial" or "standard","{}" was given and is not accepted'.format(output))
pass
################################################################################################
#
# CLASSES AND FUNCTIONS
#
#################################################################################################
class MasterFrame:
'''
The MasterFrame offers a reference system to compute the relative motion of the stars.
Init with some reference frame, the one with lowest seeing.
Match and transform every epoch to this one.
This MasterFrame will keep the reference system of the initial frame and their ID's but with the stars positions averaged.
Iterate.
'''
def __init__(self,name):
self.ID=[] #ID of the stars (based on a reference frame).
self.pos=[[],[]] #Positions of the stars after each iteration.
#Load the positions of the stars in the reference frame and their Color-Magnitude if available.
self.ID,self.pos[0],self.pos[1],self.mag_A,self.col_BA=np.loadtxt(name,skiprows=1,usecols=(0,3,4,5,6),unpack=True)
self.added_pos=[[],[]] #Sum the position XY of a star in each frame. Then average it.
self.matchedID=np.zeros(self.ID.size) #number of times that the star(ID) is found
self.added_pos[0]=np.zeros(self.ID.size)
self.added_pos[1]=np.zeros(self.ID.size)
def constrain_region(self,pos_object,region_range):
#Pick only stars in a square region around certain point. (For cutting the Chip).
X_Index=Index_in_Region(self.pos[0],pos_object[0]-region_range,pos_object[0]+region_range)
Y_Index=Index_in_Region(self.pos[1],pos_object[1]-region_range,pos_object[1]+region_range)
constrain_index=np.intersect1d(X_Index,Y_Index)
#Mask every property using the constrain index.
self.ID=self.ID[constrain_index]
self.pos[0]=self.pos[0][constrain_index]
self.pos[1]=self.pos[1][constrain_index]
self.mag_A=self.mag_A[constrain_index]
self.col_BA=self.col_BA[constrain_index]
self.matchedID=self.matchedID[constrain_index]
self.added_pos[0]=self.added_pos[0][constrain_index]
self.added_pos[1]=self.added_pos[1][constrain_index]
def save(self,output):
ascii.write([self.ID.astype(int),self.pos[0],self.pos[1],self.mag_A,self.col_BA],output,names=["#ID","X","Y","A","B-A"])
def get_Masks(self,ID):
mask_master=np.in1d(self.ID,ID) #Received ID's that are present on the masterframe ID's
mask_epoch=np.in1d(ID,self.ID) #Masterframe ID's that are present on received ID's.
return mask_master,mask_epoch
def add_frame(self,ID,pos):
#Add a frame, is must be alredy transformed to the reference frame system.
#Receives the ID's found and the positions to be added according those ID's
mask=np.in1d(self.ID,ID) #Wich stars(ID's) must be added
self.added_pos[0][mask]=self.added_pos[0][mask]+pos[0] #Add new position in X
self.added_pos[1][mask]=self.added_pos[1][mask]+pos[1] #Add new position in Y
self.matchedID[mask]=self.matchedID[mask]+1 #These stars were added
def compute_master(self,star_threshold):
#Compute the final masterframe. Discard useless stars. Keep the objects of interest.
#Average the position according the number of repeats to update Reference Positions.
self.pos[0]=np.divide(self.added_pos[0],self.matchedID)
self.pos[1]=np.divide(self.added_pos[1],self.matchedID)
#Forget the stars that were found less times that star_threshold.
#You could choose to keep some stars using: mask= np.union1d(threshold_index,another_index)
threshold_index=np.where(self.matchedID>=star_threshold)[0]
self.ID=self.ID[threshold_index]
self.pos[0]=self.pos[0][threshold_index]
self.pos[1]=self.pos[1][threshold_index]
self.mag_A=self.mag_A[threshold_index]
self.col_BA=self.col_BA[threshold_index]
#Reset the added_pos for a following iteration.
self.matchedID=np.zeros(self.ID.size)
self.added_pos[0]=np.zeros(self.ID.size)
self.added_pos[1]=np.zeros(self.ID.size)
def XYtoRADEC(name,extension):
#Generate new catalogues using the RADEC information for ulterior matching with stilts.
ID_image,x_image,y_image,mag_image=np.loadtxt(name+extension, usecols=(0,1,2,3),skiprows=3, unpack=True)
ID_image=ID_image.astype(int)
hdulist = pyfits.open(name+".fits")
#RA_image,DEC_image,distcoord=vircam_xy2standard(x_image,y_image,hdulist[0].header)
w = wcs.WCS(name+'.fits')
coords = np.transpose((x_image,y_image))
RA_image,DEC_image = np.transpose(w.wcs_pix2world(coords,1))
ascii.write([ID_image,RA_image, DEC_image,x_image,y_image,mag_image], name+".dat", names=['#ID','RA', 'DEC','X','Y','mag'])
#Busca el seeing mas bajo
def lowest_seeing(files):
nro_epocas = len(files)
seeing = np.zeros(nro_epocas)
for i,a in enumerate(files):
hdu = pyfits.open(epoch_path +a+'.fits')
s = hdu[0].header['SEEING']
seeing[i] += s
index = seeing==seeing.min()
archivo = np.array(files)[index]
return index[0],archivo[0]
#Transformations of the original Match code. Includes Rotations and Scalings.
#Quadratic used to generate the masterframe.
def linear(coord,a,b,c):
x=coord[0]
y=coord[1]
return a + b*x + c*y
def quadratic(coord,a,b,c,d,e,f):
x=coord[0]
y=coord[1]
return a + b*x + c*y + d*np.square(x) + e*np.multiply(x,y) + f*np.square(y)
def cubic(coord,a,b,c,d,e,f,g,h):
x=coord[0]
y=coord[1]
x2=np.square(x)
y2=np.square(y)
return a + b*x + c*y + d*x2 + e*np.multiply(x,y) + f*y2 + g*np.multiply(x,x2+y2) + h*np.multiply(y,x2+y2)
#Constrain the search in different ways. Obtain the Indexes of the objects in the constrain.
#The user can put these at different sections of the Code.
def Index_in_Radius(pos,pos0,rad):
dist2=np.square(pos[0]-pos0[0]) +np.square(pos[1]-pos0[1])
return np.where(dist2<=np.square(rad))[0]
def Index_out_Radius(pos,pos0,rad):
dist2=np.square(pos[0]-pos0[0]) +np.square(pos[1]-pos0[1])
return np.where(dist2>np.square(rad))[0]
def Index_in_Region(pos,inf,sup):
return np.where((pos>=inf) & (pos<=sup))[0]
def Index_in_CMD(color,magnitude,color_range,magnitude_range):
index_col=np.where((color>=color_range[0]) & (color<=color_range[1]))[0]
index_mag=np.where((magnitude>=magnitude_range[0]) & (magnitude<=magnitude_range[1]))[0]
return np.intersect1d(index_col,index_mag)
def Index_out_CMD(color,magnitude,color_range,magnitude_range):
index_col=np.where((color<=color_range[0]) & (color>=color_range[1]))[0]
index_mag=np.where((magnitude<=magnitude_range[0]) & (magnitude>=magnitude_range[1]))[0]
return np.intersect1d(index_col,index_mag)
#Directory Handling.
def make_directory(folder):
if not os.path.exists(folder):
os.makedirs(folder)
else:
empty_directory(folder)
def empty_directory(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
os.unlink(file_path)
except Exception, e:
print e
##############################################################################################
#
# USER VARIABLES
#
###############################################################################################
catalogue_extension=".dao" #Original Catalogues extension.
epoch_path="epoch/" #Original Files Folder. Must include Catalogue and fits image.
cmd_path="cmd/" #CMD (B-A vs. A) folder. Must include two catalogues of the same epoch with different filters and their fits images.
#filter_A="b242_3_k_11-001" #Name of the catalogue for filter A (No extension, no path)
filter_B="b249_5_j_13-001" #Name of the catalogue for filter B (No extension, no path)
match_path="matched_epochs/" #Output Folder where the Stilts match between epochs and the reference frame (with cmd information) are stored.
seeing_out="Seeing.dat" #Seeing output File. For user usage only.
date_out="Dates.dat" #MJD output. Needed for the proper motion Code.
cmd_filename="CMD.dat" #Output: Reference frame with information of the CMD included.
master_filename="Master.dat" #Masterframe Output.
master_iterations=4 #Number of iterations to compute the masterframe
master_star_threshold=10 #Minimum number of times that a star must be found in the masterframe to be considered.
match_tolerance=0.1 #Tolerance for stilts match separation in arcsec
ref_index=36 #IMAGE with good seeing. (Index Position in the array at epoch_path)
#Control Booleans. To don't go over innecesary parts again.
get_RADEC=True
get_Seeing=True
get_Date=True
match_cmd=True
match_epochs=True
#############################################################################################
#
# PIPELINE
#
#############################################################################################
#GET FILE NAMES
epoch_names=[]
for file in os.listdir(epoch_path):
if file.endswith(catalogue_extension):
epoch_names.append(file.split(".")[0])
epoch_names.sort()
print epoch_names
if get_Seeing:
seeing_file=open(seeing_out,'w')
for name in epoch_names:
hdulist = pyfits.open(epoch_path+name+".fits")
seeing_file.write(str(hdulist[0].header["SEEING"])+"\n")
seeing_file.close()
ref_index, ref_file = lowest_seeing(epoch_names)
print "Seeing minimo en %s" % ref_file
filter_A = ref_file
cmd_names=[filter_B,filter_A]
print cmd_names
#TRANSFORM to RA DEC
#(only for stilts match)
if get_RADEC:
for name in epoch_names:
print "RA-DEC CONVERTING: "+name
XYtoRADEC(epoch_path+name,catalogue_extension)
for name in cmd_names:
print "RA-DEC CONVERTING: "+name
XYtoRADEC(cmd_path+name,catalogue_extension)
cmd_names=[filter_B,filter_A]
if get_Date:
date_file=open(date_out,'w')
for name in epoch_names:
hdulist = pyfits.open(epoch_path+name+".fits")
date_file.write(str(hdulist[0].header["MJD-OBS"])+"\n")
date_file.close()
#Generate the CMD.dat file. Use information of the CMD epoch + Reference epoch.
if match_cmd:
#Match the two Images of the same epoch for the CMD
in_match1="in1="+cmd_path+cmd_names[0]+".dat " #Filter B#
in_match2="in2="+cmd_path+cmd_names[1]+".dat " #Filter A#
out_match="out="+cmd_path+"CMD"+".match "
params="params=%.1f"%match_tolerance
os.system("sh stilts tmatch2 ifmt1=ascii ifmt2=ascii matcher=sky ofmt=ascii values1=\"RA DEC\" values2=\"RA DEC\" "+in_match1+in_match2+out_match+params)
#Match that CMD with the Best Image to use as reference.
in_match1="in1="+epoch_path+epoch_names[ref_index]+".dat "
in_match2="in2="+cmd_path+"CMD"+".match "
out_match="out="+cmd_path+"CMD_Ref"+".match "
os.system("sh stilts tmatch2 ifmt1=ascii ifmt2=ascii matcher=sky ofmt=ascii values1=\"RA DEC\" values2=\"RA_2 DEC_2\" "+in_match1+in_match2+out_match+params)
#Save the Reference Image with all the original stars AND the Color-Magnitude found in the match..
#If CMD was not found for a star mark it as False
cmd_match=cmd_path+"CMD_Ref"+".match"
ref_name=epoch_path+epoch_names[ref_index]+".dat"
ID,RA,DEC,X,Y= np.loadtxt(ref_name,skiprows=1, usecols=(0,1,2,3,4),unpack=True) #read reference image
ID_matched,B,A= np.loadtxt(cmd_match,skiprows=1, usecols=(0,11,17),unpack=True) #read match of reference image & cmd
#Set an absurd value for the cmd. Mark those without cmd as false using a mask based on the match.
mag_A=np.zeros(ID.size)-10
col_BA=np.zeros(ID.size)-10
cmd_mask=np.in1d(ID,ID_matched)
#Set the real value for stars with cmd using a mask.
mag_A[cmd_mask]=A
col_BA[cmd_mask]=B-A
#Output the CMD file with information about positions.
ascii.write([ID.astype(int),RA, DEC,X,Y,mag_A,col_BA,cmd_mask], cmd_filename, names=['#ID','RA', 'DEC','X','Y','A','B-A','bool_CMD'])
#MATCH EACH EPOCH WITH THE REFERENCE EPOCH(Including CMD Information)(STILTS)
if match_epochs:
make_directory(match_path)
for name in epoch_names:
print "STILTS MATCHING "+name
in_ref="in1="+cmd_filename+" "
in_epoch="in2="+epoch_path+name+".dat "
out_match="out="+match_path+name+".match "
params="params=%.1f"%match_tolerance
os.system("sh stilts tmatch2 ifmt1=ascii ifmt2=ascii matcher=sky ofmt=ascii values1=\"RA DEC\" values2=\"RA DEC\" "+in_ref+in_epoch+out_match+params)
#CREATE MASTER FRAME (based on the better looking epoch) (CMD incorporated)
masterframe=MasterFrame(cmd_filename)
#You can call here masterframe.constrain_region() to use only a region of the chip for the masterframe.
#masterframe.constrain_region([0,0],500)
for i in range(master_iterations):
for name in epoch_names:
print "MASTER FRAMING: "+name+"\nIteration: "+str(i+1)
in_match=match_path+name+".match"
pos_ref=[[],[]]
pos_epoch=[[],[]]
pos_trans=[[],[]]
#read matched_file.
#Note that since they were matched, the index corresponds to a matched pair
#Remember we keep using the ID's of the original reference epoch.
matchedID,pos_epoch[0],pos_epoch[1],separation=np.loadtxt(in_match,skiprows=1, usecols=(0,11,12,14),unpack=True)
#Which stars from the epoch are present in the masterframe
#Wich stars from the masterframe are present in the epoch
mask_master,mask_epoch=masterframe.get_Masks(matchedID)
pos_ref=[masterframe.pos[0][mask_master],masterframe.pos[1][mask_master]]
pos_epoch=[pos_epoch[0][mask_epoch],pos_epoch[1][mask_epoch]]
mag=masterframe.mag_A[mask_master]
col=masterframe.col_BA[mask_master]
#GLOBAL QUADRATIC TRANSFORM TO THE INITIAL REFERENCE FRAME
#The transformation is applied to ALL matched stars
popt_X,pcov_X= curve_fit(quadratic,[pos_epoch[0],pos_epoch[1]],pos_ref[0])
popt_Y,pcov_Y= curve_fit(quadratic,[pos_epoch[0],pos_epoch[1]],pos_ref[1])
pos_trans[0]=quadratic([pos_epoch[0],pos_epoch[1]],popt_X[0],popt_X[1],popt_X[2],popt_X[3],popt_X[4],popt_X[5])
pos_trans[1]=quadratic([pos_epoch[0],pos_epoch[1]],popt_Y[0],popt_Y[1],popt_Y[2],popt_Y[3],popt_Y[4],popt_Y[5])
#Add the matched stars to the frame.
masterframe.add_frame(matchedID,pos_trans)
#Update the masterframe using the transformed positions to the reference system.
masterframe.compute_master(master_star_threshold)
masterframe.save(master_filename)
#Got the Masterframe. Includes ID's + Positions in the reference epoch system and Color-Magnitude of the CMD epoch.
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.jwt.taskrouter.capabilities import WorkspaceCapabilityToken
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
capability = WorkspaceCapabilityToken(
account_sid=account_sid,
auth_token=auth_token,
workspace_sid=workspace_sid
)
capability.allow_fetch_subresources()
capability.allow_update_subresources()
capability.allow_delete_subresources()
token = capability.to_jwt()
# By default, tokens are good for one hour.
# Override this default timeout by specifiying a new value (in seconds).
# For example, to generate a token good for 8 hours:
# 60 * 60 * 8 = 28800
token = capability.to_jwt(ttl=28800)
print(token)
|
from django.contrib import admin
from .models import Project, ProjectSubtitle, BulletPoint
# Register your models here.
class ProjectAdmin(admin.ModelAdmin):
pass
admin.site.register(Project)
admin.site.register(ProjectSubtitle)
admin.site.register(BulletPoint) |
"""
Hardcoded data for the blockchain (use for genesis validation)
"""
from .block import Block
__all__ = ["GENESIS_BLOCK", "developer_address"]
developer_address = "A2tddJpzOWIp1Dv81mqn4WJ/UQAjmFLPINRnkt67zMMR"
GENESIS_BLOCK = Block(index=0, previous_hash="0", forger=developer_address, timestamp=0)
|
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
#
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
#
# Example:
# Given nums = [2, 7, 11, 15], target = 9,
#
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# brute force
# length = len(nums)
# for i in range(length):
# for j in range(i + 1, length):
# if nums[i] + nums[j] == target:
# return [i, j]
# use one way hash map
dic = {}
for i, v in enumerate(nums):
if target - v in dic:
return [dic[target - v], i]
else:
dic[v] = i
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
from subprocess import call
APP_HOME = os.environ.get('APP_HOME', '/app')
APP_ENV = os.path.join(APP_HOME, '.env')
APP_ETC = os.path.join(APP_HOME, 'etc')
APP_ROOT = os.path.join(APP_HOME, 'root')
class CommandManager(object):
def __init__(self, before_command=None, after_command=None):
self._commands = {}
self._before_command = before_command
self._after_command = after_command
def reg(self, name, with_args=False):
def decorator(fn):
self._commands.update({
name: {
'fn': fn,
'with_args': with_args
}
})
return fn
return decorator
def before_command(self):
if self._before_command:
self._before_command()
def after_command(self):
if self._after_command:
self._after_command()
def not_found(self, name):
print('the command "%s" not found in busybox!' % name)
def _next_command(self, argv):
name = argv[0]
argv = argv[1:]
return name, argv
def _extract_args(self, argv, count=None):
if count:
return argv[:count], argv[count:]
else:
return argv, []
def run(self, argv=None):
argv = argv if argv else sys.argv[1:]
self.before_command()
while argv:
cmd_name, argv = self._next_command(argv)
cmd_data = self._commands.get(cmd_name, None)
if cmd_data and cmd_data.get('with_args'):
nargs = cmd_data.get('with_args')
if nargs is True:
args, argv = self._extract_args(argv)
else:
args, argv = self._extract_args(argv, nargs)
cmd_data.get('fn')(*args)
elif cmd_data and not cmd_data.get('with_args'):
cmd_data.get('fn')()
else:
self.not_found(cmd_name)
self.after_command()
def _fix_permissions():
print('fix permissions...')
directories = (
APP_HOME,
)
for directory in directories:
print('fixing "%s"' % directory)
call(
[
'/bin/chown',
'user.user',
directory
],
shell=False
)
cm = CommandManager(before_command=_fix_permissions)
@cm.reg('configure')
def cmd_configure():
os.setgid(1000)
os.setuid(1000)
os.chdir(APP_HOME)
os.environ.update(HOME='/home/user')
print('exists virtualenv?')
if not os.path.exists(APP_ENV):
print('need create virtualenv')
call(
[
'/usr/bin/virtualenv',
APP_ENV,
'--python=python3'
],
shell=False
)
if not os.path.exists(APP_ROOT):
print('create root application directory')
os.mkdir(APP_ROOT)
if not os.path.exists(os.path.join(APP_ROOT, 'requiriments.txt')):
print('create minimal requiriments.txt')
with open(os.path.join(APP_ROOT, 'requiriments.txt'), 'wt') as fd:
fd.write(
'\n'.join([
'Django<1.9',
'ipython>=4.0.0',
'uwsgi>=2.0.0'
])
)
print('install requiriments')
call(
[
os.path.join(APP_ENV, 'bin', 'pip'),
'install',
'-U',
'-r',
os.path.join(APP_ROOT, 'requiriments.txt')
],
shell=False
)
print('exists root app?')
if not os.path.exists(os.path.join(APP_ROOT, 'manage.py')):
print('initialize django project')
call(
[
os.path.join(APP_ENV, 'bin', 'django-admin'),
'startproject',
'app',
APP_ROOT
],
shell=False
)
if not os.path.exists(APP_ETC):
print('create configuration directory')
os.mkdir(APP_ETC)
if not os.path.exists(os.path.join(APP_ETC, 'uwsgi.yml')):
print('create uwsgi.yml for launche server')
with open(os.path.join(APP_ETC, 'uwsgi.yml'), 'wt') as fd:
fd.write(
'\n'.join([
'uwsgi:',
' master: 1',
' workers: 1',
' threads: 4',
' http: 0.0.0.0:8000',
' wsgi: app.wsgi',
' home: %s' % APP_ENV,
' env: PYTHONPATH=%s' % APP_ROOT,
' py-autoreload: 1'
])
)
@cm.reg('shell')
def cmd_shell():
print('open shell in app directory')
os.setgid(1000)
os.setuid(1000)
os.chdir(APP_HOME)
os.environ.update(HOME='/home/user')
call('/bin/bash'.split(' '), shell=False)
@cm.reg('start')
def cmd_start():
print('start server')
os.setgid(1000)
os.setuid(1000)
os.chdir(APP_HOME)
os.environ.update(HOME='/home/user')
try:
call(
[
os.path.join(APP_ENV, 'bin', 'uwsgi'),
'--yml',
os.path.join(APP_ETC, 'uwsgi.yml')
],
shell=False
)
except KeyboardInterrupt:
print('the end')
@cm.reg('root_shell')
def cmd_root_shell():
print('open shell with root')
call('/bin/bash'.split(' '), shell=False)
if __name__ == '__main__':
cm.run()
|
inputs = [int(i) for i in input().split()]
def candies(vladik_a, valera_a):
amount = 0
toggle = True
while True:
amount += 1
if vladik_a < 0:
return "Vladik"
if valera_a < 0:
return "Valera"
if toggle:
vladik_a -= amount
toggle = False
else:
valera_a -= amount
toggle = True
print(candies(inputs[0], inputs[1]))
|
#!/usr/bin/env python
class Clock:
def __init__(self, decimal_hours, decimal_minutes):
self.minutes = decimal_hours*60 + decimal_minutes
def _to_dec(self):
decimal_hours = (self.minutes//60) % 24
decimal_minutes = (self.minutes - (decimal_hours*60)) % 60
return (decimal_hours, decimal_minutes)
def add(self, y):
self.minutes += y
return self
def __eq__(self, other):
return str(self) == str(other)
def __repr__(self):
return "%02d:%02d" % (self._to_dec())
if __name__ == '__main__':
import sys, time
c = Clock(0,0)
if len(sys.argv) == 2:
i = float(sys.argv[1])
else:
i = 1
while True:
c.add(1); print c
time.sleep(i)
|
import matplotlib.pyplot as plt
import wbml.out as out
from wbml.plot import tweak
from stheno import B, Measure, GP
B.epsilon = 1e-10 # Very slightly regularise.
# Define points to predict at.
x = B.linspace(0, 10, 200)
x_obs = B.linspace(0, 10, 10)
with Measure() as prior:
# Construct a linear model.
slope = GP(1)
intercept = GP(5)
f = slope * (lambda x: x) + intercept
# Sample a slope, intercept, underlying function, and observations.
true_slope, true_intercept, f_true, y_obs = prior.sample(
slope(0), intercept(0), f(x), f(x_obs, 0.2)
)
# Condition on the observations to make predictions.
post = prior | (f(x_obs, 0.2), y_obs)
mean, lower, upper = post(f(x)).marginal_credible_bounds()
out.kv("True slope", true_slope[0, 0])
out.kv("Predicted slope", post(slope(0)).mean[0, 0])
out.kv("True intercept", true_intercept[0, 0])
out.kv("Predicted intercept", post(intercept(0)).mean[0, 0])
# Plot result.
plt.plot(x, f_true, label="True", style="test")
plt.scatter(x_obs, y_obs, label="Observations", style="train", s=20)
plt.plot(x, mean, label="Prediction", style="pred")
plt.fill_between(x, lower, upper, style="pred")
tweak()
plt.savefig("readme_example6_blr.png")
plt.show()
|
import argparse
import pickle
import ujson as json
from multiprocessing import Pool
from os.path import join
from typing import List, Dict
import numpy as np
from tqdm import tqdm
from docqa.config import CORPUS_DIR
from docqa.configurable import Configurable
from docqa.data_processing.multi_paragraph_qa import MultiParagraphQuestion, DocumentParagraph
from docqa.data_processing.preprocessed_corpus import FilteredData
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.hotpotqa.answer_detection import FastNormalizedAnswerDetector
from docqa.utils import ResourceLoader
from docqa.utils import bcolors
from docqa.utils import split
class HotpotQaSpanDataset(Configurable):
def __init__(self, corpus_name):
self.corpus_name = corpus_name
self.dir = join(CORPUS_DIR, self.corpus_name)
self.tokenizer = NltkAndPunctTokenizer()
self.detector = FastNormalizedAnswerDetector()
self._train, self._raw_train = list(), None
self._dev, self._raw_dev = list(), None
self.missed_answer = 0
def get_train(self) -> List[Dict]:
return self._train
def get_dev(self) -> List[Dict]:
return self._dev
def get_resource_loader(self):
return ResourceLoader()
@property
def name(self):
return self.corpus_name
@staticmethod
def _build_question(questions, tokenizer, detector):
questions_chunk = []
for question in tqdm(questions, desc='Chunk.', ncols=70):
question_id = question['_id']
question_text = tokenizer.tokenize_paragraph_flat(question['question'])
answer_text = [question['answer']]
supporting_facts = question['supporting_facts']
paragraphs = []
tokenized_aliases = [tokenizer.tokenize_paragraph_flat(x) for x in answer_text]
detector.set_question(tokenized_aliases)
# 0: no
# 1: yes
# 2: answer spans
answer_type = 2
if answer_text[0].lower() == 'yes':
answer_type = 0
elif answer_text[0].lower() == 'no':
answer_type = 1
# golden paragraph ๋ง ๊ณ ๋ฅด๋ ๊ณผ์
if supporting_facts is not None:
answer_para_title = [p[0] for p in supporting_facts]
documents = [d for d in question['context'] if d[0] in answer_para_title]
if len(documents) < 2:
tqdm.write(bcolors.WARNING + "The number of golden paragraph is not two" + bcolors.ENDC)
offset = 0
for i, d in enumerate(documents):
title, paragraph = d[0], d[1]
text_paragraph = " ".join(paragraph)
text = tokenizer.tokenize_paragraph_flat(text_paragraph)
start, end = 0, len(text) - 1
rank = -1
# answer๊ฐ span ์ผ ๊ฒฝ์ฐ
if answer_type == 2:
spans = []
for s, e in detector.any_found([text]):
spans.append((s + offset, e + offset))
if len(spans) == 0:
answer_spans = np.zeros((0, 2), dtype=np.int32)
else:
get_answer_span = True
answer_spans = np.array(spans, dtype=np.int32)
# answer๊ฐ yes/no ์ผ ๊ฒฝ์ฐ
else:
get_answer_span = True
if i == 0:
if answer_type == 0:
answer_spans = np.array([[0, 0]], dtype=np.int32)
else:
answer_spans = np.array([[0, 0]], dtype=np.int32)
else:
answer_spans = np.zeros((0, 2), dtype=np.int32)
answer_yes_no = np.array([answer_type], dtype=np.int32)
paragraphs.append(DocumentParagraph(title, start + offset, end + offset, rank, answer_spans, text, answer_yes_no))
offset += end
if paragraphs is not None:
questions_chunk.append(MultiParagraphQuestion(question_id, question_text, answer_text, paragraphs))
return questions_chunk
@classmethod
def _build_dataset(cls, corpus_name, n_processes, train_file: str, dev_file: str):
hotpotqa = cls(corpus_name=corpus_name)
with open(join(hotpotqa.dir, train_file), "rt") as f_train:
_raw_train = json.load(f_train)
with open(join(hotpotqa.dir, dev_file), "rt") as f_dev:
_raw_dev = json.load(f_dev)
dataset = {'train': _raw_train, 'dev': _raw_dev}
for d in dataset:
with Pool(n_processes) as pool, tqdm(total=len(dataset[d]), desc=d, ncols=70) as pbar:
tqdm.write(bcolors.OKBLUE + "[+] Preprocess for {} set".format(d) + bcolors.ENDC)
chunks = split(dataset[d], n_processes)
for questions in pool.starmap(hotpotqa._build_question, [[c, hotpotqa.tokenizer, hotpotqa.detector] for c in chunks]):
pbar.update(len(questions))
if d == 'train':
hotpotqa._train += questions
elif d == 'dev':
hotpotqa._dev += questions
hotpotqa._train = FilteredData(hotpotqa._train, len(hotpotqa._train))
hotpotqa._dev = FilteredData(hotpotqa._dev, len(hotpotqa._dev))
return hotpotqa
def save(self):
with open(join(self.dir, "train.pkl"), "wb") as f:
f.write(pickle.dumps(self._train))
with open(join(self.dir, "dev.pkl"), "wb") as f:
f.write(pickle.dumps(self._dev))
tqdm.write(bcolors.OKGREEN + "[+] saved at {}".format(self.dir) + bcolors.ENDC)
@classmethod
def load(cls, corpus_name):
hotpot = cls(corpus_name=corpus_name)
hotpot._train = pickle.load(open(join(hotpot.dir, "train.pkl"), "rb"))
hotpot._dev = pickle.load(open(join(hotpot.dir, "dev.pkl"), "rb"))
return hotpot
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--n_processes', type=int, default=10, help="The number of processes for multi-processing")
parser.add_argument('--save', default=False, action='store_true')
args = parser.parse_args()
hotpotqa_dataset = HotpotQaSpanDataset._build_dataset('hotpotqa', args.n_processes, 'hotpot_train_v1.json', 'hotpot_dev_distractor_v1.json')
if args.save:
hotpotqa_dataset.save()
|
import requests
from yaml_config import config
import global_vars
from datetime import datetime
from sms_sender import send_sms
import logging
import colorlog
logger = colorlog.getLogger(__name__)
logger.addHandler(global_vars.file_handler)
logger.addHandler(global_vars.handler)
logger.setLevel(global_vars.log_level)
river_data = {}
river_data['last'] = {}
def init_river():
try:
logger.info('Initialising river data from Environment Agency')
resp = requests.get(config['river']['api_station_url'])
river_data['id'] = resp.json()['items']['stationReference']
river_data['name'] = resp.json()['items']['riverName']
river_data['level'] = resp.json()['items']['measures']['latestReading']['value']
river_data['timestamp'] = resp.json()['items']['measures']['latestReading']['dateTime'][:-1]
# If this hasn't been set pull from the retrieved JSON
if config['river']['high_warn']:
river_data['high_warn'] = config['river']['high_warn']
river_data['high'] = config['river']['high']
else:
river_data['high_warn'] = resp.json()['items']['stageScale']['typicalRangeHigh']
river_data['high'] = river_data['high_warn'] - 0.15 # 0.15 is generally a good guess for hysteresis
check_river()
except Exception as e:
logger.error('River init failed: ' + str(e))
def check_river():
try:
resp = requests.get(config['river']['api_url'])
# If the timestamps have changed, we have a new reading, so process it
if resp.json()['items']['latestReading']['dateTime'][:-1] != river_data['timestamp']:
river_data['last']['timestamp'] = river_data['timestamp']
river_data['timestamp'] = resp.json()['items']['latestReading']['dateTime'][:-1]
river_data['last']['level'] = river_data['level']
river_data['level'] = resp.json()['items']['latestReading']['value']
if river_data['level'] > river_data['last']['level']:
river_data['status'] = 'rising'
elif river_data['level'] < river_data['last']['level']:
river_data['status'] = 'falling'
else:
river_data['status'] = 'steady'
human_datetime = datetime.now().strftime('%d/%m/%Y %H:%M')
now_iso_stamp = datetime.now().replace(microsecond=0).isoformat()
if river_data['level'] > river_data['last']['high_level']:
river_data['last']['high_level'] = river_data['level']
river_data['last']['high'] = river_data['timestamp']
if config['river']['warn_enable']:
if river_data['level'] > river_data['high_warn']:
if not river_data['warning_active'] or ( river_data['warning_active'] and river_data['level'] > ( river_data['last']['warn_level'] + 0.1) ):
if not river_data['warning_active']:
logger.critical('River level high! '+str(river_data['level'])+'m. Sending alert SMS!')
warn_sms_text = human_datetime + ': River level high! '+str(river_data['level'])+'m'
else:
logger.critical('River level rising! '+str(river_data['level'])+'m. Sending alert SMS!')
warn_sms_text = human_datetime + ': River level rising! '+str(river_data['level'])+'m'
send_sms(config['river']['warn_sms_list'], warn_sms_text)
logger.critical('Alerts sent')
river_data['last']['warn_level'] = river_data['level']
river_data['last']['high_level'] = river_data['level']
river_data['warning_active'] = True
river_data['last']['warn'] = now_iso_stamp
if river_data['warning_active'] and river_data['level'] < river_data['high']:
logger.warning('River returned to normal levels')
normal_sms_text = human_datetime + ': River level returned to normal. '+str(river_data['level'])+'m'
send_sms(config['river']['warn_sms_list'], normal_sms_text)
river_data['warning_active'] = False
pass
except Exception as e:
logger.error('River task failed: ' + str(e))
pass
|
from flask import jsonify, make_response, request
from flask_restful import Resource
from ..models.sale_model import *
class Sales(Resource):
def __init__(self):
self.sale = Sale()
def post(self):
data = request.get_json()
user_id = data['user_id']
product_id = data['product_id']
quantity = data['quantity']
unit_price = data['unit_price']
total_cost = int(data['quantity']) * int(data['unit_price'])
sale = self.sale.create_sale(user_id, product_id, quantity, unit_price)
return make_response(jsonify({
"message": "success",
"sale": sale,
"total_cost": total_cost
}), 201)
def get(self):
sales = self.sale.get_all_sales()
return make_response(jsonify({
"status": "OK",
"message": "success",
"products": sales
}), 200)
class FilterRecordsByAttendant(Resource):
def __init__(self):
self.sales = Sale()
def get(self, user_id):
sale_records = self.sales.filter_sales_records_by_attendant(user_id)
return make_response(jsonify({
"status": "OK",
"message": "success",
"product": sale_records
}), 200)
|
import json
import os
import random
import re
import string
from json import JSONDecodeError
from typing import Optional, Tuple, Iterable, Callable
import dns
import dns.name
import dns.query
import pytest
import requests
from requests.exceptions import SSLError
def random_mixed_case_string(n):
k = random.randint(1, n-1)
s = random.choices(string.ascii_lowercase, k=k) + random.choices(string.ascii_uppercase, k=n-k)
random.shuffle(s)
return ''.join(s)
@pytest.fixture()
def random_email() -> Callable[[], str]:
return lambda: f'{random_mixed_case_string(10)}@{random_mixed_case_string(10)}.desec.test'
@pytest.fixture()
def random_password() -> Callable[[], str]:
return lambda: "".join(random.choice(string.ascii_letters) for _ in range(16))
@pytest.fixture()
def random_domainname() -> Callable[[], str]:
return lambda: (
"".join(random.choice(string.ascii_lowercase) for _ in range(16))
+ ".test"
)
@pytest.fixture()
def random_local_public_suffix_domainname() -> Callable[[], str]:
return lambda: (
"".join(random.choice(string.ascii_lowercase) for _ in range(16))
+ ".dedyn."
+ os.environ['DESECSTACK_DOMAIN']
)
class DeSECAPIV1Client:
base_url = "https://desec." + os.environ["DESECSTACK_DOMAIN"] + "/api/v1"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": "e2e2",
}
def __init__(self) -> None:
super().__init__()
self.email = None
self.password = None
self.domains = []
# We support two certificate verification methods
# (1) against self-signed certificates, if /autocert path is present
# (this is usually the case when run inside a docker container)
# (2) against the default certificate store, if /autocert is not available
# (this is usually the case when run outside a docker container)
self.verify = True
self.verify_alt = [
f'/autocert/desec.{os.environ["DESECSTACK_DOMAIN"]}.cer',
f'/autocert/get.desec.{os.environ["DESECSTACK_DOMAIN"]}.cer',
]
@staticmethod
def _filter_response_output(output: dict) -> dict:
try:
output['challenge'] = output['challenge'][:10] + '...'
except (KeyError, TypeError):
pass
return output
def _do_request(self, *args, **kwargs):
verify_list = [self.verify] + self.verify_alt
exc = None
for verify in verify_list:
try:
reply = requests.request(*args, **kwargs, verify=verify)
except SSLError as e:
print(f'API <<< SSL could not verify against "{verify}"')
exc = e
else:
# note verification preference for next time
self.verify = verify
self.verify_alt = verify_list
self.verify_alt.remove(self.verify)
return reply
print(f'API <<< SSL could not be verified against any verification method')
raise exc
def _request(self, method: str, *, path: str, data: Optional[dict] = None, **kwargs) -> requests.Response:
if data is not None:
data = json.dumps(data)
url = self.base_url + path if re.match(r'^https?://', path) is None else path
print(f"API >>> {method} {url}")
if data:
print(f"API >>> {type(data)}: {data}")
response = self._do_request(
method,
url,
data=data,
headers=self.headers,
**kwargs,
)
print(f"API <<< {response.status_code}")
if response.text:
try:
print(f"API <<< {self._filter_response_output(response.json())}")
except JSONDecodeError:
print(f"API <<< {response.text}")
return response
def get(self, path: str, **kwargs) -> requests.Response:
return self._request("GET", path=path, **kwargs)
def post(self, path: str, data: Optional[dict] = None, **kwargs) -> requests.Response:
return self._request("POST", path=path, data=data, **kwargs)
def delete(self, path: str, **kwargs) -> requests.Response:
return self._request("DELETE", path=path, **kwargs)
def register(self, email: str, password: str) -> Tuple[requests.Response, requests.Response]:
self.email = email
self.password = password
captcha = self.post("/captcha/")
return captcha, self.post(
"/auth/",
data={
"email": email,
"password": password,
"captcha": {
"id": captcha.json()["id"],
"solution": captcha.json()[
"content"
], # available via e2e configuration magic
},
},
)
def login(self, email: str, password: str) -> requests.Response:
response = self.post(
"/auth/login/", data={"email": email, "password": password}
)
token = response.json().get('token')
if token is not None:
self.headers["Authorization"] = f'Token {response.json()["token"]}'
return response
def domain_list(self) -> requests.Response:
return self.get("/domains/")
def domain_create(self, name) -> requests.Response:
self.domains.append(name)
return self.post(
"/domains/",
data={
"name": name,
}
)
def domain_destroy(self, name) -> requests.Response:
self.domains.remove(name)
return self.delete(f"/domains/{name}/")
def rr_set_create(self, domain_name: str, rr_type: str, records: Iterable[str], subname: str = '',
ttl: int = 3600) -> requests.Response:
return self.post(
f"/domains/{domain_name}/rrsets/",
data={
"subname": subname,
"type": rr_type,
"ttl": ttl,
"records": records,
}
)
@pytest.fixture
def api_anon() -> DeSECAPIV1Client:
"""
Anonymous access to the API.
"""
return DeSECAPIV1Client()
@pytest.fixture()
def api_user(random_email, random_password) -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account (zero domains, one token). Authorization header
is preconfigured, email address and password are randomly chosen.
"""
api = DeSECAPIV1Client()
email = random_email()
password = random_password()
api.register(email, password)
api.login(email, password)
return api
@pytest.fixture()
def api_user_domain(api_user, random_domainname) -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account that owns a domain with random name. The domain has
no records other than the default ones.
"""
api_user.domain_create(random_domainname())
return api_user
class NSClient:
where = None
def query(self, qname: str, qtype: str):
print(f'DNS >>> {qname}/{qtype} @{self.where}')
qname = dns.name.from_text(qname)
qtype = dns.rdatatype.from_text(qtype)
answer = dns.query.tcp(
q=dns.message.make_query(qname, qtype),
where=self.where,
timeout=2
)
try:
section = dns.message.AUTHORITY if qtype == dns.rdatatype.from_text('NS') else dns.message.ANSWER
response = answer.find_rrset(section, qname, dns.rdataclass.IN, qtype)
print(f'DNS <<< {response}')
return {i.to_text() for i in response.items}
except KeyError:
print('DNS <<< !!! not found !!! Complete Answer below:\n' + answer.to_text())
return {}
class NSLordClient(NSClient):
where = os.environ["DESECSTACK_IPV4_REAR_PREFIX16"] + '.0.129'
@pytest.fixture()
def ns_lord() -> NSLordClient:
return NSLordClient()
|
from app import create_app
app = create_app()
if __name__=='__main__':
app.run()
|
# -*- coding: utf-8 -*
import face_model_prnet_mask
import argparse
import cv2
import sys
import math
import numpy as np
import csv
import os
from deploy.FUNCTION import *
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../model-resnet152-quality-mask/model,1', help='path to load model.')
parser.add_argument('--gpu', default=0, type=str, help='gpu id')
parser.add_argument('--flip', default=1, type=int, help='whether do lr flip aug')
args = parser.parse_args()
model = face_model_prnet_mask.FaceModel(args)
IMAGE_DIR = '../images/test_imgs/false'
txt_write = open('../images/test/result.txt', 'w')
feature = np.load('../images/test/all_img_resnet152_mask.npy')
file_names = get_all_path(IMAGE_DIR)
for index in range(len(file_names)):
name = file_names[index][0][0]["name"]
file_path = file_names[index][0][1]["path"]
img = cv2.imread(file_path)
aligned_rgb = model.get_input(img)
if aligned_rgb is None:
continue
fea = model.get_feature(aligned_rgb)
NAME, SIM, _ = get_top(fea, feature, top=1, COLOR='rgb')
# print(name, NAME, SIM, face_score)
txt_write.write(str(name) + ' ')
txt_write.write(str(NAME[0]) + ' ')
txt_write.write(str(SIM[0]) + '\n')
txt_write.close()
#
# IMAGE_DIR = '../images/test/20190430_ids/'
# feature = []
# dir0 = os.listdir(IMAGE_DIR)
# count = len(dir0)
# for i in dir0:
# count -= 1
# if count%2000 == 0:
# print(count)
#
# file_path = os.path.join(IMAGE_DIR+i)
# img = cv2.imread(file_path)
# if img is None:
# continue
# aligned_rgb = model.get_input(img)
# if aligned_rgb is None:
# continue
#
# f_rgb = model.get_feature(aligned_rgb)
# feature.append({'id':i.strip('.jpg'), 'fea_rgb':f_rgb})
# np.save('../images/test/all_img_resnet152_mask.npy', feature) |
import numpy as np
import chipwhisperer as cw
from capture import scope, target
# Helper Functions
########################################
# ANCHOR: random_str_fn
import string
import random
def random_string(length):
# Define the alphabet of the random string
# Here we take the lowercase latin alphabet in ascii encoding
# e.g. "cpjsapcnrsdtjvlo", "btqfocsprbualtwt" or "yzkwewjbkpmriccx"
alphabet = string.ascii_lowercase
# Return a string with the given length with randomly chosen chars
return ''.join(random.choice(alphabet) for i in range(length))
# ANCHOR_END: random_str_fn
########################################
# Do the actual traces
########################################
# ANCHOR: multiple_traces
from tqdm import trange
# Define the key used for the encryption
# This key has to be 128 bits = 16 bytes
# = 16 ascii characters in length
key_str = 'H4ck3rm4n-l33t42'
# Convert the key to a byte array
key = bytearray(key_str, 'ascii')
# Define the constant for the amount of traces
N = 100
textins = []
traces = []
# Loop through all traces
for i in trange(N, desc="Capturing traces"):
# Define the plain text used
# This plain text has to be a multiple of
# 128 bits = 16 bytes = 16 ascii characters in length.
plain_text = bytearray(random_string(16), 'ascii')
# Capture the actual trace
trace = cw.capture_trace(scope, target, plain_text, key)
# If the capture timed out move to the next capture
if trace is None:
continue
textins.append(plain_text)
traces.append(trace.wave)
# ANCHOR_END: multiple_traces
########################################
# Convert to numpy arrays
########################################
# ANCHOR: to_np_arrays
np_traces = np.asarray(traces)
np_textins = np.asarray(textins)
# ANCHOR_END: to_np_arrays
# ANCHOR: save_np_arrays
np.save('output/traces.npy', np_traces)
np.save('output/textins.npy', np_textins)
# ANCHOR_END: save_np_arrays
########################################
# Disconnect
########################################
scope.dis()
target.dis()
########################################
|
from musicscore.musicxml.attributes.attribute_abstract import AttributeAbstract, String
from musicscore.musicxml.attributes.color import Color
from musicscore.musicxml.attributes.font import Font
from musicscore.musicxml.types.complextypes.complextype import ComplexType
class Smulf(AttributeAbstract):
def __init__(self, smulf=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('smulf', smulf, 'SmulfLyricsGlyphName')
class ComplexTypeElision(ComplexType, String, Font, Color, Smulf):
"""
The elision type represents an elision between lyric syllables. The text content specifies the symbol used to
display the elision. Common values are a no-break space (Unicode 00A0), an underscore (Unicode 005F), or an undertie
(Unicode 203F). If the text content is empty, the smufl attribute is used to specify the symbol to use. Its value
is a SMuFL canonical glyph name that starts with lyrics. The SMuFL attribute is ignored if the elision glyph is
already specified by the text content. If neither text content nor a smufl attribute are present, the elision
glyph is application-specific.
"""
def __init__(self, value, *args, **kwargs):
super().__init__(value=value, *args, **kwargs)
|
# This is (slightly adapted) from PyTorch:
# TODO: ADD COPYRIGHT AND LINK
import asyncio
import torch
from torch._six import queue, container_abcs, string_classes
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch._utils import ExceptionWrapper
async def _single_task(in_queue, out_queue, device_id, done_event, async_sleep):
while not done_event.is_set():
try:
task = in_queue.get_nowait()
except queue.Empty:
await asyncio.sleep(async_sleep)
continue
if not done_event.is_set() and not isinstance(task, ExceptionWrapper):
try:
task = pin_memory(task)
except Exception:
task = ExceptionWrapper(
where="in pin memory thread for device {}".format(device_id)
)
while not done_event.is_set():
try:
out_queue.put(task, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
del task
def _pin_memory_loop(in_queues, out_queues, device_id, done_event, async_sleep):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.cuda.set_device(device_id)
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
asyncio.run(_run_tasks(in_queues, out_queues, device_id, done_event, async_sleep))
async def _run_tasks(in_queues, out_queues, device_id, done_event, async_sleep):
await asyncio.gather(
*[
asyncio.create_task(
_single_task(in_queue, out_queue, device_id, done_event, async_sleep)
)
for in_queue, out_queue in zip(in_queues, out_queues)
]
)
def pin_memory(data):
if isinstance(data, torch.Tensor):
return data.pin_memory()
elif isinstance(data, string_classes):
return data
elif isinstance(data, container_abcs.Mapping):
return {k: pin_memory(sample) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return type(data)(*(pin_memory(sample) for sample in data))
elif isinstance(data, container_abcs.Sequence):
return [pin_memory(sample) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
# Copyright 2021 Rosalind Franklin Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import os
from glob import glob, glob1
import yaml
import pandas as pd
from icecream import ic
from beautifultable import BeautifulTable as bt
from tqdm import tqdm
import skimage.transform as skt
import re
import subprocess
import numpy as np
import mrcfile
from . import params as prmMod
from . import metadata as mdMod
from . import motioncorr as mc2Mod
from . import logger as logMod
from . import ctffind as ctfMod
from . import align as alignMod
from . import recon as reconMod
from . import ctfsim as ctfsimMod
from . import savurecon as savuMod
def get_proj_name():
"""
Function to get project name from user
"""
project_name = sys.argv[1]
# Check input validity
for char in ['<', '>', ':', '"', '/', '\\', '|', '?', '*']:
if project_name.find(char) != -1:
raise ValueError(f"Error in Ot2Rec.main.new_proj: Illegal character ({char}) found in input project name.")
return project_name
def new_proj():
"""
Subroutine executing actions when a new project has been initiated
"""
project_name = get_proj_name()
# Create master yaml config file
prmMod.new_master_yaml(project_name)
def get_master_metadata():
"""
Subroutine to get master metadata from raw images
"""
project_name = get_proj_name()
# Create empty Metadata object
# Master yaml file will be read automatically
meta = mdMod.Metadata(project_name=project_name,
job_type='master')
# Create master metadata and serialise it as yaml file
meta.create_master_metadata()
master_md_name = project_name + '_master_md.yaml'
with open(master_md_name, 'w') as f:
yaml.dump(meta.metadata, f, indent=4)
def update_mc2_yaml():
"""
Subroutine to update yaml file for motioncorr
"""
project_name = get_proj_name()
# Check if MC2 yaml exists
mc2_yaml_name = project_name + '_mc2.yaml'
if not os.path.isfile(mc2_yaml_name):
raise IOError("Error in Ot2Rec.main.update_mc2_yaml: File not found.")
# Read in master yaml
master_yaml = project_name + '_proj.yaml'
with open(master_yaml, 'r') as f:
master_config = yaml.load(f, Loader=yaml.FullLoader)
# Read in master metadata (as Pandas dataframe)
master_md_name = project_name + '_master_md.yaml'
with open(master_md_name, 'r') as f:
master_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts', 'angles']]
# Read in previous MC2 output metadata (as Pandas dataframe) for old projects
mc2_md_name = project_name + '_mc2_md.yaml'
if os.path.isfile(mc2_md_name):
is_old_project = True
with open(mc2_md_name, 'r') as f:
mc2_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts', 'angles']]
else:
is_old_project = False
# Diff the two dataframes to get numbers of tilt-series with unprocessed data
if is_old_project:
merged_md = master_md.merge(mc2_md,
how='outer',
indicator=True)
unprocessed_images = merged_md.loc[lambda x: x['_merge']=='left_only']
else:
unprocessed_images = master_md
unique_ts_numbers = unprocessed_images['ts'].sort_values(ascending=True).unique().tolist()
# Read in MC2 yaml file, modify, and update
mc2_params = prmMod.read_yaml(project_name=project_name,
filename=mc2_yaml_name)
mc2_params.params['System']['process_list'] = unique_ts_numbers
mc2_params.params['System']['output_prefix'] = project_name
mc2_params.params['System']['source_TIFF'] = master_config['source_TIFF']
if mc2_params.params['MC2']['desired_pixel_size'] == 'ps_x2':
mc2_params.params['MC2']['desired_pixel_size'] = mc2_params.params['MC2']['pixel_size'] * 2
else:
mc2_params.params['MC2']['desired_pixel_size'] = mc2_params.params['MC2']['pixel_size']
with open(mc2_yaml_name, 'w') as f:
yaml.dump(mc2_params.params, f, indent=4, sort_keys=False)
def create_mc2_yaml():
"""
Subroutine to create new yaml file for motioncorr
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_mc2_yaml(project_name)
update_mc2_yaml()
def run_mc2():
"""
Method to run motioncorr
"""
project_name = get_proj_name()
# Check if prerequisite files exist
mc2_yaml = project_name + '_mc2.yaml'
master_md_file = project_name + '_master_md.yaml'
if not os.path.isfile(mc2_yaml):
raise IOError("Error in Ot2Rec.main.run_mc2: MC2 yaml config not found.")
if not os.path.isfile(master_md_file):
raise IOError("Error in Ot2Rec.main.run_mc2: Master metadata not found.")
# Read in config and metadata
mc2_config = prmMod.read_yaml(project_name=project_name,
filename=mc2_yaml)
master_md = mdMod.read_md_yaml(project_name=project_name,
job_type='motioncorr',
filename=master_md_file)
# Create Logger object
logger = logMod.Logger()
# Create Motioncorr object
mc2_obj = mc2Mod.Motioncorr(project_name=project_name,
mc2_params=mc2_config,
md_in=master_md,
logger=logger
)
if not mc2_obj.no_processes:
# Run MC2 recursively (and update input/output metadata) until nothing is left in the input metadata list
mc2_obj.run_mc2()
# Once all specified images are processed, export output metadata
mc2_obj.export_metadata()
def update_ctffind_yaml():
"""
Subroutine to update yaml file for ctffind
"""
project_name = get_proj_name()
# Check if ctffind and motioncorr yaml files exist
ctf_yaml_name = project_name + '_ctffind.yaml'
mc2_yaml_name = project_name + '_mc2.yaml'
if not os.path.isfile(ctf_yaml_name):
raise IOError("Error in Ot2Rec.main.update_ctffind_yaml: ctffind config file not found.")
if not os.path.isfile(mc2_yaml_name):
raise IOError("Error in Ot2Rec.main.update_ctffind_yaml: motioncorr config file not found.")
# Read in MC2 metadata (as Pandas dataframe)
# We only need the TS number and the tilt angle for comparisons at this stage
mc2_md_name = project_name + '_mc2_mdout.yaml'
with open(mc2_md_name, 'r') as f:
mc2_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts', 'angles']]
# Read in previous ctffind output metadata (as Pandas dataframe) for old projects
ctf_md_name = project_name + '_ctffind_mdout.yaml'
if os.path.isfile(ctf_md_name):
is_old_project = True
with open(ctf_md_name, 'r') as f:
ctf_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts', 'angles']]
else:
is_old_project = False
# Diff the two dataframes to get numbers of tilt-series with unprocessed data
if is_old_project:
merged_md = mc2_md.merge(ctf_md,
how='outer',
indicator=True)
unprocessed_images = merged_md.loc[lambda x: x['_merge']=='left_only']
else:
unprocessed_images = mc2_md
unique_ts_numbers = unprocessed_images['ts'].sort_values(ascending=True).unique().tolist()
# Read in ctffind yaml file, modify, and update
# read in MC2 yaml as well (some parameters depend on MC2 settings)
ctf_params = prmMod.read_yaml(project_name=project_name,
filename=ctf_yaml_name)
mc2_params = prmMod.read_yaml(project_name=project_name,
filename=mc2_yaml_name)
ctf_params.params['System']['output_prefix'] = project_name
ctf_params.params['System']['process_list'] = unique_ts_numbers
ctf_params.params['ctffind']['pixel_size'] = mc2_params.params['MC2']['desired_pixel_size']
with open(ctf_yaml_name, 'w') as f:
yaml.dump(ctf_params.params, f, indent=4, sort_keys=False)
def create_ctffind_yaml():
"""
Subroutine to create new yaml file for ctffind
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_ctffind_yaml(project_name)
update_ctffind_yaml()
def run_ctffind():
"""
Method to run ctffind
"""
project_name = get_proj_name()
# Check if prerequisite files exist
ctffind_yaml = project_name + '_ctffind.yaml'
mc2_md_file = project_name + '_mc2_mdout.yaml'
if not os.path.isfile(ctffind_yaml):
raise IOError("Error in Ot2Rec.main.run_ctffind: ctffind yaml config not found.")
if not os.path.isfile(mc2_md_file):
raise IOError("Error in Ot2Rec.main.run_ctffind: MC2 output metadata not found.")
# Read in config and metadata
ctffind_config = prmMod.read_yaml(project_name=project_name,
filename=ctffind_yaml)
mc2_md = mdMod.read_md_yaml(project_name=project_name,
job_type='ctffind',
filename=mc2_md_file)
# Create Logger object
logger = logMod.Logger()
# Create ctffind object
ctffind_obj = ctfMod.ctffind(project_name=project_name,
md_in=mc2_md,
params_in=ctffind_config,
logger_in=logger,
)
if not ctffind_obj.no_processes:
ctffind_obj.run_ctffind()
def update_align_yaml():
"""
Subroutine to update yaml file for IMOD newstack / alignment
"""
project_name = get_proj_name()
# Check if align and motioncorr yaml files exist
align_yaml_name = project_name + '_align.yaml'
mc2_yaml_name = project_name + '_mc2.yaml'
if not os.path.isfile(align_yaml_name):
raise IOError("Error in Ot2Rec.main.update_align_yaml: alignment config file not found.")
if not os.path.isfile(mc2_yaml_name):
raise IOError("Error in Ot2Rec.main.update_align_yaml: motioncorr config file not found.")
# Read in MC2 metadata (as Pandas dataframe)
# We only need the TS number and the tilt angle for comparisons at this stage
mc2_md_name = project_name + '_mc2_mdout.yaml'
with open(mc2_md_name, 'r') as f:
mc2_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts']]
# Read in previous alignment output metadata (as Pandas dataframe) for old projects
align_md_name = project_name + '_align_mdout.yaml'
if os.path.isfile(align_md_name):
is_old_project = True
with open(align_md_name, 'r') as f:
align_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts']]
else:
is_old_project = False
# Diff the two dataframes to get numbers of tilt-series with unprocessed data
if is_old_project:
merged_md = mc2_md.merge(align_md,
how='outer',
indicator=True)
unprocessed_images = merged_md.loc[lambda x: x['_merge']=='left_only']
else:
unprocessed_images = mc2_md
unique_ts_numbers = unprocessed_images['ts'].sort_values(ascending=True).unique().tolist()
# Read in ctffind yaml file, modify, and update
# read in MC2 yaml as well (some parameters depend on MC2 settings)
align_params = prmMod.read_yaml(project_name=project_name,
filename=align_yaml_name)
mc2_params = prmMod.read_yaml(project_name=project_name,
filename=mc2_yaml_name)
align_params.params['System']['output_rootname'] = project_name
align_params.params['System']['process_list'] = unique_ts_numbers
align_params.params['BatchRunTomo']['setup']['pixel_size'] = mc2_params.params['MC2']['desired_pixel_size'] * 0.1
with open(align_yaml_name, 'w') as f:
yaml.dump(align_params.params, f, indent=4, sort_keys=False)
def update_align_yaml_stacked():
"""
Method to update yaml file for IMOD newstack / alignment --- if stacks already exist
"""
project_name = get_proj_name()
# User prompt for file specifications
parent_path = input('Enter path of parent folder with stacks in: \n')
assert (os.path.isdir(parent_path)), \
"Error in main.update_align_yaml_stacked: IMOD parent folder not found."
while parent_path.endswith('/'):
parent_path = parent_path[:-1]
rootname = input('Enter rootname of project (remove final underscore): \n')
while rootname.endswith('_'):
rootname = rootname[:-1]
suffix = input('Enter file suffix (leave empty if not applicable): \n')
pixel_size = input('Enter desired pixel size (in angstroms): \n')
# Find stack files
st_file_list = glob(f'{parent_path}/{rootname}_*{suffix}/{rootname}_*{suffix}.st')
# Extract tilt series number
ts_list = [int(i.split('/')[-1].replace(f'{rootname}_', '').replace(f'{suffix}.st', '')) for i in st_file_list]
# Read in and update YAML parameters
align_yaml_name = project_name + '_align.yaml'
align_params = prmMod.read_yaml(project_name=project_name,
filename=align_yaml_name)
align_params.params['System']['output_path'] = parent_path
align_params.params['System']['output_rootname'] = rootname
align_params.params['System']['output_suffix'] = suffix
align_params.params['System']['process_list'] = ts_list
align_params.params['BatchRunTomo']['setup']['pixel_size'] = float(pixel_size) * 0.1
# Write out YAML file
with open(align_yaml_name, 'w') as f:
yaml.dump(align_params.params, f, indent=4, sort_keys=False)
def create_align_yaml():
"""
Subroutine to create new yaml file for IMOD newstack / alignment
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_align_yaml(project_name)
update_align_yaml()
def create_align_yaml_stacked():
"""
Subroutine to create new yaml file for IMOD newstack / alignment
prestack (bool) :: if stacks already exist
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_align_yaml(project_name)
update_align_yaml_stacked()
def run_align():
"""
Method to run IMOD newstack / alignment
"""
project_name = get_proj_name()
# Check if prerequisite files exist
align_yaml = project_name + '_align.yaml'
mc2_md_file = project_name + '_mc2_mdout.yaml'
# Read in config and metadata
align_config = prmMod.read_yaml(project_name=project_name,
filename=align_yaml)
mc2_md = mdMod.read_md_yaml(project_name=project_name,
job_type='align',
filename=mc2_md_file)
# Create Logger object
logger = logMod.Logger()
# Create Align object
align_obj = alignMod.Align(project_name=project_name,
md_in=mc2_md,
params_in=align_config,
logger_in=logger,
)
# Run IMOD
# Create the stacks and rawtlt files first
if not align_obj.no_processes:
align_obj.create_stack_folders()
align_obj.create_rawtlt()
align_obj.create_stack()
align_obj.align_stack()
def run_align_ext():
"""
Method to run IMOD alignment with existing stacks
"""
project_name = get_proj_name()
# Check if prerequisite files exist
align_yaml = project_name + '_align.yaml'
# Read in config and metadata
align_config = prmMod.read_yaml(project_name=project_name,
filename=align_yaml)
# Create Logger object
logger = logMod.Logger()
# Create Align object
align_obj = alignMod.Align(project_name=project_name,
md_in=None,
params_in=align_config,
logger_in=logger,
)
# Run IMOD
# Create the stacks and rawtlt files first
if not align_obj.no_processes:
align_obj.align_stack(ext=True)
def get_align_stats():
"""
Method to extract statistics from alignment
"""
project_name = get_proj_name()
# Check if align metadata file exists
align_md_name = project_name + '_align_mdout.yaml'
if not os.path.isfile(align_md_name):
raise IOError("Error in Ot2Rec.main.get_align_stats: alignment metadata file not found.")
# Get stacks folder path from config
align_yaml = project_name + '_align.yaml'
align_config = prmMod.read_yaml(project_name=project_name,
filename=align_yaml)
folder_path = align_config.params['System']['output_path']
while folder_path.endswith('/'):
folder_path = folder_path[:-1]
rootname = align_config.params['System']['output_rootname']
while rootname.endswith('_'):
rootname = rootname[:-1]
suffix = align_config.params['System']['output_suffix']
# Read metadata to extract aligned TS numbers
with open(align_md_name, 'r') as f:
aligned_ts = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))['ts'].values.tolist()
# Create pandas dataframe
stats_df = pd.DataFrame(
{'Tilt series': [],
'Error mean (nm)': [],
'Error SD (nm)': [],
'Error weighted mean (nm)': [],
}
)
# Loop through folders, find data and append to dataframe
for curr_ts in aligned_ts:
target_file_path = f"{folder_path}/{rootname}_{curr_ts:02d}{suffix}/align.log"
if not os.path.isfile(target_file_path):
raise IOError("Error in Ot2Rec.main.get_align_stats: alignment log file not found.")
with open(target_file_path, 'r') as f:
lines = f.readlines()
mean_sd_criterion = re.compile('^\s*Residual error mean')
filtered = list(filter(mean_sd_criterion.match, lines))
filter_split = re.split('\s+', filtered[0])
get_mean_sd = re.compile('[0-9]+.[0-9]+')
mean = float(list(filter(get_mean_sd.match, filter_split))[0])
sd = float(list(filter(get_mean_sd.match, filter_split))[1])
weighted_mean_criterion = re.compile('^\s*Residual error weighted mean')
filtered = list(filter(weighted_mean_criterion.match, lines))
filter_split = re.split('\s+', filtered[0])
get_weighted_crit = re.compile('[0-9]+.[0-9]+')
weighted_error = float(list(filter(get_weighted_crit.match, filter_split))[0])
stats_df.loc[len(stats_df.index)] = [curr_ts, mean, sd, weighted_error]
stats_df.sort_values(by='Error weighted mean (nm)',
inplace=True)
# Create table object and append data from dataframe
stats = bt()
stats.columns.headers = ['Tilt series', 'Error mean (nm)', 'Error SD (nm)', 'Error weighted mean (nm)']
stats.rows.append(stats.columns.headers)
for i in stats_df.values.tolist():
stats.rows.append([int(i[0]), *i[1:]])
# Print out stats
print(stats)
def update_recon_yaml():
"""
Subroutine to update yaml file for IMOD reconstruction
"""
project_name = get_proj_name()
# Check if recon and align yaml files exist
recon_yaml_name = project_name + '_recon.yaml'
align_yaml_name = project_name + '_align.yaml'
if not os.path.isfile(recon_yaml_name):
raise IOError("Error in Ot2Rec.main.update_recon_yaml: reconstruction config file not found.")
if not os.path.isfile(align_yaml_name):
raise IOError("Error in Ot2Rec.main.update_recon_yaml: alignment config file not found.")
# Read in alignment metadata (as Pandas dataframe)
align_md_name = project_name + '_align_mdout.yaml'
with open(align_md_name, 'r') as f:
align_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts']]
# Read in previous alignment output metadata (as Pandas dataframe) for old projects
recon_md_name = project_name + '_recon_mdout.yaml'
if os.path.isfile(recon_md_name):
is_old_project = True
with open(recon_md_name, 'r') as f:
recon_md = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))[['ts']]
else:
is_old_project = False
# Diff the two dataframes to get numbers of tilt-series with unprocessed data
if is_old_project:
merged_md = align_md.merge(recon_md,
how='outer',
indicator=True)
unprocessed_images = merged_md.loc[lambda x: x['_merge']=='left_only']
else:
unprocessed_images = align_md
unique_ts_numbers = unprocessed_images['ts'].sort_values(ascending=True).unique().tolist()
# Read in reconstruction yaml file, modify, and update
# read in alignment yaml as well (some parameters depend on alignment settings)
recon_params = prmMod.read_yaml(project_name=project_name,
filename=recon_yaml_name)
align_params = prmMod.read_yaml(project_name=project_name,
filename=align_yaml_name)
recon_params.params['System']['output_rootname'] = align_params.params['System']['output_rootname']
recon_params.params['System']['output_suffix'] = align_params.params['System']['output_suffix']
recon_params.params['System']['process_list'] = unique_ts_numbers
recon_params.params['BatchRunTomo']['setup'] = {key: value for key, value in align_params.params['BatchRunTomo']['setup'].items() \
if key != 'stack_bin_factor'}
with open(recon_yaml_name, 'w') as f:
yaml.dump(recon_params.params, f, indent=4, sort_keys=False)
def create_recon_yaml():
"""
Subroutine to create new yaml file for IMOD reconstruction
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_recon_yaml(project_name)
update_recon_yaml()
def run_recon():
"""
Method to run IMOD reconstruction
"""
project_name = get_proj_name()
# Check if prerequisite files exist
recon_yaml = project_name + '_recon.yaml'
align_md_file = project_name + '_align_mdout.yaml'
# Read in config and metadata
recon_config = prmMod.read_yaml(project_name=project_name,
filename=recon_yaml)
align_md = mdMod.read_md_yaml(project_name=project_name,
job_type='reconstruct',
filename=align_md_file)
# Create Logger object
logger = logMod.Logger()
# Create Recon object
recon_obj = reconMod.Recon(project_name=project_name,
md_in=align_md,
params_in=recon_config,
logger_in=logger,
)
# Run IMOD
if not recon_obj.no_processes:
recon_obj.recon_stack()
def update_savurecon_yaml():
"""
Subroutine to update yaml file for savu reconstruction
"""
project_name = get_proj_name()
# Check if savurecon, align, and align_mdout yaml files exist
savurecon_yaml_name = project_name + '_savurecon.yaml'
align_yaml_name = project_name + '_align.yaml'
align_md_name = project_name + '_align_mdout.yaml'
if not os.path.isfile(savurecon_yaml_name):
raise IOError("Error in Ot2Rec.main.update_savurecon_yaml: reconstruction config file not found.")
if not os.path.isfile(align_yaml_name):
raise IOError("Error in Ot2Rec.main.update_savurecon_yaml: align.yaml file not found")
if not os.path.isfile(align_md_name):
raise IOError("Error in Ot2Rec.main.update_savurecon_yaml: alignment mdout file not found.")
# Read in alignment metadata (as Pandas dataframe)
with open(align_md_name, 'r') as f:
align_md_df = pd.DataFrame(yaml.load(f, Loader=yaml.FullLoader))
align_md_ts = align_md_df['ts']
align_output = align_md_df['align_output']
savurecon_params = prmMod.read_yaml(project_name=project_name,
filename=savurecon_yaml_name)
align_params = prmMod.read_yaml(project_name=project_name,
filename=align_yaml_name)
# Get tilt angle files
align_tilt_files = []
for f in align_md_df['stack_output']:
align_tilt_files.append(f.replace('.st', '.tlt'))
# Update savurecon yaml
savurecon_params.params['System']['process_list'] = align_md_ts.sort_values(ascending=True).unique().tolist()
savurecon_params.params['System']['output_rootname'] = align_params.params['System']['output_rootname']
savurecon_params.params['System']['output_suffix'] = align_params.params['System']['output_suffix']
savurecon_params.params['Savu']['setup']['tilt_angles'] = align_tilt_files
savurecon_params.params['Savu']['setup']['aligned_projections'] = align_output.sort_values(ascending=True).unique().tolist()
# Change centre of rotation to centre of image by default
# This is now done in savurecon.py on an image-by-image basis, so the following 5 lines are deprecated
# centre_of_rotation = []
# for image in savurecon_params.params['Savu']['setup']['aligned_projections']:
# mrc = mrcfile.open(image)
# centre_of_rotation.append(float(mrc.header["ny"]/2)) # ydim/2
# savurecon_params.params['Savu']['setup']['centre_of_rotation'] = centre_of_rotation
with open(savurecon_yaml_name, 'w') as f:
yaml.dump(savurecon_params.params, f, indent=4, sort_keys=False)
def create_savurecon_yaml():
"""
Creates yaml for savu reconstruction
"""
project_name = get_proj_name()
# Create savurecon yaml file and automatically update it
prmMod.new_savurecon_yaml(project_name)
update_savurecon_yaml()
def run_savurecon():
project_name = get_proj_name()
# Check if prerequisite files exist
savurecon_yaml = project_name + '_savurecon.yaml'
# Read in config and metadata
savurecon_params = prmMod.read_yaml(project_name=project_name,
filename=savurecon_yaml)
# Create Logger object
logger = logMod.Logger()
# Create SavuRecon object
savurecon_obj = savuMod.SavuRecon(project_name=project_name,
params_in=savurecon_params,
logger_in=logger,
)
# Run Savu
savurecon_obj.run_savu_all()
def cleanup():
"""
Method to clean up project folder to save space
"""
project_name = get_proj_name()
mc2_yaml = project_name + '_mc2.yaml'
recon_yaml = project_name + '_recon.yaml'
# Create Logger object
logger = logMod.Logger()
if os.path.isfile(mc2_yaml):
mc2_config = prmMod.read_yaml(project_name=project_name,
filename=mc2_yaml)
mc2_path = mc2_config.params['System']['output_path']
if os.path.isdir(mc2_path):
logger(f"Deleting {mc2_path} folder and its contents...")
cmd = ['rm', '-rf', mc2_path]
del_mc2 = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if os.path.isfile(recon_yaml):
recon_config = prmMod.read_yaml(project_name=project_name,
filename=recon_yaml)
recon_path = recon_config.params['System']['output_path']
if os.path.isdir(recon_path):
logger(f"Deleting intermediary IMOD files...")
files = glob(recon_path + 'stack*/*.*~') + \
glob(recon_path + 'stack*/*_full_rec.*')
cmd = ['rm', *files]
del_recon = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def run_all():
"""
Method to run all four processes in one go using default settings.
"""
logger = logMod.Logger()
# Collect raw images and produce master metadata
logger("Collecting raw images...")
get_master_metadata()
# Motion correction
logger("Motion correction in progress...")
create_mc2_yaml()
run_mc2()
# CTF estimation
logger("CTF estimation in progress...")
create_ctffind_yaml()
run_ctffind()
# Alignment
logger("Alignment in progress...")
create_align_yaml()
run_align()
# Reconstruction
logger("Reconstruction in progress...")
create_recon_yaml()
run_recon()
def run_ctfsim():
"""
Method to run simulator for CTF from CTFFIND4 outputs
"""
project_name = get_proj_name()
rootname = input(f'Enter file rootname: (Default: {project_name})\n')
if len(rootname) == 0:
rootname = project_name
while rootname.endswith('/'):
rootname = rootname[:-1]
pixel_size = input(f'Enter pixel size of motion-corrected images (in Angstroms)\n')
pixel_size = float(pixel_size) * 1e-10
ds_factor = int(input(f'Enter downsampling factor (must be same as alignment/reconstruction)\n'))
# Read in metadata from ctffind
ctffind_md_file = project_name + '_ctffind_mdout.yaml'
ctffind_obj = mdMod.read_md_yaml(project_name=project_name,
job_type='ctfsim',
filename=ctffind_md_file)
ctffind_md = pd.DataFrame(ctffind_obj.metadata)
# Read image to get dimensions
sample_image = ctffind_md.iloc[0].file_paths
with mrcfile.open(sample_image) as source:
source_dim = skt.downscale_local_mean(source.data, (ds_factor, ds_factor)).shape
# Generate point source
ps = np.zeros(source_dim[-2:], dtype=np.float32)
ps[ps.shape[0]//2, ps.shape[1]//2] = 1
ps_k = np.fft.fft2(ps).astype(np.cdouble)
# Calculate the grids in reciprocal space
k2_grid, alpha_g_grid = ctfsimMod.calculate_k_grids(source_dim, pixel_size*ds_factor)
# Grab tilt series numbers and tilt angles from metadata
ts_list = sorted(pd.Series(ctffind_md['ts']).unique())
tqdm_iter = tqdm(ts_list, ncols=100)
for curr_ts in tqdm_iter:
# Create folders and subfolders
subfolder_path = f'PSF/{rootname}_{curr_ts:02}'
os.makedirs(subfolder_path, exist_ok=True)
# Find txt files from ctffind
glob_list = glob1('./ctffind/', f'{rootname}_{curr_ts:03}_*ctffind.txt')
angle_list = [float(i.split('/')[-1].split('_')[2]) for i in glob_list]
angle_index = [sorted(angle_list).index(i) for i in angle_list]
full_psf = np.empty(shape=(len(angle_list), *source_dim[-2:]),
dtype=np.float32)
for index in range(len(angle_index)):
full_psf[angle_index[index], ...] = ctfsimMod.get_psf(ctffile='./ctffind/' + glob_list[index],
point_source_recip=ps_k,
k2_grid=k2_grid,
alpha_g=alpha_g_grid)
# Write out psf stack
with mrcfile.new(subfolder_path + f'/{rootname}_{curr_ts:02}.mrc', overwrite=True) as f:
f.set_data(full_psf)
# Write out rawtlt file
with open(subfolder_path + f'/{rootname}_{curr_ts:02}.rawtlt', 'w') as f:
for angle in sorted(angle_list):
f.writelines(str(angle) + '\n')
def update_recon_yaml_stacked():
"""
Method to update yaml file for savu reconstruction --- if stacks already exist
"""
project_name = get_proj_name()
# User prompt for file specifications
parent_path = input('Enter path of parent folder with stacks in: \n')
assert (os.path.isdir(parent_path)), \
"Error in main.update_recon_yaml_stacked: IMOD parent folder not found."
while parent_path.endswith('/'):
parent_path = parent_path[:-1]
rootname = input('Enter rootname of project (remove final underscore): \n')
while rootname.endswith('_'):
rootname = rootname[:-1]
suffix = input('Enter file suffix (leave empty if not applicable): \n')
pixel_size = input('Enter desired pixel size (in angstroms): \n')
# Find stack files
st_file_list = glob(f'{parent_path}/{rootname}_*{suffix}/{rootname}_*{suffix}.mrc')
# Find rawtlt files
rawtlt_file_list = glob(f'{parent_path}/{rootname}_*{suffix}/{rootname}_*{suffix}.rawtlt')
# Extract tilt series number
ts_list = [int(i.split('/')[-1].replace(f'{rootname}_', '').replace(f'{suffix}.mrc', '')) for i in st_file_list]
# Read in and update YAML parameters
recon_yaml_name = project_name + '_savurecon.yaml'
recon_params = prmMod.read_yaml(project_name=project_name,
filename=recon_yaml_name)
recon_params.params['System']['process_list'] = ts_list
recon_params.params['System']['output_rootname'] = rootname
recon_params.params['System']['output_suffix'] = suffix
recon_params.params['Savu']['setup']['tilt_angles'] = rawtlt_file_list
recon_params.params['Savu']['setup']['aligned_projections'] = st_file_list
# Write out YAML file
with open(recon_yaml_name, 'w') as f:
yaml.dump(recon_params.params, f, indent=4, sort_keys=False)
def create_recon_yaml_stacked():
"""
Subroutine to create new yaml file for IMOD reconstruction
"""
project_name = get_proj_name()
# Create the yaml file, then automatically update it
prmMod.new_savurecon_yaml(project_name)
update_recon_yaml_stacked()
def run_recon_ext():
"""
Method to run IMOD reconstruction
"""
project_name = get_proj_name()
# Check if prerequisite files exist
recon_yaml = project_name + '_recon.yaml'
# Read in config and metadata
recon_config = prmMod.read_yaml(project_name=project_name,
filename=recon_yaml)
# Create Logger object
logger = logMod.Logger()
# Create Align object
recon_obj = reconMod.Recon(project_name=project_name,
md_in=None,
params_in=recon_config,
logger_in=logger,
)
# Run IMOD
if not recon_obj.no_processes:
recon_obj.recon_stack(ext=True)
|
from django.contrib import admin
from .models import Ask
# Register your models here.
admin.site.register(Ask)
|
import os
path = str(os.path.dirname(os.path.abspath(__file__)))
path = path + "\errorlog.txt"
f = open(path, 'w')
print
try:
import numpy as np
message = "Numpy has been initiated successfully!\n"
f.write(message)
print message
except Exception(e):
message = str(e)+"\n"
f.write(message)
print message
try:
import scipy as sp
message = "Scipy has been initiated successfully!\n"
f.write(message)
print message
except :
message = "Scipy has failed to initiate.\n"
f.write(message)
print message
try:
import sklearn
message = "Scikit-learn has been initiated successfully!\n"
f.write(message)
print message
except:
message = "Scikit-learn has failed to initiate.\n"
f.write(message)
print message
try:
import MySQLdb
message = "MySQL has been initiated successfully!\n"
f.write(message)
print message
except:
message = "MySQL has failed to initiate.\n"
f.write(message)
print message
f.close() |
# encoding: utf-8
"""
Matching tool for a :class:`LineView` instance.
"""
# force new style imports
from __future__ import absolute_import
# scipy
import numpy as np
# internal
from madgui.core import wx
from madgui.util import unit
# exported symbols
__all__ = [
'CompareTool',
]
class CompareTool(object):
"""
View component to display mirko envelope for comparison.
Draws the mirko envelope into a LineView figure whenever that figure
is replotted.
"""
# TODO: allow to plot any dynamically loaded curve from any file
def __init__(self, panel):
"""
Create a mirko envelope display component.
The envelope is NOT visible by default.
"""
self._view = view = panel.view
self._model = view.segment.session.data
self._repo = view.segment.session.repo
self._lines = {}
self._visible = False
self._metadata = None
if not self._model:
return
try:
metadata = self._model['review']
except KeyError:
return
col_names = [view.sname, view.xname, view.yname]
if not all(col in metadata['columns'] for col in col_names):
return
self._metadata = metadata
# connect to toolbar
bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR)
tool = panel.toolbar.AddCheckTool(
wx.ID_ANY,
bitmap=bmp,
shortHelp='Show MIRKO envelope',
longHelp='Show MIRKO envelope for comparison. The envelope is computed for the default parameters.')
panel.Bind(wx.EVT_TOOL, self.on_click, tool)
# subscribe to plotting
view.hook.plot_ax.connect(self.plot_ax)
def on_click(self, event):
"""Invoked when user clicks Mirko-Button"""
self.visible = event.IsChecked()
@property
def test_file(self):
"""Get the envelope file."""
return self._repo.get(self._metadata['file'])
@property
def visible(self):
"""Visibility state of the envelope."""
return self._visible
@visible.setter
def visible(self, visible):
"""Set visibility."""
self._visible = visible
view = self._view
xname = view.xname
yname = view.yname
if visible:
self.plot_ax(view.axes[xname], xname)
self.plot_ax(view.axes[yname], yname)
else:
self._remove_ax(xname)
self._remove_ax(yname)
self._view.figure.canvas.draw()
def load_data(self, name):
"""Load envelope from file."""
column_info = self._metadata['columns']
scol = column_info['s']
ycol = column_info[name]
with self.test_file.filename() as f:
aenv = np.loadtxt(f, usecols=(scol['column'], ycol['column']))
return {
's': unit.from_config(scol['unit']) * aenv[:,0],
name: unit.from_config(ycol['unit']) * aenv[:,1],
}
def plot_ax(self, axes, name):
"""Plot the envelope into the figure."""
if not self.visible:
return
self._remove_ax(name)
view = self._view
envdata = self.load_data(name)
sname = view.sname
self._lines[name] = axes.plot(
unit.strip_unit(envdata[sname], view.unit[sname]),
unit.strip_unit(envdata[name], view.unit[name]),
'k-x')
def _remove_ax(self, name):
"""Remove the envelope from the figure."""
for l in self._lines.pop(name, []):
l.remove()
|
import argparse
import datetime
import re
from typing import Optional, Union
import discord
from discord.ext import commands
from ..functions import ExpiringCache
from ..util.vars import LinksAndVars, ShinobiMatch
time_regex = re.compile(r"(?:(\d{1,5})(h|s|m|d))+?")
time_dict = {"h": 3600, "s": 1, "m": 60, "d": 86400}
class TimeConverter(commands.Converter):
async def convert(self, ctx, argument):
args = argument.lower()
matches = re.findall(time_regex, args)
time = 0
for key, value in matches:
try:
time += time_dict[value] * float(key)
except KeyError:
raise commands.BadArgument(
f"{value} is an invalid time key! h|m|s|d are valid arguments"
)
except ValueError:
raise commands.BadArgument(f"{key} is not a number!")
return round(time)
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
def can_execute_action(ctx, user, target):
return (user.id == ctx.bot.owner_id or user == ctx.guild.owner
or user.top_role > target.top_role)
class MemberID(commands.Converter):
async def convert(self, ctx, argument):
try:
member = await commands.MemberConverter().convert(ctx, argument)
except commands.BadArgument:
try:
argument = int(argument, base=10)
except ValueError:
raise commands.BadArgument(
f"{argument} is not a valid member or member ID."
) from None
else:
member = await ctx.bot.get_or_fetch_member(ctx.guild, argument)
if member is None:
# hackban case
return type(
"_Hackban",
(),
{
"id": argument,
"__str__": lambda s: f"Member ID {s.id}"
},
)()
if not can_execute_action(ctx, ctx.author, member):
raise commands.BadArgument(
"You cannot do this action on this user due to role hierarchy."
)
return member
class BannedMember(commands.Converter):
async def convert(self, ctx, argument):
if argument.isdigit():
member_id = int(argument, base=10)
try:
return await ctx.guild.fetch_ban(discord.Object(id=member_id))
except discord.NotFound:
raise commands.BadArgument(
"This member has not been banned before.") from None
ban_list = await ctx.guild.bans()
entity = discord.utils.find(lambda u: str(u.user) == argument,
ban_list)
if entity is None:
raise commands.BadArgument(
"This member has not been banned before.")
return entity
class ActionReason(commands.Converter):
async def convert(self, ctx, argument):
ret = f"{ctx.author} (ID: {ctx.author.id}): {argument}"
if len(ret) > 512:
reason_max = 512 - len(ret) + len(argument)
raise commands.BadArgument(
f"Reason is too long ({len(argument)}/{reason_max})")
return ret
def safe_reason_append(base, to_append):
appended = base + f"({to_append})"
if len(appended) > 512:
return base
return appended
class AntiRaidConfig:
__slots__ = ("raid_mode", "id", "bot", "broadcast_channel_id")
@classmethod
async def from_record(cls, record, bot):
self = cls()
# the basic configuration
self.bot = bot
self.raid_mode = record["raid_mode"]
self.id = record["id"]
self.broadcast_channel_id = record["broadcast_channel"]
return self
@property
def broadcast_channel(self):
guild = self.bot.get_guild(self.id)
return guild and guild.get_channel(self.broadcast_channel_id)
class MentionSpamConfig:
__slots__ = ("id", "bot", "mention_count", "safe_mention_channel_ids")
@classmethod
async def from_record(cls, record, bot):
self = cls()
# the basic configuration
self.bot = bot
self.id = record["id"]
self.mention_count = record.get("mention_count")
self.safe_mention_channel_ids = set(
record.get("safe_mention_channel_ids") or [])
return self
class GiveawayConfig:
__slots__ = (
"id",
"host",
"channel",
"message",
"embed",
"role_required",
"tasks",
"prize",
"end_time",
"embed_dict"
)
@classmethod
async def from_record(cls, record: discord.Message, bot: commands.Bot):
self = cls()
self.id = record.id
self.channel = record.channel
if len(record.embeds) == 0 or len(record.embeds) > 1:
raise AttributeError("This is not a giveaway message")
self.embed = record.embeds[0]
self.embed_dict = self.embed.to_dict()
if self.embed_dict.get("fields") is None:
raise AttributeError("This is not a giveaway message")
if self.embed.description == '\U0001f381 Win a Prize today':
raise AttributeError("This giveaway has already been ended!")
role_required = discord.utils.find(lambda a: a["name"].lower() == "Role Required".lower(), self.embed_dict["fields"])
self.role_required = role_required["value"] if role_required is not None else None
tasks = discord.utils.find(lambda a: a["name"].lower() == "\U0001f3c1 Tasks".lower(), self.embed_dict["fields"])
self.tasks = tasks["value"] if tasks is not None else None
self.end_time = discord.utils.find(lambda a: a["name"].lower() == "Giveway ends in".lower(), self.embed_dict["fields"])["value"].split('|')[0].strip()
self.prize = self.embed.description.split('**')[1]
self.host = self.embed.author
return self
class CooldownByContent(commands.CooldownMapping):
def _bucket_key(message):
return (message.channel.id, message.content)
class SpamChecker:
"""This spam checker does a few things.
1) It checks if a user has spammed more than 10 times in 12 seconds
2) It checks if the content has been spammed 15 times in 17 seconds.
3) It checks if new users have spammed 30 times in 35 seconds.
4) It checks if "fast joiners" have spammed 10 times in 12 seconds.
The second case is meant to catch alternating spam bots while the first one
just catches regular singular spam bots.
From experience these values aren't reached unless someone is actively spamming.
"""
def __init__(self):
self.by_content = CooldownByContent.from_cooldown(
15, 17.0, commands.BucketType.member)
self.by_user = commands.CooldownMapping.from_cooldown(
10, 12.0, commands.BucketType.user)
self.last_join = None
self.new_user = commands.CooldownMapping.from_cooldown(
30, 35.0, commands.BucketType.channel)
# user_id flag mapping (for about 30 minutes)
self.fast_joiners = ExpiringCache(seconds=1800.0)
self.hit_and_run = commands.CooldownMapping.from_cooldown(
10, 12, commands.BucketType.channel)
@staticmethod
def is_new(member):
now = discord.utils.utcnow()
seven_days_ago = now - datetime.timedelta(days=7)
ninety_days_ago = now - datetime.timedelta(days=90)
return member.created_at > ninety_days_ago and member.joined_at > seven_days_ago
def is_spamming(self, message):
if message.guild is None:
return False
current = message.created_at.timestamp()
if message.author.id in self.fast_joiners:
bucket = self.hit_and_run.get_bucket(message)
if bucket.update_rate_limit(current):
return True
if self.is_new(message.author):
new_bucket = self.new_user.get_bucket(message)
if new_bucket.update_rate_limit(current):
return True
user_bucket = self.by_user.get_bucket(message)
if user_bucket.update_rate_limit(current):
return True
content_bucket = self.by_content.get_bucket(message)
if content_bucket.update_rate_limit(current):
return True
return False
def is_fast_join(self, member):
joined = member.joined_at or discord.utils.utcnow()
if self.last_join is None:
self.last_join = joined
return False
is_fast = (joined - self.last_join).total_seconds() <= 2.0
self.last_join = joined
if is_fast:
self.fast_joiners[member.id] = True
return is_fast
class Characters:
'''The characters model class'''
__slots__ = [
'id',
'name',
'images',
'emoji',
'category',
'kwargs',
]
def __init__(self, **kwargs):
self.name: Optional[str] = kwargs.get('name')
self.id: Optional[Union[str,int]] = ''.join(self.name.split()).upper() if self.name is not None else None
self.images: Optional[list] = kwargs.get('images')
self.category: Optional[str] = kwargs.get('category')
self.emoji: Optional[Union[discord.Emoji, discord.PartialEmoji]] = kwargs.get('emoji')
self.kwargs = kwargs
@property
def hitpoint(self) -> int:
category = str(self.category)
if category.lower() == 'akatsuki':
return 7
if category.lower() == 'jinchuruki':
return 8
if category.lower() in ('kage', 'special'):
return 5
if category.lower() == 'otsutsuki':
return 10
if category.lower() == 'special':
return 6
else:
return 3
@property
def regainpoint(self) -> int:
category = str(self.category)
if category.lower() == 'akatsuki':
return 5
if category.lower() == 'jinchuruki':
return 6
if category.lower() in ('kage', 'special'):
return 3
if category.lower() == 'otsutsuki':
return 7
if category.lower() == 'special':
return 4
else:
return 1
@property
def healpoint(self):
'''These are in percentages'''
category = str(self.category)
if category.lower() == 'akatsuki':
return 50
if category.lower() == 'jinchuruki':
return 60
if category.lower() in ('kage', 'special'):
return 30
if category.lower() == 'otsutsuki':
return 70
if category.lower() == 'special':
return 40
else:
return 10
@property
def specialpoint(self):
'''These are in percentages'''
category = str(self.category)
if category.lower() == 'akatsuki':
return 40
if category.lower() == 'jinchuruki':
return 50
if category.lower() in ('kage', 'special'):
return 20
if category.lower() == 'otsutsuki':
return 60
if category.lower() == 'special':
return 30
else:
return 10
@classmethod
def from_record(cls, record: dict, ctx: commands.Context, name: str):
self = cls()
self.name = name.replace("_", " ").title()
self.images = record['images']
self.category = record['category']
self.id = ''.join(self.name.split()).upper() if self.name is not None else None
self.kwargs = record
self.emoji = self.return_emoji(url=record['images'][0],category=record['category'],ctx=ctx)
return self
@staticmethod
def return_emoji(url:str, category:str, ctx: commands.Context) -> Union[discord.Emoji, discord.PartialEmoji]:
STRIPPED_STRING_LIST: list = url.lstrip(LinksAndVars.character_data.value.rstrip('img_data.json')+'photo_data/').split('/')
STRIPPED_STRING_LIST.append(category)
for i in STRIPPED_STRING_LIST:
if i.lower() in ShinobiMatch.name_exclusion.value:
return ctx.get_config_emoji_by_name_or_id(i)
return discord.PartialEmoji(name='\U0001f5e1') |
from flask_table import Table, Col
# used for front, back, total
class PlayerScoreTable(Table):
classes = ['scoring_table', 'inline_table']
name = Col('Name', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'}) # player name
gross_score = Col('Gross Score', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
net_score = Col('Net Score', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
def get_thead_attrs(self):
return {'class': 'table__header'}
def get_tr_attrs(self, item):
return {'class': 'table__row'}
class NetScore(object):
def __init__(self, name, net_score):
self.name = name
self.net_score = net_score
class PlayerScore(NetScore):
def __init__(self, name, gross_score, net_score):
NetScore.__init__(self, name, net_score)
self.gross_score = gross_score
class TeamNetTable(Table):
classes = ['scoring_table']
name = Col('Name', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'}) # team name
player_one_net = Col('Player 1 Net', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
player_two_net = Col('Player 2 Net', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
net_score = Col('Net Score', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
def get_thead_attrs(self):
return {'class': 'table__header'}
def get_tr_attrs(self, item):
return {'class': 'table__row'}
class TeamNetScore(NetScore):
def __init__(self, name, player_one_net, player_two_net):
p1_net = player_one_net # if player_one_net is not None else 0
p2_net = player_two_net # if player_two_net is not None else 0
net_score = (p1_net if p1_net else 0) + (p2_net if p2_net else 0)
if p1_net is None and p2_net is None:
net_score = None
NetScore.__init__(self, name, net_score)
self.player_one_net = p1_net
self.player_two_net = p2_net
def __repr__(self):
return '<TeamTeamNetScore {} ; {} ; {} ; {}>'.format(self.name, self.player_one_net, self.player_two_net, self.net_score)
class TeamBestGrossTable(Table):
classes = ['scoring_table']
name = Col('Foursome', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
# dynamic columns
# holes 1 through 18 - (score1, score2, score3) sum
# score = Col('Score', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
def get_thead_attrs(self):
return {'class': 'table__header'}
def get_tr_attrs(self, item):
return {'class': 'table__row'}
class TeamBestGrossScore(object):
def __init__(self, name):
self.name = name
class ChampMatchTable(Table):
classes = ['scoring_table']
name = Col('Player', td_html_attrs={'class': 'table__cell'}, th_html_attrs={'class': 'table__cell'})
# dynamic columns
# holes 1 through 18 - (score1, score2, score3) sum
def get_thead_attrs(self):
return {'class': 'table__header'}
def get_tr_attrs(self, item):
return {'class': 'table__row'}
class ChampMatchScore(object):
def __init__(self, name):
self.name = name
|
from dataclasses import dataclass
from typing import Optional
from vortexasdk.api.asset_tank import AssetTank
from vortexasdk.api.serdes import FromDictMixin
from vortexasdk.api.shared_types import ISODate
from vortexasdk.api.id import ID
@dataclass(frozen=True)
class OnshoreInventory(FromDictMixin):
"""
Land Storage measurements are the base data set the Vortexa API is centred around.
Each measurement represents the total capacity and current amount being stored at each location.
[Land Storage Further Documentation](https://docs.vortexa.com/reference/intro-land-storage)
"""
measurement_id: ID
tank_id: ID
tank_details: AssetTank
measurement_timestamp: Optional[ISODate]
publish_timestamp: Optional[ISODate]
report_timestamp: ISODate
carry_forward: bool
fill_bbl: int
fill_tons: float
fill_cbm: float
reference_data_version: str
|
# noinspection PyInterpreter
def register():
from datetime import datetime
from getpass import getpass
import hashlib
import random
import json
import os
print()
print("Register")
print()
file = "settings.json"
failed = 'You have failed the login process, please try again.'
valid_email = "@" + "."
valid_email_error= "you need to enter a valid email address"
def user_login_func():
if os.path.exists(file):
with open(file, "r") as settings_update:
user_settings = json.load(settings_update)
for settings in user_settings["settings"]:
for user_preferences in settings["preferences"]:
for user in user_preferences["user"]:
if user["logged_in"]:
print("You are already logged in")
exit()
else:
user["logged_in"] = True
with open(file, "w") as user_login:
json.dump(user_settings, user_login, indent=2)
if user["logged_in"]:
default_user_details = [{"username": user_name, "email": email}]
user["user_details"] = default_user_details
with open(file, "w") as user_defaults:
json.dump(user_settings, user_defaults, indent=2)
def password():
password = getpass("Please enter a password: ")
if len(password) < 8:
print("you're password must be longer than 8 chars")
password = getpass("Please re-enter your password: ")
if len(password) > 8:
if "(" or ")" or "{" or "}" or "[" or "]" or "{" or "}" or "|" or "\\" in password:
print("you're password must not have any bracket chars")
password = getpass("Please re-enter your password: ")
else:
if "!" or "#" or "$" or "&" or "*" or "+" or "=" or "-" or "_" or "@" or "<" or ">" in password:
# Creates a file for The user using the username and the uuid,
# e.g. user_106033536.json
data_file = open(f"{user_name}_{uuid}.json", "w")
password_e = bytes(password, "utf-8")
password_h = hashlib.sha256(password_e)
password_s = password_h.hexdigest()
# User data formatting for json
data = "{\"user\":[{" + f"\"username\": \"{user_name}\", \"email\": \"{email}\", \"uuid\": \"{uuid}\",\"password\": \"{password_s}\", \"settings\": [" + "{" + "\"password\": false}]}]}"
# Writes the data into the .json file
data_file.write(str(data))
data_file.close()
print()
print(
f"Your information has been saved in {user_name}_{uuid}.json") # Tells you your file name
print(
f"Make sure to remember this uuid, you will need it later, {uuid}") # Links you to your file directory
print()
file_path = os.path.abspath(f'{user_name}_{uuid}.json') # File path
print(saved_data + file_path) # Prints where the data has been saved
user_login_func()
exit()
elif "!" or "#" or "$" or "&" or "*" or "+" or "=" or "-" or "_" or "@" or "<" or ">" and "q" or "w" or "e" or "r" or "t" or "y" or "u" or "i" or "o" or "p" or "a" or "s" or "d" or "f" or "g" or "h" or "j" or "k" or "l" or "z" or "x" or "c" or "v" or "b" or "n" or "m" in password:
# Creates a file for The user using the username and the uuid,
# e.g. user_106033536.json
data_file = open(f"{user_name}_{uuid}.json", "w")
password_e = bytes(password, "utf-8")
password_h = hashlib.sha256(password_e)
password_s = password_h.hexdigest()
# User data formatting for json
data = "{\"user\":[{" + f"\"username\": \"{user_name}\", \"email\": \"{email}\", \"uuid\": \"{uuid}\",\"password\": \"{password_s}\", \"settings\": [" + "{" + "\"password\": false}]}]}"
# Writes the data into the .json file
data_file.write(str(data))
data_file.close()
print()
print(
f"Your information has been saved in {user_name}_{uuid}.json") # Tells you your file name
print(
f"Make sure to remember this uuid, you will need it later, {uuid}") # Links you to your file directory
print()
file_path = os.path.abspath(f'{user_name}_{uuid}.json') # File path
print(saved_data + file_path) # Prints where the data has been saved
user_login_func()
exit()
elif "(" or ")" or "{" or "}" or "[" or "]" or "{" or "}" or "|" or "\\" in password:
print("you're password must not have any bracket chars")
password = getpass("Please re-enter your password: ")
else:
# Creates a file for The user using the username and the uuid,
# e.g. user_106033536.json
data_file = open(f"{user_name}_{uuid}.json", "w")
password_e = bytes(password, "utf-8")
password_h = hashlib.sha256(password_e)
password_s = password_h.hexdigest()
# User data formatting for json
data = "{\"user\":[{" + f"\"username\": \"{user_name}\", \"email\": \"{email}\", \"uuid\": \"{uuid}\",\"password\": \"{password_s}\", \"settings\": [" + "{" + "\"password\": false}]}]}"
# Writes the data into the .json file
data_file.write(str(data))
data_file.close()
print()
print(
f"Your information has been saved in {user_name}_{uuid}.json") # Tells you your file name
print(
f"Make sure to remember this uuid, you will need it later, {uuid}") # Links you to your file directory
print()
file_path = os.path.abspath(f'{user_name}_{uuid}.json') # File path
print(saved_data + file_path) # Prints where the data has been saved
user_login_func()
exit()
if os.path.exists(file):
with open(file, "r") as settings_update:
user_settings = json.load(settings_update)
for settings in user_settings["settings"]:
for user_preferences in settings["preferences"]:
for user in user_preferences["user"]:
if user["logged_in"]:
print("You already logged in. You cannot make a new account while logged in")
else:
dt = datetime(2018, 1, 1)
seed = int(round(dt.timestamp() * 10))
start_number = random.randint(1, 10)
uuid = round(start_number * seed / 1000) # Creates the uuid variable from a random number
user_name = input("Please enter a username: ")
saved_data = "Your data has been saved: "
# Simple requirements for UserName and Email
if len(user_name) < 3:
print()
user_name = input("You need to enter a username: ")
if len(user_name) < 3:
print()
print(failed)
exit()
elif len(user_name) > 3:
email = input("Please enter a email: ")
if len(email) < 3:
print()
print(failed)
exit()
elif len(email) > 3:
if valid_email in email:
password()
else:
print(valid_email_error)
else:
email = input("Please enter a email: ")
if len(email) < 3:
print()
email = input("You need to enter a email: ")
if len(email) < 3:
print()
print(failed)
exit()
elif len(email) > 3:
if valid_email in email:
password()
else:
print(valid_email_error)
else:
password()
print()
print(f"Your information has been saved in {user_name}_{uuid}.json") # Tells you your file name
print(
f"Make sure to remember this uuid, you will need it later, {uuid}") # Links you to your file directory
print()
file_path = os.path.abspath(f'{user_name}_{uuid}.json') # File path
print(saved_data + file_path) # Prints where the data has been saved
register() |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mockito import mock, when, unstub
import testtools
from testtools import matchers
import swiftclient.client
from trove.tests.fakes.swift import SwiftClientStub
from trove.common.context import TroveContext
from trove.common import remote
class TestRemote(testtools.TestCase):
def setUp(self):
super(TestRemote, self).setUp()
def tearDown(self):
super(TestRemote, self).tearDown()
unstub()
def test_creation(self):
when(swiftclient.client.Connection).get_auth().thenReturn(None)
conn = swiftclient.client.Connection()
self.assertIsNone(conn.get_auth())
def test_create_swift_client(self):
mock_resp = mock(dict)
when(swiftclient.client.Connection).get_container('bob').thenReturn(
["text", mock_resp])
client = remote.create_swift_client(TroveContext(tenant='123'))
headers, container = client.get_container('bob')
self.assertIs(headers, "text")
self.assertIs(container, mock_resp)
def test_empty_account(self):
"""
this is an account with no containers and no objects
"""
# setup expectation
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
# interact
conn = swiftclient.client.Connection()
account_info = conn.get_account()
self.assertThat(account_info, matchers.Not(matchers.Is(None)))
self.assertThat(len(account_info), matchers.Is(2))
self.assertThat(account_info, matchers.IsInstance(tuple))
self.assertThat(account_info[0], matchers.IsInstance(dict))
self.assertThat(account_info[0],
matchers.KeysEqual('content-length', 'accept-ranges',
'x-timestamp', 'x-trans-id', 'date',
'x-account-bytes-used',
'x-account-container-count',
'content-type',
'x-account-object-count'))
self.assertThat(account_info[1], matchers.IsInstance(list))
self.assertThat(len(account_info[1]), matchers.Is(0))
def test_one_container(self):
"""
tests to ensure behavior is normal with one container
"""
# setup expectation
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
cont_name = 'a-container-name'
swift_stub.with_container(cont_name)
# interact
conn = swiftclient.client.Connection()
conn.get_auth()
conn.put_container(cont_name)
# get headers plus container metadata
self.assertThat(len(conn.get_account()), matchers.Is(2))
# verify container details
account_containers = conn.get_account()[1]
self.assertThat(len(account_containers), matchers.Is(1))
self.assertThat(account_containers[0],
matchers.KeysEqual('count', 'bytes', 'name'))
self.assertThat(account_containers[0]['name'], matchers.Is(cont_name))
# get container details
cont_info = conn.get_container(cont_name)
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0], matchers.KeysEqual('content-length',
'x-container-object-count', 'accept-ranges',
'x-container-bytes-used', 'x-timestamp',
'x-trans-id', 'date', 'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(0))
# remove container
swift_stub.without_container(cont_name)
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container(cont_name)
# ensure there are no more containers in account
self.assertThat(len(conn.get_account()[1]), matchers.Is(0))
def test_one_object(self):
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_object('bob', 'test', 'test_contents')
# create connection
conn = swiftclient.client.Connection()
# test container lightly
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp', 'x-trans-id', 'date',
'content-type'))
cont_objects = cont_info[1]
self.assertThat(len(cont_objects), matchers.Equals(1))
obj_1 = cont_objects[0]
self.assertThat(obj_1, matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream'}))
# test object api - not much to do here
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
# test remove object
swift_stub.without_object('bob', 'test')
# interact
conn.delete_object('bob', 'test')
with testtools.ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(0))
def test_two_objects(self):
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_container('bob2')
swift_stub.with_object('bob', 'test', 'test_contents')
swift_stub.with_object('bob', 'test2', 'test_contents2')
conn = swiftclient.client.Connection()
self.assertIs(len(conn.get_account()), 2)
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp', 'x-trans-id', 'date',
'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(2))
self.assertThat(cont_info[1][0], matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream'}))
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
self.assertThat(conn.get_object('bob', 'test2')[1],
matchers.Is('test_contents2'))
swift_stub.without_object('bob', 'test')
conn.delete_object('bob', 'test')
with testtools.ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(1))
swift_stub.without_container('bob')
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container('bob')
self.assertThat(len(conn.get_account()), matchers.Is(2))
def test_nonexisting_container(self):
"""
when a container does not exist and is accessed then a 404 is returned
"""
from trove.tests.fakes.swift import SwiftClientStub
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('existing')
conn = swiftclient.client.Connection()
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container('nonexisting')
def test_replace_object(self):
"""
Test to ensure that if an object is updated the container object
count is the same and the contents of the object are updated
"""
swift_stub = SwiftClientStub()
swift_stub.with_account('1223df2')
swift_stub.with_container('new-container')
swift_stub.with_object('new-container', 'new-object',
'new-object-contents')
conn = swiftclient.client.Connection()
conn.put_object('new-container', 'new-object', 'new-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('new-object-contents'))
# set expected behavior - trivial here since it is the intended
# behavior however keep in mind this is just to support testing of
# trove components
swift_stub.with_object('new-container', 'new-object',
'updated-object-contents')
conn.put_object('new-container', 'new-object',
'updated-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('updated-object-contents'))
# ensure object count has not increased
self.assertThat(len(conn.get_container('new-container')[1]),
matchers.Is(1))
|
from os import makedirs, path
from shutil import rmtree
from diot import Diot
from bioprocs.utils import cmdargs, runcmd, mem2
tmpdir = path.join ({{ args.tmpdir | quote}}, "{{proc.id}}.{{i.infiles[0] | fn2}}.{{job.index}}")
if not path.exists (tmpdir): makedirs (tmpdir)
params = {{args.params}}
try:
{% case args.tool %}
############# picard
{% when 'picard' %}
mem = mem2({{ args.mem | quote }}, 'java')
infiles = {{ i.infiles }}
for i, infile in enumerate(infiles):
params['I' + ' ' * i] = infile
{% if args.nthread > 1 %}
params['USE_THREADING'] = 'true'
{% else %}
params['USE_THREADING'] = 'false'
{% endif %}
params['TMP_DIR'] = tmpdir
params['O'] = {{o.outfile | quote}}
params['AS'] = 'true'
cmd = '{{args.picard}} MergeSamFiles %s -Djava.io.tmpdir="%s" %s' % (mem, tmpdir, cmdargs(params, dash = '', equal = '='))
runcmd (cmd)
############# bamutil
{% when 'bamutil' %}
infiles = {{ i.infiles }}
for i, infile in enumerate(infiles):
params['i' + ' ' * i] = infile
params['o'] = {{o.outfile | quote}}
cmd = '{{args.bamutil}} mergeBam %s' % cmdargs(params)
runcmd (cmd)
############# samtools
{% when 'samtools' %}
inlist = path.join({{job.outdir | quote}}, 'bamlist.txt')
with open(inlist, 'w') as f:
f.write('\n'.join({{i.infiles}}) + '\n')
params['@'] = {{args.nthread}}
params['O'] = 'bam'
params['b'] = inlist
cmd = '{{args.samtools}} merge %s {{o.outfile | quote}}' % cmdargs(params)
runcmd (cmd)
############# sambamba
{% when 'sambamba' %}
params['t'] = {{args.nthread}}
cmd = '{{args.sambamba}} merge %s {{o.outfile | quote}} {{ i.infiles | asquote }}' % cmdargs(params)
runcmd (cmd)
{% endcase %}
except Exception as ex:
stderr.write ("Job failed: %s" % str(ex))
raise
finally:
rmtree (tmpdir)
|
# Copyright (c) 2020 Open-E, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import requests
from cinder import exception
from cinder.tests.unit import test
from cinder.volume.drivers.open_e.jovian_common import exception as jexc
from cinder.volume.drivers.open_e.jovian_common import rest_proxy
UUID_1 = '12345678-1234-1234-1234-000000000001'
UUID_2 = '12345678-1234-1234-1234-000000000002'
UUID_3 = '12345678-1234-1234-1234-000000000003'
CONFIG_OK = {
'san_hosts': ['192.168.0.2'],
'san_api_port': 82,
'driver_use_ssl': 'true',
'driver_ssl_cert_verify': True,
'driver_ssl_cert_path': '/etc/cinder/joviandss.crt',
'jovian_rest_send_repeats': 3,
'jovian_recovery_delay': 60,
'san_login': 'admin',
'san_password': 'password',
'jovian_ignore_tpath': [],
'target_port': 3260,
'jovian_pool': 'Pool-0',
'iscsi_target_prefix': 'iqn.2020-04.com.open-e.cinder:',
'chap_password_len': 12,
'san_thin_provision': False,
'jovian_block_size': '128K'
}
CONFIG_BAD_IP = {
'san_hosts': ['asd'],
'san_api_port': 82,
'driver_use_ssl': 'true',
'driver_ssl_cert_verify': True,
'driver_ssl_cert_path': '/etc/cinder/joviandss.crt',
'jovian_rest_send_repeats': 3,
'jovian_recovery_delay': 60,
'san_login': 'admin',
'san_password': 'password',
'jovian_ignore_tpath': [],
'target_port': 3260,
'jovian_pool': 'Pool-0',
'iscsi_target_prefix': 'iqn.2020-04.com.open-e.cinder:',
'chap_password_len': 12,
'san_thin_provision': False,
'jovian_block_size': '128K'
}
CONFIG_MULTIHOST = {
'san_hosts': ['192.168.0.2', '192.168.0.3', '192.168.0.4'],
'san_api_port': 82,
'driver_use_ssl': 'true',
'driver_ssl_cert_verify': True,
'driver_ssl_cert_path': '/etc/cinder/joviandss.crt',
'jovian_rest_send_repeats': 3,
'jovian_recovery_delay': 60,
'san_login': 'admin',
'san_password': 'password',
'jovian_ignore_tpath': [],
'target_port': 3260,
'jovian_pool': 'Pool-0',
'iscsi_target_prefix': 'iqn.2020-04.com.open-e.cinder:',
'chap_password_len': 12,
'san_thin_provision': False,
'jovian_block_size': '128K'
}
class TestOpenEJovianRESTProxy(test.TestCase):
def start_patches(self, patches):
for p in patches:
p.start()
def stop_patches(self, patches):
for p in patches:
p.stop()
def test_init(self):
self.assertRaises(exception.InvalidConfigurationValue,
rest_proxy.JovianRESTProxy,
CONFIG_BAD_IP)
def test_get_base_url(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_OK)
url = proxy._get_base_url()
exp = '{proto}://{host}:{port}/api/v3'.format(
proto='https',
host='192.168.0.2',
port='82')
self.assertEqual(exp, url)
def test_next_host(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
self.assertEqual(0, proxy.active_host)
proxy._next_host()
self.assertEqual(1, proxy.active_host)
proxy._next_host()
self.assertEqual(2, proxy.active_host)
proxy._next_host()
self.assertEqual(0, proxy.active_host)
def test_request(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
patches = [
mock.patch.object(requests, "Request", return_value="request"),
mock.patch.object(proxy.session,
"prepare_request",
return_value="out_data"),
mock.patch.object(proxy, "_send", return_value="out_data")]
addr = 'https://192.168.0.2:82/api/v3/pools/Pool-0'
self.start_patches(patches)
proxy.request('GET', '/pools/Pool-0')
requests.Request.assert_called_once_with('GET', addr)
self.stop_patches(patches)
def test_request_host_failure(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
patches = [
mock.patch.object(requests, "Request", return_value="request"),
mock.patch.object(proxy.session,
"prepare_request",
return_value="out_data"),
mock.patch.object(proxy, "_send", return_value="out_data")]
request_expected = [
mock.call('GET',
'https://192.168.0.2:82/api/v3/pools/Pool-0'),
mock.call('GET',
'https://192.168.0.3:82/api/v3/pools/Pool-0'),
mock.call('GET',
'https://192.168.0.4:82/api/v3/pools/Pool-0')]
self.start_patches(patches)
proxy._send.side_effect = [
requests.exceptions.ConnectionError(),
requests.exceptions.ConnectionError(),
"out_data"]
proxy.request('GET', '/pools/Pool-0')
self.assertEqual(2, proxy.active_host)
requests.Request.assert_has_calls(request_expected)
self.stop_patches(patches)
def test_pool_request(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_OK)
patches = [mock.patch.object(proxy, "request")]
req = '/pools/Pool-0/volumes'
self.start_patches(patches)
proxy.pool_request('GET', '/volumes')
proxy.request.assert_called_once_with('GET', req, json_data=None)
self.stop_patches(patches)
def test_send(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
json_data = {"data": [{"available": "949998694400",
"status": 26,
"name": "Pool-0",
"scan": None,
"encryption": {"enabled": False},
"iostats": {"read": "0",
"write": "0",
"chksum": "0"},
"vdevs": [{}],
"health": "ONLINE",
"operation": "none",
"id": "12413634663904564349",
"size": "996432412672"}],
"error": None}
session_ret = mock.Mock()
session_ret.text = json.dumps(json_data)
session_ret.status_code = 200
patches = [mock.patch.object(proxy.session,
"send",
return_value=session_ret)]
pr = 'prepared_request'
self.start_patches(patches)
ret = proxy._send(pr)
proxy.session.send.assert_called_once_with(pr)
self.assertEqual(0, proxy.active_host)
self.assertEqual(200, ret['code'])
self.assertEqual(json_data['data'], ret['data'])
self.assertEqual(json_data['error'], ret['error'])
self.stop_patches(patches)
def test_send_connection_error(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
json_data = {"data": None,
"error": None}
session_ret = mock.Mock()
session_ret.text = json.dumps(json_data)
session_ret.status_code = 200
patches = [mock.patch.object(proxy.session, "send")]
pr = 'prepared_request'
self.start_patches(patches)
side_effect = [requests.exceptions.ConnectionError()] * 4
side_effect += [session_ret]
proxy.session.send.side_effect = side_effect
send_expected = [mock.call(pr)] * 4
ret = proxy._send(pr)
proxy.session.send.assert_has_calls(send_expected)
self.assertEqual(0, proxy.active_host)
self.assertEqual(200, ret['code'])
self.assertEqual(json_data['data'], ret['data'])
self.assertEqual(json_data['error'], ret['error'])
self.stop_patches(patches)
def test_send_mixed_error(self):
proxy = rest_proxy.JovianRESTProxy(CONFIG_MULTIHOST)
json_data = {"data": None,
"error": None}
session_ret = mock.Mock()
session_ret.text = json.dumps(json_data)
session_ret.status_code = 200
patches = [mock.patch.object(proxy.session, "send")]
pr = 'prepared_request'
self.start_patches(patches)
side_effect = [requests.exceptions.ConnectionError()] * 4
side_effect += [jexc.JDSSOSException()] * 4
side_effect += [session_ret]
proxy.session.send.side_effect = side_effect
send_expected = [mock.call(pr)] * 7
self.assertRaises(jexc.JDSSOSException, proxy._send, pr)
proxy.session.send.assert_has_calls(send_expected)
self.assertEqual(0, proxy.active_host)
def test_handle_500(self):
error = {"class": "exceptions.OSError",
"errno": 17,
"message": ""}
json_data = {"data": None,
"error": error}
session_ret = mock.Mock()
session_ret.text = json.dumps(json_data)
session_ret.status_code = 500
self.assertRaises(jexc.JDSSOSException,
rest_proxy.JovianRESTProxy._handle_500,
session_ret)
session_ret.status_code = 200
json_data = {"data": None,
"error": None}
session_ret.text = json.dumps(json_data)
self.assertIsNone(rest_proxy.JovianRESTProxy._handle_500(session_ret))
|
#!/usr/bin/env python
import json
import os
import requests
import shlex
import socket
import sys
from api_common import get_services
if __name__ == "__main__":
assert len(sys.argv) >= 1, "Usage: <scriptname> <cluster-name>"
clustername = sys.argv[1]
get_services(clustername)
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from nnabla.utils import nnabla_pb2
from .exporter import rename_square_bracket
# Expand repeat and recurrent in nnp file.
class _TopologicalSort:
def __init__(self, functions):
self._orig = functions
self._flags = {}
def _visit(self, func):
fname = func[0]
if fname in self._flags:
if self._flags[fname] == 1:
logger.critical('Fatal error! network ins not Dag.')
import sys
sys.exit(-1)
else:
return
else:
if fname not in self._flags:
self._flags[fname] = 1
for output in func[3]:
for f in self._orig:
for input in f[2]:
if output == input:
self._visit(f)
self._flags[fname] = 2
self._sorted.insert(0, func)
def sorted(self):
self._sorted = []
for f in self._orig:
fname = f[0]
if fname not in self._flags:
self._visit(f)
return self._sorted
class NnpExpander:
def __init__(self, nnp):
self._nnp = nnp
self._parameters = {}
for param in self._nnp.parameter:
self._parameters[param.variable_name] = True
def _expand_repeat(self, network):
def _search_repeat_id(mes, rid):
return list(mes.repeat_id).index(rid) if rid in mes.repeat_id else None
def _add_suffix(name, suffix, num):
return '{}_{}_{}'.format(name, suffix, num)
########################################################################
# Prepare output network message
net = nnabla_pb2.NNablaProtoBuf().network.add()
net.CopyFrom(network)
########################################################################
# Finish when repeat_info is not present
if len(net.repeat_info) == 0:
return net
########################################################################
# Use first repeat_info
ri = net.repeat_info[0]
del net.repeat_info[0]
########################################################################
# Expand variables
net.ClearField('variable')
for vpos, var in enumerate(network.variable):
if var.type == 'Parameter':
if var.name not in self._parameter_original_names:
self._parameter_original_names[var.name] = []
pos = _search_repeat_id(var, ri.id)
if pos is not None:
for i in range(ri.times):
if var.type == 'Parameter':
if self._old_version_param_name:
name = _add_suffix(var.name, ri.id, i)
else:
name = var.name.replace(
'{{{}}}'.format(ri.id), '_{}'.format(i))
self._parameter_original_names[var.name].append(name)
else:
name = _add_suffix(var.name, ri.id, i)
v = net.variable.add()
v.CopyFrom(var)
v.name = name
del v.repeat_id[pos]
else:
if var.type == 'Parameter' and len(var.repeat_id) == 0 and len(self._parameter_original_names[var.name]) == 0:
self._parameter_original_names[var.name].append(var.name)
v = net.variable.add()
v.CopyFrom(var)
########################################################################
# Expand functions
########################################################################
########################################################################
# Prepare delayed inputs
delay_var = {}
for fpos, func in enumerate(network.function):
if func.type == 'Delay':
if func.recurrent_param.repeat_id == ri.id:
delay_var[func.output[0]] = []
for i in range(ri.times):
if i == 0:
delay_var[func.output[0]].append(func.input[1])
else:
v = func.input[0]
v = _add_suffix(v, ri.id, i-1)
delay_var[func.output[0]].append(v)
########################################################################
# Prepare repeat end inputs
repeat_end_var = {}
for fpos, func in enumerate(network.function):
if func.type == 'RepeatEnd':
if func.repeat_param.repeat_id == ri.id:
repeat_end_var[func.output[0]] = []
for i in range(func.repeat_param.times):
repeat_end_var[func.output[0]].append(_add_suffix(
func.input[0], func.repeat_param.repeat_id, i))
########################################################################
# Prepare repeat start inputs
repeat_start_var = {}
for fpos, func in enumerate(network.function):
if func.type == 'RepeatStart':
if func.repeat_param.repeat_id == ri.id:
repeat_start_var[func.output[0]] = []
for i in range(ri.times):
if i == 0:
v = func.input[0]
if v in repeat_end_var:
v = repeat_end_var[v][ri.times-1]
repeat_start_var[func.output[0]].append(v)
else:
v = func.input[1]
if v in repeat_end_var:
v = repeat_end_var[v][i-1]
else:
v = _add_suffix(v, ri.id, i-1)
repeat_start_var[func.output[0]].append(v)
########################################################################
# Expand network
net.ClearField('function')
for fpos, func in enumerate(network.function):
if func.type == 'RepeatStart' or func.type == 'RepeatEnd':
if func.repeat_param.repeat_id == ri.id:
continue
if func.type == 'Delay':
if func.recurrent_param.repeat_id == ri.id:
continue
if func.type == 'RecurrentInput':
if func.recurrent_param.repeat_id == ri.id:
f = net.function.add()
f.CopyFrom(func)
f.type = 'Split'
f.split_param.axis = func.recurrent_param.axis
f.ClearField('output')
for i in range(ri.times):
f.output.append(_add_suffix(func.output[0], ri.id, i))
pos = _search_repeat_id(func, ri.id)
del f.repeat_id[pos]
f.ClearField('recurrent_param')
continue
if func.type == 'RecurrentOutput':
if func.recurrent_param.repeat_id == ri.id:
f = net.function.add()
f.CopyFrom(func)
f.type = 'Stack'
f.stack_param.axis = func.recurrent_param.axis
f.ClearField('input')
for i in range(ri.times):
f.input.append(_add_suffix(func.input[0], ri.id, i))
f.ClearField('recurrent_param')
continue
pos = _search_repeat_id(func, ri.id)
if pos is not None:
for i in range(ri.times):
f = net.function.add()
f.CopyFrom(func)
del f.repeat_id[pos]
f.name = _add_suffix(func.name, ri.id, i)
for n, v in enumerate(func.input):
vname = None
if v in self._parameter_original_names:
if len(self._parameter_original_names[v]) == ri.times:
vname = self._parameter_original_names[v][i]
else:
vname = v
elif v in repeat_start_var:
vname = repeat_start_var[v][i]
elif v in repeat_end_var:
vname = repeat_end_var[v][i]
elif v in delay_var:
vname = delay_var[v][i]
else:
vname = _add_suffix(v, ri.id, i)
f.input[n] = vname
for n, v in enumerate(func.output):
vname = _add_suffix(v, ri.id, i)
f.output[n] = vname
else:
f = net.function.add()
f.CopyFrom(func)
for n, v in enumerate(func.input):
if v in repeat_end_var:
vname = repeat_end_var[v][ri.times-1]
f.input[n] = vname
return self._expand_repeat(net)
def _expand_network(self, network):
self._parameter_original_names = collections.OrderedDict()
print(' Expanding {}.'.format(network.name))
repeat_ids = collections.OrderedDict()
for ri in network.repeat_info:
repeat_ids[ri.id] = ri.times
# Check whether parameter name complies with old rule.
self._old_version_param_name = False
for param in self._parameters:
for ri in repeat_ids:
m = re.search('{}\[([0-9]+)\]$'.format(ri), param)
if m:
if int(m.group(1)) < repeat_ids[ri]:
self._old_version_param_name = True
# Expand repeat
network = self._expand_repeat(network)
functions = []
for func in network.function:
functions.append((func.name,
func.type,
[n for n in func.input],
[n for n in func.output]))
sorted_functions = self._sort_functions(functions)
func_list = []
for f in functions:
func_list.append(f[0])
net = nnabla_pb2.NNablaProtoBuf().network.add()
net.CopyFrom(network)
net.ClearField('function')
for f in sorted_functions:
func = net.function.add()
func.CopyFrom(network.function[func_list.index(f[0])])
return net
def _sort_functions(self, orig_functions):
return _TopologicalSort(orig_functions).sorted()
def _expand_parameter_variable(self, proto):
names = []
for pv in proto.parameter_variable:
if pv.variable_name in self._parameter_original_names:
for n in self._parameter_original_names[pv.variable_name]:
names.append(n)
else:
names.append(pv.variable_name)
proto.ClearField('parameter_variable')
for n in sorted(names):
pv = proto.parameter_variable.add()
pv.variable_name = n
def execute(self):
nnp = nnabla_pb2.NNablaProtoBuf()
nnp.CopyFrom(self._nnp)
nnp.ClearField('network')
for network in self._nnp.network:
net = nnp.network.add()
net.CopyFrom(self._expand_network(network))
for optimizer in nnp.optimizer:
self._expand_parameter_variable(optimizer)
for executor in nnp.executor:
self._expand_parameter_variable(executor)
rename_square_bracket(nnp)
return nnp
|
# Copyright (c) 2021 The Toltec Contributors
# SPDX-License-Identifier: MIT
"""
Parse recipes.
A package is a final user-installable software archive. A recipe is a Bash file
which contains the instructions necessary to build one or more related
packages (in the latter case, it is called a split package).
"""
from dataclasses import dataclass
from itertools import product
from typing import Optional
import os
import textwrap
import dateutil.parser
from .version import Version, Dependency, DependencyKind
from . import bash
class RecipeError(Exception):
"""Raised when a recipe contains an error."""
@dataclass
class Source:
"""Source item needed to build a recipe."""
url: str
checksum: str
noextract: bool
class Recipe: # pylint:disable=too-many-instance-attributes,disable=too-few-public-methods
"""Load recipes."""
@staticmethod
def from_file(path: str) -> "Recipe":
"""
Load a recipe from its directory.
:param path: path to the directory containing the recipe definition
:returns: loaded recipe
"""
name = os.path.basename(path)
with open(os.path.join(path, "package"), "r") as recipe:
return Recipe(name, path, recipe.read())
def __init__(self, name: str, path: str, definition: str):
"""
Load a recipe from a Bash source.
:param name: name of the recipe
:param path: path to the directory containing the recipe definition
:param definition: source string of the recipe
:raises RecipeError: if the recipe contains an error
"""
self.name = name
self.path = path
variables, functions = bash.get_declarations(definition)
# Original declarations of standard fields and functions
self.variables: bash.Variables = {}
self.functions: bash.Functions = {}
self._load_fields(variables)
self._load_functions(functions)
self._load_packages(variables, functions)
self.custom_variables = variables
self.custom_functions = functions
def _load_fields(self, variables: bash.Variables) -> None:
"""Parse and check standard fields."""
timestamp_str = _pop_field_string(variables, "timestamp")
self.variables["timestamp"] = timestamp_str
try:
self.timestamp = dateutil.parser.isoparse(timestamp_str)
except ValueError as err:
raise RecipeError(
"Field 'timestamp' does not contain a valid ISO-8601 date"
) from err
self.maintainer = _pop_field_string(variables, "maintainer")
self.variables["maintainer"] = self.maintainer
self.image = _pop_field_string(variables, "image", "")
self.variables["image"] = self.image
self.flags = _pop_field_indexed(variables, "flags", [])
self.variables["flags"] = self.flags
sources = _pop_field_indexed(variables, "source", [])
self.variables["source"] = sources
sha256sums = _pop_field_indexed(variables, "sha256sums", [])
self.variables["sha256sums"] = sha256sums
noextract = _pop_field_indexed(variables, "noextract", [])
self.variables["noextract"] = noextract
if len(sources) != len(sha256sums):
raise RecipeError(
f"Expected the same number of sources and checksums, got \
{len(sources)} source(s) and {len(sha256sums)} checksum(s)"
)
depends_raw = _pop_field_indexed(variables, "depends", [])
variables["depends"] = depends_raw
makedepends_raw = _pop_field_indexed(variables, "makedepends", [])
self.variables["makedepends"] = makedepends_raw
self.makedepends = [
Dependency.parse(dep or "") for dep in depends_raw + makedepends_raw
]
self.sources = []
for source, checksum in zip(sources, sha256sums):
self.sources.append(
Source(
url=source or "",
checksum=checksum or "SKIP",
noextract=os.path.basename(source or "") in noextract,
)
)
def _load_functions(self, functions: bash.Functions) -> None:
"""Parse and check standard functions."""
if self.image and "build" not in functions:
raise RecipeError(
"Missing build() function for a recipe which declares a \
build image"
)
if not self.image and "build" in functions:
raise RecipeError(
"Missing image declaration for a recipe which has a \
build() step"
)
self.functions["prepare"] = functions.pop("prepare", "")
self.functions["build"] = functions.pop("build", "")
def _load_packages(
self, variables: bash.Variables, functions: bash.Functions
) -> None:
"""Load packages defined by this recipe."""
self.packages = {}
pkgnames = _pop_field_indexed(variables, "pkgnames")
self.variables["pkgnames"] = pkgnames
if len(pkgnames) == 1:
# Single-package recipe: use global declarations
pkg_name = pkgnames[0]
variables["pkgname"] = pkg_name
self.packages[pkg_name] = Package(self, variables, functions)
else:
# Split-package recipe: load package-local declarations
pkg_decls = {}
for pkg_name in pkgnames:
if pkg_name not in functions:
raise RecipeError(
"Missing required function {pkg_name}() for \
corresponding package"
)
pkg_def = functions.pop(pkg_name)
context = bash.put_variables(
{
**self.variables,
**variables,
"pkgname": pkg_name,
}
)
pkg_decls[pkg_name] = bash.get_declarations(context + pkg_def)
for var_name in self.variables:
del pkg_decls[pkg_name][0][var_name]
for pkg_name, (pkg_vars, pkg_funcs) in pkg_decls.items():
self.packages[pkg_name] = Package(self, pkg_vars, pkg_funcs)
class Package: # pylint:disable=too-many-instance-attributes
"""Load packages."""
def __init__(
self,
parent: Recipe,
variables: bash.Variables,
functions: bash.Functions,
):
"""
Load a package.
:param parent: recipe which declares this package
:param variables: Bash variables declared in the package
:param functions: Bash functions declared in the package
:raises RecipeError: if the package contains an error
"""
self.parent = parent
# Original declarations of standard fields and functions
self.variables: bash.Variables = {}
self.functions: bash.Functions = {}
self._load_fields(variables)
self._load_functions(functions)
self._load_custom(variables, functions)
def _load_fields(self, variables: bash.Variables) -> None:
"""Parse and check standard fields."""
self.name = _pop_field_string(variables, "pkgname")
self.variables["pkgname"] = self.name
pkgver_str = _pop_field_string(variables, "pkgver")
self.variables["pkgver"] = pkgver_str
self.version = Version.parse(pkgver_str)
self.arch = _pop_field_string(variables, "arch", "armv7-3.2")
self.variables["arch"] = self.arch
self.desc = _pop_field_string(variables, "pkgdesc")
self.variables["pkgdesc"] = self.desc
self.url = _pop_field_string(variables, "url")
self.variables["url"] = self.url
self.section = _pop_field_string(variables, "section")
self.variables["section"] = self.section
self.license = _pop_field_string(variables, "license")
self.variables["license"] = self.license
depends_raw = _pop_field_indexed(variables, "depends", [])
self.variables["depends"] = depends_raw
self.depends = []
for dep_raw in depends_raw:
dep = Dependency.parse(dep_raw or "")
if dep.kind != DependencyKind.Host:
raise RecipeError(
"Only host packages are supported in the 'depends' field"
)
self.depends.append(dep)
conflicts_raw = _pop_field_indexed(variables, "conflicts", [])
self.variables["conflicts"] = conflicts_raw
self.conflicts = []
for conflict_raw in conflicts_raw:
conflict = Dependency.parse(conflict_raw or "")
if dep.kind != DependencyKind.Host:
raise RecipeError(
"Only host packages are supported in the 'conflicts' field"
)
self.conflicts.append(conflict)
def _load_functions(self, functions: bash.Functions) -> None:
"""Parse and check standard functions."""
if "package" not in functions:
raise RecipeError(
f"Missing required function package() for package {self.name}"
)
self.functions["package"] = functions.pop("package")
for action in ("preinstall", "configure"):
self.functions[action] = functions.pop(action, "")
for rel, step in product(("pre", "post"), ("remove", "upgrade")):
self.functions[rel + step] = functions.pop(rel + step, "")
def _load_custom(
self, variables: bash.Variables, functions: bash.Functions
) -> None:
"""Parse and check custom fields and functions."""
for var_name in variables.keys():
if not var_name.startswith("_"):
raise RecipeError(
f"Unknown field '{var_name}' in the definition of \
package {self.name} ({self.parent.name}) โ make sure to prefix the names of \
custom fields with '_'"
)
for func_name in functions.keys():
if not func_name.startswith("_"):
raise RecipeError(
f"Unknown function '{func_name}' in the definition of \
package {self.name} ({self.parent.name}) โ make sure to prefix the names of \
custom functions with '_'"
)
self.custom_variables = variables
self.custom_functions = functions
def pkgid(self) -> str:
"""Get the unique identifier of this package."""
return "_".join((self.name, str(self.version), self.arch))
def filename(self) -> str:
"""Get the name of the archive corresponding to this package."""
return self.pkgid() + ".ipk"
def control_fields(self) -> str:
"""Get the control fields for this package."""
control = textwrap.dedent(
f"""\
Package: {self.name}
Description: {self.desc}
Homepage: {self.url}
Version: {self.version}
Section: {self.section}
Maintainer: {self.parent.maintainer}
License: {self.license}
Architecture: {self.arch}
"""
)
if self.depends:
control += (
"Depends: "
+ ", ".join(dep.to_debian() for dep in self.depends if dep)
+ "\n"
)
if self.conflicts:
control += (
"Conflicts: "
+ ", ".join(dep.to_debian() for dep in self.conflicts if dep)
+ "\n"
)
return control
# Helpers to check that fields of the right type are defined in a recipe
# and to otherwise return a default value
def _pop_field_string(
variables: bash.Variables, name: str, default: Optional[str] = None
) -> str:
if name not in variables:
if default is None:
raise RecipeError(f"Missing required field {name}")
return default
value = variables.pop(name)
if not isinstance(value, str):
raise RecipeError(
f"Field {name} must be a string, \
got {type(variables[name]).__name__}"
)
return value
def _pop_field_indexed(
variables: bash.Variables,
name: str,
default: Optional[bash.IndexedArray] = None,
) -> bash.IndexedArray:
if name not in variables:
if default is None:
raise RecipeError(f"Missing required field '{name}'")
return default
value = variables.pop(name)
if not isinstance(value, list):
raise RecipeError(
f"Field '{name}' must be an indexed array, \
got {type(variables[name]).__name__}"
)
return value
|
#! /usr/bin/env python3
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from bs4 import BeautifulSoup # type: ignore
from bs4.element import Tag # type: ignore
from dataclasses_json import DataClassJsonMixin
from ff14angler.constants.data_corrections import angler_fish_lodestone_url_corrections
from ff14angler.constants.regex import non_number_replacement_regex
from ff14angler.dataClasses.bait.baitProvider import BaitPercentage, BaitProvider
from ff14angler.dataClasses.comment.commentSection import CommentSection
from ff14angler.dataClasses.fish.fishDesynthesisChance import FishDesynthesisChance
from ff14angler.dataClasses.fish.fishHourPreferences import FishHourPreferences
from ff14angler.dataClasses.fish.fishId import FishId
from ff14angler.dataClasses.fish.fishLeve import FishLeve
from ff14angler.dataClasses.fish.fishRecipe import FishRecipe
from ff14angler.dataClasses.fish.fishTugStrength import FishTugStrength
from ff14angler.dataClasses.fish.fishWeatherPreferences import FishWeatherPreferences
from ff14angler.dataClasses.spot.spotId import SpotId
from ff14angler.network.xivapiWrapper import XivapiWrapper
@dataclass
class Fish(DataClassJsonMixin):
fish_id: FishId
fish_angler_name: str
fish_angler_aquarium_size: Optional[str] = None
fish_angler_bait_preferences: List[BaitPercentage] = field(default_factory=list)
fish_angler_canvas_size: Optional[str] = None
fish_angler_comments: Optional[CommentSection] = None
fish_angler_desynthesis_items: List[FishDesynthesisChance] = field(default_factory=list)
fish_angler_double_hooking_count: Optional[str] = None
fish_angler_gathering_spots: List[SpotId] = field(default_factory=list)
fish_angler_hour_preferences: Optional[FishHourPreferences] = None
fish_angler_involved_leves: List[FishLeve] = field(default_factory=list)
fish_angler_involved_recipes: List[FishRecipe] = field(default_factory=list)
fish_angler_item_category: Optional[str] = None
fish_angler_lodestone_url: Optional[str] = None
fish_angler_territory: Optional[str] = None
fish_angler_tug_strength: List[FishTugStrength] = field(default_factory=list)
fish_angler_weather_preferences: Optional[FishWeatherPreferences] = None
fish_fishing_log_description_de: Optional[str] = None
fish_fishing_log_description_en: Optional[str] = None
fish_fishing_log_description_fr: Optional[str] = None
fish_fishing_log_description_ja: Optional[str] = None
fish_icon_url: Optional[str] = None
fish_introduced_patch: Optional[str] = None
fish_item_description_de: Optional[str] = None
fish_item_description_en: Optional[str] = None
fish_item_description_fr: Optional[str] = None
fish_item_description_ja: Optional[str] = None
fish_item_level: Optional[int] = None
fish_item_name_de: Optional[str] = None
fish_item_name_en: Optional[str] = None
fish_item_name_fr: Optional[str] = None
fish_item_name_ja: Optional[str] = None
fish_large_icon_url: Optional[str] = None
@staticmethod
async def _parse_angler_aquarium_size(data_row3: Tag) -> Optional[str]:
div_tag = data_row3.find('div', {'class': 'fancy info_icon_area'})
if div_tag:
for tag in div_tag.select('.clear_icon.icon_with_text'): # type: Tag
img_tag = tag.find('img')
if img_tag and 'aquarium' in img_tag.attrs.get('src', ''):
return tag.attrs['data-text']
return None
@staticmethod
async def _parse_angler_bait_preferences(soup: BeautifulSoup) -> List[BaitPercentage]:
return await BaitProvider.get_bait_percentage_list_from_fish_soup(soup)
@staticmethod
async def _parse_angler_canvas_size(data_row3: Tag) -> Optional[str]:
div_tag = data_row3.find('div', {'class': 'fancy info_icon_area'})
if div_tag:
for tag in div_tag.select('.clear_icon.icon_with_text'): # type: Tag
img_tag = tag.find('img')
# noinspection SpellCheckingInspection
if img_tag and 'gyotaku' in img_tag.attrs.get('src', ''):
return tag.attrs['data-text']
return None
@staticmethod
async def _parse_angler_desynthesis_items(soup: BeautifulSoup) -> List[FishDesynthesisChance]:
"""Not every fish has a desynthesis list."""
# noinspection SpellCheckingInspection
form = soup.find('form', {'name': 'desynthesized_delete'})
temp_desynthesis_list: List[FishDesynthesisChance] = []
if form:
# noinspection SpellCheckingInspection
if len(tbody := form.find_all('tbody')) > 1:
for tag in tbody[1].find_all('tr'): # type: Tag
temp_desynthesis_list.append(await FishDesynthesisChance.get_desynthesis_chance_from_soup(tag))
return temp_desynthesis_list
@staticmethod
async def _parse_angler_double_hooking_count(data_row3: Tag) -> str:
div_tag = data_row3.find('div', {'class': 'fancy info_icon_area'})
if div_tag:
for tag in div_tag.select('.clear_icon.icon_with_text'): # type: Tag
img_tag = tag.find('img')
if img_tag and 'double_hooking' in img_tag.attrs.get('src', ''):
return tag.attrs['data-text']
return '1'
@staticmethod
async def _parse_angler_gathering_spots(soup: BeautifulSoup) -> List[SpotId]:
# Avoiding circular imports
from ff14angler.dataClasses.spot.spotProvider import SpotProvider
temp_fishing_spot_list: List[SpotId] = []
spot_form = soup.find('form', {'name': 'spot_delete'})
if spot_form:
# noinspection SpellCheckingInspection
body = spot_form.find_all('tbody')[1]
for tag in body.find_all('tr'): # type: Tag
if not tag.find('a'):
continue
td1, _, td3 = tag.find_all('td') # type: Tag, _, Tag
spot_angler_spot_id: int = int(
non_number_replacement_regex.sub(repl='', string=td1.find('a').attrs['href'])
)
temp_fishing_spot_list.append(await SpotProvider.get_spot_id_from_angler_id(spot_angler_spot_id))
return temp_fishing_spot_list
@staticmethod
async def _parse_angler_hour_preferences(soup: BeautifulSoup) -> Optional[FishHourPreferences]:
return await FishHourPreferences.get_hour_preferences_from_fish_soup(soup)
@staticmethod
async def _parse_angler_involved_leves(soup: BeautifulSoup) -> List[FishLeve]:
"""Not every fish is used as a leve turn in."""
header = soup.find('h3', {'id': 'leve'})
temp_leve_list: List[FishLeve] = []
if header:
table: Tag = header.find_next('table', {'class': 'list'})
if table:
for tag in table.find_all('tr'): # type: Tag
temp_leve_list.append(await FishLeve.get_leve_from_soup(tag))
return temp_leve_list
@staticmethod
async def _parse_angler_involved_recipes(soup: BeautifulSoup) -> List[FishRecipe]:
"""Not every fish is an ingredient in a recipe."""
# noinspection SpellCheckingInspection
header = soup.find('h3', {'id': 'receipe'})
temp_recipe_list: List[FishRecipe] = []
if header:
table: Tag = header.find_next('table', {'class': 'list'})
if table:
for tag in table.find_all('tr'): # type: Tag
temp_recipe_list.append(await FishRecipe.get_recipe_from_fish_soup(tag))
return temp_recipe_list
@staticmethod
async def _parse_angler_item_category(data_row2: Tag) -> str:
span1: Tag = data_row2.find_all('span')[0]
return span1.text.strip()
@staticmethod
async def _parse_angler_large_icon_url(data_row1: Tag) -> str:
div_tag = data_row1.find('div', {'class': 'clear_icon_l'})
img_tag = div_tag.find('img')
return 'https://en.ff14angler.com{}'.format(img_tag.attrs['src'])
@staticmethod
async def _parse_angler_lodestone_url(fish_id: FishId, data_row2: Tag) -> Optional[str]:
# noinspection SpellCheckingInspection
a_tag = data_row2.find('a', {'class', 'lodestone eorzeadb_link'})
if a_tag:
url = a_tag.attrs['href']
if url.endswith('0000.png'):
return None
return url
elif angler_fish_lodestone_url_corrections.get(fish_id.fish_angler_fish_id):
return angler_fish_lodestone_url_corrections.get(fish_id.fish_angler_fish_id)
return None
@staticmethod
async def _parse_angler_territory(data_row2: Tag) -> Optional[str]:
span2: Tag = data_row2.find_all('span')[1]
territory = ' '.join(span2.text.strip().split()[1:])
if territory:
return territory
return None
@staticmethod
async def _parse_angler_weather_preferences(soup: BeautifulSoup) -> Optional[FishWeatherPreferences]:
return await FishWeatherPreferences.get_weather_preferences_from_fish_soup(soup)
@staticmethod
async def _parse_icon_url(data_row1: Tag) -> str:
div_tag = data_row1.find('div', {'class': 'clear_icon_l'})
img_tag = div_tag.find('img')
return 'https://en.ff14angler.com{}'.format(img_tag.attrs['src'].replace('l.png', '.png'))
@staticmethod
async def _parse_fish_introduced_patch(data_row2: Tag) -> str:
return data_row2.find('span', {'class': 'patch'}).attrs['patch']
async def _lookup_fish_introduced_patch(self, data_row2: Tag, item_lookup_response: Dict[str, Any]) -> str:
try:
return item_lookup_response['GamePatch']['Version']
except KeyError:
return await self._parse_fish_introduced_patch(data_row2)
@staticmethod
async def _parse_item_level(data_row2: Tag) -> int:
span2: Tag = data_row2.find_all('span')[1]
return int(span2.text.strip().split()[0])
@staticmethod
async def _lookup_fish_long_description(item_lookup_response: Dict[str, Any]) -> Optional[Dict[str, str]]:
game_content_links: Dict[str, Any] = item_lookup_response['GameContentLinks']
try:
fish_lookup_response = await XivapiWrapper.xivapi_fish_parameter_lookup(
game_content_links['FishParameter']['Item'][0]
)
return {
'de': fish_lookup_response['Text_de'],
'en': fish_lookup_response['Text_en'],
'fr': fish_lookup_response['Text_fr'],
'ja': fish_lookup_response['Text_ja'],
}
except KeyError:
try:
fish_lookup_response = await XivapiWrapper.xivapi_spearfishing_item_lookup(
game_content_links['SpearfishingItem']['Item'][0]
)
return {
'de': fish_lookup_response['Description_de'],
'en': fish_lookup_response['Description_en'],
'fr': fish_lookup_response['Description_fr'],
'ja': fish_lookup_response['Description_ja'],
}
except KeyError:
return dict()
@classmethod
async def get_fish_from_angler_fish(cls, fish_angler_id: int, fish_angler_name: str) -> 'Fish':
fish_id = await FishId.get_fish_id_from_angler_fish(
fish_angler_id=fish_angler_id,
fish_angler_name=fish_angler_name
)
return cls(fish_id, fish_angler_name)
async def update_fish_with_fish_soup(self, soup: BeautifulSoup) -> 'Fish':
search_response = await XivapiWrapper.xivapi_item_search(self.fish_angler_name)
item_lookup_response = await XivapiWrapper.xivapi_item_lookup(search_response['ID'])
fish_table: Tag = soup.find('table', {'class': 'fish_info'})
# noinspection SpellCheckingInspection
if ranking := fish_table.find('span', {'class': 'ranklist note frame'}):
ranking.decompose()
data_row1, data_row2, data_row3 = fish_table.find_all('tr')[:3] # type: Tag, Tag, Tag
self.fish_angler_aquarium_size = await self._parse_angler_aquarium_size(data_row3)
self.fish_angler_bait_preferences += await self._parse_angler_bait_preferences(soup)
self.fish_angler_canvas_size = await self._parse_angler_canvas_size(data_row3)
self.fish_angler_desynthesis_items += await self._parse_angler_desynthesis_items(soup)
self.fish_angler_double_hooking_count = await self._parse_angler_double_hooking_count(data_row3)
self.fish_angler_gathering_spots += await self._parse_angler_gathering_spots(soup)
self.fish_angler_hour_preferences = await self._parse_angler_hour_preferences(soup)
self.fish_angler_involved_leves += await self._parse_angler_involved_leves(soup)
self.fish_angler_involved_recipes += await self._parse_angler_involved_recipes(soup)
self.fish_angler_item_category = await self._parse_angler_item_category(data_row2)
self.fish_angler_lodestone_url = await self._parse_angler_lodestone_url(self.fish_id, data_row2)
self.fish_angler_territory = await self._parse_angler_territory(data_row2)
self.fish_angler_weather_preferences = await self._parse_angler_weather_preferences(soup)
self.fish_icon_url = item_lookup_response["Icon"]
self.fish_introduced_patch = await self._lookup_fish_introduced_patch(data_row2, item_lookup_response)
self.fish_item_description_de = item_lookup_response['Description_de']
self.fish_item_description_en = item_lookup_response['Description_en']
self.fish_item_description_fr = item_lookup_response['Description_fr']
self.fish_item_description_ja = item_lookup_response['Description_ja']
self.fish_item_level = await self._parse_item_level(data_row2)
self.fish_item_name_de = item_lookup_response['Name_de']
self.fish_item_name_en = item_lookup_response['Name_en']
self.fish_item_name_fr = item_lookup_response['Name_fr']
self.fish_item_name_ja = item_lookup_response['Name_ja']
fishing_log_descriptions = await self._lookup_fish_long_description(item_lookup_response)
self.fish_fishing_log_description_de = fishing_log_descriptions.get('de')
self.fish_fishing_log_description_en = fishing_log_descriptions.get('en')
self.fish_fishing_log_description_fr = fishing_log_descriptions.get('fr')
self.fish_fishing_log_description_ja = fishing_log_descriptions.get('ja')
await XivapiWrapper.xivapi_download_icon_image(self.fish_icon_url)
return self
async def update_fish_with_comment_section(self, comment_section: CommentSection):
self.fish_angler_comments = comment_section
async def update_fish_with_tug_strength(self, tug_strength: Dict[str, float]):
if len(self.fish_angler_tug_strength) == 0:
self.fish_angler_tug_strength += [
FishTugStrength(
fish_tug_strength=int(strength_num),
fish_tug_strength_percent=strength_percent
) for strength_num, strength_percent in tug_strength.items()
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.