content
stringlengths 5
1.05M
|
|---|
import os
from app import DATA_DIR
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
from app.bot_communities.tokenizers import Tokenizer #, SpacyTokenizer
from app.bot_communities.token_analyzer import summarize_token_frequencies
from pandas import DataFrame
if __name__ == "__main__":
file_storage = FileStorage(dirpath="bot_retweet_graphs/bot_min/0.8/n_communities/2/analysis_v2")
bq_service = BigQueryService()
tokenizer = Tokenizer()
results = [dict(row) for row in list(bq_service.fetch_bot_community_profiles(n_communities=2))]
print("FETCHED", len(results), "RECORDS")
for i, row in enumerate(results):
row["profile_tokens"] = []
row["profile_tags"] = []
if row["user_descriptions"]:
#print("--------------")
#print("COMMUNITY", row["community_id"], i, row["bot_id"], row["screen_names"])
#print(row["user_descriptions"])
# we want unique tokens here because otherwise someone changing their description (multiple descriptions) will have a greater influence over the counts
# but then it makes TF/IDF not possible because the doc counts are the same as the token counts
# really we are just counting number of users who have these tokens...
tokens = list(set(tokenizer.custom_stems(row["user_descriptions"])))
row["profile_tokens"] = tokens
#print("TOKENS:", tokens)
tags = list(set(tokenizer.hashtags(row["user_descriptions"])))
row["profile_tags"] = tags
#print("TAGS:", tags)
print("--------------")
print("BOT PROFILES:")
profiles_df = DataFrame(results)
print(profiles_df.head())
# SAVE AND UPLOAD PROFILES
local_profiles_filepath = os.path.join(file_storage.local_dirpath, "community_profiles.csv")
gcs_profiles_filepath = os.path.join(file_storage.gcs_dirpath, "community_profiles.csv")
profiles_df.to_csv(local_profiles_filepath)
file_storage.upload_file(local_profiles_filepath, gcs_profiles_filepath)
for community_id, filtered_df in profiles_df.groupby(["community_id"]):
print("--------------")
print(f"COMMUNITY {community_id}:", len(filtered_df))
local_community_dirpath = os.path.join(file_storage.local_dirpath, f"community_{community_id}")
gcs_community_dirpath = os.path.join(file_storage.gcs_dirpath, f"community_{community_id}")
if not os.path.exists(local_community_dirpath):
os.makedirs(local_community_dirpath)
tokens_df = summarize_token_frequencies(filtered_df["profile_tokens"].tolist())
print(tokens_df.head())
# SAVE AND UPLOAD PROFILE TOKENS
local_tokens_filepath = os.path.join(local_community_dirpath, "profile_tokens.csv")
gcs_tokens_filepath = os.path.join(gcs_community_dirpath, "profile_tokens.csv")
tokens_df.to_csv(local_tokens_filepath)
file_storage.upload_file(local_tokens_filepath, gcs_tokens_filepath)
token_records = tokens_df[tokens_df["count"] > 1].to_dict("records")
bq_service.upload_bot_community_profile_tokens(community_id=community_id, records=token_records)
tags_df = summarize_token_frequencies(filtered_df["profile_tags"].tolist())
print(tags_df.head())
# SAVE AND UPLOAD PROFILE TAGS
local_tags_filepath = os.path.join(local_community_dirpath, "profile_tags.csv")
gcs_tags_filepath = os.path.join(gcs_community_dirpath, "profile_tags.csv")
tags_df.to_csv(local_tags_filepath)
file_storage.upload_file(local_tags_filepath, gcs_tags_filepath)
tag_records = tags_df[tags_df["count"] > 1].to_dict("records")
bq_service.upload_bot_community_profile_tags(community_id=community_id, records=tag_records)
|
word_count = {}
with open("./Beginner Guide to Python.txt") as fin:
for line in fin:
line = line[:-1]
words = line.split()
for word in words:
if word not in word_count:
word_count[word] = 0
word_count[word] += 1
print(
sorted(
word_count.items(),
key=lambda x: x[1],
reverse=True
)[:10]
)
|
from collections import namedtuple
import torch
from torch.nn import LSTMCell, Module, Linear
from editor_code.copy_editor.vocab import base_plus_copy_indices
from gtd.ml.torch.seq_batch import SequenceBatch
from gtd.ml.torch.source_encoder import MultiLayerSourceEncoder
from gtd.ml.torch.utils import NamedTupleLike
from gtd.ml.torch.utils import GPUVariable
import numpy as np
EncoderInput = namedtuple('EncoderInput', ['input_words', 'output_words', 'token_embedder'])
"""
Args:
input_words (list[MultiVocabIndices])
output_words (list[MultiVocabIndices])
token_embedder (DynamicMultiVocabTokenEmbedder)
"""
class EncoderOutput(namedtuple('EncoderOutput', ['input_embeds', 'agenda', 'token_embedder']), NamedTupleLike):
pass
"""
Args:
input_embeds (list[SequenceBatch]): list of SequenceBatch elements of shape (batch_size, seq_length, hidden_size)
agenda (Variable): of shape (batch_size, agenda_dim) - VAE latent variable for reconstructing target sequence
token_embedder (DynamicMultiVocabTokenEmbedder)
"""
class Encoder(Module):
def __init__(self, word_dim, agenda_dim, hidden_dim, num_layers, num_inputs, dropout_prob, use_vae, kappa=0, use_target=True):
"""Construct Encoder.
Args:
word_dim (int)
agenda_dim (int)
hidden_dim (int)
num_layers (int)
num_inputs (int)
"""
super(Encoder, self).__init__()
self.agenda_dim = agenda_dim
self.source_encoder = MultiLayerSourceEncoder(2 * word_dim, hidden_dim, num_layers, dropout_prob=dropout_prob,
rnn_cell_factory=LSTMCell)
self.target_encoder = MultiLayerSourceEncoder(2 * word_dim, hidden_dim, num_layers, dropout_prob=0.0,
rnn_cell_factory=LSTMCell)
self.agenda_maker = AgendaMaker(self.source_encoder.hidden_dim * num_inputs, self.agenda_dim)
self.output_agenda_maker = AgendaMaker(self.source_encoder.hidden_dim, self.agenda_dim)
self.use_vae = use_vae
self.vae_wrap = VMFVAEWrapper(kappa)
self.use_target = use_target
def preprocess(self, input_batches, output_seqs, token_embedder, volatile=False):
"""Preprocess.
Args:
input_batches (list[list[list[unicode]]]): a batch of input sequence lists
Each sequence list has one sequence per "input channel"
output_seqs (list[list[unicode]]): a batch of output sequences (targets)
token_embedder (DynamicMultiVocabTokenEmbedder)
volatile (bool): whether to make Variables volatile (don't track gradients)
Returns:
EncoderInput
"""
dynamic_vocabs = token_embedder.dynamic_vocabs
base_vocab = token_embedder.base_vocab
indices_list = []
for channel_seqs in zip(*input_batches):
# channel_seqs is a batch of sequences, where all the sequences come from one "input channel"
multi_vocab_indices = base_plus_copy_indices(list(channel_seqs), dynamic_vocabs, base_vocab, volatile=volatile)
indices_list.append(multi_vocab_indices)
output_indices = base_plus_copy_indices(output_seqs, dynamic_vocabs, base_vocab, volatile=volatile)
return EncoderInput(indices_list, output_indices, token_embedder)
def make_embedding(self, encoder_input, words_list, encoder):
"""Encoder for a single `channel'
"""
channel_word_embeds = encoder_input.token_embedder.embed_seq_batch(words_list)
source_encoder_output = encoder(channel_word_embeds.split())
channel_embeds_list = source_encoder_output.combined_states
channel_embeds = SequenceBatch.cat(channel_embeds_list)
# the final hidden states in both the forward and backward direction, concatenated
channel_embeds_final = torch.cat(source_encoder_output.final_states, 1) # (batch_size, hidden_dim)
return channel_embeds, channel_embeds_final
def target_out(self, encoder_input):
output_embeds, output_embeds_final = self.make_embedding(encoder_input, encoder_input.output_words,
self.target_encoder)
return self.output_agenda_maker(output_embeds_final)
def ctx_code_out(self, encoder_input):
all_channel_embeds = []
all_channel_embeds_final = []
for channel_words in encoder_input.input_words:
channel_embeds, channel_embeds_final = self.make_embedding(encoder_input, channel_words,
self.source_encoder)
all_channel_embeds.append(channel_embeds)
all_channel_embeds_final.append(channel_embeds_final)
input_embeds_final = torch.cat(all_channel_embeds_final, 1) # (batch_size, hidden_dim * num_channels)
context_agenda = self.agenda_maker(input_embeds_final)
return context_agenda, all_channel_embeds
def forward(self, encoder_input, train_mode=True):
"""Encode.
Args:
encoder_input (EncoderInput)
Returns:
EncoderOutput, cost (0 in this case)
"""
context_agenda, all_channel_embeds = self.ctx_code_out(encoder_input)
if self.use_vae and train_mode:
if self.use_target:
target_agenda = self.target_out(encoder_input)
vae_agenda, vae_loss = self.vae_wrap(context_agenda+target_agenda, True)
else:
vae_agenda, vae_loss = self.vae_wrap(context_agenda, True)
else:
vae_agenda = context_agenda / torch.sqrt(torch.sum(context_agenda**2.0, dim=1)).expand_as(context_agenda)
vae_loss = GPUVariable(torch.zeros(1))
return EncoderOutput(all_channel_embeds, vae_agenda, encoder_input.token_embedder), vae_loss
class AgendaMaker(Module):
def __init__(self, source_dim, agenda_dim):
super(AgendaMaker, self).__init__()
self.linear = Linear(source_dim, agenda_dim)
def forward(self, source_embed):
"""Create agenda vector from source text embedding and edit embedding.
Args:
source_embed (Variable): of shape (batch_size, source_dim)
Returns:
agenda (Variable): of shape (batch_size, agenda_dim)
"""
return self.linear(source_embed)
class GaussianVAEWrapper(Module):
def __init__(self, vae_wt):
super(GaussianVAEWrapper, self).__init__()
self.vae_wt = vae_wt
def kl_penalty(self, agenda):
"""
Computes KL penalty given encoder output
"""
batch_size, agenda_dim = agenda.size()
return self.vae_wt * 0.5 * torch.sum(torch.pow(agenda, 2)) / batch_size
def forward(self, source_embed, add_noise=True):
means = torch.zeros(source_embed.size())
std = torch.ones(source_embed.size())
noise = GPUVariable(torch.normal(means=means, std=std)) # unit Gaussian
if add_noise:
return source_embed + noise, self.kl_penalty(source_embed)
else:
return source_embed, 0
class VMFVAEWrapper(Module):
def __init__(self, kappa):
super(VMFVAEWrapper, self).__init__()
self.kappa = kappa
def kl_penalty(self, agenda):
# igoring this for simplicity since we don't need the KL penalty.
batch_size, id_dim = agenda.size()
return GPUVariable(torch.zeros(1))
def sample_vMF(self, mu, kappa):
"""vMF sampler in pytorch.
Args:
mu (Tensor): of shape (batch_size, 2*word_dim)
kappa (Float): controls dispersion. kappa of inf is no dispersion.
"""
batch_size, id_dim = mu.size()
result_list = []
for i in range(batch_size):
munorm = mu[i].norm().expand(id_dim)
# sample offset from center (on sphere) with spread kappa
w = self._sample_weight(kappa, id_dim)
wtorch = GPUVariable(w * torch.ones(id_dim))
# sample a point v on the unit sphere that's orthogonal to mu
v = self._sample_orthonormal_to(mu[i] / munorm, id_dim)
# compute new point
scale_factr = torch.sqrt(GPUVariable(torch.ones(id_dim)) - torch.pow(wtorch, 2))
orth_term = v * scale_factr
muscale = mu[i] * wtorch / munorm
sampled_vec = (orth_term + muscale)
result_list.append(sampled_vec)
return torch.stack(result_list, 0)
def renormalize_norm(self, mu):
"""
Args:
mu (Tensor): of shape (batch_size, 2*word_dim)
"""
batch_size, id_dim = mu.size()
result_list = []
for i in range(batch_size):
munorm = mu[i].norm().expand(id_dim)
sampled_vec = mu[i] / munorm
result_list.append(sampled_vec)
return torch.stack(result_list, 0)
def _sample_weight(self, kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2) # dim * (kdiv *x + np.log(1-x**2))
while True:
z = np.random.beta(dim / 2., dim / 2.) # concentrates towards 0.5 as d-> inf
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
u): # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
return w
def _sample_orthonormal_to(self, mu, dim):
"""Sample point on sphere orthogonal to mu.
"""
v = GPUVariable(torch.randn(dim))
rescale_value = mu.dot(v) / mu.norm()
proj_mu_v = mu * rescale_value.expand(dim)
ortho = v - proj_mu_v
ortho_norm = torch.norm(ortho)
return ortho / ortho_norm.expand_as(ortho)
def forward(self, source_embed, add_noise=True):
if add_noise:
return self.sample_vMF(source_embed, self.kappa), self.kl_penalty(source_embed)
else:
return self.renormalize_norm(source_embed), 0
|
# generated with make_mock.py
E2BIG = 7
EACCES = 13
EADDRINUSE = 98
EADDRNOTAVAIL = 99
EADV = 68
EAFNOSUPPORT = 97
EAGAIN = 11
EALREADY = 114
EBADE = 52
EBADF = 9
EBADFD = 77
EBADMSG = 74
EBADR = 53
EBADRQC = 56
EBADSLT = 57
EBFONT = 59
EBUSY = 16
ECHILD = 10
ECHRNG = 44
ECOMM = 70
ECONNABORTED = 103
ECONNREFUSED = 111
ECONNRESET = 104
EDEADLK = 35
EDEADLOCK = 35
EDESTADDRREQ = 89
EDOM = 33
EDOTDOT = 73
EDQUOT = 122
EEXIST = 17
EFAULT = 14
EFBIG = 27
EHOSTDOWN = 112
EHOSTUNREACH = 113
EIDRM = 43
EILSEQ = 84
EINPROGRESS = 115
EINTR = 4
EINVAL = 22
EIO = 5
EISCONN = 106
EISDIR = 21
EISNAM = 120
EL2HLT = 51
EL2NSYNC = 45
EL3HLT = 46
EL3RST = 47
ELIBACC = 79
ELIBBAD = 80
ELIBEXEC = 83
ELIBMAX = 82
ELIBSCN = 81
ELNRNG = 48
ELOOP = 40
EMFILE = 24
EMLINK = 31
EMSGSIZE = 90
EMULTIHOP = 72
ENAMETOOLONG = 36
ENAVAIL = 119
ENETDOWN = 100
ENETRESET = 102
ENETUNREACH = 101
ENFILE = 23
ENOANO = 55
ENOBUFS = 105
ENOCSI = 50
ENODATA = 61
ENODEV = 19
ENOENT = 2
ENOEXEC = 8
ENOLCK = 37
ENOLINK = 67
ENOMEM = 12
ENOMSG = 42
ENONET = 64
ENOPKG = 65
ENOPROTOOPT = 92
ENOSPC = 28
ENOSR = 63
ENOSTR = 60
ENOSYS = 38
ENOTBLK = 15
ENOTCONN = 107
ENOTDIR = 20
ENOTEMPTY = 39
ENOTNAM = 118
ENOTSOCK = 88
ENOTTY = 25
ENOTUNIQ = 76
ENXIO = 6
EOPNOTSUPP = 95
EOVERFLOW = 75
EPERM = 1
EPFNOSUPPORT = 96
EPIPE = 32
EPROTO = 71
EPROTONOSUPPORT = 93
EPROTOTYPE = 91
ERANGE = 34
EREMCHG = 78
EREMOTE = 66
EREMOTEIO = 121
ERESTART = 85
EROFS = 30
ESHUTDOWN = 108
ESOCKTNOSUPPORT = 94
ESPIPE = 29
ESRCH = 3
ESRMNT = 69
ESTALE = 116
ESTRPIPE = 86
ETIME = 62
ETIMEDOUT = 110
ETOOMANYREFS = 109
ETXTBSY = 26
EUCLEAN = 117
EUNATCH = 49
EUSERS = 87
EWOULDBLOCK = 11
EXDEV = 18
EXFULL = 54
errorcode = {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO', 6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN', 12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY', 17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR', 22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY', 27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK', 32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG', 37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG', 43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST', 48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE', 53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT', 59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR', 64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV', 69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT', 74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG', 79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC', 84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK', 89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT', 93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'EOPNOTSUPP', 96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE', 99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET', 103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN', 107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT', 111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY', 115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM', 119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT'}
|
import numpy as np
""" How to use?
example 1:
# J = [[-1,0,0],[0,2,1],[0,0,2]]
# make_problem_Jordan_normal_form(J)
example2:
J = [[-1,0,0,0],[0,2,1,0],[0,0,2,1],[0,0,0,2]]
make_problem_Jordan_normal_form(J,difficulty=8)
"""
"""
テキトーな n*n ユニモジュラ行列を1つ得る
(単位行列に対して、行or列変形をランダムに繰り返すというアルゴリズム)
difficulty: ユニモジュラ行列の「複雑度」(基本変形の最大回数)
"""
def random_unimodular(n,difficulty):
from random import randint, sample
if difficulty==None: difficulty = n*3
A = np.eye(n,n)
for _ in range(difficulty):
# j-th のベクトルを c 倍して i-th に足す
i,j = sample(range(n), 2)
c, = sample([-2,-1,1,2], 1)
if randint(0,1)==0: #行基本変形
A[i] += c*A[j]
else: #列基本変形
A[:, i:i+1] += c*A[:, j:j+1]
#unimodularを確認
assert abs(np.linalg.det(A) - 1) < 1e-5
return np.matrix(A)
"""
np.matrix形式の行列 A を
整数成分のリスト形式に変換
"""
def mat2intlist(A):
return np.round(A).astype(int).tolist()
"""
行列 P,Q,J(ジョルダン標準形),A の情報を
texのコメント形式で出力
"""
def show_info(P,Q,J,A):
print("情報")
x = "P^{-1}"
print(f"% P={mat2intlist(P)}")
print(f"% Q={x}={mat2intlist(Q)}")
print(f"% J={J}")
print(f"% A = PJQ = {mat2intlist(A)}")
print()
"""
問題をtexとして出力
"""
def show_problem(A):
print("問題")
print(f"${mat2tex(A)}$\nのジョルダン標準形を求めよ。\n")
"""
解答をtexとして出力
"""
def show_ans(P,Q,J,A):
print("答え")
print(f"\\[\n{mat2tex(Q)}{mat2tex(A)}{mat2tex(P)}={mat2tex(J)}\\]\n")
"""
行列 A をtex形式に変換した文字列を返す
"""
def mat2tex(A):
res = "\\begin{pmatrix}\n"
for line in mat2intlist(A):
res += " & ".join(map(str,line)) + " \\\\\n"
res += "\\end{pmatrix}\n"
return res
"""
ジョルダン標準形が J になるような行列の問題を生成する
A = PJP^{-1}となる
つまり、広義固有空間の基底は P の縦ベクトルたちからなる
difficulty: 問題の難しさ(大きいほど難しい)デフォルトは 3*n
"""
def make_problem_Jordan_normal_form(J,difficulty=None):
n = len(J)
P = random_unimodular(n,difficulty)
Q = np.linalg.inv(P)
A = P@np.matrix(J)@Q
# P,Q,A は整数行列であることを確認
assert np.all(np.abs(P@Q - np.eye(n)) < 1e-5)
assert np.all(np.abs(P - np.round(P)) < 1e-5)
assert np.all(np.abs(Q - np.round(Q)) < 1e-5)
assert np.all(np.abs(A - np.round(A)) < 1e-5)
show_info(P,Q,J,A)
show_problem(A)
show_ans(P,Q,J,A)
|
import ast
import os
import collections
from nltk import pos_tag
def flat(_list):
""" [(1,2), (3,4)] -> [1, 2, 3, 4]"""
return sum([list(item) for item in _list], [])
def is_verb(word):
"""return True if word is a verb, base form"""
return word is not None and pos_tag([word])[0][1] == 'VB'
def is_special(name):
"""__some_name__ -> True"""
return isinstance(name, str) and name.startswith('__') and name.endswith('__')
def process_files(dirname, files):
"""generate trees from files in the list"""
processed = 0
trees = []
for file in files:
if not file.endswith('.py'):
continue
filename = os.path.join(dirname, file)
with open(filename, 'r', encoding='utf-8') as attempt_handler:
main_file_content = attempt_handler.read()
try:
tree = ast.parse(main_file_content)
except SyntaxError as e:
print(e)
continue
trees.append(tree)
return trees
def get_trees(path):
"""generate trees from files in given path"""
trees = []
for dirname, dirs, files in os.walk(path, topdown=True):
dir_trees = process_files(dirname, files)
trees += dir_trees
return trees
def get_verbs_from_function_name(function_name):
"""return list of all verbs (base form) in function name"""
return [word for word in function_name.split('_') if is_verb(word)]
def get_names(tree):
"""return list of all names in a tree"""
return [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]
def get_function_names(tree):
"""return list of all function names in a tree"""
return [node.name.lower() for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]
def get_all_names(path):
"""return list of all names in python files in specified path"""
trees = get_trees(path)
names = [f for f in flat([get_names(t) for t in trees]) if not is_special(f)]
return names
def get_top_names(path, top_size=10):
"""return most common names in python files in specified path"""
names = get_all_names(path)
return collections.Counter(names).most_common(top_size)
def get_all_function_names(path):
"""return list of all function names inpython files in specified path"""
trees = get_trees(path)
function_names = [f for f in flat([get_function_names(t) for t in trees]) if not is_special(f)]
return function_names
def get_top_function_names(path, top_size=10):
"""return most common function names in python files in specified path"""
function_names = get_all_function_names(path)
return collections.Counter(function_names).most_common(top_size)
def get_top_function_verbs(path, top_size=10):
"""return most common verbs in function names in python files in specified path"""
function_names = get_all_function_names(path)
verbs = flat([get_verbs_from_function_name(function_name) for function_name in function_names])
return collections.Counter(verbs).most_common(top_size)
if __name__ == '__main__':
words = []
projects = [
'django',
'flask',
'pyramid',
'reddit',
'requests',
'sqlalchemy',
]
for project in projects:
path = os.path.join('.', project)
words += get_top_function_verbs(path)
top_size = 200
print('total %s words, %s unique' % (len(words), len(set(words))))
for word, occurence in collections.Counter(words).most_common(top_size):
print(word, occurence)
|
import logging
def config_log(level):
logging.basicConfig(
format='%(asctime)s %(name)s %(levelname)s %(message)s',
level=level.upper())
|
import random
import numpy as np
from .arena import Arena
from .deliverylogger import DeliveryLogger
from .drone import Drone
class PackageGenerator:
def __init__(self):
self.coordinate_pool = self.define_coordinate_pool()
self.pool_size = self.coordinate_pool.shape[0]
self.package_weights = [0.5, 0.75, 1]
self.rng = {}
self.delivery_loggers = {}
def define_coordinate_pool(self):
arena = Arena(0)
z = arena.min_z
return np.array([
[2.6, 0.6, z],
[2.4, 3.4, z],
[0.6, 2.2, z],
[1.4, 3.2, z],
[1., 1.6, z],
[3.6, 0.6, z],
[3.2, 3.2, z],
[3.4, 1.4, z]
])
def initialize_swarm(self, swarm_id, seed):
self.rng[swarm_id] = random.Random()
self.rng[swarm_id].seed(seed)
self.delivery_loggers[swarm_id] = DeliveryLogger()
return True
def generate_number(self, swarm_id, lower_limit, upper_limit):
return self.rng[swarm_id].randint(lower_limit, upper_limit)
def generate_hash(self, swarm_id):
return self.rng[swarm_id].getrandbits(128)
def get_package(self, swarm_id):
if self.delivery_loggers[swarm_id].log_is_full(swarm_id):
return None
rand = self.generate_number(swarm_id, 0, self.pool_size - 1)
weightIndex = self.generate_number(swarm_id, 0, len(self.package_weights)-1)
weight = self.package_weights[weightIndex]
id = self.generate_hash(swarm_id)
package = {'id': str(id), 'coordinates': self.coordinate_pool[rand].tolist(), 'weight': weight, 'drone': None, 'picked': False}
self.delivery_loggers[swarm_id].add_package(swarm_id, package)
return package
def pickup(self, swarm_id, package_id, drone: Drone):
success = self.delivery_loggers[swarm_id].pickup(swarm_id, package_id, drone)
return success
def deliver(self, swarm_id, package_id, drone: Drone):
success = self.delivery_loggers[swarm_id].deliver(swarm_id, package_id, drone)
return success
def print_deliveries(self, swarm_id):
success = self.delivery_loggers[swarm_id].print_deliveries()
return success
|
from rest_framework import serializers
from ..models import Officer
from accounts.models import User
from accounts.api.serializers import AccountSerializer, AccountCreateSerializer, AccountLoginSerializer
from django.contrib.auth import get_user_model
from django.db.models import Q
from ..models import Officer
# class OfficerSerializer(serializers.ModelSerializer):
# class Meta:
# model = Officer
# fields = ['account', 'id', 'activity']
class OfficerSerializer(serializers.ModelSerializer):
account = AccountSerializer()
class Meta:
model = Officer
fields = ['account']
class OfficerCreateSerializer(serializers.ModelSerializer):
# email_confirm = serializers.EmailField(label='Confirm Email')
account = AccountCreateSerializer()
class Meta:
model = Officer
fields = ['account']
def create(self, validated_data):
account_data = validated_data.pop('account')
# print(account_data)
account = User.objects.create(**account_data)
officer = Officer.objects.create(account=account, **validated_data)
return officer
class OfficerLoginSerializer(serializers.ModelSerializer):
account = AccountLoginSerializer()
class Meta:
model = Officer
fields = ['account']
""""""
|
a = 100
b = 10
c = 5
over
|
def main():
x=2
y=6
print(plus(x,y))
def plus(x,y):
return x+y
if __name__ == "__main__":
main()
|
"""Test dataset building and splitting."""
import pytest
import torch
from gcn_prot.data import get_datasets, get_longest
def test_longest(graph_path):
"""Test get longest length in directory."""
assert 189 == get_longest(graph_path)
def test_get_dataset(data_path):
"""Test splitting with default size of datasets."""
train, test, valid = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
k_fold=None,
seed=1234,
)
assert 164 == len(train)
assert 24 == len(test)
assert 48 == len(valid)
def test_gaussian_augmentation(data_path):
"""Test splitting with default size of datasets."""
train, valid, test = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
augment=2,
k_fold=None,
seed=1234,
)
assert 164 * 2 == len(train)
assert 48 * 2 == len(test)
assert 24 == len(valid)
def test_gaussian_augmentation_label(data_path):
"""Test splitting with default size of datasets."""
train, valid, test = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
augment=3,
augmented_label="0",
k_fold=None,
seed=1234,
)
assert 164 + (57 * 2) == len(train)
assert 24 + (25 * 2) == len(test)
assert 24 == len(valid)
def test_indexing(data_path):
"""Test random access the generated graph dataset."""
train, _, _ = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
k_fold=None,
seed=1234,
)
prot = train[0]
prot_dims = [len(tr) for tr in prot]
# v, c, m, y
assert prot_dims == [185, 185, 185, 2]
def test_dataloading_batch(data_path):
"""Test transformation of input."""
train, _, _ = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
k_fold=None,
seed=1234,
)
trainloader = torch.utils.data.DataLoader(
train, shuffle=False, batch_size=25, drop_last=False
)
for batch in trainloader:
batch_dims = [len(tr) for tr in batch]
break
v = batch[0]
assert batch_dims == [25, 25, 25, 2]
assert v.shape == torch.Size([25, 185, 29])
@pytest.mark.skip(reason="have to figure out how it works.")
def test_kfold(data_path):
"""Test kfold splitting."""
train, test, valid = get_datasets(
data_path=data_path,
nb_nodes=185,
task_type="classification",
nb_classes=2,
split=None,
k_fold=[7, 1, 2],
seed=1234,
)
assert 164 == len(train)
assert 24 == len(test)
assert 48 == len(valid)
|
#!/usr/bin/env python3
# @Time : 17-9-8 23:48
# @Author : Wavky Huang
# @Contact : master@wavky.com
# @File : io.py
"""
Process input and output
"""
import calendar
import json
import os
import pickle
from datetime import date
from urllib import request
from mhcalendar.time_elements import Schedule, Holiday, Month, dec_float
CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.mhcalendar')
def exist(path):
return os.path.exists(path)
def prepare():
"""
Create config folder if not exist, cache holidays if not exist or out of date, and initialize schedule cache.
"""
_check_config_path()
# TODO: Maybe we can prepare holiday data for specific year
if not _check_holiday_cache():
holidays = update_holiday_schedule() or []
Cache.cache_holidays(holidays)
_init_schedule_cache()
def _check_config_path():
"""
Create config folder if not exist
"""
if not exist(CONFIG_DIR):
os.makedirs(CONFIG_DIR)
def _check_holiday_cache():
holiday_cache = Cache.restore_holidays()
if holiday_cache and len(holiday_cache) > 0:
thisyear = date.today().year
if holiday_cache[0].year == str(thisyear):
return True
return False
def _init_schedule_cache():
schedule_cache = Cache.restore_schedule()
if not schedule_cache:
today = date.today()
month = Month(today.year, today.month)
schedule = Schedule(None, month)
Cache.cache_schedule(schedule)
class Cache:
HOLIDAY_CACHE_NAME = 'holiday.cache'
SCHEDULE_CACHE_NAME = 'schedule.cache'
@classmethod
def cache_holidays(cls, holidays):
_check_config_path()
path = os.path.join(CONFIG_DIR, Cache.HOLIDAY_CACHE_NAME)
try:
with open(path, 'w') as file:
json.dump(holidays, file)
except:
print("Failure to open cache file:", path)
@classmethod
def cache_schedule(cls, schedule):
_check_config_path()
path = os.path.join(CONFIG_DIR, Cache.SCHEDULE_CACHE_NAME)
try:
with open(path, 'wb') as file:
pickle.dump(schedule, file)
except:
print("Failure to open cache file:", path)
@classmethod
def restore_holidays(cls):
"""
:return: Holiday list or None if no cache is found
"""
path = os.path.join(CONFIG_DIR, Cache.HOLIDAY_CACHE_NAME)
if exist(path):
try:
with open(path, 'r') as file:
jslist = json.load(file)
if jslist and isinstance(jslist, list):
holidays = []
for item in jslist:
holiday = Holiday(*item)
holidays.append(holiday)
return holidays
else:
return None
except:
print("Failure to open cache file:", path)
else:
return None
@classmethod
def restore_schedule(cls):
"""
:return: Schedule object or None if no cache is found
"""
path = os.path.join(CONFIG_DIR, Cache.SCHEDULE_CACHE_NAME)
if exist(path):
try:
with open(path, 'rb') as file:
schedule = pickle.load(file)
return schedule if isinstance(schedule, Schedule) else None
except:
print("Failure to open cache file:", path)
else:
return None
class MHCalendarDrawer:
"""
Output a monthly calendar.
"""
def __init__(self, width=14):
"""
:param width: minimum width is limited to 12, and MUST BE set as EVEN
"""
self.width = 12 if width < 12 else width
self.hr_line = '-' * (width + 1) * 7 + '-'
self.seperator = '|' + ('-' * width + '|') * 7
self.blank_line = str(' ' * ((width + 1) * 7 + 1))
def __separate_line(self, week: str, end=''):
week = list(week)
distance_to_end = self.width * 7 + 8 - len(week)
week += list(' ' * distance_to_end)
week[::self.width + 1] = list('|' * 8)
return ''.join(week) + end
def __pipe(self, start, width, *texts, seperator='|', end=''):
"""
The real name is Pile In piPE :P
:param start: offset to start
:param width: pipe's width
:param texts: text to fill into a pipe
:param seperator: seperator before and after, not count in pipe's width
:return: a bamboo...
"""
elements = [' ' * start, seperator]
for t in texts:
lent = len(t)
if lent < width:
t += ' ' * (width - lent)
t += seperator
elements.append(t)
elements.append(end)
return ''.join(elements)
def __packup_week_schedule(self, week):
first_weekday = week[0].date.weekday()
start_index = first_weekday * (self.width + 1)
pipes = list()
holidays = ['* Holiday *'.center(self.width)
if week[i].holiday is not None else ' ' * self.width for i in range(len(week))]
schedule_hours = ['Sched: {}'.format(week[i].scheduled_work_hours)
if not week[i].is_dayoff else '-' for i in range(len(week))]
overtime_hours = ['OT: {}'.format(week[i].overtime)
if not week[i].is_dayoff else '-' for i in range(len(week))]
checkin_hours = ['Checkin: {}'.format(week[i].checkin_manhour)
if not week[i].is_dayoff else '-' for i in range(len(week))]
dayoff = ['Dayoff: {}'.format('Yes' if week[i].is_dayoff else 'No') for i in range(len(week))]
done = ['Done: {}'.format('Yes' if week[i].is_past else 'No') for i in range(len(week))]
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *holidays), '\n'))
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *schedule_hours), '\n'))
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *overtime_hours), '\n'))
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *checkin_hours), '\n'))
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *dayoff), '\n'))
pipes.append(self.__separate_line(self.__pipe(start_index, self.width, *done), '\n'))
return pipes
def __decorate_today(self, month, week_line):
if month.today:
day = date.today().day
index = week_line.find(' ' + str(day) + ' ')
if index > 0:
week_line = list(week_line)
week_line[index - 1] = '['
r_offset = 3 if day < 10 else 4
week_line[index + r_offset] = ']'
week_line = ''.join(week_line)
return week_line
def __print_holiday(self, schedule):
holidays = schedule.month.holidays
if holidays and len(holidays):
for holiday in holidays:
print('{year}.{month}.{day} {name}'
.format(year=holiday.year, month=holiday.month, day=holiday.day, name=holiday.name))
def __print_today(self, schedule):
day = schedule.month.today
day_name = date.today().strftime('%A')
if day:
holiday = '** {} **'.format(day.holiday.name) if day.holiday else ''
checkin_or_dayoff = '\t Checkin: {}'.format(day.checkin_manhour) if not day.is_dayoff else '\t Day off'
print('Today:', str(day.date), day_name, holiday, '\t Schedule(OT):', day.scheduled_work_hours,
'({})'.format(day.overtime), checkin_or_dayoff)
else:
print('today:', date.today(), day_name)
def __print_manhour_expect(self, schedule):
workdays = len(schedule.month.days) - len(schedule.dayoff_list)
salary = schedule.job.required_manhour * schedule.job.hourly_pay
print('Expecting:', 'Manhour/Workdays = {0}/{1}d'.format(schedule.job.required_manhour, workdays),
'\t Salary = {}'.format(salary))
def __print_manhour_fornow(self, schedule):
print('For now: ', 'Checkin manhour = {}'.format(schedule.checkin_manhour),
'\t Remaining manhour = {}'.format(schedule.manhour_remain),
'\t Overtime = {}'.format(schedule.overhours),
'\t Salary = {}'.format(schedule.checkin_manhour * dec_float(schedule.job.hourly_pay)))
def __print_manhour_absence(self, schedule):
if schedule.manhour_absence > 0:
print('These manhour can not be scheduled on this month:', schedule.manhour_absence)
def draw(self, schedule: Schedule):
cal = str(calendar.month(schedule.month.index['year'], schedule.month.index['month'], self.width))
cal_lines = [' ' + w for w in cal.splitlines() if w != '']
title = cal_lines.pop(0)
day_of_week = cal_lines.pop(0)
cal_weeks = cal_lines
print('', title, self.hr_line, day_of_week, self.__separate_line(self.hr_line), sep='\n')
for i in range(len(cal_weeks)):
print(self.__separate_line(self.__decorate_today(schedule.month, cal_weeks[i])))
print(*self.__packup_week_schedule(schedule.month.weeks[i]), sep='', end='')
print(self.__separate_line(self.hr_line))
print('(Sched = Schedule, OT = Overtime)')
self.__print_holiday(schedule)
print('')
self.__print_today(schedule)
self.__print_manhour_expect(schedule)
self.__print_manhour_fornow(schedule)
self.__print_manhour_absence(schedule)
def update_holiday_schedule():
"""
request new schedule list of holidays this year.
:return: new list of Holiday or None for update failure.
"""
url = "http://calendar-service.net/cal?start_year={year}&start_mon=1&end_year={year}&end_mon=12\
&year_style=normal&month_style=numeric&wday_style=en&format=csv&holiday_only=1".format(year=date.today().year)
print('Accessing network to request holiday data...')
print('url: ' + url)
try:
with request.urlopen(url) as f:
content = [line.decode('EUC-JP').replace('\n', '') for line in f.readlines()]
del content[0]
content = [line.split(',') for line in content]
holidays = [Holiday(*line) for line in content]
print('Success.')
return holidays
except:
print("Holiday schedule request failure.")
return None
|
import os
import itertools
import glob
import re
import torch
from torch.utils.data import Dataset
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
import PIL
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
import flow_utils
import pdb
SINTEL_DIM = (1024, 436)
CHAIRS_DIM = (512, 384)
THINGS_DIM = (960, 540)
KITTI_DIM = (1241, 376)
KITTI_COARSE_DIM = (77, 23)
class MaskToTensor(object):
def __call__(self, x):
return to_tensor(x)
def to_tensor(mask):
mask = mask[:, :, None]
return torch.from_numpy(mask.transpose((2, 0, 1))).byte()
def kitti_invalid_mask(image):
image = np.array(image)
mask = np.zeros(image.shape, dtype=np.uint8)
mask[:, :, 2] = 255 * np.ones(image.shape[:2], dtype=np.uint8)
return np.all(mask == image, axis=2).astype(np.uint8)
class InpaintNaNs(object):
def __call__(self, x):
if isinstance(x, Image.Image):
x = np.array(x)
nanmask = np.isnan(x)
if not np.any(nanmask):
x = Image.fromarray(x)
return x
x = cv2.inpaint(x, nanmask.astype(np.uint8), 2, cv2.INPAINT_TELEA)
x = Image.fromarray(x)
else:
nanmask = np.isnan(x)
if not np.any(nanmask):
return x
tx = x[:, :, 0]
tx = cv2.inpaint(tx, nanmask[:, :, 0].astype(np.uint8), 2, cv2.INPAINT_TELEA)
ty = x[:, :, 1]
ty = cv2.inpaint(ty, nanmask[:, :, 1].astype(np.uint8), 2, cv2.INPAINT_TELEA)
x = np.stack((tx, ty), axis=-1)
return x
class RandomOrderFlip(object):
def __init__(self, p=0.5):
self.p = p
self.draw()
def draw(self):
self.flip = np.random.random()
def __call__(self, image1, image2):
#
self.draw()
if self.flip < self.p:
return image2, image1, -1
else:
return image1, image2, 1
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
self.draw()
def draw(self):
self.flip = np.random.random()
def __call__(self, x):
if self.flip < self.p:
if isinstance(x, Image.Image):
x = F.hflip(x)
else:
x = np.fliplr(x)
x = x + 0 # hack
x[:, :, 0] = -x[:, :, 0] # negate horizontal displacement
return x
class ScaledCenterCrop(object):
def __init__(self, input_size, output_size):
w, h = input_size
tw, th = output_size
self.i = int(round((h - th) / 2.))
self.j = int(round((w - tw) / 2.))
self.w = tw
self.h = th
def new_params(self):
return None
def correct_calibration(self, K, scale):
dx = self.j * scale
dy = self.i * scale
K[0, -1] = K[0, -1] - dx
K[1, -1] = K[1, -1] - dy
return K
def __call__(self, x, scale):
i = self.i * scale
j = self.j * scale
h = self.h * scale
w = self.w * scale
if isinstance(x, Image.Image):
# PIL image
return F.crop(x, i, j, h, w)
else:
# numpy array
return x[i:i+h, j:j+w]
class ScaledRandomCrop(object):
def __init__(self, input_size, output_size):
self.input_size = input_size
self.output_size = output_size
self.new_params()
def get_params(self, input_size, output_size):
w, h = input_size
tw, th = output_size
if w == tw and h == th:
return 0, 0, h, w
elif w == tw and h > th:
i = np.random.randint(0, h-th)
return i, 0, th, tw
elif w > tw and h == th:
j = np.random.randint(0, w-tw)
return 0, j, th, tw
else:
i = np.random.randint(0, h-th)
j = np.random.randint(0, w-tw)
return i, j, th, tw
def new_params(self):
self.i, self.j, self.h, self.w = self.get_params(self.input_size, self.output_size)
def correct_calibration(self, K, scale):
dx = self.j * scale
dy = self.i * scale
K[0, -1] = K[0, -1] - dx
K[1, -1] = K[1, -1] - dy
return K
def __call__(self, x, scale):
i = self.i * scale
j = self.j * scale
h = self.h * scale
w = self.w * scale
if isinstance(x, Image.Image):
# PIL image
return F.crop(x, i, j, h, w)
else:
# numpy array
return x[i:i+h, j:j+w]
class RandomCrop(object):
def __init__(self, input_size, output_size):
self.input_size = input_size
self.output_size = output_size
self.new_params()
def get_params(self, input_size, output_size):
w, h = input_size
tw, th = output_size
if w == tw and h == th:
return 0, 0, h, w
elif w == tw and h > th:
i = np.random.randint(0, h-th)
return i, 0, th, tw
elif w > tw and h == th:
j = np.random.randint(0, w-tw)
return 0, j, th, tw
else:
i = np.random.randint(0, h-th)
j = np.random.randint(0, w-tw)
return i, j, th, tw
def new_params(self):
self.i, self.j, self.h, self.w = self.get_params(self.input_size, self.output_size)
def __call__(self, x):
if isinstance(x, Image.Image):
# PIL image
return F.crop(x, self.i, self.j, self.h, self.w)
else:
# numpy array
return x[self.i:self.i+self.h, self.j:self.j+self.w]
class CenterCrop(object):
def __init__(self, input_size, output_size):
w, h = input_size
tw, th = output_size
self.i = int(round((h - th) / 2.))
self.j = int(round((w - tw) / 2.))
self.w = tw
self.h = th
def new_params(self):
return None
def __call__(self, x):
if isinstance(x, Image.Image):
# PIL image
return F.crop(x, self.i, self.j, self.h, self.w)
else:
# numpy array
return x[self.i:self.i+self.h, self.j:self.j+self.w]
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def flowshow(flow):
# npflow = flow.numpy()
# npflow = np.transpose(npflow, (1, 2, 0))
img = flow_utils.compute_flow_image(flow)
plt.imshow(img)
plt.show()
def write_flow(name, flow):
with open(name, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
def flow_loader(path):
TAG_FLOAT = 202021.25
with open(path, "rb") as f:
tag = np.fromfile(f, np.float32, count=1)
assert tag[0] == TAG_FLOAT, 'Flow number %r incorrect. Invalid .flo file' % tag
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
data = np.fromfile(f, np.float32, count=2*w*h)
flow = np.reshape(data, (int(h), int(w), 2))
return flow
def pfm_loader(path):
file = open(path, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip().decode("utf-8")
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip().decode("utf-8"))
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def flow_png_loader(path):
flow_raw = cv2.imread(path, -1)
# convert BGR to RG
flow = flow_raw[:, :, 2:0:-1].astype(np.float32)
# scaling
flow = flow - 32768.
flow = flow / 64.
# clip
flow[np.abs(flow) < 1e-10] = 1e-10
# invalid mask
invalid_mask = (flow_raw[:, :, 0] == 0)
flow[invalid_mask, :] = 0
# valid mask
valid_mask = (flow_raw[:, :, 0] == 1).astype(np.uint8)
return flow, valid_mask
class Sintel(Dataset):
def __init__(self, root, split='train', passes=['final'], data_augmentation=True,
transform=None, target_transform=None, pyramid_levels=[0],
flow_scale=1, crop_dim=(384, 768), hflip=-1):
super(Sintel, self).__init__()
pyramid_levels = sorted(pyramid_levels)
if split == 'test':
root = os.path.join(root, 'test')
else:
root = os.path.join(root, 'training')
self.transform = transform
self.target_transform = target_transform
if data_augmentation:
self.flip_transform = RandomHorizontalFlip(hflip)
self.crop_transform = RandomCrop(SINTEL_DIM, crop_dim[::-1])
else:
self.flip_transform = RandomHorizontalFlip(-1)
self.crop_transform = CenterCrop(SINTEL_DIM, crop_dim[::-1])
dim = (crop_dim[0] // 2**pyramid_levels[0], crop_dim[1] // 2**pyramid_levels[0])
self.first_level_resize = transforms.Resize(dim)
self.pyramid_transforms = []
for l in pyramid_levels:
dim = (crop_dim[0] // 2**l, crop_dim[1] // 2**l)
self.pyramid_transforms.append(flow_utils.ResizeFlow(dim))
self.scale_transform = flow_utils.ScaleFlow(flow_scale)
passdirs = [os.path.join(root, p) for p in passes]
self.dataset = list(itertools.chain(*[self.make_dataset(p) for p in passdirs]))
def __getitem__(self, idx):
image1_path, image2_path, flow_path = self.dataset[idx]
image1 = pil_loader(image1_path)
image2 = pil_loader(image2_path)
image1 = self.crop_transform(image1)
image2 = self.crop_transform(image2)
image1 = self.flip_transform(image1)
image2 = self.flip_transform(image2)
image1 = self.first_level_resize(image1)
image2 = self.first_level_resize(image2)
if self.transform is not None:
image1 = self.transform(image1)
image2 = self.transform(image2)
flow0 = flow_loader(flow_path)
flow0 = self.crop_transform(flow0)
flow0 = self.flip_transform(flow0)
flow0 = self.scale_transform(flow0)
flow_levels = []
for pt in self.pyramid_transforms:
flow = pt(flow0)
flow_levels.append(flow)
if self.target_transform is not None:
flow_levels = [self.target_transform(flow) for flow in flow_levels]
self.crop_transform.new_params()
self.flip_transform.draw()
return image1, image2, flow_levels
def __len__(self):
return len(self.dataset)
def make_dataset(self, passdir):
dataset = []
flowdir = os.path.join(os.path.dirname(passdir), 'flow')
for seqid in sorted(os.listdir(passdir)):
seqdir = os.path.join(passdir, seqid)
for sd, _, fnames in sorted(os.walk(seqdir)):
for f1, f2 in zip(sorted(fnames), sorted(fnames)[1:]):
image1 = os.path.join(sd, f1)
image2 = os.path.join(sd, f2)
flow = os.path.join(flowdir, seqid, f1.split('.')[0] + '.flo')
dataset.append((image1, image2, flow))
return dataset
class SintelSR(Dataset):
def __init__(self, root, split='train', passes=['final'], transform=None,
input_scale=2, target_scale=1, crop_dim=(384, 768)):
super(SintelSR, self).__init__()
if split == 'test':
root = os.path.join(root, 'test')
else:
root = os.path.join(root, 'training')
self.transform = transform
self.crop_transform = transforms.RandomCrop(crop_dim)
self.input_resize = None
if input_scale != 1:
input_dim = (crop_dim[0] // input_scale, crop_dim[1] // input_scale)
self.input_resize = transforms.Resize(input_dim)
self.target_resize = None
if target_scale != 1:
target_dim = (crop_dim[0] // target_scale, crop_dim[1] // target_scale)
self.target_resize = transforms.Resize(target_dim)
self.tensor_transform = transforms.ToTensor()
passdirs = [os.path.join(root, p) for p in passes]
self.dataset = list(itertools.chain(*[self.make_dataset(p) for p in passdirs]))
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image_path = self.dataset[idx]
image = pil_loader(image_path)
image = self.crop_transform(image)
input_image = image
target_image = image
if self.input_resize is not None:
input_image = self.input_resize(input_image)
if self.target_resize is not None:
target_image = self.target_resize(target_image)
if self.transform is not None:
input_image = self.transform(input_image)
target_image = self.transform(target_image)
input_image = self.tensor_transform(input_image)
target_image = 2 * self.tensor_transform(target_image) - 1
return input_image, target_image
def make_dataset(self, passdir):
dataset = []
for seqid in sorted(os.listdir(passdir)):
seqdir = os.path.join(passdir, seqid)
fnames = sorted(glob.glob(seqdir + '/*.png'))
dataset.extend(fnames)
return dataset
class FlyingChairs(Dataset):
def __init__(self, root, transform=None, target_transform=None,
pyramid_levels=[0], flow_scale=1, crop_dim=(384, 448)):
super(FlyingChairs, self).__init__()
pyramid_levels = sorted(pyramid_levels)
self.transform = transform
self.target_transform = target_transform
self.crop_transform = RandomCrop(CHAIRS_DIM, crop_dim[::-1])
dim = (crop_dim[0] // 2**pyramid_levels[0], crop_dim[1] // 2**pyramid_levels[0])
self.first_level_resize = transforms.Resize(dim)
self.pyramid_transforms = []
for l in pyramid_levels:
dim = (crop_dim[0] // 2**l, crop_dim[1] // 2**l)
self.pyramid_transforms.append(flow_utils.ResizeFlow(dim))
self.scale_transform = flow_utils.ScaleFlow(flow_scale)
self.dataset = self.make_dataset(root)
def __getitem__(self, idx):
image1_path, image2_path, flow_path = self.dataset[idx]
image1 = pil_loader(image1_path)
image2 = pil_loader(image2_path)
image1 = self.crop_transform(image1)
image2 = self.crop_transform(image2)
image1 = self.first_level_resize(image1)
image2 = self.first_level_resize(image2)
if self.transform is not None:
image1 = self.transform(image1)
image2 = self.transform(image2)
flow0 = flow_loader(flow_path)
flow0 = self.crop_transform(flow0)
flow0 = self.scale_transform(flow0)
flow_levels = []
for pt in self.pyramid_transforms:
flow = pt(flow0)
flow_levels.append(flow)
if self.target_transform is not None:
flow_levels = [self.target_transform(flow) for flow in flow_levels]
self.crop_transform.new_params()
return image1, image2, flow_levels
def __len__(self):
return len(self.dataset)
def make_dataset(self, root):
fnames = sorted(glob.glob(root + '/*.ppm'))
images1 = fnames[::2]
images2 = fnames[1::2]
flows = [f.split('img')[0] + 'flow.flo' for f in images1]
dataset = list(zip(images1, images2, flows))
return dataset
class FlyingChairsSR(Dataset):
def __init__(self, root, transform=None,
input_scale=2, target_scale=1, crop_dim=(384, 448)):
super(FlyingChairsSR, self).__init__()
self.transform = transform
self.crop_transform = transforms.RandomCrop(crop_dim)
self.input_resize = None
if input_scale != 1:
input_dim = (crop_dim[0] // input_scale, crop_dim[1] // input_scale)
self.input_resize = transforms.Resize(input_dim)
self.target_resize = None
if target_scale != 1:
target_dim = (crop_dim[0] // target_scale, crop_dim[1] // target_scale)
self.target_resize = transforms.Resize(target_dim)
self.tensor_transform = transforms.ToTensor()
self.dataset = self.make_dataset(root)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image_path = self.dataset[idx]
image = pil_loader(image_path)
image = self.crop_transform(image)
input_image = image
target_image = image
if self.input_resize is not None:
input_image = self.input_resize(input_image)
if self.target_resize is not None:
target_image = self.target_resize(target_image)
if self.transform is not None:
input_image = self.transform(input_image)
target_image = self.transform(target_image)
input_image = self.tensor_transform(input_image)
target_image = 2 * self.tensor_transform(target_image) - 1
return input_image, target_image
def make_dataset(self, root):
return sorted(glob.glob(root + '/*.ppm'))
class FlyingThings(Dataset):
def __init__(self, root, split='train', partition=['A', 'B', 'C'], transform=None, target_transform=None,
pyramid_levels=[0], flow_scale=1, crop_dim=(384, 768)):
super(FlyingThings, self).__init__()
pyramid_levels = sorted(pyramid_levels)
self.transform = transform
self.target_transform = target_transform
self.nanfilter_transform = InpaintNaNs()
self.crop_transform = RandomCrop(THINGS_DIM, crop_dim[::-1])
dim = (crop_dim[0] // 2**pyramid_levels[0], crop_dim[1] // 2**pyramid_levels[0])
self.first_level_resize = transforms.Resize(dim)
self.pyramid_transforms = []
for l in pyramid_levels:
dim = (crop_dim[0] // 2**l, crop_dim[1] // 2**l)
self.pyramid_transforms.append(flow_utils.ResizeFlow(dim))
self.scale_transform = flow_utils.ScaleFlow(flow_scale)
self.dataset = self.make_dataset(root, split, partition)
def __getitem__(self, idx):
image1_path, image2_path, flow_path = self.dataset[idx]
image1 = pil_loader(image1_path)
image2 = pil_loader(image2_path)
image1 = self.nanfilter_transform(image1)
image2 = self.nanfilter_transform(image2)
image1 = self.crop_transform(image1)
image2 = self.crop_transform(image2)
image1 = self.first_level_resize(image1)
image2 = self.first_level_resize(image2)
if self.transform is not None:
image1 = self.transform(image1)
image2 = self.transform(image2)
flow0, _ = pfm_loader(flow_path) # add pfm loader
flow0 = flow0[:, :, :2]
flow0 = self.nanfilter_transform(flow0)
flow0 = self.crop_transform(flow0)
flow0 = self.scale_transform(flow0)
flow_levels = []
for pt in self.pyramid_transforms:
flow = pt(flow0)
flow_levels.append(flow)
if self.target_transform is not None:
flow_levels = [self.target_transform(flow) for flow in flow_levels]
self.crop_transform.new_params()
return image1, image2, flow_levels
def __len__(self):
return len(self.dataset)
def make_dataset(self, root, split, partition):
image_pairs = self.image_paths(root, split, partition)
image1, image2 = zip(*image_pairs)
flows = self.flow_paths(root, split, partition)
dataset = list(zip(image1, image2, flows))
return dataset
def image_paths(self, root, split, partition):
root = os.path.join(root, 'frames_finalpass')
if split == 'test':
root = os.path.join(root, 'TEST')
else:
root = os.path.join(root, 'TRAIN')
image_pairs = []
for part in partition:
part_path = os.path.join(root, part)
for subseq_path in sorted(glob.glob(part_path + '/*')):
# future direction
for camera in ['left', 'right']:
camera_path = os.path.join(subseq_path, camera)
fnames = sorted(glob.glob(camera_path + '/*.png'))
subseq_pairs = list(zip(fnames, fnames[1:]))
image_pairs.extend(subseq_pairs)
# past direction
for camera in ['left', 'right']:
camera_path = os.path.join(subseq_path, camera)
fnames = sorted(glob.glob(camera_path + '/*.png'))
subseq_pairs = list(zip(fnames[1:], fnames))
image_pairs.extend(subseq_pairs)
return image_pairs
def flow_paths(self, root, split, partition):
root = os.path.join(root, 'optical_flow')
if split == 'test':
root = os.path.join(root, 'TEST')
else:
root = os.path.join(root, 'TRAIN')
flows = []
for part in partition:
part_path = os.path.join(root, part)
for subseq_path in sorted(glob.glob(part_path + '/*')):
direction_path = os.path.join(subseq_path, 'into_future')
for camera in ['left', 'right']:
camera_path = os.path.join(direction_path, camera)
fnames = sorted(glob.glob(camera_path + '/*.pfm'))
flows.extend(fnames[:-1])
direction_path = os.path.join(subseq_path, 'into_past')
for camera in ['left', 'right']:
camera_path = os.path.join(direction_path, camera)
fnames = sorted(glob.glob(camera_path + '/*.pfm'))
flows.extend(fnames[1:])
return flows
class KITTIFlow(Dataset):
def __init__(self, root, split='train', transform=None, target_transform=None,
pyramid_levels=[0], flow_scale=1, crop_dim=(320, 896)):
super(KITTIFlow, self).__init__()
pyramid_levels = sorted(pyramid_levels)
self.split = split
if self.split == 'test':
root = os.path.join(root, 'testing')
else:
root = os.path.join(root, 'training')
self.transform = transform
self.target_transform = target_transform
self.flow_to_tensor = flow_utils.ToTensor()
self.mask_to_tensor = MaskToTensor()
self.crop_transform = RandomCrop(KITTI_DIM, crop_dim[::-1])
dim = (crop_dim[0] // 2**pyramid_levels[0], crop_dim[1] // 2**pyramid_levels[0])
self.first_level_resize = transforms.Resize(dim)
self.scale_transform = flow_utils.ScaleFlow(flow_scale)
self.pyramid_transforms = []
for l in pyramid_levels:
self.pyramid_transforms.append(flow_utils.ResizeSparseFlow(2**l))
self.dataset = self.make_dataset(root, split)
def __getitem__(self, idx):
image1_path, image2_path, flow_path = self.dataset[idx]
image1 = pil_loader(image1_path)
image2 = pil_loader(image2_path)
# NOTE: Hack for dealing with different sizes between samples
self.crop_transform.input_size = image1.size
self.crop_transform.new_params()
image1 = self.crop_transform(image1)
image2 = self.crop_transform(image2)
image1 = self.first_level_resize(image1)
image2 = self.first_level_resize(image2)
if self.transform is not None:
image1 = self.transform(image1)
image2 = self.transform(image2)
if self.split == 'test':
return image1, image2
flow0, valid_mask0 = flow_png_loader(flow_path)
flow0 = self.crop_transform(flow0)
valid_mask0 = self.crop_transform(valid_mask0)
flow0 = self.scale_transform(flow0)
flow_levels = []
mask_levels = []
for pt in self.pyramid_transforms:
flow, mask = pt(flow0, valid_mask0)
flow_levels.append(flow)
mask_levels.append(mask)
flow_levels = [self.flow_to_tensor(flow) for flow in flow_levels]
mask_levels = [self.mask_to_tensor(mask) for mask in mask_levels]
if self.target_transform is not None:
flow_levels = [self.target_transform(flow) for flow in flow_levels]
return image1, image2, flow_levels, mask_levels
def __len__(self):
return len(self.dataset)
def make_dataset(self, root, split):
imagedir = os.path.join(root, 'image_2')
fnames = sorted(glob.glob(imagedir + '/*.png'))
images1 = fnames[::2]
images2 = fnames[1::2]
flowdir = os.path.join(root, 'flow_noc')
flows = sorted(glob.glob(flowdir + '/*.png'))
dataset = []
if split == 'test':
dataset = list(zip(images1, images2))
else:
dataset = list(zip(images1, images2, flows))
return dataset
class KITTIFlowSR(Dataset):
def __init__(self, root, split='train', transform=None,
input_scale=2, target_scale=1, crop_dim=(320, 896)):
super(KITTIFlowSR, self).__init__()
if split == 'test':
root = os.path.join(root, 'test')
else:
root = os.path.join(root, 'training')
self.transform = transform
self.crop_transform = transforms.RandomCrop(crop_dim)
self.input_resize = None
if input_scale != 1:
input_dim = (crop_dim[0] // input_scale, crop_dim[1] // input_scale)
self.input_resize = transforms.Resize(input_dim)
self.target_resize = None
if target_scale != 1:
target_dim = (crop_dim[0] // target_scale, crop_dim[1] // target_scale)
self.target_resize = transforms.Resize(target_dim)
self.tensor_transform = transforms.ToTensor()
self.dataset = self.make_dataset(root)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image_path = self.dataset[idx]
image = pil_loader(image_path)
image = self.crop_transform(image)
input_image = image
target_image = image
if self.input_resize is not None:
input_image = self.input_resize(input_image)
if self.target_resize is not None:
target_image = self.target_resize(target_image)
if self.transform is not None:
input_image = self.transform(input_image)
target_image = self.transform(target_image)
input_image = self.tensor_transform(input_image)
target_image = 2 * self.tensor_transform(target_image) - 1
return input_image, target_image
def make_dataset(self, root):
imagedir = os.path.join(root, 'image_2')
dataset = sorted(glob.glob(imagedir + '/*.png'))
return dataset
class KITTIDerot(Dataset):
def __init__(self, root, sequences=[0, 1, 2, 3, 4, 5], transform=None, input_scale=None, frame_offset=None,
pyramid_levels=[0], crop_dim=(20, 56), data_augmentation=True, fflip=-1, return_id=False):
super(KITTIDerot, self).__init__()
if frame_offset is None:
frame_offset = [1, 2, 3]
self.return_id = return_id
self.pyramid_levels = sorted(pyramid_levels)
if input_scale is None:
input_scale = self.pyramid_levels[-1]
self.input_scale = input_scale
self.transform = transform
if data_augmentation:
self.flow_flip_transform = RandomOrderFlip(fflip)
self.crop_transform = ScaledRandomCrop(KITTI_COARSE_DIM, crop_dim[::-1])
else:
self.flow_flip_transform = RandomOrderFlip(-1)
self.crop_transform = ScaledCenterCrop(KITTI_COARSE_DIM, crop_dim[::-1])
self.flow_to_tensor = flow_utils.ToTensor()
self.mask_to_tensor = MaskToTensor()
self.dataset = self.make_dataset(root, sequences, self.pyramid_levels, input_scale, frame_offset)
def __getitem__(self, idx):
image1_path, image2_path, mask_path, calib_paths, t, subseq_id, pair_id = self.dataset[idx]
image1 = pil_loader(image1_path)
image2 = pil_loader(image2_path)
mask = kitti_invalid_mask(pil_loader(mask_path))
image1, image2, sign_flip = self.flow_flip_transform(image1, image2)
t = sign_flip * t
Ks = []
for cp in calib_paths:
Ks.append(np.fromfile(cp, sep=' ').reshape((3, 3)))
Ks = Ks[::-1]
flow_levels = []
for l in range(len(self.pyramid_levels)):
flow_shape = (image1.size[1] * 2**l, image1.size[0] * 2**l, 2)
if np.linalg.norm(t) < np.finfo(np.float32).eps:
flow = np.zeros(flow_shape)
flow_levels.append(flow)
continue
# calibration
K = Ks[l]
# epipole
e = np.dot(K, t)
e = e / (e[2] + np.finfo(np.float32).eps)
e = e[:2]
# epipole matrix
E = e[None, None, :]
E = np.tile(E, (flow_shape[0], flow_shape[1], 1))
# init pixel map
px, py = np.meshgrid(np.arange(flow_shape[1]), np.arange(flow_shape[0]))
X = np.stack((px, py), axis=2)
# flow map
flow = E - X
fmag = np.sqrt(np.sum(np.multiply(flow, flow), axis=2))
flow = np.divide(flow, np.stack((fmag, fmag), axis=2))
# flip if forward translation
flow = -np.sign(t[2]) * flow
# append
flow_levels.append(flow)
# apply crops
self.crop_transform.new_params()
image1 = self.crop_transform(image1, 2**(4-self.input_scale))
image2 = self.crop_transform(image2, 2**(4-self.input_scale))
mask = self.crop_transform(mask, 2**(4-self.input_scale))
Ks = [self.crop_transform.correct_calibration(K, 2**l) for l, K in enumerate(Ks)]
flow_levels = [self.crop_transform(flow, 2**l) for l, flow in enumerate(flow_levels)]
flow_levels = flow_levels[::-1]
# convert to tensor
flow_levels = [self.flow_to_tensor(flow) for flow in flow_levels]
mask = self.mask_to_tensor(mask)
image1 = F.to_tensor(image1)
image2 = F.to_tensor(image2)
# remove blue pixels
if sign_flip < 0:
image1[:, mask[0]] = 0
else:
image2[:, mask[0]] = 0
# optional input image transform
if self.transform is not None:
image1 = F.to_tensor(self.transform(F.to_pil_image(image1)))
image2 = F.to_tensor(self.transform(F.to_pil_image(image2)))
if self.return_id:
return image1, image2, flow_levels, Ks[-1], t, subseq_id, pair_id
else:
return image1, image2, flow_levels, Ks[-1], t
def __len__(self):
return len(self.dataset)
def make_dataset(self, root, sequences, pyramid_levels, input_scale, frame_offset):
input_scale = 2**input_scale
window_fids = [0] + frame_offset
dataset = []
for s in sequences:
scaledir = os.path.join(root, "%02d" % s, 'subsequences', "%dx" % input_scale)
subseqdirs = sorted(glob.glob(scaledir + '/*/'))
for subseq in subseqdirs:
# all images in folder
fnames = sorted(glob.glob(subseq + '/*.png'))
nimages = len(fnames)
# select relevant frames
fnames = [fnames[i] for i in window_fids]
npairs = len(fnames[1:])
fmasks = fnames[1:]
ftrans = os.path.join(subseq, 'translations.txt')
translations = np.fromfile(ftrans, sep=' ').reshape((nimages,3))
fcalibs = []
for f in fnames[1:]:
pcalibs = []
for p in pyramid_levels:
pyrdir = os.path.join(root, "%02d" % s, 'subsequences', "%dx" % 2**p)
refdir = os.path.join(pyrdir, subseq.split('/')[-2])
pcalibs.append(os.path.join(refdir, 'scaled_calibration.txt'))
fcalibs.append(tuple(pcalibs))
subseq_id = os.path.basename(os.path.dirname(fnames[0]))
pair_ids = ['0{:d}'.format(i) for i in frame_offset]
pairs = list(zip([fnames[0]]*npairs, fnames[1:], fmasks, fcalibs,
translations[frame_offset, :], [subseq_id]*npairs, pair_ids))
dataset.extend(pairs)
return dataset
|
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_check_partials
from openaerostruct.aerodynamics.lift_drag import LiftDrag
from openaerostruct.utils.testing import run_test, get_default_surfaces
class Test(unittest.TestCase):
def test(self):
surfaces = get_default_surfaces()
comp = LiftDrag(surface=surfaces[0])
run_test(self, comp)
def test_derivs_with_sideslip(self):
surfaces = get_default_surfaces()
# Use Tail since it is not symmetric.
comp = LiftDrag(surface=surfaces[1])
prob = om.Problem()
prob.model.add_subsystem("comp", comp)
prob.setup(force_alloc_complex=True)
prob["comp.alpha"] = 3.0
prob["comp.beta"] = 15.0
rng = np.random.default_rng(0)
prob["comp.sec_forces"] = 10.0 * rng.random(prob["comp.sec_forces"].shape)
prob.run_model()
check = prob.check_partials(compact_print=True, method="cs", step=1e-40)
assert_check_partials(check)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
# Make sure the binaries can be imported
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'Release'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'RelWithDebInfo'))
__this_file_directory = os.path.dirname(os.path.abspath(__file__))
def package_dir():
"""Return the path containing this ell module """
import os
return os.path.join(os.path.dirname(__file__))
# The SWIG generated wrappers are divided into pseudo-namespace sub packages.
from . import data
from . import math
from . import model
from . import nodes
from . import neural
from . import trainers
from . import platform
try:
from .rpi_magic import init_magics
init_magics()
except ImportError:
pass # we're in regular Python, not Jupyter
# must come after we initialize rpi_magic.
from . import util
del os
del sys
|
from dataclasses import dataclass
from .t_event import TEvent
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class Event(TEvent):
class Meta:
name = "event"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
|
# Copyright (c) 2013, Noah Jacob and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
def execute(filters=None):
columns, data = [], []
date=[]
labels = ""
if filters.get('filter_based_on') =='Date Range':
date.append(filters.get('from_date'))
date.append(filters.get('to_date'))
from_year = (date[0].split("-"))[0]
to_year = (date[1].split("-"))[0]
labels = from_year if from_year == to_year else from_year + "-" + to_year
else:
get_fiscal_year(date,filters)
labels = filters.get("fiscal_year")
columns = get_columns(labels)
validate_dates(date)
asset = get_data(filters.company,'Asset',date)
liability = get_data(filters.company,'Liability',date)
data.extend(asset)
asset_data = data[-2]
data.extend(liability)
liability_data = data[-2]
get_total_profit_loss(data)
profit_loss_data = data[-2]
report_summary = get_report_summary(asset_data,liability_data,profit_loss_data)
chart = get_chart_data(filters,columns,asset,liability)
return columns, data,None, chart,report_summary
def get_chart_data(filters,columns,asset,liability):
labels = [d.get("label") for d in columns[1:]]
asset_data, liability_data = [], []
if asset:
asset_data.append(asset[-2].get("amount"))
if liability:
liability_data.append(liability[-2].get("amount"))
datasets = []
if asset_data:
datasets.append({'name': ('Assets'), 'values': asset_data})
if liability_data:
datasets.append({'name': ('Liabilities'), 'values': liability_data})
chart = {
"data": {
'labels': labels,
'datasets': datasets
}
}
if filters.chart_type == "Bar":
chart["type"] = "bar"
else:
chart["type"] = "line"
return chart
def get_report_summary(asset,liability,profit_loss):
return [
{
"value": asset['amount'],
"label": "Total Asset",
"datatype": "Currency",
"currency": "₹"
},
{
"value": liability['amount'],
"label": "Total Liability",
"datatype": "Currency",
"currency": "₹"
},
{
"value":profit_loss['amount'],
"label": "Provisional Profit/Loss ",
"indicator": "Green" if profit_loss['amount'] > 0 else "Red" ,
"datatype": "Currency",
"currency": "₹"
}
]
def validate_dates(date):
if date[0] > date[1]:
frappe.throw("Starting Date cannot be greater than ending date")
def get_fiscal_year(date,filters):
dates = frappe.db.sql("""SELECT
from_date,to_date
FROM
`tabFiscal Year`
WHERE
period = %(fiscal_year)s
""",filters,as_dict = 1)[0]
date.append(dates.get('from_date'))
date.append(dates.get('to_date'))
def get_data(company,account_type,date):
accounts = get_accounts(company,account_type,date)
data = []
indent = 0
for d in accounts:
if d.parent_account == None or d.parent_account == data[-1]['account']:
data_add(data,d,indent)
indent = indent +1
else:
for n in data:
if n['account'] == d.parent_account:
indent = n['indent'] + 1
data_add(data,d,indent)
break
indent = indent + 1
root_type = "Assets" if account_type == "Asset" else "Liabilities"
get_account_balances(company,data,root_type,date)
return data
def get_account_balances(company,accounts,root_type,date):
data = []
for a in accounts:
if not a['has_value']:
amount = get_balance(company,a['account'],date)
amount = abs(amount) if amount else 0.0
a['amount'] = amount
for d in reversed(data):
if d['parent_account'] == root_type:
d['amount'] +=flt(amount)
data[0]['amount']+=flt(amount)
break
else:
d['amount'] +=flt(amount)
data.append(a)
else:
data.append(a)
total_credit_debit = {
'account':'Total ' + accounts[0]['account_type'] + (' (' + "Debit" + ')' if accounts[0]['account_type'] == 'Asset' else ' ('+'Credit' +')'),
'amount':accounts[0]['amount']
}
accounts.append(total_credit_debit)
accounts.append({})
def get_total_profit_loss(data):
total_debit = data[0]['amount']
total_credit = data[-2]['amount']
total_profit_loss = total_debit - total_credit
total_credit += total_profit_loss
data.append({'account':'Provisonal Profit/Loss(Credit)','amount':total_profit_loss})
data.append({'account':'Total(Credit)','amount':total_credit})
def get_balance(company,name,date):
return frappe.db.sql("""SELECT
sum(debit_amount) - sum(credit_amount) as total
FROM
`tabGL Entry`
WHERE
company = %s and account = %s and posting_date>= %s and posting_date<= %s
""",(company,name,date[0],date[1]),as_dict = 1)[0]['total']
def data_add(data,account,indent):
data.append({
"account":account.name,
"parent_account":account.parent_account,
"account_type":account.account_type,
"has_value":account.is_group,
"indent":indent,
"amount":0.0
})
def get_accounts(company,account_type,date):
return frappe.db.sql("""SELECT
name,parent_account,lft,is_group,account_type
FROM
tabAccount
WHERE
company = %s and account_type = %s
ORDER BY
lft""",(company,account_type),as_dict = 1)
def get_columns(labels):
columns = [
{
"fieldname": "account",
"label": "Account",
"fieldtype": "Link",
"options": "Account",
"width": 400
},
{
"fieldname": 'amount',
"label": labels,
"fieldtype": "Currency",
"options": "currency",
"width": 500
}
]
return columns
|
from typing import Iterable, Optional
from crowdin_api.api_resources.abstract.resources import BaseResource
from crowdin_api.api_resources.labels.types import LabelsPatchRequest
class LabelsResource(BaseResource):
"""
Resource for Labels.
Link to documentation:
https://support.crowdin.com/api/v2/#tag/Labels
"""
def get_labels_path(self, projectId: int, labelId: Optional[int] = None):
if labelId:
return f"projects/{projectId}/labels/{labelId}"
return f"projects/{projectId}/labels"
def list_labels(
self,
projectId: int,
page: Optional[int] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
):
"""
List Labels.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.getMany
"""
return self.requester.request(
method="get",
path=self.get_labels_path(projectId=projectId),
params=self.get_page_params(page=page, offset=offset, limit=limit),
)
def add_label(self, projectId: int, title: str):
"""
Add Label.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.post
"""
return self.requester.request(
method="post",
path=self.get_labels_path(projectId=projectId),
request_data={"title": title},
)
def get_label(self, projectId: int, labelId: int):
"""
Get Label.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.get
"""
return self.requester.request(
method="get",
path=self.get_labels_path(projectId=projectId, labelId=labelId),
)
def delete_label(self, projectId: int, labelId: int):
"""
Delete Label.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.delete
"""
return self.requester.request(
method="delete",
path=self.get_labels_path(projectId=projectId, labelId=labelId),
)
def edit_label(self, projectId: int, labelId: int, data: Iterable[LabelsPatchRequest]):
"""
Edit Label.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.patch
"""
return self.requester.request(
method="patch",
path=self.get_labels_path(projectId=projectId, labelId=labelId),
request_data=data,
)
def assign_label_to_strings(self, projectId: int, labelId: int, stringIds: Iterable[int]):
"""
Assign Label to Strings.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.strings.post
"""
return self.requester.request(
method="post",
request_data={"stringIds": stringIds},
path=self.get_labels_path(projectId=projectId, labelId=labelId),
)
def unassign_label_from_strings(self, projectId: int, labelId: int, stringIds: Iterable[int]):
"""
Unassign Label from Strings.
Link to documentation:
https://support.crowdin.com/api/v2/#operation/api.projects.labels.strings.deleteMany
"""
return self.requester.request(
method="delete",
params={"stringIds": ",".join(str(stringId) for stringId in stringIds)},
path=self.get_labels_path(projectId=projectId, labelId=labelId),
)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Planet aggregator library.
This package is a library for developing web sites or software that
aggregate RSS, CDF and Atom feeds taken from elsewhere into a single,
combined feed.
"""
__version__ = "1.0~pre1"
__authors__ = [ "Scott James Remnant <scott@netsplit.com>",
"Jeff Waugh <jdub@perkypants.org>" ]
__license__ = "Python"
# Modules available without separate import
import cache
import feedparser
import htmltmpl
try:
import logging
except:
import compat_logging as logging
# Limit the effect of "from planet import *"
__all__ = ("cache", "feedparser", "htmltmpl", "logging",
"Planet", "Channel", "NewsItem")
import os
import md5
import time
import dbhash
# Version information (for generator headers)
VERSION = ("Planet/%s +http://www.planetplanet.org" % __version__)
# Default User-Agent header to send when retreiving feeds
USER_AGENT = VERSION + " " + feedparser.USER_AGENT
# Default cache directory
CACHE_DIRECTORY = "cache"
# Default number of items to display from a new feed
NEW_FEED_ITEMS = 2
# Useful common date/time formats
TIMEFMT_ISO = "%Y-%m-%dT%H:%M:%S+00:00"
TIMEFMT_822 = "%a, %d %b %Y %H:%M:%S +0000"
# Log instance to use here
log = logging.getLogger("planet")
class Planet:
"""A set of channels.
This class represents a set of channels for which the items will
be aggregated together into one combined feed.
Properties:
user_agent User-Agent header to fetch feeds with.
cache_directory Directory to store cached channels in.
new_feed_items Number of items to display from a new feed.
"""
def __init__(self):
self._channels = []
self.user_agent = USER_AGENT
self.cache_directory = CACHE_DIRECTORY
self.new_feed_items = NEW_FEED_ITEMS
def channels(self, hidden=0, sorted=1):
"""Return the list of channels."""
channels = []
for channel in self._channels:
if hidden or not channel.has_key("hidden"):
channels.append((channel.name, channel))
if sorted:
channels.sort()
return [ c[-1] for c in channels ]
def subscribe(self, channel):
"""Subscribe the planet to the channel."""
self._channels.append(channel)
def unsubscribe(self, channel):
"""Unsubscribe the planet from the channel."""
self._channels.remove(channel)
def items(self, hidden=0, sorted=1, max_items=0, max_days=0):
"""Return an optionally filtered list of items in the channel.
The filters are applied in the following order:
If hidden is true then items in hidden channels and hidden items
will be returned.
If sorted is true then the item list will be sorted with the newest
first.
If max_items is non-zero then this number of items, at most, will
be returned.
If max_days is non-zero then any items older than the newest by
this number of days won't be returned. Requires sorted=1 to work.
The sharp-eyed will note that this looks a little strange code-wise,
it turns out that Python gets *really* slow if we try to sort the
actual items themselves. Also we use mktime here, but it's ok
because we discard the numbers and just need them to be relatively
consistent between each other.
"""
items = []
for channel in self.channels(hidden=hidden, sorted=0):
for item in channel._items.values():
if hidden or not item.has_key("hidden"):
items.append((time.mktime(item.date), item.order, item))
# Sort the list
if sorted:
items.sort()
items.reverse()
# Apply max_items filter
if len(items) and max_items:
items = items[:max_items]
# Apply max_days filter
if len(items) and max_days:
max_count = 0
max_time = items[0][0] - max_days * 84600
for item in items:
if item[0] > max_time:
max_count += 1
else:
items = items[:max_count]
break
return [ i[-1] for i in items ]
class Channel(cache.CachedInfo):
"""A list of news items.
This class represents a list of news items taken from the feed of
a website or other source.
Properties:
url URL of the feed.
url_etag E-Tag of the feed URL.
url_modified Last modified time of the feed URL.
hidden Channel should be hidden (True if exists).
name Name of the feed owner, or feed title.
next_order Next order number to be assigned to NewsItem
updated Correct UTC-Normalised update time of the feed.
last_updated Correct UTC-Normalised time the feed was last updated.
id An identifier the feed claims is unique (*).
title One-line title (*).
link Link to the original format feed (*).
tagline Short description of the feed (*).
info Longer description of the feed (*).
modified Date the feed claims to have been modified (*).
author Name of the author (*).
publisher Name of the publisher (*).
generator Name of the feed generator (*).
category Category name (*).
copyright Copyright information for humans to read (*).
license Link to the licence for the content (*).
docs Link to the specification of the feed format (*).
language Primary language (*).
errorreportsto E-Mail address to send error reports to (*).
image_url URL of an associated image (*).
image_link Link to go with the associated image (*).
image_title Alternative text of the associated image (*).
image_width Width of the associated image (*).
image_height Height of the associated image (*).
Properties marked (*) will only be present if the original feed
contained them. Note that the optional 'modified' date field is simply
a claim made by the item and parsed from the information given, 'updated'
(and 'last_updated') are far more reliable sources of information.
Some feeds may define additional properties to those above.
"""
IGNORE_KEYS = ("links", "contributors", "textinput", "cloud", "categories",
"url", "url_etag", "url_modified")
def __init__(self, planet, url):
if not os.path.isdir(planet.cache_directory):
os.makedirs(planet.cache_directory)
cache_filename = cache.filename(planet.cache_directory, url)
cache_file = dbhash.open(cache_filename, "c", 0666)
cache.CachedInfo.__init__(self, cache_file, url, root=1)
self._items = {}
self._planet = planet
self._expired = []
self.url = url
self.url_etag = None
self.url_modified = None
self.name = None
self.updated = None
self.last_updated = None
self.next_order = "0"
self.cache_read()
self.cache_read_entries()
def has_item(self, id_):
"""Check whether the item exists in the channel."""
return self._items.has_key(id_)
def get_item(self, id_):
"""Return the item from the channel."""
return self._items[id_]
# Special methods
__contains__ = has_item
def items(self, hidden=0, sorted=0):
"""Return the item list."""
items = []
for item in self._items.values():
if hidden or not item.has_key("hidden"):
items.append((time.mktime(item.date), item.order, item))
if sorted:
items.sort()
items.reverse()
return [ i[-1] for i in items ]
def __iter__(self):
"""Iterate the sorted item list."""
return iter(self.items(sorted=1))
def cache_read_entries(self):
"""Read entry information from the cache."""
keys = self._cache.keys()
for key in keys:
if key.find(" ") != -1: continue
if self.has_key(key): continue
item = NewsItem(self, key)
self._items[key] = item
def cache_write(self, sync=1):
"""Write channel and item information to the cache."""
for item in self._items.values():
item.cache_write(sync=0)
for item in self._expired:
item.cache_clear(sync=0)
cache.CachedInfo.cache_write(self, sync)
self._expired = []
def update(self):
"""Download the feed to refresh the information.
This does the actual work of pulling down the feed and if it changes
updates the cached information about the feed and entries within it.
"""
info = feedparser.parse(self.url,
etag=self.url_etag, modified=self.url_modified,
agent=self._planet.user_agent)
if not info.has_key("status"):
log.info("Updating feed <%s>", self.url)
elif info.status == 301 or info.status == 302:
log.warning("Feed has moved from <%s> to <%s>", self.url, info.url)
os.link(cache.filename(self._planet.cache_directory, self.url),
cache.filename(self._planet.cache_directory, info.url))
self.url = info.url
elif info.status == 304:
log.info("Feed <%s> unchanged", self.url)
return
elif info.status >= 400:
log.error("Error %d while updating feed <%s>",
info.status, self.url)
return
else:
log.info("Updating feed <%s>", self.url)
self.url_etag = info.has_key("etag") and info.etag or None
self.url_modified = info.has_key("modified") and info.modified or None
if self.url_etag is not None:
log.debug("E-Tag: %s", self.url_etag)
if self.url_modified is not None:
log.debug("Last Modified: %s",
time.strftime(TIMEFMT_ISO, self.url_modified))
self.update_info(info.feed)
self.update_entries(info.entries)
self.cache_write()
def update_info(self, feed):
"""Update information from the feed.
This reads the feed information supplied by feedparser and updates
the cached information about the feed. These are the various
potentially interesting properties that you might care about.
"""
for key in feed.keys():
if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS:
# Ignored fields
pass
elif feed.has_key(key + "_parsed"):
# Ignore unparsed date fields
pass
elif key.endswith("_detail"):
# Ignore detail fields
pass
elif key.endswith("_parsed"):
# Date fields
if feed[key] is not None:
self.set_as_date(key[:-len("_parsed")], feed[key])
elif key == "image":
# Image field: save all the information
if feed[key].has_key("url"):
self.set_as_string(key + "_url", feed[key].url)
if feed[key].has_key("link"):
self.set_as_string(key + "_link", feed[key].link)
if feed[key].has_key("title"):
self.set_as_string(key + "_title", feed[key].title)
if feed[key].has_key("width"):
self.set_as_string(key + "_width", str(feed[key].width))
if feed[key].has_key("height"):
self.set_as_string(key + "_height", str(feed[key].height))
else:
# String fields
try:
self.set_as_string(key, feed[key])
except KeyboardInterrupt:
raise
except:
log.exception("Ignored '%s' of <%s>, unknown format",
key, self.url)
def update_entries(self, entries):
"""Update entries from the feed.
This reads the entries supplied by feedparser and updates the
cached information about them. It's at this point we update
the 'updated' timestamp and keep the old one in 'last_updated',
these provide boundaries for acceptable entry times.
If this is the first time a feed has been updated then most of the
items will be marked as hidden, according to Planet.new_feed_items.
If the feed does not contain items which, according to the sort order,
should be there; those items are assumed to have been expired from
the feed or replaced and are removed from the cache.
"""
if not len(entries):
return
self.last_updated = self.updated
self.updated = time.gmtime()
new_items = []
feed_items = []
for entry in entries:
# Try really hard to find some kind of unique identifier
if entry.has_key("id"):
entry_id = cache.utf8(entry.id)
elif entry.has_key("link"):
entry_id = cache.utf8(entry.link)
elif entry.has_key("title"):
entry_id = (self.url + "/"
+ md5.new(cache.utf8(entry.title)).hexdigest())
elif entry.has_key("summary"):
entry_id = (self.url + "/"
+ md5.new(cache.utf8(entry.summary)).hexdigest())
else:
log.error("Unable to find or generate id, entry ignored")
continue
# Create the item if necessary and update
if self.has_item(entry_id):
item = self._items[entry_id]
else:
item = NewsItem(self, entry_id)
self._items[entry_id] = item
new_items.append(item)
item.update(entry)
feed_items.append(entry_id)
# Hide excess items the first time through
if self.last_updated is None and self._planet.new_feed_items \
and len(feed_items) > self._planet.new_feed_items:
item.hidden = "yes"
log.debug("Marked <%s> as hidden (new feed)", entry_id)
# Assign order numbers in reverse
new_items.reverse()
for item in new_items:
item.order = self.next_order = str(int(self.next_order) + 1)
# Check for expired or replaced items
feed_count = len(feed_items)
log.debug("Items in Feed: %d", feed_count)
for item in self.items(sorted=1):
if feed_count < 1:
break
elif item.id in feed_items:
feed_count -= 1
else:
del(self._items[item.id])
self._expired.append(item)
log.debug("Removed expired or replaced item <%s>", item.id)
def get_name(self, key):
"""Return the key containing the name."""
for key in ("name", "title"):
if self.has_key(key) and self.key_type(key) != self.NULL:
return self.get_as_string(key)
return ""
class NewsItem(cache.CachedInfo):
"""An item of news.
This class represents a single item of news on a channel. They're
created by members of the Channel class and accessible through it.
Properties:
id Channel-unique identifier for this item.
date Corrected UTC-Normalised update time, for sorting.
order Order in which items on the same date can be sorted.
hidden Item should be hidden (True if exists).
title One-line title (*).
link Link to the original format text (*).
summary Short first-page summary (*).
content Full HTML content.
modified Date the item claims to have been modified (*).
issued Date the item claims to have been issued (*).
created Date the item claims to have been created (*).
expired Date the item claims to expire (*).
author Name of the author (*).
publisher Name of the publisher (*).
category Category name (*).
comments Link to a page to enter comments (*).
license Link to the licence for the content (*).
source_name Name of the original source of this item (*).
source_link Link to the original source of this item (*).
Properties marked (*) will only be present if the original feed
contained them. Note that the various optional date fields are
simply claims made by the item and parsed from the information
given, 'date' is a far more reliable source of information.
Some feeds may define additional properties to those above.
"""
IGNORE_KEYS = ("categories", "contributors", "enclosures", "links",
"guidislink", "date")
def __init__(self, channel, id_):
cache.CachedInfo.__init__(self, channel._cache, id_)
self._channel = channel
self.id = id_
self.date = None
self.order = None
self.content = None
self.cache_read()
def update(self, entry):
"""Update the item from the feedparser entry given."""
for key in entry.keys():
if key in self.IGNORE_KEYS or key + "_parsed" in self.IGNORE_KEYS:
# Ignored fields
pass
elif entry.has_key(key + "_parsed"):
# Ignore unparsed date fields
pass
elif key.endswith("_detail"):
# Ignore detail fields
pass
elif key.endswith("_parsed"):
# Date fields
if entry[key] is not None:
self.set_as_date(key[:-len("_parsed")], entry[key])
elif key == "source":
# Source field: save both url and value
if entry[key].has_key("value"):
self.set_as_string(key + "_name", entry[key].value)
if entry[key].has_key("url"):
self.set_as_string(key + "_link", entry[key].url)
elif key == "content":
# Content field: concatenate the values
value = ""
for item in entry[key]:
value += cache.utf8(item.value)
self.set_as_string(key, value)
else:
# String fields
try:
self.set_as_string(key, entry[key])
except KeyboardInterrupt:
raise
except:
log.exception("Ignored '%s' of <%s>, unknown format",
key, self.id)
# Generate the date field if we need to
self.get_date("date")
def get_date(self, key):
"""Get (or update) the date key.
We check whether the date the entry claims to have been changed is
since we last updated this feed and when we pulled the feed off the
site.
If it is then it's probably not bogus, and we'll sort accordingly.
If it isn't then we bound it appropriately, this ensures that
entries appear in posting sequence but don't overlap entries
added in previous updates and don't creep into the next one.
"""
if self.has_key(key) and self.key_type(key) != self.NULL:
return self.get_as_date(key)
for other_key in ("modified", "issued", "created"):
if self.has_key(other_key):
date = self.get_as_date(other_key)
break
else:
date = None
if date is not None:
if date > self._channel.updated:
date = self._channel.updated
elif date < self._channel.last_updated:
date = self._channel.updated
else:
date = self._channel.updated
self.set_as_date(key, date)
return date
def get_content(self, key):
"""Return the key containing the content."""
for key in ("content", "tagline", "summary"):
if self.has_key(key) and self.key_type(key) != self.NULL:
return self.get_as_string(key)
return ""
|
"""
По данному натуральном n вычислите сумму 1²+2²+3²+...+n².
Формат ввода
Вводится натуральное число.
Формат вывода
Выведите ответ на задачу.
"""
n = int(input())
k = 0
for i in range(n+1):
k += i ** 2
print(k)
|
#!/usr/bin/env python
import logging
from scanpy_scripts.wrapper_utils import (
ScanpyArgParser, comma_separated_list,
read_input_object, write_output_object,
)
def main(argv=None):
argparser = ScanpyArgParser(argv, 'Run Louvain clustering on data with neighborhood graph computed')
argparser.add_input_object()
argparser.add_output_object()
argparser.add_argument('--output-text-file',
default=None,
help='File name in which to store text format set of clusters')
argparser.add_argument('--flavor',
choices=['vtraag', 'igraph'],
default='vtraag',
help='Choose between two packages for computing the clustering.'
'"vtraag" is much more powerful, and the default.')
argparser.add_argument('--resolution',
type=float,
default=1.0,
help='For the default flavor "vtraag", you can provide a resolution '
'(higher resolution means finding more and smaller clusters). '
'Default: 1.0')
argparser.add_argument('--restrict-to',
type=comma_separated_list('restrict-to', str),
default=None,
help='Restrict the clustering to the categories within the key for '
'sample annotation, tuple needs to contain (obs key, list of '
'categories).')
argparser.add_argument('--key-added',
default='louvain',
help='Key under which to add the cluster labels. Default: louvain')
argparser.add_argument('--use-weights',
action='store_true',
default=False,
help='Use weights from knn graph.')
argparser.add_argument('-s', '--random-seed',
type=int,
default=0,
help='The seed used to initialise optimisation. Default: 0')
args = argparser.args
logging.debug(args)
import scanpy.api as sc
adata = read_input_object(args.input_object_file, args.input_format)
if args.restrict_to is not None:
args.restrict_to = (args.restrict_to[0], args.restrict_to[1:])
sc.tl.louvain(adata,
flavor=args.flavor,
resolution=args.resolution,
restrict_to=args.restrict_to,
key_added=args.key_added,
use_weights=args.use_weights,
random_state=args.random_seed)
write_output_object(adata, args.output_object_file, args.output_format)
if args.output_text_file:
adata.obs[[args.key_added]].reset_index(level=0).rename(columns={'index':'cells'}).to_csv(
args.output_text_file, sep='\t', index=None)
logging.info('Done')
return 0
if __name__ == '__main__':
main()
|
"""
Alex Martelli's Borg non-pattern - not a singleton
See: http://www.aleax.it/5ep.html
"""
import logging
# import en_core_web_lg
import en_core_web_md
from gamechangerml.src.utilities.borg import Borg
logger = logging.getLogger(__name__)
class SpacyConfig(Borg):
def __init__(self):
Borg.__init__(self)
def _set_config(self, val):
self._value = val
def _get_config(self):
return getattr(self, "_value", None)
config = property(_get_config, _set_config)
def _log_metadata(nlp):
logger.info(
"{} version {} vector width = {}".format(
nlp.meta["vectors"]["name"],
nlp.meta["version"],
nlp.meta["vectors"]["width"],
)
)
def _load_spacy_name(model_name, disable):
if model_name == "spacy-large":
nlp = en_core_web_md.load(disable=disable)
_log_metadata(nlp)
logger.info("disabled components {}".format(str(disable)))
else:
raise ValueError("model not supported: {}".format(model_name))
return nlp
def _set_nlp(model_name, disable=None):
"""
Load the spaCy model
"""
if disable is None:
disable = list()
c = SpacyConfig()
if c.config is None:
nlp = _load_spacy_name(model_name, disable)
c.config = {"nlp": nlp}
return c
else:
logger.info("using existing language model")
return c
def get_lg_nlp():
"""
Loads the `en_core_web_lg` model with the full pipeline.
Returns:
spacy.lang.en.English
"""
try:
c = _set_nlp("spacy-large", disable=None)
return c.config["nlp"]
except ValueError as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise e
def get_lg_vectors():
"""
Load the `en_core_web_lg` model with the `ner`, `parser`, and `tagger`
pipeline components disabled. Embedding vectors remain; smaller
faster.
Returns:
spacy.lang.en.English
"""
try:
c = _set_nlp("spacy-large", disable=["ner", "parser", "tagger"])
return c.config["nlp"]
except ValueError as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise e
def spacy_vector_width(nlp):
return nlp.meta["vectors"]["width"]
|
import unittest
class MainTestCase(unittest.TestCase):
def test_two_and_two(self):
four = 2 + 2
self.assertEqual(four, 4)
self.assertNotEqual(four, 5)
self.assertNotEqual(four, 6)
self.assertNotEqual(four, 22)
if __name__ == '__main__':
unittest.main()
|
#NODE. THIS GETS THE FILE NAME FROM THE SERVER AND SENDS THE FILE BACK
import socket
s = socket.socket()
host = socket.gethostname()
port = 7013
s.bind((host,port))
s.listen(5)
print("<<-NODE IS UP->>")
while True:
connection, address = s.accept() #Accepts the connection from the server
print("Node is connected to:",address)
nameFile = connection.recv(1024) #Gets the file name from the client-->Server-->Node
print nameFile
file = open(nameFile, 'rb') #Opens the file
reading = file.read(1024) #First 1024 bytes
print("Sending %s to server",nameFile)
while(reading):
connection.send(reading) #sends them to the node --> Server -->Client
reading = file.read(1024)
file.close()
print("File has been sent to the server")
connection.close()
s.close()
|
import asyncio
import logging
import random
import threading
from typing import Callable, Optional
import discord
from system import database, spigotmc
color_error = 0xfa5858
color_processing = 0x12a498
color_success = 0xdaa520
class Message:
def __init__(self, message: discord.Message, has_premium: bool, loading_emoji: str, run_later: Optional[Callable] = None):
self.user = message.author
self.spigot_user = message.content
self.channel = message.channel
self.run_later = run_later
self.loading_emoji = loading_emoji
self.response = None
self.has_premium = has_premium
self.no_buyer = False
self.spigot_already_linked = False
self.code_received = False
self.done = False
self.error = False
async def update(self) -> None:
if self.response is not None:
await self.response.delete()
await asyncio.sleep(.5)
if self.done:
color = color_success
title = self.user.name + "'s promotion is completed"
content = self.user.name + " has been successfully promoted to premium ✅"
if self.run_later is not None:
self.run_later(self.user, True)
elif self.error:
color = color_error
title = self.user.name + "'s promotion has been cancelled"
content = "An error occurred. Please try it again."
elif self.has_premium:
color = color_error
title = self.user.name + "'s promotion has been cancelled"
content = "You already have the premium role 👀"
elif self.spigot_already_linked:
color = color_error
title = self.user.name + "'s promotion has been cancelled"
content = self.spigot_user + " is already linked to a discord account 🤨"
elif self.no_buyer:
color = color_error
title = self.user.name + "'s promotion has been cancelled"
content = self.spigot_user + " hasn't purchased the plugin 😭"
elif self.code_received:
color = color_processing
title = "Verifying " + self.user.name
content = "The verification code has been sent. Check your SpigotMC inbox 📫\n\n" \
"https://www.spigotmc.org/conversations/"
else:
color = color_processing
title = "Verifying " + self.user.name
content = "Your verification is processing. Please wait " + self.loading_emoji
embed = discord.Embed(description=content, colour=color)
embed.set_author(name=title, icon_url=self.user.avatar_url)
self.response = await self.channel.send(embed=embed)
if color == color_error:
asyncio.create_task(self.__delete_response__())
async def __delete_response__(self) -> None:
await asyncio.sleep(10)
await self.response.delete()
if self.run_later is not None:
self.run_later(self.user, False)
class Process:
def __init__(self, client: discord.Client, message: discord.Message, run_later: Optional[Callable], run_after_browsing: Callable, premium_role: discord.Role,
forum_credentials: spigotmc.Credentials, database_credentials: database.Credentials, loading_emoji: str, logger: logging, has_premium: bool = False):
self.client = client
self.message = Message(message, has_premium, loading_emoji, run_later)
self.run_after_browsing = run_after_browsing
self.user = message.author
self.spigot = message.content
self.guild = message.guild
self.premium_role = premium_role
self.forum_credentials = forum_credentials
self.code = random.randint(100000, 999999)
self.database = database.Database(database_credentials)
self.logger = logger
async def start(self) -> None:
self.logger.info("Starting " + self.user.name + "'s promotion")
await self.message.update()
if self.database.is_discord_user_linked(self.user.id):
# Skip process, user has already been linked -> re-link
await self.__apply_premium__()
self.__stop__()
elif self.database.is_spigot_name_linked(self.spigot):
self.message.spigot_already_linked = True
await self.message.update()
self.__stop__()
else:
# Run check in another thread to avoid blocking the main thread
threading.Thread(target=asyncio.run, args=(self.__check_premium__(),)).start()
async def incoming_message(self, message: discord.Message) -> None:
if self.message.code_received:
if message.content == str(self.code):
self.database.link(self.spigot, self.user)
await self.__apply_premium__()
def __stop__(self) -> None:
self.database.connection.close()
self.logger.info(self.user.name + "'s promotion has been finished")
async def __apply_premium__(self) -> None:
await self.user.add_roles(self.premium_role)
self.message.done = True
await self.message.update()
self.__stop__()
async def __check_premium__(self) -> None:
forum = spigotmc.ForumAPI(self.forum_credentials, self.forum_credentials.google_chrome_location, self.logger)
forum.debug("start " + self.spigot + "'s verification")
try:
# thread-blocking
if forum.is_user_premium(self.spigot):
forum.send_message(self.spigot, self.forum_credentials.title, self.forum_credentials.content.format(code=self.code, discord=self.user.name + "#" + self.user.discriminator))
self.message.code_received = True
else:
self.message.no_buyer = True
forum.debug("done")
except Exception as e:
forum.debug("an error occurred: " + str(e))
self.message.error = True
# close browser
forum.close()
# go back to main thread
await self.__complete_browsing__()
async def __complete_browsing__(self) -> None:
self.client.loop.create_task(self.message.update())
# add bigger delay to avoid SpigotMC's message cooldown
await asyncio.sleep(20)
self.client.loop.create_task(self.run_after_browsing())
|
# config.py ---
#
# Description:
# Author: Goncalo Pais
# Date: 28 Jun 2019
# https://arxiv.org/abs/1904.01701
#
# Instituto Superior Técnico (IST)
# Code:
import argparse
arg_lists = []
parser = argparse.ArgumentParser()
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ("true", "1")
# -----------------------------------------------------------------------------
# Data
data_arg = add_argument_group("Data")
data_arg.add_argument(
"--data_pre", type=str, default="data", help=""
"prefix for the dump folder locations")
data_arg.add_argument(
"--data_tr", type=str, default="sun3d", help=""
"name of the dataset for train")
data_arg.add_argument(
"--data_va", type=str, default="sun3d", help=""
"name of the dataset for valid")
data_arg.add_argument(
"--data_te", type=str, default="sun3d", help=""
"name of the dataset for test")
# -----------------------------------------------------------------------------
# Network
net_arg = add_argument_group("Network")
net_arg.add_argument(
"--net_depth", type=int, default=12, help=""
"number of layers")
net_arg.add_argument(
"--net_nchannel", type=int, default=128, help=""
"number of channels in a layer")
net_arg.add_argument(
"--net_act_pos", type=str, default="post",
choices=["pre", "mid", "post"], help=""
"where the activation should be in case of resnet")
net_arg.add_argument(
"--net_gcnorm", type=str2bool, default=True, help=""
"whether to use context normalization for each layer")
net_arg.add_argument(
"--net_batchnorm", type=str2bool, default=True, help=""
"whether to use batch normalization")
net_arg.add_argument(
"--net_bn_test_is_training", type=str2bool, default=False, help=""
"is_training value for testing")
net_arg.add_argument(
"--net_concat_post", type=str2bool, default=False, help=""
"retrieve top k values or concat from different layers")
net_arg.add_argument(
"--gpu_options", type=str, default='gpu', choices=['gpu', 'cpu'],
help="choose which gpu or cpu")
net_arg.add_argument(
"--gpu_number", type=str, default='0',
help="choose which gpu number")
# -----------------------------------------------------------------------------
# Loss
loss_arg = add_argument_group("loss")
loss_arg.add_argument(
"--loss_decay", type=float, default=0.0, help=""
"l2 decay")
loss_arg.add_argument(
"--loss_classif", type=float, default=0.5, help=""
"weight of the classification loss")
loss_arg.add_argument(
"--loss_reconstruction", type=float, default=0.01, help=""
"weight of the essential loss")
loss_arg.add_argument(
"--loss_reconstruction_init_iter", type=int, default=20000, help=""
"initial iterations to run only the classification loss")
# -----------------------------------------------------------------------------
# Training
train_arg = add_argument_group("Train")
train_arg.add_argument(
"--run_mode", type=str, default="train", help=""
"run_mode")
train_arg.add_argument(
"--train_batch_size", type=int, default=16, help=""
"batch size")
train_arg.add_argument(
"--train_max_tr_sample", type=int, default=10000, help=""
"number of max training samples")
train_arg.add_argument(
"--train_max_va_sample", type=int, default=1000, help=""
"number of max validation samples")
train_arg.add_argument(
"--train_max_te_sample", type=int, default=1000, help=""
"number of max test samples")
train_arg.add_argument(
"--train_lr", type=float, default=1e-5, help=""
"learning rate")
train_arg.add_argument(
"--train_epoch", type=int, default=3750, help=""
"training iterations to perform")
train_arg.add_argument(
"--train_step", type=int, default=200, help=""
"training iterations to perform")
train_arg.add_argument(
"--res_dir", type=str, default="./logs", help=""
"base directory for results")
train_arg.add_argument(
"--log_dir", type=str, default="logs_lie", help=""
"save directory name inside results")
train_arg.add_argument(
"--test_log_dir", type=str, default="", help=""
"which directory to test inside results")
train_arg.add_argument(
"--val_intv", type=int, default=5, help=""
"validation interval")
train_arg.add_argument(
"--report_intv", type=int, default=100, help=""
"summary interval")
net_arg.add_argument(
"--loss_function", type=str, default='l1', choices=['l1', 'l2', 'wls', 'gm', 'l05'],
help="choose which loss function")
# -----------------------------------------------------------------------------
# Data Augmentation
d_aug = add_argument_group('Augmentation')
d_aug.add_argument("--data_aug", type=str2bool, default=False, help="Perform data Augmentation")
d_aug.add_argument("--aug_cl", type=str2bool, default=True, help="Perform Curriculum Learning")
d_aug.add_argument("--aug_dir", type=str, default="augmentented", help="save directory name inside results")
# -----------------------------------------------------------------------------
# Visualization
vis_arg = add_argument_group('Visualization')
vis_arg.add_argument(
"--tqdm_width", type=int, default=79, help=""
"width of the tqdm bar")
vis_arg.add_argument(
"--reg_flag", type=str2bool, default=False, help="Refine transformation")
test_arg = add_argument_group('Test')
test_arg.add_argument(
"--reg_function", type=str, default='fast', choices=['fast', 'global'], help="Registration function: global or fast")
test_arg.add_argument(
"--representation", type=str, default='lie', choices=['lie', 'quat', 'linear'], help="Type of Representation")
def setup_dataset(dataset_name):
dataset_name = dataset_name.split(".")
data_dir = []
for name in dataset_name:
if 'sun3d' == name:
data_dir.append(name)
assert data_dir
return data_dir
def get_config():
config, unparsed = parser.parse_known_args()
# Setup the dataset related things
for _mode in ["tr", "va", "te"]:
data_dir = setup_dataset(
getattr(config, "data_" + _mode))
setattr(config, "data_dir_" + _mode, data_dir)
# setattr(config, "data_geom_type_" + _mode, geom_type)
# setattr(config, "data_vis_th_" + _mode, vis_th)
return config, unparsed
def print_usage():
parser.print_usage()
|
from unittest import TestCase
from analyzer import Pattern
my_pattern = Pattern(name="my pattern", score=0.9, regex="[re]")
my_pattern_dict = {"name": "my pattern", "regex": "[re]", "score": 0.9}
class TestPattern(TestCase):
def test_to_dict(self):
expected = my_pattern_dict
actual = my_pattern.to_dict()
assert expected == actual
def test_from_dict(self):
expected = my_pattern
actual = Pattern.from_dict(my_pattern_dict)
assert expected.name == actual.name
assert expected.score == actual.score
assert expected.regex == actual.regex
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import scanpy as sc
import matplotlib.pyplot as plt
from matplotlib.path import Path as mpl_path
import matplotlib.patches as patches
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
import numpy as np
import pandas as pd
def build_flux_mat(df, senders=None, receivers=None, piv_kws={"aggfunc":"size"}, dtype=int):
flux = df.pivot_table(index="celltype_a", columns="celltype_b", fill_value=0, **piv_kws).astype(int)
possible_cts = flux.columns.union(flux.index)
missing_cols = possible_cts.difference(flux.columns)
missing_rows = possible_cts.difference(flux.index)
for col in missing_cols:
flux[col] = 0
for row in missing_rows:
flux.loc[row, :] = 0
flux = flux.reindex(flux.columns)
if senders is not None:
flux.loc[~flux.index.isin(senders), :] = 0
if receivers is not None:
flux.loc[:, ~flux.columns.isin(receivers)] = 0
return flux.astype(dtype)
def rot_mat(a):
return np.array([np.cos(a), -np.sin(a), np.sin(a), np.cos(a)]).reshape(2, 2)
def rectangular_ideogram(
ax,
start,
end,
thickness=0.2,
angle=0,
origin_offset=0,
facecolor=None,
edgecolor=None,
label="",
label_ha="left"
):
start, end = start, end
xy = (start, 0)
width = end - start
height = thickness
# Rectange(angle=angle) is about xy, so we need to compute offsets ourself
transform = Affine2D().rotate_deg_around(0, 0, angle) + Affine2D().translate(origin_offset, 0) + ax.transData
patch = patches.Rectangle(xy, width, height, facecolor=facecolor, edgecolor=edgecolor,
transform=transform)
ax.add_patch(patch)
ax.text((end+start)/2, thickness, s=label, transform=transform, fontsize=6, ha=label_ha)
def chord(
ax,
start1=-1,
end1=-2,
start2=1,
end2=2,
angle1=0,
angle2=0,
x_offset=0,
chordwidth=0.7,
facecolor=None,
edgecolor=None
):
assert start1 <= 0
assert end1 <= 0
angle1 *= np.pi / 180
angle2 *= np.pi / 180
x1, y1 = abs(start1) * np.cos(angle1) - x_offset, abs(start1) * np.sin(angle1)
x2, y2 = abs(start2) * np.cos(angle2) + x_offset, abs(start2) * np.sin(angle2)
x3, y3 = abs(end2) * np.cos(angle2) + x_offset, abs(end2) * np.sin(angle2)
x4, y4 = abs(end1) * np.cos(angle1) - x_offset, abs(end1) * np.sin(angle1)
codes = [
mpl_path.MOVETO,
mpl_path.CURVE4,
mpl_path.CURVE4,
mpl_path.CURVE4,
mpl_path.LINETO,
mpl_path.CURVE4,
mpl_path.CURVE4,
mpl_path.CURVE4,
mpl_path.LINETO,
]
inner_rad = (start2 - start1) / 2
outer_rad = (end2 - end1) / 2
A = (angle2 + angle1) / 2
L = 4 * np.tan(A/4) / 3
verts = [
(x1, y1),
(x1 + inner_rad * L * np.sin(angle1), y1 - inner_rad * L * np.cos(angle1)),
(x2 - inner_rad * L * np.sin(angle2), y2 + inner_rad * L * np.cos(angle2)),
(x2, y2),
(x3, y3),
(x3 - outer_rad * L * np.sin(angle2), y3 + outer_rad * L * np.cos(angle2)),
(x4 + outer_rad * L * np.sin(angle1), y4 - outer_rad * L * np.cos(angle1)),
(x4, y4),
(x1, y1)
]
path = mpl_path(verts, codes)
patch = patches.PathPatch(path, facecolor=facecolor, edgecolor=edgecolor, alpha=0.4)
ax.add_patch(patch)
def sankey(
ax,
flux_df,
angle1=-150,
angle2=-30,
row_palette=sns.mpl_palette("tab10", 5),
col_palette=sns.mpl_palette("tab20", 12),
sender_length=1,
receiver_length=1,
rect_thickness=0.1,
split_dist=0.,
pad=0.01,
add_labels=True,
add_edges=False,
norm=True
):
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 0.1)
ax.set_axis_off()
m, n = flux_df.shape
senders = np.linspace(0, sender_length, m+1)
receivers = np.linspace(0, receiver_length, n+1)
for i, j, c, l in zip(senders, senders[1:], row_palette, flux_df.index):
l = l if add_labels else ""
rectangular_ideogram(
ax, i, j,
angle=angle1, origin_offset=-split_dist, thickness=-rect_thickness,
facecolor=c, label=l, label_ha="right", edgecolor="none" if not add_edges else "0.2"
)
for i, j, c, l in zip(receivers, receivers[1:], col_palette, flux_df.columns):
l = l if add_labels else ""
rectangular_ideogram(
ax, i, j, angle=angle2, origin_offset=split_dist, thickness=rect_thickness,
facecolor=c, label=l, label_ha="left", edgecolor="none" if not add_edges else "0.2"
)
sender_props = receiver_props = flux_df
if norm:
sender_props = flux_df / flux_df.sum(axis=1).values[:,None]
receiver_props = flux_df / flux_df.sum(axis=0)
start_pos = dict(zip(flux_df.index, senders))
start_pos.update(dict(zip(flux_df.columns, receivers)))
for i, s in enumerate(flux_df.index):
sender_scale = senders[i+1] - senders[i]
for j, r in enumerate(flux_df.columns):
receiver_scale = receivers[j+1] - receivers[j]
sp = sender_props.loc[s,r]
rp = receiver_props.loc[s,r]
if (sp == 0) or (rp == 0):
continue
chord(
ax,
start1=-start_pos[s],
end1=-start_pos[s] - sp * sender_scale,
start2=start_pos[r],
end2=start_pos[r] + rp * receiver_scale,
angle1=angle1,
angle2=angle2,
x_offset=split_dist,
#y_offset=0,
facecolor=row_palette[i],
edgecolor="none" if not add_edges else "0.2"
)
start_pos[s] += sp*sender_scale
start_pos[r] += rp*receiver_scale
return
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
my_dict = dict()
temp = []
for i in range(len(s)):
if s[i] not in temp:
temp.append(s[i])
else:
temp_str = ''.join(temp)
# temp.clear()
temp = temp[temp.index(s[i])+1:]
temp.append(s[i])
my_dict[temp_str] = len(temp_str)
temp_str = ''.join(temp)
my_dict[temp_str] = len(temp_str)
return max(my_dict.values())
|
# Copyright 2019-present, GraphQL Foundation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from casing import snake
from license import C_LICENSE_COMMENT
class Printer(object):
'''Printer for a simple list of types to be visited by the C visitor.
'''
def __init__(self):
self._types = []
def start_file(self):
print(C_LICENSE_COMMENT + '/** @generated */')
print('#define FOR_EACH_CONCRETE_TYPE(MACRO) \\')
def start_type(self, name):
self._types.append(name)
def field(self, type, name, nullable, plural):
pass
def end_type(self, name):
pass
def end_file(self):
print(' \\\n'.join('MACRO(%s, %s)' % (name, snake(name))
for name in self._types))
def start_union(self, name):
pass
def union_option(self, option):
pass
def end_union(self, name):
pass
|
## An implementation of a Parallel Oblivious Unpredictable Function (POUF)
# Stanislaw Jarecki, Xiaomin Liu: Fast Secure Computation of Set Intersection.
# Published in SCN 2010: 418-435
from petlib.ec import EcGroup
import pytest
def POUF_setup(nid=713):
"""Parameters for the group"""
G = EcGroup(nid)
g = G.generator()
o = G.order()
return (G, g, o)
def POUF_keyGen(params):
"""Generate the secret key k"""
G, g, o = params
return o.random()
def POUF_blind(params, messages):
"""Blind the messages input to the POUF"""
G, g, o = params
hi = [G.hash_to_point(m) for m in messages]
ai = [o.random() for _ in messages]
yi = [a * h for a,h in zip(ai, hi)]
return ai, yi
def POUF_mac(params, k, yi):
""" Apply the unpredctable function to the messages """
return [k * y for y in yi]
def POUF_unblind(params, ai, zi):
""" Unblind the messages to recover the raw outputs of the POUF """
G, g, o = params
xi = [a.mod_inverse(o) * z for a,z in zip(ai, zi)]
return xi
### ----------- TESTS ---------------
def test_setup():
params = POUF_setup()
k = POUF_keyGen(params)
assert k
def test_blind():
params = POUF_setup()
ai, yi = POUF_blind(params, [b"1", b"2"])
assert len(ai) == 2
assert len(yi) == 2
def test_mac():
params = POUF_setup()
k = POUF_keyGen(params)
ai, yi = POUF_blind(params, [b"1", b"2"])
zi = POUF_mac(params, k, yi)
assert len(zi) == len(ai)
def test_unblind():
params = POUF_setup()
G, g, o = params
k = POUF_keyGen(params)
ai, yi = POUF_blind(params, [b"1", b"2"])
zi = POUF_mac(params, k, yi)
# Make sure unblinding works
fi = POUF_unblind(params, ai, zi)
hi = [G.hash_to_point(m) for m in [b"1", b"2"]]
assert fi == [k * h for h in hi]
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestScalaLibraryIntegrationTest(PantsRunIntegrationTest):
def test_bundle(self):
pants_run = self.run_pants(['compile',
'testprojects/src/scala/org/pantsbuild/testproject/javasources'])
self.assert_success(pants_run)
|
import io, gzip, boto3
from urllib.parse import urlparse
def stream(url):
s3 = boto3.resource('s3')
_, bucket_name, key = urlparse(url).path.split('/', 2)
obj = s3.Object(
bucket_name=bucket_name,
key=key
)
buffer = io.BytesIO(obj.get()["Body"].read())
if key.endswith('.gz'):
return gzip.open(buffer, 'rt')
else:
return buffer
|
"""
Configures access to BingAds API and where to store results
"""
def data_dir() -> str:
"""The directory where result data is written to"""
return '/tmp/bingads/'
def first_date() -> str:
"""The first day from which on data will be downloaded"""
return '2015-01-01'
def developer_token() -> str:
"""The developer token that is used to access the BingAds API"""
return '012345679ABCDEF'
def environment() -> str:
"""The deployment environment"""
return 'production'
def oauth2_client_id() -> str:
"""The Oauth client id obtained from the BingAds developer center"""
return 'abc1234-1234-1234-abc-abcd1234'
def oauth2_client_secret() -> str:
"""The Oauth client secret obtained from the BingAds developer center"""
return 'ABCDefgh!1234567890'
def oauth2_refresh_token() -> str:
"""The Oauth refresh token returned from the adwords-downloader-refresh-oauth2-token script"""
return 'ABCDefgh!1234567890ABCDefgh!1234567890ABCDefgh!1234567890ABCDefgh!1234567890ABCDefgh!1234567890ABCDefgh!1234567890ABCDefgh!1234567890'
def timeout() -> int:
"""The maximum amount of time (in milliseconds) that you want to wait for the report download"""
return 3600000
def total_attempts_for_single_day() -> int:
"""The attempts to download a single day (ad and keyword performance) in case of HTTP errors or timeouts"""
return 5
def retry_timeout_interval() -> int:
"""number of seconds to wait before trying again to download a single day"""
return 10
def output_file_version() -> str:
"""A suffix that is added to output files, denoting a version of the data format"""
return 'v4'
|
from ..requestsender import Wrapper
class Stickers(object):
__slots__ = ['fosscord', 's', 'edited_s', 'main_url', 'log']
def __init__(self, fosscord, s, main_url, log): #s is the requests session object
self.fosscord = fosscord
self.main_url = main_url+'/' if not main_url.endswith('/') else main_url
self.s = s
header_mods = {'remove': ['Authorization', 'X-Super-Properties', 'X-Debug-Options']}
self.edited_s = Wrapper.edited_req_session(s, header_mods)
self.log = log
def get_stickers(self, country_code, locale):
country_code = country_code.upper()
u = '{}sticker-packs?country_code={}&locale={}'
url = u.format(self.fosscord, country_code, locale)
return Wrapper.send_request(self.s, 'get', url, log=self.log)
def get_sticker_file(self, sticker_id): #this is an apng
u = '{}stickers/{}.png?size=512'
url = u.format(self.main_url, sticker_id)
return Wrapper.send_request(self.edited_s, 'get', url, log=self.log)
def get_sticker_pack(self, sticker_pack_id):
u = '{}sticker-packs/{}'
url = u.format(self.fosscord, sticker_pack_id)
return Wrapper.send_request(self.s, 'get', url, log=self.log)
|
import logger
import os
class Trial:
def __init__(self, name="Default_Trial", trial=["tailSetAngle(50, 1)", "tailSetAngle(20, 1.5)", "tailSetAngle(50, 2)", "tailSetAngle(20, 2.5)", "tailSetAngle(50, 3)", "tailSetAngle(20, 3.5)", "tailSetAngle(50, 4)", "tailSetAngle(20, 4.5)", "tailSetAngle(50, 5)", "tailSetAngle(20, 5.5)", "tailSetAngle(-20, 6)", "audioPlay('track004.mp3', 7)", "audioStop(30)"], DEBUG=False):
self.name = name
self.execStack = list(
map(lambda n: "self.lastOutput = Trial." + n, trial))
self._DEBUG = DEBUG
self.lastOutput = None
i = 0
csvExists = os.path.isfile("logs/" + name + str(i) + ".csv")
while (csvExists == True):
i = i + 1
csvExists = os.path.isfile("logs/" + name + str(i) + ".csv")
self.logger = logger.Logger("logs/" + name + str(i) + ".csv")
def getName(self):
return self.name
def isDone(self):
# checks to see if the trial is over
if len(self.execStack) == 0:
return (True)
return (False)
def popNextLine(self):
# runs next command in execStack
if self.isDone():
return
exec(self.execStack.pop(0))
return self.lastOutput
def popAllLines(self):
# runs everything in the execStack
output = []
for i in range(len(self.execStack)):
output.append(Trial.popNextLine(self))
return output
def tailGetAngle(time):
# Note this is comm sendable
return (time, "Robot Servo 0 getAngle")
def tailSetAngle(angle, time):
# Note this is comm sendable
return (time, "Robot Servo 0 setAngle " + str(angle))
def tailGoLimp(time):
# Not this is com sendable
return (time, "Robot Servo 0 goLimp")
def coverGetAngle(time):
# Note this is comm sendable
return (time, "Robot Servo 1 getAngle")
def coverSetAngle(angle, time):
# Note this is comm sendable
return (time, "Robot Servo 1 setAngle " + str(angle))
def tailGoLimp(time):
# Not this is com sendable
return (time, "Robot Servo 1 goLimp")
def audioSetVolume(percent, time):
# Note this is comm sendable
return (time, "Robot Audio setVolume " + str(percent))
def audioPlay(fileName, time):
# Note this is comm sendable
return (time, "Robot Audio play " + fileName)
def audioIsPlaying(time):
# Note this is comm sendable
return (time, "Robot Audio isPlaying")
def audioPause(time):
# Note this is comm sendable
return (time, "Robot Audio pause")
def audioResume(time):
# Note this is comm sendable
return (time, "Robot Audio resume")
def audioIsPaused(time):
# Note this is comm sendable
return (time, "Robot Audio isPaused")
def audioStop(time):
# Note this is comm sendable
return (time, "Robot Audio stop")
def audioIsStopped(time):
# Note this is comm sendable
return (time, "Robot Audio isStopped")
def audioGetFileName(time):
# Note this is comm sendable
return (time, "Robot Audio getFileName")
def audioSetVolume(percent, time):
# Note this is comm sendable
return (time, "Robot Audio setVolume " + str(percent) + "%")
def tailMotion(self, positionList, startTime, rate):
pass
# This doesn't work
# Note this is not comm sendable.
# Adds the list of comm sendable functions that would create the intended motion to the stack then does runNextLine
@staticmethod
def advancedTailMotion(WIP):
pass
# Note this is not comm sendable
# Adds the list of comm sendable functions that would create the intended motion to the stack then does runNextLine
|
"""
categories: Types,str
description: UnicodeDecodeError not raised when expected
cause: Unknown
workaround: Unknown
"""
try:
print(repr(str(b"\xa1\x80", 'utf8')))
print('Should not get here')
except UnicodeDecodeError:
print('UnicodeDecodeError')
|
# -*- coding: utf-8 -*-
from entityClasses import Message, Action, ActionType
from dbWrapper import RobertLogMSSQL
from cn_utility import num_cn2digital, extract_cn_time
import re
import datetime
import config
import urllib
import config
import cn_utility
import warning
class ActionCenter:
#SQL
rlSQL = RobertLogMSSQL(host=config.db_server,user=config.db_user,pwd=config.db_pwd,db="robertlog")
#Action List
FeedKeywords = {u"吃了",u"喂了", u"喂奶", u"吃奶"}
ReportsKeywords = {u"报告", u"总结", u"情况"}
WeeklyReportsKeywords = {u"一周报告", u"一周总结", u"一周情况", u"本周总结"}
NotesKeywords = {u"备注", u"笔记"}
mLKeywords = {u"ml",u"毫升"}
MinKeywords = {u"分钟"}
ADKeywords = {u"AD",u"吃药", u"ad",u"喂药"}
PoopKeywords = {u"拉屎",u"大便", }
BathKeywords = {u"洗澡"}
RemoveKeywords = {u"撤销", u"删除"}
FallSleepKeywords = {u"睡着"}#, u"睡觉"}
WakeUpKeywords = {u"醒了", u"睡醒"}
ListImageKeywords = {u"看照片"}
ListSleepTimeKeywords = {u"几点睡",u"睡多久", u"睡了多久"}
DebugMsgKeywords = {u"调试消息"}
EatCaKeywords = {u"补钙", u"钙片"}
ComFoodKeywords = [u"辅食"]
ComFoodListKeywords = {u"食谱"}
users_can_write = {"fake_weichat_id"}
user_mapping = {"fake_weichat_id" : "Mom"}
actiontype_skip_log = {ActionType.UnKnown, ActionType.Reports, ActionType.WeeklyReports,\
ActionType.Remove, ActionType.NoPermission, ActionType.ListImage, \
ActionType.SleepTime, ActionType.DebugMsg, ActionType.RemoveSpecific, \
ActionType.ErrStatus, ActionType.ComFoodList}
def check_strList(self, str, listStr):
for s in listStr:
if str.find(s) >= 0 :
return True
return False
def DetectAction(self, msg):
action = Action(msg)
num2d = num_cn2digital()
ect = extract_cn_time()
content = num2d.replace_cn_digital(msg.RawContent)
t = ect.extract_time(content)
if t is not None and len(t) > 0:
action.TimeStamp = t[0]
content = ect.remove_time(content)
if self.check_strList(msg.RawContent, self.NotesKeywords):
action.Type = ActionType.Notes
action.Detail = msg.RawContent
for k in self.NotesKeywords:
action.Detail = action.Detail.lstrip(k)
elif self.check_strList(msg.RawContent, self.ListSleepTimeKeywords):
action.Type = ActionType.SleepTime
self.get_latest_sleep(action, num2d, ect)
elif self.check_strList(msg.RawContent, self.ADKeywords):
action.Type = ActionType.AD
elif self.check_strList(msg.RawContent, self.EatCaKeywords):
action.Type = ActionType.EatCa
elif self.check_strList(msg.RawContent, self.ComFoodListKeywords):
action.Type = ActionType.ComFoodList
elif self.check_strList(msg.RawContent, self.ComFoodKeywords):
action.Type = ActionType.ComFood
start = content.index(self.ComFoodKeywords[0])
detail = content[start+2:].strip()
action.Detail = detail
elif self.check_strList(content, self.FeedKeywords):
#feed
action.Type = ActionType.Feed
action.Status = Action.Active
nums = re.findall(r"\d+",content)
if len(nums) > 0:
if self.check_strList(content, self.MinKeywords):
action.Detail = "母乳:" + nums[0] + u"分钟"
else:
action.Detail = "奶瓶:" + nums[0] + "mL"
elif self.check_strList(msg.RawContent, self.WeeklyReportsKeywords):
action.Type = ActionType.WeeklyReports
elif self.check_strList(msg.RawContent, self.RemoveKeywords):
action.Type = ActionType.Remove
elif self.check_strList(msg.RawContent, self.ReportsKeywords):
#reports
action.Type = ActionType.Reports
action.Status = Action.Active
elif self.check_strList(msg.RawContent, self.PoopKeywords):
action.Type = ActionType.Poop
elif self.check_strList(msg.RawContent, self.BathKeywords):
action.Type = ActionType.Bath
elif self.check_strList(msg.RawContent, self.FallSleepKeywords):
lastAct = self.rlSQL.GetSleepStatus()
if lastAct.Type == ActionType.WakeUp:
action.Type = ActionType.FallSleep
else:
action.Type = ActionType.ErrStatus
action.Detail = "重复的睡觉,上一次是:"
action.Detail += lastAct.TimeStamp.strftime( "%H:%M")
elif self.check_strList(msg.RawContent, self.WakeUpKeywords):
lastAct = self.rlSQL.GetSleepStatus()
if lastAct.Type == ActionType.FallSleep:
action.Type = ActionType.WakeUp
self.get_latest_sleep(action, num2d, ect)
else:
action.Type = ActionType.ErrStatus
action.Detail = "重复的睡醒,上一次是:"
action.Detail += lastAct.TimeStamp.strftime( "%H:%M")
elif self.check_strList(msg.RawContent, self.DebugMsgKeywords):
action.Type = ActionType.DebugMsg
elif self.check_strList(msg.RawContent, self.ListImageKeywords):
action.Type = ActionType.ListImage
files = cn_utility.listimgfiles(config.ImageRoot, 7)
action.ImageList = []
for f in files:
action.ImageList.append((f[5:16], \
"http://<yourhost>.eastasia.cloudapp.azure.com/robert_image?name="+f))
#TODO::load from config
else:
action.Type = ActionType.UnKnown
try:
int(msg.RawContent)
msgs = self.rlSQL.GetMsgFromUser(msg.FromUser, 2)
if self.check_strList(msgs[1].RawContent, self.RemoveKeywords):
action.Type = ActionType.RemoveSpecific
except ValueError:
pass
if action.FromUser not in self.users_can_write and action.Type not in \
{ActionType.Reports, ActionType.WeeklyReports, ActionType.ListImage}:
action.Type = ActionType.NoPermission
return action
def get_latest_sleep(self, action, num2d, ect):
sleep = self.rlSQL.GetLastFallSleep()
if not sleep:
action.Type = ActionType.UnKnown
else:
#check previous time
pre_content = num2d.replace_cn_digital(sleep.RawContent)
sleep_t = ect.extract_time(pre_content, sleep.TimeStamp)
if sleep_t is None or len(sleep_t) <= 0:
sleep_t = sleep.TimeStamp
else:
sleep_t = sleep_t[0]
delta_minutes = int((action.TimeStamp - sleep_t).total_seconds()/60)
action.Detail = "从{0}到{1},睡了{2}小时{3}分钟".format(sleep_t.strftime( "%H:%M"), \
action.TimeStamp.strftime( "%H:%M"), int(delta_minutes/60), delta_minutes%60)
ImageFileTemplate = config.ImageRoot + r"{0}_{1}.jpg"
def process_img_post(self, msg):
timag_name = "D:\\tmp\\{0}_{1}.jpg".format(msg.TimeStamp.strftime("%Y_%m_%d_%H_%M"), msg.MediaId[:6])
img_name = self.ImageFileTemplate.format(msg.TimeStamp.strftime("%Y_%m_%d_%H_%M"), msg.MediaId[:6])
urllib.request.urlretrieve(msg.PicUrl, timag_name)
cn_utility.reshapimg(timag_name, img_name)
return "收到照片"
def GenResponse(self, action):
response = "抱歉没听懂."
if action.Type == ActionType.Feed:
response = "收到,萝卜在{1}吃了{0}".format(action.Detail, action.TimeStamp.strftime( "%H:%M"))
elif action.Type == ActionType.Reports:
response = "统计结果:"
cur = datetime.datetime.utcnow() + datetime.timedelta(days=2)
actions = self.rlSQL.GetActionReports(30)
actions.sort(key=lambda a:a.TimeStamp)
lastmilk = sleepstatus = None
for a in actions:
if a.Status == Action.Deleted:
continue
if a.Type == ActionType.FallSleep:
sleepstatus = a
continue
elif a.Type == ActionType.WakeUp:
sleepstatus = a
elif a.Type == ActionType.Feed:
lastmilk = a
if a.Type not in self.actiontype_skip_log :
if a.TimeStamp.day != cur.day:
cur = a.TimeStamp
response += "\n{0}日(第{1}天)记录:\n".format(cur.strftime("%m-%d"), \
config.get_days_to_birth(cur))
response += (a.GenBrief() + "\n")
tnow = cn_utility.GetNowForUTC8()
if sleepstatus.Type == ActionType.FallSleep:
#is sleeping
response += (sleepstatus.GenBrief() + "\n")
else :
delta_minutes = int((tnow - sleepstatus.TimeStamp).total_seconds()/60)
if delta_minutes > 150:
response += "\n醒了{0}小时{1}分钟了,该睡了".format(int(delta_minutes/60), delta_minutes%60)
delta_minutes = int((tnow - lastmilk.TimeStamp).total_seconds()/60)
if delta_minutes > 150:
response += "\n上次喂奶是{0}小时{1}分钟前:{2}".format(int(delta_minutes/60), delta_minutes%60, lastmilk.GenBrief())
elif action.Type == ActionType.WeeklyReports:
response = "统计结果: \n"
cur = datetime.datetime.utcnow() + datetime.timedelta(days=2)
actions = self.rlSQL.GetActionReports(300)
milk = 0
breast = 0
breastNum = 0
poop = 0
sleep = 0
daysShown = 0
for a in actions:
if a.Status == Action.Deleted:
continue
if a.TimeStamp.day != cur.day and (milk !=0 or breast !=0):
response += "{0}日:奶瓶{1}mL,母乳{2}次共{3}分钟,睡觉{5}小时{6}分钟,大便{4}次\n".format(\
cur.strftime("%m-%d"), milk, breastNum, breast, poop, int(sleep/60), sleep%60)
milk = 0
breast = 0
poop = 0
breastNum = 0
sleep = 0
daysShown += 1
if daysShown >= 8 : break
cur = a.TimeStamp
if a.Type == ActionType.Feed:
nums = re.findall(r"\d+",a.Detail)
if len(nums) > 0:
d = int(nums[0])
if a.Detail.find("母乳") >= 0:
breast += d
breastNum += 1
elif a.Detail.find("奶瓶") >= 0:
milk += d
elif a.Type == ActionType.Poop:
poop += 1
elif a.Type == ActionType.WakeUp:
ect = extract_cn_time()
sleep += ect.extract_time_delta(a.Detail)
#print(cur, sleep, a.Detail)
elif a.Type == ActionType.Notes:
#response += "{0}日{1}\n".format(cur.strftime("%m-%d"),a.GenBrief())
pass
if (milk !=0 or breast !=0) and daysShown < 7:
response += "{0}日:奶瓶{1}mL,母乳{2}次共{3}分钟,睡觉{5}小时{6}分钟,大便{4}次\n".format(\
cur.strftime("%m-%d"), milk, breastNum, breast, poop, int(sleep/60), sleep%60)
elif action.Type == ActionType.Remove:
response = "请输入要删除的项目序号\n"
actions = self.rlSQL.GetActionReports(6)
actions.sort(key=lambda a:a.TimeStamp)
for a in actions:
if a.Status == Action.Deleted:
continue
response += "序号:{0} 内容:{1},{2}\n".format(\
a.ActionID, self.user_mapping.get(a.FromUser, a.FromUser), a.GenBrief())
elif action.Type == ActionType.RemoveSpecific:
self.rlSQL.DeleteAction(int(action.message.RawContent))
response ="已删除一条记录.\n"
elif action.Type == ActionType.ListImage:
return action.ImageList
elif action.Type == ActionType.DebugMsg:
msg_list = self.rlSQL.GetLastNumMsg(30)
response = "List:\n"
for m in msg_list:
response +="[{0}] {1}:{2} \n".format(m.TimeStamp.strftime( "%H:%M"), self.user_mapping.get(m.FromUser, m.FromUser), m.RawContent)
elif action.Type == ActionType.NoPermission:
response = "抱歉您没有权限,可以尝试 '总结' 或 '一周总结' 查看萝卜成长状态。"
elif action.Type == ActionType.ComFoodList:
foodList = self.rlSQL.GetActionList( ActionType.ComFood, 80)
foodList.sort(key=lambda a:a.TimeStamp)
cur = datetime.datetime.utcnow() + datetime.timedelta(days=2)
for f in foodList:
if f.TimeStamp.day != cur.day: #a new day
response += "\n[{0}] {1} ".format(f.TimeStamp.strftime("%m-%d"), f.Detail)
cur = f.TimeStamp
else:
response += f.Detail #f.GenBrief()
else:
response = action.GenBrief()
response += warning.GetWarnings(self.rlSQL)
return response
def Receive(self, raw_str):
msg = Message(raw_str)
self.rlSQL.LogMessage(msg)
if msg.MsgType == "image":
return self.process_img_post(msg)
action = self.DetectAction(msg)
if action.Type not in self.actiontype_skip_log :
self.rlSQL.AppendAction(action)
else:
pass
return self.GenResponse(action)
|
from collections import Counter
inputs = []
with open('day01.input') as f:
inputs = [int(x) for x in f.readlines()]
def most_common(xs):
return Counter(xs).most_common(1)[0][0]
def part1(ns):
return most_common([n * (2020-n) for n in ns])
def part2(ns):
return most_common([a*b*c for a in ns for b in ns for c in ns if a+b+c == 2020])
print(part1(inputs))
print(part2(inputs))
|
# -*- coding: utf-8 -*-
"""
Module to train the GNN.
"""
# Python Standard
import logging
# External dependencies
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
# Internals
from .symbols import DEVICE
def train(model, dataset, n_epoch, learning_rate):
"""
Training function for a GNN.
Parameters
----------
model : torch.nn.Module
Model to train.
dataset : ProteinDataset
Dataset to work on.
n_epoch : int
Number of epochs.
Returns
-------
losses : list of float
List of the losses for each epoch.
"""
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
losses = []
train_step = make_train_step(model, optimizer)
for epoch in range(1, n_epoch + 1):
logging.info("Epoch %2d/%d", epoch, n_epoch)
for _, xdata, ydata in dataset:
loss = train_step(xdata, ydata)
scheduler.step()
logging.info(" -> loss = %f", loss)
losses.append(loss)
return losses
def evaluate(model, dataset):
"""
Evaluate a model on a given dataset and return the mean AUC.
"""
aucs = []
with torch.no_grad():
for idx, (name, xdata, target) in enumerate(dataset):
ydata = model.forward(xdata)
try:
aucs.append(roc_auc_score(target[0].cpu().numpy(), ydata.cpu().numpy()))
except ValueError:
logging.warning(" Complex %s discarded because no positive sample." % name)
return np.array(aucs).mean() if len(aucs) else np.nan
def make_train_step(model, optimizer):
"""
Builds function that performs a step in the train loop.
Extracted from:
https://towardsdatascience.com/understanding-pytorch-with-an-example-a-step-by-step-tutorial-81fc5f8c4e8e#58f2
"""
def train_step(xdata, ydata):
# Zeroes gradients
optimizer.zero_grad()
# Sets model to TRAIN mode
model.train()
# Makes predictions
yhat = model(xdata)
loss_fn = torch.nn.BCEWithLogitsLoss(
weight=ydata[1].to(DEVICE)
)
# Computes loss
loss = loss_fn(yhat, torch.squeeze(ydata[0]).to(DEVICE))
# Computes gradients
loss.backward()
# Updates parameters
optimizer.step()
# Returns the loss
return loss.item()
# Returns the function that will be called inside the train loop
return train_step
|
from django.test import TestCase
from survey.models import SurveyOrderModel
class SurveyOrderTest(TestCase):
def setUp(self):
SurveyOrderModel.objects.create(
customer_faktura_id=1234,
)
def test_blank_survey_created(self):
obj = SurveyOrderModel.objects.get(customer_faktura_id=1234)
self.assertTrue(obj)
# def test_finish_survey_order_without_address_success(self):
# obj = SurveyOrderModel.objects.get(customer_faktura_id=1234)
# obj.number_of_collars = '$1$2'
# obj.animal_species = 'wolf'
# obj.battery_size = '$2D$3D'
# obj.belt_shape = ''
# obj.nom_collar_circumference = '2'
# obj.vhf_beacon_frequency = 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut l'
# obj.mortality_sensor = 1
# obj.utc_lmt = 'utc'
# obj.globalstar = True
#
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from os import path
import test_inventory
import unittest
TARGET_DIR = path.join(os.getcwd(), 'tests', 'inventory')
from osa_toolkit import manage as mi
def setUpModule():
test_inventory.make_config()
def tearDownModule():
os.remove(test_inventory.USER_CONFIG_FILE)
class TestExportFunction(unittest.TestCase):
def setUp(self):
self.inv = test_inventory.get_inventory()
def tearDown(self):
test_inventory.cleanup()
def test_host_is_present(self):
host_inv = mi.export_host_info(self.inv)['hosts']
self.assertIn('aio1', host_inv.keys())
def test_groups_added(self):
host_inv = mi.export_host_info(self.inv)['hosts']
self.assertIn('groups', host_inv['aio1'].keys())
def test_variables_added(self):
host_inv = mi.export_host_info(self.inv)['hosts']
self.assertIn('hostvars', host_inv['aio1'].keys())
def test_number_of_hosts(self):
host_inv = mi.export_host_info(self.inv)['hosts']
self.assertEqual(len(self.inv['_meta']['hostvars']),
len(host_inv))
def test_all_information_added(self):
all_info = mi.export_host_info(self.inv)['all']
self.assertIn('provider_networks', all_info)
def test_all_lb_information(self):
all_info = mi.export_host_info(self.inv)['all']
inv_all = self.inv['all']['vars']
self.assertEqual(inv_all['internal_lb_vip_address'],
all_info['internal_lb_vip_address'])
class TestRemoveIpfunction(unittest.TestCase):
def setUp(self):
self.inv = test_inventory.get_inventory()
def tearDown(self):
test_inventory.cleanup()
def test_ips_removed(self):
mi.remove_ip_addresses(self.inv)
mi.remove_ip_addresses(self.inv, TARGET_DIR)
hostvars = self.inv['_meta']['hostvars']
for host, variables in hostvars.items():
has_networks = 'container_networks' in variables
if variables.get('is_metal', False):
continue
self.assertFalse(has_networks)
def test_inventory_item_removed(self):
inventory = self.inv
# Make sure we have log_hosts in the original inventory
self.assertIn('log_hosts', inventory)
mi.remove_inventory_item("log_hosts", inventory)
mi.remove_inventory_item("log_hosts", inventory, TARGET_DIR)
# Now make sure it's gone
self.assertIn('log_hosts', inventory)
def test_metal_ips_kept(self):
mi.remove_ip_addresses(self.inv)
hostvars = self.inv['_meta']['hostvars']
for host, variables in hostvars.items():
has_networks = 'container_networks' in variables
if not variables.get('is_metal', False):
continue
self.assertTrue(has_networks)
def test_ansible_host_vars_removed(self):
mi.remove_ip_addresses(self.inv)
hostvars = self.inv['_meta']['hostvars']
for host, variables in hostvars.items():
has_host = 'ansible_host' in variables
if variables.get('is_metal', False):
continue
self.assertFalse(has_host)
def test_multiple_calls(self):
"""Removal should fail silently if keys are absent."""
mi.remove_ip_addresses(self.inv)
mi.remove_ip_addresses(self.inv)
if __name__ == '__main__':
unittest.main(catchbreak=True)
|
# -*- coding: utf-8 -*-
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(687)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_687(client, variables):
scenario.initial(variables)
scenario.variables["VPC_name_1"] = "VPC_api_test_1"
scenario.variables["remark"] = "remark_api_test"
scenario.variables["tag"] = "tag_api_test"
scenario.variables["Subnet_name_1_1"] = "subnet_1_1"
scenario.variables["subnet_netmask"] = 24
scenario.variables["project_id"] = "org-achi1o"
scenario.run(client)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "GetProjectListResponse"),
],
action="GetProjectList",
)
def get_project_list_00(client, variables):
d = {}
try:
resp = client.uaccount().get_project_list(d)
except exc.RetCodeException as e:
resp = e.json()
variables["project_list"] = utest.value_at_path(resp, "ProjectSet")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVPC",
)
def create_vpc_01(client, variables):
d = {
"Tag": variables.get("tag"),
"Remark": variables.get("remark"),
"Region": variables.get("Region"),
"Network": ["172.16.16.0/20"],
"Name": variables.get("VPC_name_1"),
}
try:
resp = client.vpc().create_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VPCId_1"] = utest.value_at_path(resp, "VPCId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSubnet",
)
def create_subnet_02(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Tag": variables.get("tag"),
"SubnetName": variables.get("Subnet_name_1_1"),
"Subnet": "172.16.17.0",
"Remark": variables.get("remark"),
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_1_1"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "UpdateSubnetAttributeResponse"),
],
action="UpdateSubnetAttribute",
)
def update_subnet_attribute_03(client, variables):
d = {
"Tag": "qa",
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().update_subnet_attribute(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeSubnet",
)
def describe_subnet_04(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
"Offset": 1,
"Limit": 1,
}
try:
resp = client.vpc().describe_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
action="CreateVPC",
)
def create_vpc_05(client, variables):
d = {
"Region": variables.get("Region"),
"Network": ["192.168.16.0/20"],
"Name": "vpc_2",
}
try:
resp = client.vpc().create_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VPCId_2"] = utest.value_at_path(resp, "VPCId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSubnet",
)
def create_subnet_06(client, variables):
d = {
"VPCId": variables.get("VPCId_2"),
"SubnetName": "Subnet_2_1",
"Subnet": "192.168.17.0",
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_2_1"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "CreateSubnetResponse"),
],
action="CreateSubnet",
)
def create_subnet_07(client, variables):
d = {
"VPCId": variables.get("VPCId_2"),
"Tag": "Subnet_2_2",
"SubnetName": "Subnet_2_2",
"Subnet": "192.168.18.0",
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_2_2"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.VPCId", variables.get("VPCId_1")),
("str_eq", "DataSet.0.VPCName", variables.get("VPC_name_1")),
("str_eq", "DataSet.0.SubnetId", variables.get("SubnetId_1_1")),
("str_eq", "DataSet.0.SubnetName", variables.get("Subnet_name_1_1")),
("str_eq", "DataSet.0.Tag", "qa"),
("str_eq", "DataSet.0.Remark", variables.get("remark")),
("str_eq", "DataSet.0.SubnetType", 2),
("str_eq", "DataSet.0.Netmask", 24),
],
action="DescribeSubnet",
)
def describe_subnet_08(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().describe_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="AllocateVIP",
)
def allocate_vip_09(client, variables):
d = {
"Zone": variables.get("Zone"),
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Remark": "vip_tag1",
"Region": variables.get("Region"),
"Name": "vip_api_auto",
}
try:
resp = client.unet().allocate_vip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VIPId_1"] = utest.value_at_path(resp, "VIPSet.0.VIPId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "VIPSet.0.VPCId", variables.get("VPCId_1")),
("str_eq", "VIPSet.0.VIPId", variables.get("VIPId_1")),
("str_eq", "VIPSet.0.SubnetId", variables.get("SubnetId_1_1")),
],
action="DescribeVIP",
)
def describe_vip_10(client, variables):
d = {
"Zone": variables.get("Zone"),
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.unet().describe_vip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VIP_ip_1"] = utest.value_at_path(resp, "DataSet.0")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "TotalCount", 1),
("str_eq", "DataSet.0.ResourceId", variables.get("VIPId_1")),
("str_eq", "DataSet.0.IP", variables.get("VIP_ip_1")),
],
action="DescribeSubnetResource",
)
def describe_subnet_resource_11(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
"Offset": 0,
"Limit": 20,
}
try:
resp = client.vpc().describe_subnet_resource(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ReleaseVIP",
)
def release_vip_12(client, variables):
d = {
"Zone": variables.get("Zone"),
"VIPId": variables.get("VIPId_1"),
"Region": variables.get("Region"),
}
try:
resp = client.unet().release_vip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_13(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_14(client, variables):
d = {
"SubnetId": variables.get("SubnetId_2_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_15(client, variables):
d = {
"SubnetId": variables.get("SubnetId_2_2"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "AddVPCNetworkResponse"),
],
action="AddVPCNetwork",
)
def add_vpc_network_16(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"Network": ["10.100.96.0/20"],
}
try:
resp = client.vpc().add_vpc_network(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribeVPCResponse"),
],
action="DescribeVPC",
)
def describe_vpc_17(client, variables):
d = {
"VPCIds": [variables.get("VPCId_1")],
"Region": variables.get("Region"),
}
try:
resp = client.vpc().describe_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVPCIntercom",
)
def create_vpc_intercom_18(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"DstVPCId": variables.get("VPCId_2"),
"DstRegion": variables.get("Region"),
"DstProjectId": funcs.search_value(
variables.get("project_list"), "IsDefault", True, "ProjectId"
),
}
try:
resp = client.vpc().create_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.VPCId", variables.get("VPCId_2")),
],
action="DescribeVPCIntercom",
)
def describe_vpc_intercom_19(client, variables):
d = {"VPCId": variables.get("VPCId_1"), "Region": variables.get("Region")}
try:
resp = client.vpc().describe_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVPCIntercom",
)
def delete_vpc_intercom_20(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"DstVPCId": variables.get("VPCId_2"),
"DstRegion": variables.get("Region"),
"DstProjectId": funcs.search_value(
variables.get("project_list"), "IsDefault", True, "ProjectId"
),
}
try:
resp = client.vpc().delete_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVPC",
)
def delete_vpc_21(client, variables):
d = {"VPCId": variables.get("VPCId_1"), "Region": variables.get("Region")}
try:
resp = client.vpc().delete_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
action="DeleteVPC",
)
def delete_vpc_22(client, variables):
d = {"VPCId": variables.get("VPCId_2"), "Region": variables.get("Region")}
try:
resp = client.vpc().delete_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
|
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:51:28+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Extra, Field
class ResourceNotFoundException(BaseModel):
__root__: Any
class ResourceInUseException(ResourceNotFoundException):
pass
class InvalidArgumentException(ResourceNotFoundException):
pass
class ConcurrentModificationException(ResourceNotFoundException):
pass
class InvalidRequestException(ResourceNotFoundException):
pass
class InvalidApplicationConfigurationException(ResourceNotFoundException):
pass
class CodeValidationException(ResourceNotFoundException):
pass
class LimitExceededException(ResourceNotFoundException):
pass
class TooManyTagsException(ResourceNotFoundException):
pass
class CreateApplicationSnapshotResponse(BaseModel):
pass
class UnsupportedOperationException(ResourceNotFoundException):
pass
class DeleteApplicationResponse(CreateApplicationSnapshotResponse):
pass
class DeleteApplicationSnapshotResponse(CreateApplicationSnapshotResponse):
pass
class UnableToDetectSchemaException(ResourceNotFoundException):
pass
class ResourceProvisionedThroughputExceededException(ResourceNotFoundException):
pass
class ServiceUnavailableException(ResourceNotFoundException):
pass
class StartApplicationResponse(CreateApplicationSnapshotResponse):
pass
class StopApplicationResponse(CreateApplicationSnapshotResponse):
pass
class TagResourceResponse(CreateApplicationSnapshotResponse):
pass
class UntagResourceResponse(CreateApplicationSnapshotResponse):
pass
class ApplicationName(BaseModel):
__root__: Annotated[
str, Field(max_length=128, min_length=1, regex='[a-zA-Z0-9_.-]+')
]
class ApplicationVersionId(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=999999999.0)]
class ConditionalToken(BaseModel):
__root__: Annotated[
str, Field(max_length=512, min_length=1, regex='[a-zA-Z0-9-_+/=]+')
]
class ResourceARN(BaseModel):
__root__: Annotated[str, Field(max_length=2048, min_length=1, regex='arn:.*')]
class Id(BaseModel):
__root__: Annotated[
str, Field(max_length=50, min_length=1, regex='[a-zA-Z0-9_.-]+')
]
class CodeContentType(Enum):
PLAINTEXT = 'PLAINTEXT'
ZIPFILE = 'ZIPFILE'
class ApplicationDescription(BaseModel):
__root__: Annotated[str, Field(max_length=1024, min_length=0)]
class RuntimeEnvironment(Enum):
SQL_1_0 = 'SQL-1_0'
FLINK_1_6 = 'FLINK-1_6'
FLINK_1_8 = 'FLINK-1_8'
FLINK_1_11 = 'FLINK-1_11'
ZEPPELIN_FLINK_1_0 = 'ZEPPELIN-FLINK-1_0'
class RoleARN(ResourceARN):
pass
class ApplicationStatus(Enum):
DELETING = 'DELETING'
STARTING = 'STARTING'
STOPPING = 'STOPPING'
READY = 'READY'
RUNNING = 'RUNNING'
UPDATING = 'UPDATING'
AUTOSCALING = 'AUTOSCALING'
FORCE_STOPPING = 'FORCE_STOPPING'
MAINTENANCE = 'MAINTENANCE'
ROLLING_BACK = 'ROLLING_BACK'
ROLLED_BACK = 'ROLLED_BACK'
class Timestamp(BaseModel):
__root__: datetime
class ApplicationMode(Enum):
STREAMING = 'STREAMING'
INTERACTIVE = 'INTERACTIVE'
class ApplicationMaintenanceWindowStartTime(BaseModel):
__root__: Annotated[
str, Field(max_length=5, min_length=5, regex='([01][0-9]|2[0-3]):[0-5][0-9]')
]
class ApplicationMaintenanceWindowEndTime(ApplicationMaintenanceWindowStartTime):
pass
class ApplicationMaintenanceConfigurationUpdate(BaseModel):
"""
Describes the updated maintenance configuration for the application.
"""
ApplicationMaintenanceWindowStartTimeUpdate: ApplicationMaintenanceWindowStartTime
class ApplicationRestoreType(Enum):
SKIP_RESTORE_FROM_SNAPSHOT = 'SKIP_RESTORE_FROM_SNAPSHOT'
RESTORE_FROM_LATEST_SNAPSHOT = 'RESTORE_FROM_LATEST_SNAPSHOT'
RESTORE_FROM_CUSTOM_SNAPSHOT = 'RESTORE_FROM_CUSTOM_SNAPSHOT'
class SnapshotName(BaseModel):
__root__: Annotated[
str, Field(max_length=256, min_length=1, regex='[a-zA-Z0-9_.-]+')
]
class ApplicationRestoreConfiguration(BaseModel):
"""
Specifies the method and snapshot to use when restarting an application using previously saved application state.
"""
ApplicationRestoreType: ApplicationRestoreType
SnapshotName: Optional[SnapshotName] = None
class BooleanObject(BaseModel):
__root__: bool
class ApplicationSummary(BaseModel):
"""
Provides application summary information, including the application Amazon Resource Name (ARN), name, and status.
"""
ApplicationName: ApplicationName
ApplicationARN: ResourceARN
ApplicationStatus: ApplicationStatus
ApplicationVersionId: ApplicationVersionId
RuntimeEnvironment: RuntimeEnvironment
ApplicationMode: Optional[ApplicationMode] = None
class ApplicationSummaries(BaseModel):
__root__: List[ApplicationSummary]
class ApplicationVersionSummary(BaseModel):
"""
The summary of the application version.
"""
ApplicationVersionId: ApplicationVersionId
ApplicationStatus: ApplicationStatus
class ApplicationVersionSummaries(BaseModel):
__root__: List[ApplicationVersionSummary]
class ArtifactType(Enum):
UDF = 'UDF'
DEPENDENCY_JAR = 'DEPENDENCY_JAR'
class AuthorizedUrl(BaseModel):
__root__: Annotated[str, Field(max_length=2048, min_length=1)]
class BasePath(BaseModel):
__root__: Annotated[
str, Field(max_length=1024, min_length=1, regex="[a-zA-Z0-9/!-_.*'()]+")
]
class BucketARN(ResourceARN):
pass
class RecordRowDelimiter(BaseModel):
__root__: Annotated[str, Field(max_length=1024, min_length=1)]
class RecordColumnDelimiter(RecordRowDelimiter):
pass
class CSVMappingParameters(BaseModel):
"""
<p>For a SQL-based Kinesis Data Analytics application, provides additional mapping information when the record format uses delimiters, such as CSV. For example, the following sample records use CSV format, where the records use the <i>'\n'</i> as the row delimiter and a comma (",") as the column delimiter: </p> <p> <code>"name1", "address1"</code> </p> <p> <code>"name2", "address2"</code> </p>
"""
RecordRowDelimiter: RecordRowDelimiter
RecordColumnDelimiter: RecordColumnDelimiter
class ConfigurationType(Enum):
DEFAULT = 'DEFAULT'
CUSTOM = 'CUSTOM'
class CheckpointInterval(BaseModel):
__root__: Annotated[int, Field(ge=1.0)]
class MinPauseBetweenCheckpoints(BaseModel):
__root__: Annotated[int, Field(ge=0.0)]
class CheckpointConfiguration(BaseModel):
"""
Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see <a href="https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance"> Checkpoints for Fault Tolerance</a> in the <a href="https://ci.apache.org/projects/flink/flink-docs-release-1.8/">Apache Flink Documentation</a>.
"""
ConfigurationType: ConfigurationType
CheckpointingEnabled: Optional[BooleanObject] = None
CheckpointInterval: Optional[CheckpointInterval] = None
MinPauseBetweenCheckpoints: Optional[MinPauseBetweenCheckpoints] = None
class CheckpointConfigurationDescription(BaseModel):
"""
Describes checkpointing parameters for a Flink-based Kinesis Data Analytics application.
"""
ConfigurationType: Optional[ConfigurationType] = None
CheckpointingEnabled: Optional[BooleanObject] = None
CheckpointInterval: Optional[CheckpointInterval] = None
MinPauseBetweenCheckpoints: Optional[MinPauseBetweenCheckpoints] = None
class CheckpointConfigurationUpdate(BaseModel):
"""
Describes updates to the checkpointing parameters for a Flink-based Kinesis Data Analytics application.
"""
ConfigurationTypeUpdate: Optional[ConfigurationType] = None
CheckpointingEnabledUpdate: Optional[BooleanObject] = None
CheckpointIntervalUpdate: Optional[CheckpointInterval] = None
MinPauseBetweenCheckpointsUpdate: Optional[MinPauseBetweenCheckpoints] = None
class LogStreamARN(ResourceARN):
pass
class CloudWatchLoggingOptionDescription(BaseModel):
"""
Describes the Amazon CloudWatch logging option.
"""
CloudWatchLoggingOptionId: Optional[Id] = None
LogStreamARN: LogStreamARN
RoleARN: Optional[RoleARN] = None
class CloudWatchLoggingOptionUpdate(BaseModel):
"""
Describes the Amazon CloudWatch logging option updates.
"""
CloudWatchLoggingOptionId: Id
LogStreamARNUpdate: Optional[LogStreamARN] = None
class CloudWatchLoggingOptionUpdates(BaseModel):
__root__: List[CloudWatchLoggingOptionUpdate]
class TextContent(BaseModel):
__root__: Annotated[str, Field(max_length=102400, min_length=0)]
class ZipFileContent(BaseModel):
__root__: Annotated[str, Field(max_length=52428800, min_length=0)]
class CodeMD5(BaseModel):
__root__: Annotated[str, Field(max_length=128, min_length=128)]
class CodeSize(BaseModel):
__root__: Annotated[int, Field(ge=0.0, le=52428800.0)]
class UrlType(Enum):
FLINK_DASHBOARD_URL = 'FLINK_DASHBOARD_URL'
ZEPPELIN_UI_URL = 'ZEPPELIN_UI_URL'
class SessionExpirationDurationInSeconds(BaseModel):
__root__: Annotated[int, Field(ge=1800.0, le=43200.0)]
class DatabaseARN(ResourceARN):
pass
class S3ContentBaseLocation(BaseModel):
"""
The S3 bucket that holds the application information.
"""
BucketARN: BucketARN
BasePath: Optional[BasePath] = None
class DeployAsApplicationConfiguration(BaseModel):
"""
The information required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state..
"""
S3ContentLocation: S3ContentBaseLocation
class S3ContentBaseLocationDescription(S3ContentBaseLocation):
"""
The description of the S3 base location that holds the application.
"""
pass
class DeployAsApplicationConfigurationDescription(BaseModel):
"""
The configuration information required to deploy an Amazon Data Analytics Studio notebook as an application with durable state.
"""
S3ContentLocationDescription: S3ContentBaseLocationDescription
class S3ContentBaseLocationUpdate(BaseModel):
"""
The information required to update the S3 base location that holds the application.
"""
BucketARNUpdate: BucketARN
BasePathUpdate: Optional[BasePath] = None
class DeployAsApplicationConfigurationUpdate(BaseModel):
"""
Updates to the configuration information required to deploy an Amazon Data Analytics Studio notebook as an application with durable state..
"""
S3ContentLocationUpdate: S3ContentBaseLocationUpdate
class RecordFormatType(Enum):
JSON = 'JSON'
CSV = 'CSV'
class DestinationSchema(BaseModel):
"""
Describes the data format when records are written to the destination in a SQL-based Kinesis Data Analytics application.
"""
RecordFormatType: RecordFormatType
class FileKey(RecordRowDelimiter):
pass
class JobPlanDescription(BaseModel):
__root__: str
class FlinkRunConfiguration(BaseModel):
"""
Describes the starting parameters for a Flink-based Kinesis Data Analytics application.
"""
AllowNonRestoredState: Optional[BooleanObject] = None
class InAppStreamName(BaseModel):
__root__: Annotated[str, Field(max_length=32, min_length=1, regex='[^-\\s<>&]*')]
class InAppStreamNames(BaseModel):
__root__: List[InAppStreamName]
class InAppTableName(BaseModel):
__root__: Annotated[str, Field(max_length=32, min_length=1)]
class KinesisStreamsInput(BaseModel):
"""
Identifies a Kinesis data stream as the streaming source. You provide the stream's Amazon Resource Name (ARN).
"""
ResourceARN: ResourceARN
class KinesisFirehoseInput(KinesisStreamsInput):
"""
For a SQL-based Kinesis Data Analytics application, identifies a Kinesis Data Firehose delivery stream as the streaming source. You provide the delivery stream's Amazon Resource Name (ARN).
"""
pass
class KinesisStreamsInputDescription(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the Kinesis data stream that is configured as the streaming source in the application input configuration.
"""
ResourceARN: ResourceARN
RoleARN: Optional[RoleARN] = None
class KinesisFirehoseInputDescription(KinesisStreamsInputDescription):
"""
Describes the Amazon Kinesis Data Firehose delivery stream that is configured as the streaming source in the application input configuration.
"""
pass
class InputLambdaProcessor(KinesisStreamsInput):
"""
An object that contains the Amazon Resource Name (ARN) of the AWS Lambda function that is used to preprocess records in the stream in a SQL-based Kinesis Data Analytics application.
"""
pass
class InputLambdaProcessorDescription(KinesisStreamsInputDescription):
"""
For a SQL-based Kinesis Data Analytics application, an object that contains the Amazon Resource Name (ARN) of the AWS Lambda function that is used to preprocess records in the stream.
"""
pass
class InputLambdaProcessorUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, represents an update to the <a>InputLambdaProcessor</a> that is used to preprocess the records in the stream.
"""
ResourceARNUpdate: ResourceARN
class InputParallelismCount(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=64.0)]
class InputParallelismUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, provides updates to the parallelism count.
"""
CountUpdate: InputParallelismCount
class InputProcessingConfigurationUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes updates to an <a>InputProcessingConfiguration</a>.
"""
InputLambdaProcessorUpdate: InputLambdaProcessorUpdate
class RecordEncoding(BaseModel):
__root__: Annotated[str, Field(max_length=5, min_length=5, regex='UTF-8')]
class InputStartingPosition(Enum):
NOW = 'NOW'
TRIM_HORIZON = 'TRIM_HORIZON'
LAST_STOPPED_POINT = 'LAST_STOPPED_POINT'
class KinesisStreamsInputUpdate(InputLambdaProcessorUpdate):
"""
When you update the input configuration for a SQL-based Kinesis Data Analytics application, provides information about a Kinesis stream as the streaming source.
"""
pass
class KinesisFirehoseInputUpdate(InputLambdaProcessorUpdate):
"""
For a SQL-based Kinesis Data Analytics application, when updating application input configuration, provides information about a Kinesis Data Firehose delivery stream as the streaming source.
"""
pass
class RecordRowPath(BaseModel):
__root__: Annotated[
str, Field(max_length=65535, min_length=1, regex='^(?=^\\$)(?=^\\S+$).*$')
]
class JSONMappingParameters(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, provides additional mapping information when JSON is the record format on the streaming source.
"""
RecordRowPath: RecordRowPath
class KinesisAnalyticsARN(ResourceARN):
pass
class KinesisFirehoseOutput(KinesisStreamsInput):
"""
For a SQL-based Kinesis Data Analytics application, when configuring application output, identifies a Kinesis Data Firehose delivery stream as the destination. You provide the stream Amazon Resource Name (ARN) of the delivery stream.
"""
pass
class KinesisFirehoseOutputDescription(KinesisStreamsInputDescription):
"""
For a SQL-based Kinesis Data Analytics application's output, describes the Kinesis Data Firehose delivery stream that is configured as its destination.
"""
pass
class KinesisFirehoseOutputUpdate(InputLambdaProcessorUpdate):
"""
For a SQL-based Kinesis Data Analytics application, when updating an output configuration using the <a>UpdateApplication</a> operation, provides information about a Kinesis Data Firehose delivery stream that is configured as the destination.
"""
pass
class KinesisStreamsOutput(KinesisStreamsInput):
"""
When you configure a SQL-based Kinesis Data Analytics application's output, identifies a Kinesis data stream as the destination. You provide the stream Amazon Resource Name (ARN).
"""
pass
class KinesisStreamsOutputDescription(KinesisStreamsInputDescription):
"""
For an SQL-based Kinesis Data Analytics application's output, describes the Kinesis data stream that is configured as its destination.
"""
pass
class KinesisStreamsOutputUpdate(InputLambdaProcessorUpdate):
"""
When you update a SQL-based Kinesis Data Analytics application's output configuration using the <a>UpdateApplication</a> operation, provides information about a Kinesis data stream that is configured as the destination.
"""
pass
class LambdaOutput(KinesisStreamsInput):
"""
When you configure a SQL-based Kinesis Data Analytics application's output, identifies an AWS Lambda function as the destination. You provide the function Amazon Resource Name (ARN) of the Lambda function.
"""
pass
class LambdaOutputDescription(KinesisStreamsInputDescription):
"""
For a SQL-based Kinesis Data Analytics application's output, describes the AWS Lambda function that is configured as its destination.
"""
pass
class LambdaOutputUpdate(InputLambdaProcessorUpdate):
"""
When you update an SQL-based Kinesis Data Analytics application's output configuration using the <a>UpdateApplication</a> operation, provides information about an AWS Lambda function that is configured as the destination.
"""
pass
class ListSnapshotsInputLimit(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=50.0)]
class NextToken(BaseModel):
__root__: Annotated[str, Field(max_length=512, min_length=1)]
class ListApplicationVersionsInputLimit(ListSnapshotsInputLimit):
pass
class ListApplicationsInputLimit(ListSnapshotsInputLimit):
pass
class LogLevel(Enum):
INFO = 'INFO'
WARN = 'WARN'
ERROR = 'ERROR'
DEBUG = 'DEBUG'
class MappingParameters(BaseModel):
"""
When you configure a SQL-based Kinesis Data Analytics application's input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.
"""
JSONMappingParameters: Optional[JSONMappingParameters] = None
CSVMappingParameters: Optional[CSVMappingParameters] = None
class MavenArtifactId(SnapshotName):
pass
class MavenGroupId(SnapshotName):
pass
class MavenVersion(SnapshotName):
pass
class MetricsLevel(Enum):
APPLICATION = 'APPLICATION'
TASK = 'TASK'
OPERATOR = 'OPERATOR'
PARALLELISM = 'PARALLELISM'
class ObjectVersion(ApplicationDescription):
pass
class OutputDescription(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the application output configuration, which includes the in-application stream name and the destination where the stream data is written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.
"""
OutputId: Optional[Id] = None
Name: Optional[InAppStreamName] = None
KinesisStreamsOutputDescription: Optional[KinesisStreamsOutputDescription] = None
KinesisFirehoseOutputDescription: Optional[KinesisFirehoseOutputDescription] = None
LambdaOutputDescription: Optional[LambdaOutputDescription] = None
DestinationSchema: Optional[DestinationSchema] = None
class OutputUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes updates to the output configuration identified by the <code>OutputId</code>.
"""
OutputId: Id
NameUpdate: Optional[InAppStreamName] = None
KinesisStreamsOutputUpdate: Optional[KinesisStreamsOutputUpdate] = None
KinesisFirehoseOutputUpdate: Optional[KinesisFirehoseOutputUpdate] = None
LambdaOutputUpdate: Optional[LambdaOutputUpdate] = None
DestinationSchemaUpdate: Optional[DestinationSchema] = None
class OutputUpdates(BaseModel):
__root__: List[OutputUpdate]
class Parallelism(CheckpointInterval):
pass
class ParallelismPerKPU(CheckpointInterval):
pass
class ParsedInputRecordField(JobPlanDescription):
pass
class ParsedInputRecord(BaseModel):
__root__: List[ParsedInputRecordField]
class ProcessedInputRecord(JobPlanDescription):
pass
class PropertyMap(BaseModel):
pass
class Config:
extra = Extra.allow
class PropertyGroup(BaseModel):
"""
Property key-value pairs passed into an application.
"""
PropertyGroupId: Id
PropertyMap: PropertyMap
class PropertyKey(AuthorizedUrl):
pass
class PropertyValue(AuthorizedUrl):
pass
class RawInputRecord(JobPlanDescription):
pass
class RecordColumnName(BaseModel):
__root__: Annotated[str, Field(max_length=256, min_length=1, regex='[^-\\s<>&]*')]
class RecordColumnMapping(BaseModel):
__root__: Annotated[str, Field(max_length=65535, min_length=0)]
class RecordColumnSqlType(BaseModel):
__root__: Annotated[str, Field(max_length=100, min_length=1)]
class RecordColumn(BaseModel):
"""
<p>For a SQL-based Kinesis Data Analytics application, describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.</p> <p>Also used to describe the format of the reference data source.</p>
"""
Name: RecordColumnName
Mapping: Optional[RecordColumnMapping] = None
SqlType: RecordColumnSqlType
class S3ReferenceDataSource(BaseModel):
"""
<p>For a SQL-based Kinesis Data Analytics application, identifies the Amazon S3 bucket and object that contains the reference data.</p> <p>A Kinesis Data Analytics application loads reference data only once. If the data changes, you call the <a>UpdateApplication</a> operation to trigger reloading of data into your application. </p>
"""
BucketARN: Optional[BucketARN] = None
FileKey: Optional[FileKey] = None
class S3ReferenceDataSourceDescription(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, provides the bucket name and object key name that stores the reference data.
"""
BucketARN: BucketARN
FileKey: FileKey
ReferenceRoleARN: Optional[RoleARN] = None
class S3ReferenceDataSourceUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the Amazon S3 bucket name and object key name for an in-application reference table.
"""
BucketARNUpdate: Optional[BucketARN] = None
FileKeyUpdate: Optional[FileKey] = None
class RunConfigurationUpdate(BaseModel):
"""
Describes the updates to the starting parameters for a Kinesis Data Analytics application.
"""
FlinkRunConfiguration: Optional[FlinkRunConfiguration] = None
ApplicationRestoreConfiguration: Optional[ApplicationRestoreConfiguration] = None
class SecurityGroupId(JobPlanDescription):
pass
class SecurityGroupIds(BaseModel):
__root__: Annotated[List[SecurityGroupId], Field(max_items=5, min_items=1)]
class SnapshotStatus(Enum):
CREATING = 'CREATING'
READY = 'READY'
DELETING = 'DELETING'
FAILED = 'FAILED'
class SubnetId(JobPlanDescription):
pass
class SubnetIds(BaseModel):
__root__: Annotated[List[SubnetId], Field(max_items=16, min_items=1)]
class TagKey(BaseModel):
__root__: Annotated[str, Field(max_length=128, min_length=1)]
class TagValue(BaseModel):
__root__: Annotated[str, Field(max_length=256, min_length=0)]
class Tag(BaseModel):
"""
A key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see <a href="https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html">Using Tagging</a>.
"""
Key: TagKey
Value: Optional[TagValue] = None
class TagKeys(BaseModel):
__root__: Annotated[List[TagKey], Field(max_items=200, min_items=1)]
class VpcId(JobPlanDescription):
pass
class VpcConfigurationUpdate(BaseModel):
"""
Describes updates to the VPC configuration used by the application.
"""
VpcConfigurationId: Id
SubnetIdUpdates: Optional[SubnetIds] = None
SecurityGroupIdUpdates: Optional[SecurityGroupIds] = None
class ZeppelinMonitoringConfiguration(BaseModel):
"""
Describes configuration parameters for Amazon CloudWatch logging for a Kinesis Data Analytics Studio notebook. For more information about CloudWatch logging, see <a href="https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html">Monitoring</a>.
"""
LogLevel: LogLevel
class ZeppelinMonitoringConfigurationDescription(BaseModel):
"""
The monitoring configuration for Apache Zeppelin within a Kinesis Data Analytics Studio notebook.
"""
LogLevel: Optional[LogLevel] = None
class ZeppelinMonitoringConfigurationUpdate(BaseModel):
"""
Updates to the monitoring configuration for Apache Zeppelin within a Kinesis Data Analytics Studio notebook.
"""
LogLevelUpdate: LogLevel
class CreateApplicationPresignedUrlResponse(BaseModel):
AuthorizedUrl: Optional[AuthorizedUrl] = None
class CreateApplicationPresignedUrlRequest(BaseModel):
ApplicationName: ApplicationName
UrlType: UrlType
SessionExpirationDurationInSeconds: Optional[
SessionExpirationDurationInSeconds
] = None
class CreateApplicationSnapshotRequest(BaseModel):
ApplicationName: ApplicationName
SnapshotName: SnapshotName
class DeleteApplicationRequest(BaseModel):
ApplicationName: ApplicationName
CreateTimestamp: Timestamp
class DeleteApplicationCloudWatchLoggingOptionRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: Optional[ApplicationVersionId] = None
CloudWatchLoggingOptionId: Id
ConditionalToken: Optional[ConditionalToken] = None
class DeleteApplicationInputProcessingConfigurationResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
class DeleteApplicationInputProcessingConfigurationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
InputId: Id
class DeleteApplicationOutputResponse(
DeleteApplicationInputProcessingConfigurationResponse
):
pass
class DeleteApplicationOutputRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
OutputId: Id
class DeleteApplicationReferenceDataSourceResponse(
DeleteApplicationInputProcessingConfigurationResponse
):
pass
class DeleteApplicationReferenceDataSourceRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
ReferenceId: Id
class DeleteApplicationSnapshotRequest(BaseModel):
ApplicationName: ApplicationName
SnapshotName: SnapshotName
SnapshotCreationTimestamp: Timestamp
class DeleteApplicationVpcConfigurationResponse(
DeleteApplicationInputProcessingConfigurationResponse
):
pass
class DeleteApplicationVpcConfigurationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: Optional[ApplicationVersionId] = None
VpcConfigurationId: Id
ConditionalToken: Optional[ConditionalToken] = None
class DescribeApplicationRequest(BaseModel):
ApplicationName: ApplicationName
IncludeAdditionalDetails: Optional[BooleanObject] = None
class DescribeApplicationSnapshotRequest(BaseModel):
ApplicationName: ApplicationName
SnapshotName: SnapshotName
class DescribeApplicationVersionRequest(BaseModel):
ApplicationName: ApplicationName
ApplicationVersionId: ApplicationVersionId
class ListApplicationSnapshotsRequest(BaseModel):
ApplicationName: ApplicationName
Limit: Optional[ListSnapshotsInputLimit] = None
NextToken: Optional[NextToken] = None
class ListApplicationVersionsResponse(BaseModel):
ApplicationVersionSummaries: Optional[ApplicationVersionSummaries] = None
NextToken: Optional[NextToken] = None
class ListApplicationVersionsRequest(BaseModel):
ApplicationName: ApplicationName
Limit: Optional[ListApplicationVersionsInputLimit] = None
NextToken: Optional[NextToken] = None
class ListApplicationsResponse(BaseModel):
ApplicationSummaries: ApplicationSummaries
NextToken: Optional[ApplicationName] = None
class ListApplicationsRequest(BaseModel):
Limit: Optional[ListApplicationsInputLimit] = None
NextToken: Optional[ApplicationName] = None
class ListTagsForResourceRequest(BaseModel):
ResourceARN: KinesisAnalyticsARN
class RollbackApplicationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
class StopApplicationRequest(BaseModel):
ApplicationName: ApplicationName
Force: Optional[BooleanObject] = None
class UntagResourceRequest(BaseModel):
ResourceARN: KinesisAnalyticsARN
TagKeys: TagKeys
class UpdateApplicationMaintenanceConfigurationRequest(BaseModel):
ApplicationName: ApplicationName
ApplicationMaintenanceConfigurationUpdate: ApplicationMaintenanceConfigurationUpdate
class CloudWatchLoggingOption(BaseModel):
"""
Provides a description of Amazon CloudWatch logging options, including the log stream Amazon Resource Name (ARN).
"""
LogStreamARN: LogStreamARN
class CloudWatchLoggingOptionDescriptions(BaseModel):
__root__: List[CloudWatchLoggingOptionDescription]
class InputProcessingConfiguration(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes a processor that is used to preprocess the records in the stream before being processed by your application code. Currently, the only input processor available is <a href="https://docs.aws.amazon.com/lambda/">AWS Lambda</a>.
"""
InputLambdaProcessor: InputLambdaProcessor
class InputProcessingConfigurationDescription(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, provides the configuration information about an input processor. Currently, the only input processor available is <a href="https://docs.aws.amazon.com/lambda/">AWS Lambda</a>.
"""
InputLambdaProcessorDescription: Optional[InputLambdaProcessorDescription] = None
class Output(BaseModel):
"""
<p> Describes a SQL-based Kinesis Data Analytics application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream. </p> <p/>
"""
Name: InAppStreamName
KinesisStreamsOutput: Optional[KinesisStreamsOutput] = None
KinesisFirehoseOutput: Optional[KinesisFirehoseOutput] = None
LambdaOutput: Optional[LambdaOutput] = None
DestinationSchema: DestinationSchema
class OutputDescriptions(BaseModel):
__root__: List[OutputDescription]
class VpcConfiguration(BaseModel):
"""
Describes the parameters of a VPC used by the application.
"""
SubnetIds: SubnetIds
SecurityGroupIds: SecurityGroupIds
class VpcConfigurationDescription(BaseModel):
"""
Describes the parameters of a VPC used by the application.
"""
VpcConfigurationId: Id
VpcId: VpcId
SubnetIds: SubnetIds
SecurityGroupIds: SecurityGroupIds
class ApplicationSnapshotConfiguration(BaseModel):
"""
Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"""
SnapshotsEnabled: BooleanObject
class VpcConfigurations(BaseModel):
__root__: List[VpcConfiguration]
class RunConfigurationDescription(BaseModel):
"""
Describes the starting properties for a Kinesis Data Analytics application.
"""
ApplicationRestoreConfigurationDescription: Optional[
ApplicationRestoreConfiguration
] = None
FlinkRunConfigurationDescription: Optional[FlinkRunConfiguration] = None
class ApplicationSnapshotConfigurationDescription(ApplicationSnapshotConfiguration):
"""
Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"""
pass
class VpcConfigurationDescriptions(BaseModel):
__root__: List[VpcConfigurationDescription]
class ApplicationSnapshotConfigurationUpdate(BaseModel):
"""
Describes updates to whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"""
SnapshotsEnabledUpdate: BooleanObject
class VpcConfigurationUpdates(BaseModel):
__root__: List[VpcConfigurationUpdate]
class ApplicationMaintenanceConfigurationDescription(BaseModel):
"""
The details of the maintenance configuration for the application.
"""
ApplicationMaintenanceWindowStartTime: ApplicationMaintenanceWindowStartTime
ApplicationMaintenanceWindowEndTime: ApplicationMaintenanceWindowEndTime
class GlueDataCatalogConfiguration(BaseModel):
"""
The configuration of the Glue Data Catalog that you use for Apache Flink SQL queries and table API transforms that you write in an application.
"""
DatabaseARN: DatabaseARN
class CatalogConfiguration(BaseModel):
"""
The configuration parameters for the default AWS Glue database. You use this database for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
"""
GlueDataCatalogConfiguration: GlueDataCatalogConfiguration
class GlueDataCatalogConfigurationDescription(GlueDataCatalogConfiguration):
"""
The configuration of the Glue Data Catalog that you use for Apache Flink SQL queries and table API transforms that you write in an application.
"""
pass
class CatalogConfigurationDescription(BaseModel):
"""
The configuration parameters for the default AWS Glue database. You use this database for Apache Flink SQL queries and table API transforms that you write in a Kinesis Data Analytics Studio notebook.
"""
GlueDataCatalogConfigurationDescription: GlueDataCatalogConfigurationDescription
class GlueDataCatalogConfigurationUpdate(BaseModel):
"""
Updates to the configuration of the Glue Data Catalog that you use for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
"""
DatabaseARNUpdate: Optional[DatabaseARN] = None
class CatalogConfigurationUpdate(BaseModel):
"""
Updates to
"""
GlueDataCatalogConfigurationUpdate: GlueDataCatalogConfigurationUpdate
class CloudWatchLoggingOptions(BaseModel):
__root__: List[CloudWatchLoggingOption]
class S3ContentLocation(BaseModel):
"""
For a Kinesis Data Analytics application provides a description of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the name of the Amazon S3 object that contains the data, and the version number of the Amazon S3 object that contains the data.
"""
BucketARN: BucketARN
FileKey: FileKey
ObjectVersion: Optional[ObjectVersion] = None
class S3ApplicationCodeLocationDescription(S3ContentLocation):
"""
Describes the location of an application's code stored in an S3 bucket.
"""
pass
class S3ContentLocationUpdate(BaseModel):
"""
Describes an update for the Amazon S3 code content location for an application.
"""
BucketARNUpdate: Optional[BucketARN] = None
FileKeyUpdate: Optional[FileKey] = None
ObjectVersionUpdate: Optional[ObjectVersion] = None
class Tags(BaseModel):
__root__: Annotated[List[Tag], Field(max_items=200, min_items=1)]
class MavenReference(BaseModel):
"""
The information required to specify a Maven reference. You can use Maven references to specify dependency JAR files.
"""
GroupId: MavenGroupId
ArtifactId: MavenArtifactId
Version: MavenVersion
class CustomArtifactConfiguration(BaseModel):
"""
Specifies dependency JARs, as well as JAR files that contain user-defined functions (UDF).
"""
ArtifactType: ArtifactType
S3ContentLocation: Optional[S3ContentLocation] = None
MavenReference: Optional[MavenReference] = None
class CustomArtifactConfigurationDescription(BaseModel):
"""
Specifies a dependency JAR or a JAR of user-defined functions.
"""
ArtifactType: Optional[ArtifactType] = None
S3ContentLocationDescription: Optional[S3ContentLocation] = None
MavenReferenceDescription: Optional[MavenReference] = None
class CustomArtifactsConfigurationDescriptionList(BaseModel):
__root__: Annotated[
List[CustomArtifactConfigurationDescription], Field(max_items=50)
]
class CustomArtifactsConfigurationList(BaseModel):
__root__: Annotated[List[CustomArtifactConfiguration], Field(max_items=50)]
class SnapshotDetails(BaseModel):
"""
Provides details about a snapshot of application state.
"""
SnapshotName: SnapshotName
SnapshotStatus: SnapshotStatus
ApplicationVersionId: ApplicationVersionId
SnapshotCreationTimestamp: Optional[Timestamp] = None
class InputStartingPositionConfiguration(BaseModel):
"""
Describes the point at which the application reads from the streaming source.
"""
InputStartingPosition: Optional[InputStartingPosition] = None
class S3Configuration(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, provides a description of an Amazon S3 data source, including the Amazon Resource Name (ARN) of the S3 bucket and the name of the Amazon S3 object that contains the data.
"""
BucketARN: BucketARN
FileKey: FileKey
class ParsedInputRecords(BaseModel):
__root__: List[ParsedInputRecord]
class ProcessedInputRecords(BaseModel):
__root__: List[ProcessedInputRecord]
class RawInputRecords(BaseModel):
__root__: List[RawInputRecord]
class PropertyGroups(BaseModel):
__root__: Annotated[List[PropertyGroup], Field(max_items=50)]
class MonitoringConfiguration(BaseModel):
"""
Describes configuration parameters for Amazon CloudWatch logging for an application. For more information about CloudWatch logging, see <a href="https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html">Monitoring</a>.
"""
ConfigurationType: ConfigurationType
MetricsLevel: Optional[MetricsLevel] = None
LogLevel: Optional[LogLevel] = None
class ParallelismConfiguration(BaseModel):
"""
Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously. For more information about parallelism, see <a href="https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html">Parallel Execution</a> in the <a href="https://ci.apache.org/projects/flink/flink-docs-release-1.8/">Apache Flink Documentation</a>.
"""
ConfigurationType: ConfigurationType
Parallelism: Optional[Parallelism] = None
ParallelismPerKPU: Optional[ParallelismPerKPU] = None
AutoScalingEnabled: Optional[BooleanObject] = None
class MonitoringConfigurationDescription(BaseModel):
"""
Describes configuration parameters for CloudWatch logging for an application.
"""
ConfigurationType: Optional[ConfigurationType] = None
MetricsLevel: Optional[MetricsLevel] = None
LogLevel: Optional[LogLevel] = None
class ParallelismConfigurationDescription(BaseModel):
"""
Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously.
"""
ConfigurationType: Optional[ConfigurationType] = None
Parallelism: Optional[Parallelism] = None
ParallelismPerKPU: Optional[ParallelismPerKPU] = None
CurrentParallelism: Optional[Parallelism] = None
AutoScalingEnabled: Optional[BooleanObject] = None
class MonitoringConfigurationUpdate(BaseModel):
"""
Describes updates to configuration parameters for Amazon CloudWatch logging for an application.
"""
ConfigurationTypeUpdate: Optional[ConfigurationType] = None
MetricsLevelUpdate: Optional[MetricsLevel] = None
LogLevelUpdate: Optional[LogLevel] = None
class ParallelismConfigurationUpdate(BaseModel):
"""
Describes updates to parameters for how an application executes multiple tasks simultaneously.
"""
ConfigurationTypeUpdate: Optional[ConfigurationType] = None
ParallelismUpdate: Optional[Parallelism] = None
ParallelismPerKPUUpdate: Optional[ParallelismPerKPU] = None
AutoScalingEnabledUpdate: Optional[BooleanObject] = None
class InputParallelism(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the number of in-application streams to create for a given streaming source.
"""
Count: Optional[InputParallelismCount] = None
class RecordFormat(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the record format and relevant mapping information that should be applied to schematize the records on the stream.
"""
RecordFormatType: RecordFormatType
MappingParameters: Optional[MappingParameters] = None
class RecordColumns(BaseModel):
__root__: Annotated[List[RecordColumn], Field(max_items=1000, min_items=1)]
class InputSchemaUpdate(BaseModel):
"""
Describes updates for an SQL-based Kinesis Data Analytics application's input schema.
"""
RecordFormatUpdate: Optional[RecordFormat] = None
RecordEncodingUpdate: Optional[RecordEncoding] = None
RecordColumnUpdates: Optional[RecordColumns] = None
class InputUpdate(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes updates to a specific input configuration (identified by the <code>InputId</code> of an application).
"""
InputId: Id
NamePrefixUpdate: Optional[InAppStreamName] = None
InputProcessingConfigurationUpdate: Optional[
InputProcessingConfigurationUpdate
] = None
KinesisStreamsInputUpdate: Optional[KinesisStreamsInputUpdate] = None
KinesisFirehoseInputUpdate: Optional[KinesisFirehoseInputUpdate] = None
InputSchemaUpdate: Optional[InputSchemaUpdate] = None
InputParallelismUpdate: Optional[InputParallelismUpdate] = None
class InputUpdates(BaseModel):
__root__: List[InputUpdate]
class SnapshotSummaries(BaseModel):
__root__: List[SnapshotDetails]
class Outputs(BaseModel):
__root__: List[Output]
class SqlRunConfiguration(BaseModel):
"""
Describes the starting parameters for a SQL-based Kinesis Data Analytics application.
"""
InputId: Id
InputStartingPositionConfiguration: InputStartingPositionConfiguration
class AddApplicationCloudWatchLoggingOptionResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
CloudWatchLoggingOptionDescriptions: Optional[
CloudWatchLoggingOptionDescriptions
] = None
class AddApplicationCloudWatchLoggingOptionRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: Optional[ApplicationVersionId] = None
CloudWatchLoggingOption: CloudWatchLoggingOption
ConditionalToken: Optional[ConditionalToken] = None
class AddApplicationInputProcessingConfigurationResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
InputId: Optional[Id] = None
InputProcessingConfigurationDescription: Optional[
InputProcessingConfigurationDescription
] = None
class AddApplicationInputProcessingConfigurationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
InputId: Id
InputProcessingConfiguration: InputProcessingConfiguration
class AddApplicationOutputResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
OutputDescriptions: Optional[OutputDescriptions] = None
class AddApplicationOutputRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
Output: Output
class AddApplicationVpcConfigurationResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
VpcConfigurationDescription: Optional[VpcConfigurationDescription] = None
class AddApplicationVpcConfigurationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: Optional[ApplicationVersionId] = None
VpcConfiguration: VpcConfiguration
ConditionalToken: Optional[ConditionalToken] = None
class DeleteApplicationCloudWatchLoggingOptionResponse(
AddApplicationCloudWatchLoggingOptionResponse
):
pass
class DescribeApplicationSnapshotResponse(BaseModel):
SnapshotDetails: SnapshotDetails
class DiscoverInputSchemaRequest(BaseModel):
ResourceARN: Optional[ResourceARN] = None
ServiceExecutionRole: RoleARN
InputStartingPositionConfiguration: Optional[
InputStartingPositionConfiguration
] = None
S3Configuration: Optional[S3Configuration] = None
InputProcessingConfiguration: Optional[InputProcessingConfiguration] = None
class ListApplicationSnapshotsResponse(BaseModel):
SnapshotSummaries: Optional[SnapshotSummaries] = None
NextToken: Optional[NextToken] = None
class ListTagsForResourceResponse(BaseModel):
Tags: Optional[Tags] = None
class TagResourceRequest(BaseModel):
ResourceARN: KinesisAnalyticsARN
Tags: Tags
class UpdateApplicationMaintenanceConfigurationResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationMaintenanceConfigurationDescription: Optional[
ApplicationMaintenanceConfigurationDescription
] = None
class CodeContent(BaseModel):
"""
Specifies either the application code, or the location of the application code, for a Flink-based Kinesis Data Analytics application.
"""
TextContent: Optional[TextContent] = None
ZipFileContent: Optional[ZipFileContent] = None
S3ContentLocation: Optional[S3ContentLocation] = None
class ApplicationCodeConfiguration(BaseModel):
"""
Describes code configuration for an application.
"""
CodeContent: Optional[CodeContent] = None
CodeContentType: CodeContentType
class CodeContentDescription(BaseModel):
"""
Describes details about the code of a Kinesis Data Analytics application.
"""
TextContent: Optional[TextContent] = None
CodeMD5: Optional[CodeMD5] = None
CodeSize: Optional[CodeSize] = None
S3ApplicationCodeLocationDescription: Optional[
S3ApplicationCodeLocationDescription
] = None
class ApplicationCodeConfigurationDescription(BaseModel):
"""
Describes code configuration for an application.
"""
CodeContentType: CodeContentType
CodeContentDescription: Optional[CodeContentDescription] = None
class CodeContentUpdate(BaseModel):
"""
Describes an update to the code of an application. Not supported for Apache Zeppelin.
"""
TextContentUpdate: Optional[TextContent] = None
ZipFileContentUpdate: Optional[ZipFileContent] = None
S3ContentLocationUpdate: Optional[S3ContentLocationUpdate] = None
class ApplicationCodeConfigurationUpdate(BaseModel):
"""
Describes code configuration updates for an application. This is supported for a Flink-based Kinesis Data Analytics application or a SQL-based Kinesis Data Analytics application.
"""
CodeContentTypeUpdate: Optional[CodeContentType] = None
CodeContentUpdate: Optional[CodeContentUpdate] = None
class FlinkApplicationConfiguration(BaseModel):
"""
Describes configuration parameters for a Flink-based Kinesis Data Analytics application or a Studio notebook.
"""
CheckpointConfiguration: Optional[CheckpointConfiguration] = None
MonitoringConfiguration: Optional[MonitoringConfiguration] = None
ParallelismConfiguration: Optional[ParallelismConfiguration] = None
class EnvironmentProperties(BaseModel):
"""
Describes execution properties for a Flink-based Kinesis Data Analytics application.
"""
PropertyGroups: PropertyGroups
class ZeppelinApplicationConfiguration(BaseModel):
"""
The configuration of a Kinesis Data Analytics Studio notebook.
"""
MonitoringConfiguration: Optional[ZeppelinMonitoringConfiguration] = None
CatalogConfiguration: Optional[CatalogConfiguration] = None
DeployAsApplicationConfiguration: Optional[DeployAsApplicationConfiguration] = None
CustomArtifactsConfiguration: Optional[CustomArtifactsConfigurationList] = None
class FlinkApplicationConfigurationDescription(BaseModel):
"""
Describes configuration parameters for a Flink-based Kinesis Data Analytics application.
"""
CheckpointConfigurationDescription: Optional[
CheckpointConfigurationDescription
] = None
MonitoringConfigurationDescription: Optional[
MonitoringConfigurationDescription
] = None
ParallelismConfigurationDescription: Optional[
ParallelismConfigurationDescription
] = None
JobPlanDescription: Optional[JobPlanDescription] = None
class EnvironmentPropertyDescriptions(BaseModel):
"""
Describes the execution properties for an Apache Flink runtime.
"""
PropertyGroupDescriptions: Optional[PropertyGroups] = None
class ZeppelinApplicationConfigurationDescription(BaseModel):
"""
The configuration of a Kinesis Data Analytics Studio notebook.
"""
MonitoringConfigurationDescription: ZeppelinMonitoringConfigurationDescription
CatalogConfigurationDescription: Optional[CatalogConfigurationDescription] = None
DeployAsApplicationConfigurationDescription: Optional[
DeployAsApplicationConfigurationDescription
] = None
CustomArtifactsConfigurationDescription: Optional[
CustomArtifactsConfigurationDescriptionList
] = None
class FlinkApplicationConfigurationUpdate(BaseModel):
"""
Describes updates to the configuration parameters for a Flink-based Kinesis Data Analytics application.
"""
CheckpointConfigurationUpdate: Optional[CheckpointConfigurationUpdate] = None
MonitoringConfigurationUpdate: Optional[MonitoringConfigurationUpdate] = None
ParallelismConfigurationUpdate: Optional[ParallelismConfigurationUpdate] = None
class EnvironmentPropertyUpdates(EnvironmentProperties):
"""
Describes updates to the execution property groups for a Flink-based Kinesis Data Analytics application or a Studio notebook.
"""
pass
class ZeppelinApplicationConfigurationUpdate(BaseModel):
"""
Updates to the configuration of Kinesis Data Analytics Studio notebook.
"""
MonitoringConfigurationUpdate: Optional[
ZeppelinMonitoringConfigurationUpdate
] = None
CatalogConfigurationUpdate: Optional[CatalogConfigurationUpdate] = None
DeployAsApplicationConfigurationUpdate: Optional[
DeployAsApplicationConfigurationUpdate
] = None
CustomArtifactsConfigurationUpdate: Optional[
CustomArtifactsConfigurationList
] = None
class SourceSchema(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.
"""
RecordFormat: RecordFormat
RecordEncoding: Optional[RecordEncoding] = None
RecordColumns: RecordColumns
class InputDescription(BaseModel):
"""
Describes the application input configuration for a SQL-based Kinesis Data Analytics application.
"""
InputId: Optional[Id] = None
NamePrefix: Optional[InAppStreamName] = None
InAppStreamNames: Optional[InAppStreamNames] = None
InputProcessingConfigurationDescription: Optional[
InputProcessingConfigurationDescription
] = None
KinesisStreamsInputDescription: Optional[KinesisStreamsInputDescription] = None
KinesisFirehoseInputDescription: Optional[KinesisFirehoseInputDescription] = None
InputSchema: Optional[SourceSchema] = None
InputParallelism: Optional[InputParallelism] = None
InputStartingPositionConfiguration: Optional[
InputStartingPositionConfiguration
] = None
class ReferenceDataSourceDescription(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the reference data source configured for an application.
"""
ReferenceId: Id
TableName: InAppTableName
S3ReferenceDataSourceDescription: S3ReferenceDataSourceDescription
ReferenceSchema: Optional[SourceSchema] = None
class ReferenceDataSourceUpdate(BaseModel):
"""
When you update a reference data source configuration for a SQL-based Kinesis Data Analytics application, this object provides all the updated values (such as the source bucket name and object key name), the in-application table name that is created, and updated mapping information that maps the data in the Amazon S3 object to the in-application reference table that is created.
"""
ReferenceId: Id
TableNameUpdate: Optional[InAppTableName] = None
S3ReferenceDataSourceUpdate: Optional[S3ReferenceDataSourceUpdate] = None
ReferenceSchemaUpdate: Optional[SourceSchema] = None
class ReferenceDataSourceUpdates(BaseModel):
__root__: List[ReferenceDataSourceUpdate]
class SqlRunConfigurations(BaseModel):
__root__: List[SqlRunConfiguration]
class RunConfiguration(BaseModel):
"""
Describes the starting parameters for an Kinesis Data Analytics application.
"""
FlinkRunConfiguration: Optional[FlinkRunConfiguration] = None
SqlRunConfigurations: Optional[SqlRunConfigurations] = None
ApplicationRestoreConfiguration: Optional[ApplicationRestoreConfiguration] = None
class DiscoverInputSchemaResponse(BaseModel):
InputSchema: Optional[SourceSchema] = None
ParsedInputRecords: Optional[ParsedInputRecords] = None
ProcessedInputRecords: Optional[ProcessedInputRecords] = None
RawInputRecords: Optional[RawInputRecords] = None
class StartApplicationRequest(BaseModel):
ApplicationName: ApplicationName
RunConfiguration: Optional[RunConfiguration] = None
class Input(BaseModel):
"""
When you configure the application input for a SQL-based Kinesis Data Analytics application, you specify the streaming source, the in-application stream name that is created, and the mapping between the two.
"""
NamePrefix: InAppStreamName
InputProcessingConfiguration: Optional[InputProcessingConfiguration] = None
KinesisStreamsInput: Optional[KinesisStreamsInput] = None
KinesisFirehoseInput: Optional[KinesisFirehoseInput] = None
InputParallelism: Optional[InputParallelism] = None
InputSchema: SourceSchema
class InputDescriptions(BaseModel):
__root__: List[InputDescription]
class ReferenceDataSource(BaseModel):
"""
For a SQL-based Kinesis Data Analytics application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.
"""
TableName: InAppTableName
S3ReferenceDataSource: Optional[S3ReferenceDataSource] = None
ReferenceSchema: SourceSchema
class ReferenceDataSourceDescriptions(BaseModel):
__root__: List[ReferenceDataSourceDescription]
class SqlApplicationConfigurationDescription(BaseModel):
"""
Describes the inputs, outputs, and reference data sources for a SQL-based Kinesis Data Analytics application.
"""
InputDescriptions: Optional[InputDescriptions] = None
OutputDescriptions: Optional[OutputDescriptions] = None
ReferenceDataSourceDescriptions: Optional[ReferenceDataSourceDescriptions] = None
class ApplicationConfigurationDescription(BaseModel):
"""
Describes details about the application code and starting parameters for a Kinesis Data Analytics application.
"""
SqlApplicationConfigurationDescription: Optional[
SqlApplicationConfigurationDescription
] = None
ApplicationCodeConfigurationDescription: Optional[
ApplicationCodeConfigurationDescription
] = None
RunConfigurationDescription: Optional[RunConfigurationDescription] = None
FlinkApplicationConfigurationDescription: Optional[
FlinkApplicationConfigurationDescription
] = None
EnvironmentPropertyDescriptions: Optional[EnvironmentPropertyDescriptions] = None
ApplicationSnapshotConfigurationDescription: Optional[
ApplicationSnapshotConfigurationDescription
] = None
VpcConfigurationDescriptions: Optional[VpcConfigurationDescriptions] = None
ZeppelinApplicationConfigurationDescription: Optional[
ZeppelinApplicationConfigurationDescription
] = None
class SqlApplicationConfigurationUpdate(BaseModel):
"""
Describes updates to the input streams, destination streams, and reference data sources for a SQL-based Kinesis Data Analytics application.
"""
InputUpdates: Optional[InputUpdates] = None
OutputUpdates: Optional[OutputUpdates] = None
ReferenceDataSourceUpdates: Optional[ReferenceDataSourceUpdates] = None
class ApplicationConfigurationUpdate(BaseModel):
"""
Describes updates to an application's configuration.
"""
SqlApplicationConfigurationUpdate: Optional[
SqlApplicationConfigurationUpdate
] = None
ApplicationCodeConfigurationUpdate: Optional[
ApplicationCodeConfigurationUpdate
] = None
FlinkApplicationConfigurationUpdate: Optional[
FlinkApplicationConfigurationUpdate
] = None
EnvironmentPropertyUpdates: Optional[EnvironmentPropertyUpdates] = None
ApplicationSnapshotConfigurationUpdate: Optional[
ApplicationSnapshotConfigurationUpdate
] = None
VpcConfigurationUpdates: Optional[VpcConfigurationUpdates] = None
ZeppelinApplicationConfigurationUpdate: Optional[
ZeppelinApplicationConfigurationUpdate
] = None
class ApplicationDetail(BaseModel):
"""
Describes the application, including the application Amazon Resource Name (ARN), status, latest version, and input and output configurations.
"""
ApplicationARN: ResourceARN
ApplicationDescription: Optional[ApplicationDescription] = None
ApplicationName: ApplicationName
RuntimeEnvironment: RuntimeEnvironment
ServiceExecutionRole: Optional[RoleARN] = None
ApplicationStatus: ApplicationStatus
ApplicationVersionId: ApplicationVersionId
CreateTimestamp: Optional[Timestamp] = None
LastUpdateTimestamp: Optional[Timestamp] = None
ApplicationConfigurationDescription: Optional[
ApplicationConfigurationDescription
] = None
CloudWatchLoggingOptionDescriptions: Optional[
CloudWatchLoggingOptionDescriptions
] = None
ApplicationMaintenanceConfigurationDescription: Optional[
ApplicationMaintenanceConfigurationDescription
] = None
ApplicationVersionUpdatedFrom: Optional[ApplicationVersionId] = None
ApplicationVersionRolledBackFrom: Optional[ApplicationVersionId] = None
ConditionalToken: Optional[ConditionalToken] = None
ApplicationVersionRolledBackTo: Optional[ApplicationVersionId] = None
ApplicationMode: Optional[ApplicationMode] = None
class Inputs(BaseModel):
__root__: List[Input]
class ReferenceDataSources(BaseModel):
__root__: List[ReferenceDataSource]
class AddApplicationInputResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
InputDescriptions: Optional[InputDescriptions] = None
class AddApplicationInputRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
Input: Input
class AddApplicationReferenceDataSourceResponse(BaseModel):
ApplicationARN: Optional[ResourceARN] = None
ApplicationVersionId: Optional[ApplicationVersionId] = None
ReferenceDataSourceDescriptions: Optional[ReferenceDataSourceDescriptions] = None
class AddApplicationReferenceDataSourceRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: ApplicationVersionId
ReferenceDataSource: ReferenceDataSource
class CreateApplicationResponse(BaseModel):
ApplicationDetail: ApplicationDetail
class DescribeApplicationResponse(CreateApplicationResponse):
pass
class DescribeApplicationVersionResponse(BaseModel):
ApplicationVersionDetail: Optional[ApplicationDetail] = None
class RollbackApplicationResponse(CreateApplicationResponse):
pass
class UpdateApplicationResponse(CreateApplicationResponse):
pass
class UpdateApplicationRequest(BaseModel):
ApplicationName: ApplicationName
CurrentApplicationVersionId: Optional[ApplicationVersionId] = None
ApplicationConfigurationUpdate: Optional[ApplicationConfigurationUpdate] = None
ServiceExecutionRoleUpdate: Optional[RoleARN] = None
RunConfigurationUpdate: Optional[RunConfigurationUpdate] = None
CloudWatchLoggingOptionUpdates: Optional[CloudWatchLoggingOptionUpdates] = None
ConditionalToken: Optional[ConditionalToken] = None
class SqlApplicationConfiguration(BaseModel):
"""
Describes the inputs, outputs, and reference data sources for a SQL-based Kinesis Data Analytics application.
"""
Inputs: Optional[Inputs] = None
Outputs: Optional[Outputs] = None
ReferenceDataSources: Optional[ReferenceDataSources] = None
class ApplicationConfiguration(BaseModel):
"""
Specifies the creation parameters for a Kinesis Data Analytics application.
"""
SqlApplicationConfiguration: Optional[SqlApplicationConfiguration] = None
FlinkApplicationConfiguration: Optional[FlinkApplicationConfiguration] = None
EnvironmentProperties: Optional[EnvironmentProperties] = None
ApplicationCodeConfiguration: Optional[ApplicationCodeConfiguration] = None
ApplicationSnapshotConfiguration: Optional[ApplicationSnapshotConfiguration] = None
VpcConfigurations: Optional[VpcConfigurations] = None
ZeppelinApplicationConfiguration: Optional[ZeppelinApplicationConfiguration] = None
class CreateApplicationRequest(BaseModel):
ApplicationName: ApplicationName
ApplicationDescription: Optional[ApplicationDescription] = None
RuntimeEnvironment: RuntimeEnvironment
ServiceExecutionRole: RoleARN
ApplicationConfiguration: Optional[ApplicationConfiguration] = None
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] = None
Tags: Optional[Tags] = None
ApplicationMode: Optional[ApplicationMode] = None
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore model definitions."""
__author__ = 'Ka-Ping Yee <kpy@google.com>'
from google.appengine.ext import db
import datetime
import random
class Config(db.Model):
"""Stores a configuration setting."""
value = db.StringProperty()
@staticmethod
def get(name):
config = Config.get_by_key_name(name)
return config and config.value or None
@staticmethod
def set(name, value):
Config(key_name=name, value=value).put()
@staticmethod
def get_or_generate(name):
"""Generates a random value if the setting does not already exist.
Use this to initialize secret keys."""
# TODO(kpy): Use memcache to avoid expensive random number generation.
value = ''.join('%02x' % random.randrange(256) for i in range(32))
return Config.get_or_insert(key_name=name, value=value).value
class Location(db.Model):
location = db.GeoPtProperty()
location_time = db.DateTimeProperty()
tags = db.StringListProperty()
class Member(db.Model):
"""Represents a user who has registered and authorized this app.
key_name: user.user_id()"""
user = db.UserProperty()
nickname = db.StringProperty() # nickname to show with the user's location
tags = db.StringListProperty() # list of groups this user has joined
stop_times = db.ListProperty(datetime.datetime) # membership ending times
latitude_key = db.StringProperty() # OAuth access token key
latitude_secret = db.StringProperty() # OAuth access token secret
location = db.GeoPtProperty() # user's geolocation
location_time = db.DateTimeProperty() # time that location was recorded
# NOTE: tags and stop_times are parallel arrays!
# INVARIANT: len(tags) == len(stop_times)
def get_stop_time(self, tag):
"""Gets the stop time for a particular tag."""
return self.stop_times[self.tags.index(tag)]
def remove_tag(self, tag):
"""Removes a tag from self.tags, preserving the invariant."""
if tag in self.tags:
index = self.tags.index(tag)
self.tags[index:index + 1] = []
self.stop_times[index:index + 1] = []
@staticmethod
def get_for_tag(tag, now):
"""Gets all active members of the given tag."""
members = Member.all().filter('tags =', tag).fetch(1000)
results = []
for member in members:
tag_index = member.tags.index(tag)
stop_time = member.stop_times[tag_index]
if stop_time > now:
results.append(member)
return results
@staticmethod
def create(user):
"""Creates a Member object for a user."""
return Member(key_name=user.user_id(), user=user)
@staticmethod
def get(user):
"""Gets the Member object for a user."""
if user:
return Member.get_by_key_name(user.user_id())
@staticmethod
def join(user, tag, stop_time):
"""Transactionally adds a tag for a user."""
def work():
member = Member.get(user)
member.remove_tag(tag)
member.tags.append(tag)
member.stop_times.append(stop_time)
member.put()
db.run_in_transaction(work)
@staticmethod
def quit(user, tag):
"""Transactionally removes a tag for a user."""
def work():
member = Member.get(user)
member.remove_tag(tag)
member.put()
db.run_in_transaction(work)
def clean(self, now):
"""Transactionally removes all expired tags for this member."""
def work():
member = db.get(self.key())
index = 0
while index < len(member.tags):
if member.stop_times[index] <= now:
# We don't bother to update member_count here;
# update_tagstats will eventually take care of it.
member.remove_tag(member.tags[index])
else:
index += 1
member.put()
return member
# Before starting a transaction, test if cleaning is needed.
if self.stop_times and min(self.stop_times) <= now:
return db.run_in_transaction(work)
return self
def set_location(self, location, now):
"""Transactionally sets the location for this member."""
def work():
member = db.get(self.key())
member.location = location
member.location_time = now
member.put()
db.run_in_transaction(work)
class TagStat(db.Model):
"""Contains periodically updated statistics about a particular tag.
key_name: tag"""
update_time = db.DateTimeProperty(auto_now=True)
member_count = db.IntegerProperty(default=0)
centroid = db.GeoPtProperty() # centroid of member locations
radius = db.FloatProperty() # RMS member distance from centroid
@staticmethod
def get(tag):
return TagStat.get_by_key_name(tag)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppDetectReportQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppDetectReportQueryResponse, self).__init__()
self._audit_done = None
self._audit_pass = None
self._err_msg = None
self._out_biz_no = None
@property
def audit_done(self):
return self._audit_done
@audit_done.setter
def audit_done(self, value):
self._audit_done = value
@property
def audit_pass(self):
return self._audit_pass
@audit_pass.setter
def audit_pass(self, value):
self._audit_pass = value
@property
def err_msg(self):
return self._err_msg
@err_msg.setter
def err_msg(self, value):
self._err_msg = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
def parse_response_content(self, response_content):
response = super(AlipayEbppDetectReportQueryResponse, self).parse_response_content(response_content)
if 'audit_done' in response:
self.audit_done = response['audit_done']
if 'audit_pass' in response:
self.audit_pass = response['audit_pass']
if 'err_msg' in response:
self.err_msg = response['err_msg']
if 'out_biz_no' in response:
self.out_biz_no = response['out_biz_no']
|
MutableSet = None
try:
from collections import MutableSet
except ImportError:
# Python 2.4
pass
from theano.compat.python2x import OrderedDict
import types
def check_deterministic(iterable):
# Most places where OrderedSet is used, theano interprets any exception
# whatsoever as a problem that an optimization introduced into the graph.
# If I raise a TypeError when the DestoryHandler tries to do something
# non-deterministic, it will just result in optimizations getting ignored.
# So I must use an assert here. In the long term we should fix the rest of
# theano to use exceptions correctly, so that this can be a TypeError.
if iterable is not None:
assert isinstance(iterable, (
list, tuple, OrderedSet, types.GeneratorType, basestring))
if MutableSet is not None:
# Copyright (C) 2009 Raymond Hettinger
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## {{{ http://code.activestate.com/recipes/576696/ (r5)
import collections
import weakref
class Link(object):
# This make that we need to use a different pickle protocol
# then the default. Othewise, there is pickling errors
__slots__ = 'prev', 'next', 'key', '__weakref__'
def __getstate__(self):
# weakref.proxy don't pickle well, so we use weakref.ref
# manually and don't pickle the weakref.
# We restore the weakref when we unpickle.
ret = [self.prev(), self.next()]
try:
ret.append(self.key)
except AttributeError:
pass
return ret
def __setstate__(self, state):
self.prev = weakref.ref(state[0])
self.next = weakref.ref(state[1])
if len(state) == 3:
self.key = state[2]
class OrderedSet(collections.MutableSet):
'Set the remembers the order elements were added'
# Big-O running times for all methods are the same as for regular sets.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedSet.
# Added by IG-- pre-existing theano code expected sets
# to have this method
def update(self, iterable):
check_deterministic(iterable)
self |= iterable
def __init__(self, iterable=None):
# Checks added by IG
check_deterministic(iterable)
self.__root = root = Link() # sentinel node for doubly linked list
root.prev = root.next = weakref.ref(root)
self.__map = {} # key --> link
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.__map)
def __contains__(self, key):
return key in self.__map
def add(self, key):
# Store new key in a new link at the end of the linked list
if key not in self.__map:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, weakref.ref(root), key
last().next = root.prev = weakref.ref(link)
def union(self, s):
check_deterministic(s)
n = self.copy()
for elem in s:
if elem not in n:
n.add(elem)
return n
def intersection_update(self, s):
l = []
for elem in self:
if elem not in s:
l.append(elem)
for elem in l:
self.remove(elem)
return self
def difference_update(self, s):
check_deterministic(s)
for elem in s:
if elem in self:
self.remove(elem)
return self
def copy(self):
n = OrderedSet()
n.update(self)
return n
def discard(self, key):
# Remove an existing item using self.__map to find the link which is
# then removed by updating the links in the predecessor and successors.
if key in self.__map:
link = self.__map.pop(key)
link.prev().next = link.next
link.next().prev = link.prev
def __iter__(self):
# Traverse the linked list in order.
root = self.__root
curr = root.next()
while curr is not root:
yield curr.key
curr = curr.next()
def __reversed__(self):
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev()
while curr is not root:
yield curr.key
curr = curr.prev()
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
if last:
key = next(reversed(self))
else:
key = next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
# Note that we implement only the comparison to another
# `OrderedSet`, and not to a regular `set`, because otherwise we
# could have a non-symmetric equality relation like:
# my_ordered_set == my_set and my_set != my_ordered_set
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
elif isinstance(other, set):
# Raise exception to avoid confusion.
raise TypeError(
'Cannot compare an `OrderedSet` to a `set` because '
'this comparison cannot be made symmetric: please '
'manually cast your `OrderedSet` into `set` before '
'performing this comparison.')
else:
return NotImplemented
## end of http://code.activestate.com/recipes/576696/ }}}
else:
# Python 2.4
class OrderedSet(object):
"""
An implementation of OrderedSet based on the keys of
an OrderedDict.
"""
def __init__(self, iterable=None):
self.data = OrderedDict()
if iterable is not None:
self.update(iterable)
def update(self, container):
check_deterministic(container)
for elem in container:
self.add(elem)
def add(self, key):
self.data[key] = None
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data
def discard(self, key):
if key in self.data:
del self.data[key]
def remove(self, key):
if key in self.data:
del self.data[key]
else:
raise KeyError(key)
def __iter__(self):
return self.data.__iter__()
def __reversed__(self):
return self.data.__reversed__()
def pop(self, last=True):
raise NotImplementedError()
def __eq__(self, other):
# Note that we implement only the comparison to another
# `OrderedSet`, and not to a regular `set`, because otherwise we
# could have a non-symmetric equality relation like:
# my_ordered_set == my_set and my_set != my_ordered_set
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
elif isinstance(other, set):
# Raise exception to avoid confusion.
raise TypeError(
'Cannot compare an `OrderedSet` to a `set` because '
'this comparison cannot be made symmetric: please '
'manually cast your `OrderedSet` into `set` before '
'performing this comparison.')
else:
return NotImplemented
# NB: Contrary to the other implementation above, we do not override
# the `__del__` method. On one hand, this is not needed since this
# implementation does not add circular references. Moreover, one should
# not clear the underlying dictionary holding the data as soon as the
# ordered set is cleared from memory, because there may still be
# pointers to this dictionary.
if __name__ == '__main__':
print list(OrderedSet('abracadaba'))
print list(OrderedSet('simsalabim'))
print OrderedSet('boom') == OrderedSet('moob')
print OrderedSet('boom') == 'moob'
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 18:24:12 2020
@author: omar.elfarouk
"""
import pandas
import numpy
import seaborn
import scipy
import matplotlib.pyplot as plt
data = pandas.read_csv('gapminder.csv', low_memory=False)
#setting variables you will be working with to numeric
data['internetuserate'] = pandas.to_numeric(data['internetuserate'], errors='coerce')
data['urbanrate'] = pandas.to_numeric(data['urbanrate'], errors='coerce')
data['incomeperperson'] = pandas.to_numeric(data['incomeperperson'], errors='coerce')
data['alcconsumption'] = pandas.to_numeric(data['alcconsumption'], errors='coerce')
data['incomeperperson'] = pandas.to_numeric(data['incomeperperson'], errors='coerce')
data['suicideper100th'] = pandas.to_numeric(data['suicideper100th'], errors='coerce')
data['incomeperperson']=data['incomeperperson'].replace(' ', numpy.nan)
data['alcconsumption']=data['alcconsumption'].replace(' ', numpy.nan)
data['suicideper100th']=data['suicideper100th'].replace(' ', numpy.nan)
#Plotting figure
#scat1 = seaborn.regplot(x="incomeperperson", y="alcconsumption", fit_reg=True, data=data)
#plt.xlabel('incomeperperson')
#plt.ylabel('Alcoholuse')
#plt.title('Scatterplot for the Association Between income per personand Alcohol usage')
#scat2 = seaborn.regplot(x="incomeperperson", y="suicideper100th", fit_reg=True, data=data)
#plt.xlabel('Income per Person')
#plt.ylabel('suicideper100th')
#plt.title('Scatterplot for the Association Between Income per Person and Suicide Rate')
scat3 = seaborn.regplot(x="alcconsumption", y="suicideper100th", fit_reg=True, data=data)
plt.xlabel('Alcohol usage')
plt.ylabel('suicideper100th')
plt.title('Scatterplot for the Association Between Alcohol usage and Suicide Rate')
#Cleaning data
data_clean=data.dropna()
#Applying pearson correlation
print ('association between Income per person and Alcohole isage')
print (scipy.stats.pearsonr(data_clean['incomeperperson'], data_clean['alcconsumption']))
print ('association between incomeperperson and suscide rate ')
print (scipy.stats.pearsonr(data_clean['incomeperperson'], data_clean['suicideper100th']))
print ('association between Alcohol usage and suscide rate ')
print (scipy.stats.pearsonr(data_clean['alcconsumption'], data_clean['suicideper100th']))
|
"""Tests for the classifiers of p1-FP."""
import pytest
import numpy as np
from lab.classifiers.p1fp import KerasP1FPClassifierC
try:
from lab.classifiers.p1fp import P1FPClassifierC
except ImportError:
USE_TFLEARN = False
else:
USE_TFLEARN = True
if USE_TFLEARN:
@pytest.mark.slow
def test_p1fpclassifierc(train_test_sizes):
"""Test that it performs classification."""
x_train, x_test, y_train, y_test = train_test_sizes
classifier = P1FPClassifierC(n_epoch=1)
classifier.fit(x_train, y_train)
prediction = classifier.predict(x_test)
assert prediction.shape == (y_test.size, )
assert np.all(np.isin(np.unique(prediction), np.unique(y_train)))
prob_prediction = classifier.predict_proba(x_test)
assert prob_prediction.shape == (y_test.size, np.unique(y_test).size)
@pytest.mark.slow
def test_keras_p1fpclassifierc(train_test_sizes):
"""Test that it performs classification."""
x_train, x_test, y_train, y_test = train_test_sizes
classifier = KerasP1FPClassifierC(
n_features=5000, n_classes=3, epochs=10)
classifier.fit(x_train, y_train)
assert classifier.score(x_test, y_test) > 0.8
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import uuid
from unittest.mock import MagicMock
import pytest
import salt
import saltext.vmware.modules.esxi as esxi_mod
import saltext.vmware.states.esxi as esxi
@pytest.fixture
def dry_run():
setattr(esxi, "__opts__", {"test": True})
yield
setattr(esxi, "__opts__", {"test": False})
@pytest.fixture
def user_add_error():
esxi.__salt__["vmware_esxi.add_user"] = MagicMock(
side_effect=salt.exceptions.SaltException("add error")
)
@pytest.fixture
def user_update_error():
esxi.__salt__["vmware_esxi.update_user"] = MagicMock(
side_effect=salt.exceptions.SaltException("update error")
)
@pytest.fixture
def user_remove_error():
esxi.__salt__["vmware_esxi.remove_user"] = MagicMock(
side_effect=salt.exceptions.SaltException("remove error")
)
def test_user_present_absent(patch_salt_globals):
"""
Test scenarios for user_present state run
"""
user_name = "A{}".format(uuid.uuid4())
random_user = "Random{}".format(uuid.uuid4())
password = "Secret@123"
# create a new user
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"]
for host in ret["changes"]:
assert ret["changes"][host]["new"]["name"] == user_name
# update the user
ret = esxi.user_present(name=user_name, password=password, description="new desc")
assert ret["result"]
for host in ret["changes"]:
assert ret["changes"][host]["new"]["name"] == user_name
assert ret["changes"][host]["new"]["description"] == "new desc"
# Remove the user
ret = esxi.user_absent(name=user_name)
assert ret["result"]
for host in ret["changes"]:
assert ret["changes"][host][user_name] is True
# Remove a non-existent user
ret = esxi.user_absent(name=random_user)
assert ret["result"] is None
assert not ret["changes"]
def test_user_add_error(patch_salt_globals, user_add_error):
"""
Test scenarios for user add error
"""
user_name = "A{}".format(uuid.uuid4())
password = "Secret@123"
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"] is False
assert not ret["changes"]
assert "add error" in ret["comment"]
def test_user_remove_error(patch_salt_globals, user_remove_error):
"""
Test scenarios for user remove error
"""
# Remove the user
user_name = "A{}".format(uuid.uuid4())
password = "Secret@123"
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"] is True
ret = esxi.user_absent(name=user_name)
assert ret["result"] is False
assert not ret["changes"]
assert "remove error" in ret["comment"]
def test_user_update_error(patch_salt_globals, user_update_error):
"""
Test scenarios for user remove error
"""
# Remove the user
user_name = "A{}".format(uuid.uuid4())
password = "Secret@123"
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"] is True
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"] is False
assert not ret["changes"]
assert "update error" in ret["comment"]
def test_user_present_absent_dry_run(vmware_datacenter, service_instance, dry_run):
"""
Test scenarios for vmware_esxi.user_present state run with test=True
"""
user_name = "A{}".format(uuid.uuid4())
random_user = "Random{}".format(uuid.uuid4())
password = "Secret@123"
# create a new user
ret = esxi.user_present(name=user_name, password=password)
assert ret["result"] is None
assert not ret["changes"]
assert ret["comment"].split()[6]
# update the user
ret = esxi.user_present(name=user_name, password=password, description="new desc")
assert ret["result"] is None
assert not ret["changes"]
assert ret["comment"].split()[11]
# Remove the user
ret = esxi_mod.add_user(
user_name=user_name, password=password, service_instance=service_instance
)
ret = esxi.user_absent(name=user_name)
assert ret["result"] is None
assert not ret["changes"]
assert "will be deleted" in ret["comment"]
# Remove a non-existent user
ret = esxi.user_absent(name=random_user)
assert ret["result"] is None
assert not ret["changes"]
assert "will be deleted on 0 host" in ret["comment"]
|
from unittest import TestCase
from neo.Prompt.InputParser import InputParser
class TestInputParser(TestCase):
input_parser = InputParser()
def test_simple_whitespace_separation(self):
command, arguments = self.input_parser.parse_input("this is a simple test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["is", "a", "simple", "test"])
def test_keeping_double_quoted_strings_together(self):
command, arguments = self.input_parser.parse_input("this \"is a simple\" test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["\"is a simple\"", "test"])
def test_keeping_single_quoted_strings_together(self):
command, arguments = self.input_parser.parse_input("this 'is a simple' test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["'is a simple'", "test"])
def test_keeping_bracket_elements_together(self):
command, arguments = self.input_parser.parse_input("this [is a simple] test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["[is a simple]", "test"])
def test_keeping_brackets_and_strings_together(self):
command, arguments = self.input_parser.parse_input("this [is \"a simple\"] test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["[is \"a simple\"]", "test"])
def test_unmatched_brackets(self):
command, arguments = self.input_parser.parse_input("this [is \"a simple\" test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["[is \"a simple\" test"])
def test_unmatched_single_quotes(self):
command, arguments = self.input_parser.parse_input("this is 'a simple test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["is", "'a", "simple", "test"])
def test_unmatched_double_quotes(self):
command, arguments = self.input_parser.parse_input("this is \"a simple test")
self.assertEqual(command, "this")
self.assertEqual(arguments, ["is", "\"a", "simple", "test"])
def test_nested_lists(self):
command, arguments = self.input_parser.parse_input('sc build_run sc.py False False False 0210 01 2 ["notused",["helloworld"]]')
self.assertEqual(command, "sc")
self.assertEqual(arguments, ['build_run', 'sc.py', 'False', 'False', 'False', '0210', '01', '2', '["notused",["helloworld"] ]'])
def test_nested_lists_2(self):
command, arguments = self.input_parser.parse_input('test ["notused",["helloworld", 1, ["a", 1]]]')
self.assertEqual(command, "test")
self.assertEqual(arguments, ['["notused",["helloworld", 1, ["a", 1] ]]'])
def test_python_bytearrays(self):
command, arguments = self.input_parser.parse_input("testinvoke bytearray(b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacI)') b'\xaf\x12\xa8h{\x14\x94\x8b\xc4\xa0\x08\x12\x8aU\nci[\xc1\xa5'")
self.assertEqual(command, "testinvoke")
self.assertEqual(arguments, ["bytearray(b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacI)')", "b'\xaf\x12\xa8h{\x14\x94\x8b\xc4\xa0\x08\x12\x8aU\nci[\xc1\xa5'"])
def test_python_bytearrays_in_lists(self):
command, arguments = self.input_parser.parse_input("testinvoke f8d448b227991cf07cb96a6f9c0322437f1599b9 transfer [bytearray(b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacI)'), bytearray(b'\xaf\x12\xa8h{\x14\x94\x8b\xc4\xa0\x08\x12\x8aU\nci[\xc1\xa5'), 1000]")
self.assertEqual(command, "testinvoke")
self.assertEqual(arguments, ["f8d448b227991cf07cb96a6f9c0322437f1599b9", "transfer", "[bytearray(b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacI)'), bytearray(b'\xaf\x12\xa8h{\x14\x94\x8b\xc4\xa0\x08\x12\x8aU\nci[\xc1\xa5'), 1000]"])
def test_attribute_spacing(self):
command, arguments = self.input_parser.parse_input('wallet send neo Ae7bSUtT5Qvpkh7473q9FsxT4tSv5KK6dt 100 --tx-attr=[{"usage": 0x90,"data":"my brief description"}]')
self.assertEqual(command, "wallet")
self.assertEqual(arguments, ['send', 'neo', 'Ae7bSUtT5Qvpkh7473q9FsxT4tSv5KK6dt', '100', '--tx-attr=[{"usage": 0x90,"data":"my brief description"}]'])
|
# Generated by Django 2.1.1 on 2018-09-19 19:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='alt_phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
|
#
# PySNMP MIB module TIMETRA-SAS-IEEE8021-CFM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TIMETRA-SAS-IEEE8021-CFM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:14:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
dot1agCfmMepEntry, = mibBuilder.importSymbols("IEEE8021-CFM-MIB", "dot1agCfmMepEntry")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Counter32, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Gauge32, iso, Counter64, Integer32, MibIdentifier, ObjectIdentity, NotificationType, TimeTicks, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Gauge32", "iso", "Counter64", "Integer32", "MibIdentifier", "ObjectIdentity", "NotificationType", "TimeTicks", "Unsigned32")
TruthValue, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString")
timetraSASModules, timetraSASObjs, timetraSASConfs = mibBuilder.importSymbols("TIMETRA-SAS-GLOBAL-MIB", "timetraSASModules", "timetraSASObjs", "timetraSASConfs")
timetraSASIEEE8021CfmMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 6527, 6, 2, 1, 1, 11))
timetraSASIEEE8021CfmMIBModule.setRevisions(('1910-01-01 00:00',))
if mibBuilder.loadTexts: timetraSASIEEE8021CfmMIBModule.setLastUpdated('0902280000Z')
if mibBuilder.loadTexts: timetraSASIEEE8021CfmMIBModule.setOrganization('Alcatel')
tmnxSASDot1agMIBObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11))
tmnxSASDot1agMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7))
tmnxSASDot1agCfmMep = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 1))
tmnxSASDot1agNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 2))
tmnxSASDot1agNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 2, 1))
tmnxDot1agCfmMepExtnTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 1, 1), )
if mibBuilder.loadTexts: tmnxDot1agCfmMepExtnTable.setStatus('current')
tmnxDot1agCfmMepExtnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 1, 1, 1), )
dot1agCfmMepEntry.registerAugmentions(("TIMETRA-SAS-IEEE8021-CFM-MIB", "tmnxDot1agCfmMepExtnEntry"))
tmnxDot1agCfmMepExtnEntry.setIndexNames(*dot1agCfmMepEntry.getIndexNames())
if mibBuilder.loadTexts: tmnxDot1agCfmMepExtnEntry.setStatus('current')
tmnxDot1agCfmMepSendAisOnPortDown = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 1, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxDot1agCfmMepSendAisOnPortDown.setStatus('current')
tmnxDot1agCfmMepControlSapTag = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 2, 11, 1, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxDot1agCfmMepControlSapTag.setStatus('current')
tmnxSASDot1agCfmCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7, 1))
tmnxSASDot1agCfmGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7, 2))
tmnxSASDot1agCfmComplianceV2v0 = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7, 1, 2)).setObjects(("TIMETRA-SAS-IEEE8021-CFM-MIB", "tmnxSASDot1agCfmMepGroupV2v0"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSASDot1agCfmComplianceV2v0 = tmnxSASDot1agCfmComplianceV2v0.setStatus('current')
tmnxSASDot1agCfmMepGroupV2v0 = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7, 2, 1)).setObjects(("TIMETRA-SAS-IEEE8021-CFM-MIB", "tmnxDot1agCfmMepSendAisOnPortDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSASDot1agCfmMepGroupV2v0 = tmnxSASDot1agCfmMepGroupV2v0.setStatus('current')
tmnxSASDot1agCfmMepGroupV4v0 = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 6, 2, 2, 1, 7, 2, 2)).setObjects(("TIMETRA-SAS-IEEE8021-CFM-MIB", "tmnxDot1agCfmMepSendAisOnPortDown"), ("TIMETRA-SAS-IEEE8021-CFM-MIB", "tmnxDot1agCfmMepControlSapTag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSASDot1agCfmMepGroupV4v0 = tmnxSASDot1agCfmMepGroupV4v0.setStatus('current')
mibBuilder.exportSymbols("TIMETRA-SAS-IEEE8021-CFM-MIB", tmnxSASDot1agNotifications=tmnxSASDot1agNotifications, tmnxDot1agCfmMepExtnEntry=tmnxDot1agCfmMepExtnEntry, tmnxSASDot1agCfmCompliances=tmnxSASDot1agCfmCompliances, tmnxSASDot1agNotificationsPrefix=tmnxSASDot1agNotificationsPrefix, tmnxSASDot1agCfmMep=tmnxSASDot1agCfmMep, tmnxDot1agCfmMepControlSapTag=tmnxDot1agCfmMepControlSapTag, tmnxDot1agCfmMepSendAisOnPortDown=tmnxDot1agCfmMepSendAisOnPortDown, tmnxSASDot1agCfmMepGroupV2v0=tmnxSASDot1agCfmMepGroupV2v0, tmnxSASDot1agCfmMepGroupV4v0=tmnxSASDot1agCfmMepGroupV4v0, tmnxSASDot1agMIBObjs=tmnxSASDot1agMIBObjs, tmnxSASDot1agMIBConformance=tmnxSASDot1agMIBConformance, PYSNMP_MODULE_ID=timetraSASIEEE8021CfmMIBModule, timetraSASIEEE8021CfmMIBModule=timetraSASIEEE8021CfmMIBModule, tmnxDot1agCfmMepExtnTable=tmnxDot1agCfmMepExtnTable, tmnxSASDot1agCfmGroups=tmnxSASDot1agCfmGroups, tmnxSASDot1agCfmComplianceV2v0=tmnxSASDot1agCfmComplianceV2v0)
|
from minerva.proxy.checker import Checker
from minerva.proxy.scraper import Scraper
|
import komand
from .schema import RetrieveInput, RetrieveOutput, Component
# Custom imports below
class Retrieve(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='retrieve',
description=Component.DESCRIPTION,
input=RetrieveInput(),
output=RetrieveOutput())
def run(self, params={}):
client = self.connection.client
#
try:
client.session_create()
except Exception as e:
raise Exception("Unable to connect to OTRS webservice! Please check your connection information and \
that you have properly configured OTRS webservice. Information on configuring the webservice can be found\
in the Connection help")
clean_data = {}
try:
ticket = client.ticket_get_by_id(params.get("ticket_id"), articles=True, attachments=True, dynamic_fields=True)
ticket_data = ticket.to_dct()
clean_data = komand.helper.clean(ticket_data)
except Exception as e:
self.logger.error("Ticket may not exist, please check to make sure the ticket exists ", e)
raise
# Type formatting for other actions
try:
cleaned_articles = []
articles = clean_data["Ticket"]["Article"]
for article in articles:
if "IsVisibleForCustomer" in article:
article["IsVisibleForCustomer"] = int(article["IsVisibleForCustomer"])
if "TicketID" in article:
article["TicketID"] = int(article["TicketID"])
if "NoAgentNotify" in article:
article["NoAgentNotify"] = int(article["NoAgentNotify"])
cleaned_articles.append(article)
if "Attachment" in article:
cleaned_attachment = []
for attachment in article["Attachment"]:
attachment["FilesizeRaw"] = int(attachment["FilesizeRaw"])
cleaned_attachment.append(attachment)
article["Attachment"] = cleaned_attachment
clean_data["Ticket"]["Article"] = cleaned_articles
self.logger.info(clean_data)
if clean_data["Ticket"].get("DynamicField"):
clean_df = []
dynamicfields = clean_data["Ticket"].pop("DynamicField")
for dynamicfield in dynamicfields:
# check if value is a str or in and convert to a list of strings
if "Value" in dynamicfield:
if isinstance(dynamicfield["Value"], (str, int)):
dynamicfield["Value"] = [str(dynamicfield["Value"])]
clean_df.append(dynamicfield)
clean_data["Ticket"]["DynamicField"] = clean_df
except Exception as e:
self.logger.error("Ticket {} missing Article data! Unable to format data".format(str(params.get("ticket_id"))), e)
raise
try:
clean_data["Ticket"]["TicketID"] = int(clean_data["Ticket"]["TicketID"])
except Exception as e:
self.logger.error("Ticket {} missing Ticket ID!".format(str(params.get("ticket_id"))), e)
raise
return clean_data
|
from __future__ import print_function
import ping
import socket
class Traceroute:
def __init__(self, dest_addr, timeout=2, max_ttl=30, counts=4):
self.ping = ping.Ping()
self.dest_addr = dest_addr
self.timeout = timeout
self.max_ttl = max_ttl
self.counts = counts
self.traceroute()
def traceroute(self):
# logica do traceroute
for ttl in range(1, self.max_ttl):
rtt = []
address = "?"
for counter in range(1, self.counts):
try:
delay, addr, is_final, reached = self.ping.prepare_ping(self.dest_addr, self.timeout, ttl)
if not addr[0] == "*": # se nao ocorreu timeout
address = addr[0]
rtt.append(delay)
except socket.gaierror as e:
print ("failed. (socket error: '%s')" % e[1])
break
print ("".join(["#", str(ttl), " -> (", address, ") "]), *rtt)
if is_final: # chegou ao destino final
break
if __name__ == '__main__':
Traceroute("netvasco.com.br")
|
"""fix affaire abandon default value
Revision ID: 5a8069c68433
Revises: ee79f1259c77
Create Date: 2021-09-06 16:28:58.437853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a8069c68433'
down_revision = 'ee79f1259c77'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
|
SCHEMA_VERSION = "0.1"
# The key to store / obtain cluster metadata.
CLUSTER_METADATA_KEY = b"CLUSTER_METADATA"
# The name of a json file where usage stats will be written.
USAGE_STATS_FILE = "usage_stats.json"
USAGE_STATS_HEADS_UP_MESSAGE = (
"Usage stats collection will be enabled by default in the next release. "
"See https://github.com/ray-project/ray/issues/20857 for more details."
)
|
# coding: utf-8
import marisa_trie
import pickle
import math
#tf_trie = marisa_trie.Trie()
#tf_trie.load('my_trie_copy.marisa')
#data = open('tf_trie.txt', 'rb')
data = open('tf_trie.txt', 'rb')
tf_trie = pickle.load(data)
isf_data = open('isf_trie.txt', 'rb')
#isf_data = open('isf_trie.pickle', 'rb')
isf_trie = pickle.load(isf_data)
words = [u"ワイン", u"ロゼ", u"ロゼワイン", u"ビール", u"ヱビスビール", u"ヱビス", u"ワ", u"リング"]
for word in words:
print word + str(1.0 * tf_trie[word][0][0] * 1.0 / isf_trie[word][0][0])
print tf_trie[word][0][0]
print isf_trie[word][0][0]
def extract_segmented_substrings(word):
substrings = []
for i in range(len(word)):
for j in range(i+1, len(word) + 1):
substrings.append(word[i:j])
return substrings
compound_words = [u'プログラマーコンテスト', u'ロゼワイン', u'ヱビスビール', u'クラフトビール', u'ワインバーグ', u'スマホケース', u'フレーバードワイン', u'スパークリングワイン', u'スパイスライス', u'メタリックタトゥーシール', u'コリアンダースパイシーサラダ', u'デイジーダック', u'ドイトンコーヒー', u'ワンタッチタープテント', u'タピオカジュース', u'ロックフェス', u'ロープライス', u'ガソリンスタンド', u'コピペブログ', u'マイクロソフトオフィス', u'ブラキッシュレッド', u'ウォーターサーバ', u'ハッシュドビーフソース', u'ワンダースワン', u'トンコツラーメン', u'トラッキングスパム', u'ジャンクフード', u'アンチョビパスタ', u'グーグルマップ', u'ソーシャルネットワーキングサービス', u'ライブドアニュース', u'サントリービール', u'カスタマーサービス', u'グリーンスムージーダイエット', u'マジリスペクト', u'ユーザカンファレンス']
for word in compound_words:
substrings = extract_segmented_substrings(word)
print substrings
for string in substrings:
if string in tf_trie and string in isf_trie:
print string + str(1.0 * tf_trie[string][0][0] * 1.0 / isf_trie[string][0][0])
|
import time
import os
import youtube_dl
import cheesepi as cp
import Task
logger = cp.config.get_logger(__name__)
class Dash(Task.Task):
# construct the process and perform pre-work
def __init__(self, dao, spec):
Task.Task.__init__(self, dao, spec)
self.spec['taskname'] = "dash"
if not 'source' in spec:
self.spec['source'] = "http://www.youtube.com/watch?v=_OBlgSz8sSM"
# actually perform the measurements, no arguments required
def run(self):
logger.info("Dash download: %s @ %f, PID: %d" % (self.spec['source'], time.time(), os.getpid()))
self.measure()
# measure and record funtion
def measure(self):
self.spec['start_time'] = cp.utils.now()
self.perform()
self.spec['end_time'] = cp.utils.now()
#print "Output: %s" % op_output
#logger.debug(op_output)
if not 'download_speed' in self.spec:
self.spec['download_speed'] = self.spec['downloaded'] /(self.spec['end_time']-self.spec['start_time'])
self.dao.write_op(self.spec['taskname'], self.spec)
def perform(self):
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'logger': logger,
'progress_hooks': [self.callback],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([self.spec['source']])
except Exception as e:
logger.error("Problem with Dash download: "+str(e))
#self.spec['status'] = "error"
pass
def callback(self, stats):
#logger.info(stats)
if stats['status'] == 'finished':
#print stats
if 'downloaded_bytes' in stats:
self.spec['downloaded'] = stats['downloaded_bytes']
else:
self.spec['downloaded'] = stats['total_bytes']
if 'elapsed' in stats:
self.spec['download_speed'] = self.spec['downloaded'] / stats['elapsed']
try:
# avoid cluttering the filesystem
os.remove(stats['filename'])
pass
except Exception as e:
logger.error("Problem removing Dash.py Youtube file %s: %s" % (stats['filename'], str(e)))
if __name__ == "__main__":
#general logging here? unable to connect etc
dao = cp.config.get_dao()
spec = {'source':'http://www.youtube.com/watch?v=_OBlgSz8sSM'}
dash_task = Dash(dao, spec)
dash_task.run()
|
import logging
import multiprocessing
import time
from rpi.logging.listener import LoggingListener
from rpi.network.server import Server
# from rpi.sensors.accelerometer import Accelerometer
from rpi.sensors.accelerometer_i2c import Accelerometer
from rpi.sensors.pressure import Pressure
from rpi.sensors.thermometer import Thermometer
from shared.customlogging.errormanager import ErrorManager
from shared.customlogging.handler import CustomQueueHandler
if __name__ == '__main__':
queue = multiprocessing.Queue(-1) # Central queue for the logs
logListen = LoggingListener(queue) # Start worker which will actually log everything
logListen.start()
# Setup logging for main process and all child processes
h = CustomQueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.INFO)
# Next lines starts all of the other processes and monitor them in case they quit
processClassesList = [Server, Thermometer, Pressure, Accelerometer]
processes = dict()
for processClass in processClassesList:
p = processClass()
p.start()
processes[processClass] = p
em = ErrorManager(__name__)
while True:
time.sleep(5)
for processClass, process in processes.items():
if not process.is_alive():
em.error('The process for {} exited! Trying to restart it...'.format(processClass.__name__),
processClass.__name__)
p = processClass()
p.start()
processes[processClass] = p
else:
em.resolve('{} started successfully'.format(processClass.__name__), processClass.__name__, False)
|
from collections import Counter, defaultdict
import itertools
import math as m
test = """3,4,3,1,2"""
def part1(data, daymax=80):
for day in range(daymax):
n = []
for i in range(len(data)):
if data[i] == 0:
data[i] = 6
n.append(8)
else:
data[i] -= 1
data.extend(n)
return len(data)
def part2(_data, daymax=80):
data = defaultdict(int)
for d in _data:
data[d] += 1
for day in range(daymax):
new = defaultdict(int)
new[8] = data[0]
new[6] += data[0]
for i in range(1, 9):
new[i-1] += data.get(i, 0)
data = new
return sum(data.values())
def prep(data):
return [int(x) for x in data.split(',')]
if __name__ == '__main__':
print("part1 test", part2(prep(test), daymax=80))
print("part1 real", part2(prep(open('in06.txt').read())))
print("part2 test", part2(prep(test), daymax=256))
print("part2 real", part2(prep(open('in06.txt').read()), daymax=256))
|
classTemplate = \
"""/*
* This file is generated. Please do not modify it manually.
* If you want to update this file, run the generator again.
*
*/
package {package};
{imports}
public class Alias implements AliasInterface {{
@Override
public Map<String, String> getFileMap() {{
return File.FILE_PATH_MAP;
}}
{propClass}
{enumClasses}
}}
"""
propClassTemplate = \
"""
public static class File {{
{propNames}
public static final Map<String, String> FILE_PATH_MAP;
static {{
Map<String, String> {fileMapVar} = new HashMap<>();
{propFilesMap}
FILE_PATH_MAP = Collections.unmodifiableMap({fileMapVar});
}}
}}
"""
enumClassTemplate = \
"""
public enum Ref{filename} {{
{enums}
}}
"""
|
import mailer as m
import consoleLogger as cl
import fileLogger as fl
'''
Created by: Richard Payne
Created on: 05/08/17
Desc: Takes string from connection and extracts username, password
and IP address of the attempted login.
'''
def format(ip, usr, pw):
ip = ip
usr = usr.rsplit()
pw = pw.rsplit()
if not usr and not pw:
pass
elif not usr:
pass
elif not pw:
pw = "No password provided."
m.send(ip, str(usr[0]), pw)
cl.log(ip, str(usr[0]), pw)
fl.log(str(usr[0]), pw)
else:
m.send(ip, str(usr[0]), str(pw[0]))
cl.log(ip, str(usr[0]), str(pw[0]))
fl.log(str(usr[0]), str(pw[0]))
|
import logging
from collections import OrderedDict, defaultdict
from decimal import Decimal as D
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models
from django.utils import timezone
from zazi.core import rounding, time as timing
from zazi.core.base import BaseModel
from zazi.core.utils import generate_id
from .enums import \
LoanStatus, LoanTransactionType, LoanProductType, \
PaymentPlatform, LoanInterestMethod, LoanTransactionStatus, \
LoanInterestRateAccrualSchedule, LoanAllocationItem, LoanProfileStatus
#----------------
logger = logging.getLogger(__name__)
#----------------
class LoanProduct(BaseModel):
name = models.CharField(max_length=25)
product_id = models.CharField(max_length=25, default=generate_id, null=True)
payment_platform = models.PositiveSmallIntegerField(choices=PaymentPlatform.choices())
product_type = models.PositiveSmallIntegerField(choices=LoanProductType.choices())
max_loan_limit = models.DecimalField(max_digits=18, decimal_places=4, default=0)
# Interest
interest_method = models.PositiveSmallIntegerField(choices=LoanInterestMethod.choices())
interest_rate = models.PositiveSmallIntegerField(default=0)
interest_rate_accrual_schedule = models.PositiveSmallIntegerField(choices=LoanInterestRateAccrualSchedule.choices())
@property
def allocation_order(self):
return [
LoanAllocationItem.LIABILITY,
LoanAllocationItem.PENALTY,
LoanAllocationItem.FEES,
LoanAllocationItem.INTEREST,
LoanAllocationItem.PRINCIPAL ]
#----------------
class LoanProfile(BaseModel):
profile_id = models.CharField(max_length=25, default=generate_id)
effective_loan_limit = models.DecimalField(max_digits=18, decimal_places=4, default=0)
user_account = models.ForeignKey('users.UserAccount', models.SET_NULL, null=True, related_name='loan_profile')
# risk_classification = models.PositiveSmallIntegerField(choices=LoanRiskClassification.choices())
status = models.PositiveSmallIntegerField(choices=LoanProfileStatus.choices())
def __str__(self):
return f"LoanProfile {self.profile_id} for user {self.user_account.user.username}"
def as_dict(self):
try:
identity = self.user_account.identities.get()
except ObjectDoesNotExist as e:
logger.exception(e)
identity = None
except MultipleObjectsReturned as e:
logger.exception(e)
identity = self.user_account.identities.first()
return {
'profile_id': self.profile_id,
'user_account': self.user_account.as_dict(),
'identity': (identity and identity.as_dict()),
'loan_limit': self.loan_limit,
'loan_accounts': [
l.as_dict()
for l in self.loan_accounts.filter(status=LoanStatus.ACTIVE)
]
}
@property
def loan_limit(self):
return self.effective_loan_limit
@property
def outstanding_balance(self):
return sum(
loan_account.outstanding_balance
for loan_account in self.loan_accounts.all())
class LoanApplication(BaseModel):
application_id = models.CharField(max_length=25, default=generate_id)
loan_profile = models.ForeignKey('LoanProfile', models.SET_NULL, null=True)
payment_platform = models.PositiveSmallIntegerField(choices=PaymentPlatform.choices())
amount = models.DecimalField(decimal_places=2, max_digits=7)
applied_at = models.DateTimeField()
approved = models.NullBooleanField()
approved_at = models.DateTimeField(null=True)
approved_by = models.ForeignKey('users.UserAccount', models.SET_NULL, null=True)
def as_dict(self):
return OrderedDict(
application_id=self.application_id,
loan_profile=self.loan_profile.profile_id if self.loan_profile_id else None,
payment_platform=self.payment_platform,
amount=self.amount,
applied_at=self.applied_at,
approved=self.approved,
approved_at=self.approved_at,
approved_by=self.approved_by.account_id if self.approved_by_id else None)
#----------------
class LoanAccount(BaseModel):
account_id = models.CharField(max_length=25, default=generate_id)
product = models.ForeignKey('LoanProduct', models.SET_NULL, null=True)
#----------------
loan_profile = models.ForeignKey('LoanProfile', models.SET_NULL, null=True, related_name='loan_accounts')
#----------------
amount_disbursed = models.DecimalField(max_digits=18, decimal_places=4, default=D('0.0'), blank=True)
date_disbursed = models.DateTimeField(null=True, blank=True)
#----------------
last_repayment_date = models.DateTimeField(null=True, blank=True)
last_interest_accrual_date = models.DateTimeField(null=True, blank=True)
last_balance_update_date = models.DateTimeField(null=True)
status = models.PositiveSmallIntegerField(choices=LoanStatus.choices(), default=LoanStatus.PENDING_DISBURSEMENT)
is_active = models.BooleanField(default=False)
def __str__(self):
return "Loan Account: %s" % self.account_id
@property
def outstanding_balance(self):
try:
return self\
.current_balance\
.outstanding_balance
except AttributeError:
return D('0.0')
@property
def current_balance(self):
try:
return self\
.account_balances\
.filter(is_current=True)\
.get()
except ObjectDoesNotExist as e:
logger.exception(e)
return None
except MultipleObjectsReturned as e:
logger.exception(e)
self.account_balances\
.filter(is_current=True)\
.update(is_current=False)
latest = self\
.account_balances\
.latest('balance_as_at')
latest.is_current = True
latest.save()
return latest
def get_repayment_items(self):
return {
LoanAllocationItem.LIABILITY: self.current_balance.liability_balance,
LoanAllocationItem.PRINCIPAL: self.current_balance.principal_balance,
LoanAllocationItem.PENALTY: self.current_balance.penalties_balance,
LoanAllocationItem.FEES: self.current_balance.fees_balance,
LoanAllocationItem.INTEREST: self.current_balance.interest_balance }
def reset_account_balances(self, save=False):
self.last_repayment_date = None
self.last_interest_accrual_date = None
if save:
self.save()
@property
def disbursed_less_than_90_days_ago(self):
"""
"""
return timing._90_days_ago() < self.date_disbursed
@property
def disbursed_less_than_60_days_ago(self):
"""
"""
return timing._60_days_ago() < self.date_disbursed
@property
def disbursed_less_than_30_days_ago(self):
"""
"""
return timing._30_days_ago() < self.date_disbursed
@property
def disbursed_more_than_90_days_ago(self):
"""
"""
return timing._90_days_ago() > self.date_disbursed
@property
def disbursed_more_than_60_days_ago(self):
"""
"""
return timing._60_days_ago() > self.date_disbursed
@property
def disbursed_more_than_30_days_ago(self):
"""
"""
return timing._30_days_ago() > self.date_disbursed
def has_cleared_balance(self):
return self.outstanding_balance <= D('0.0')
#---------------
def pay_off_account(self, save=False):
# make entries to the ledger
if self.has_cleared_balance:
self.status = LoanStatus.PAID_OFF
self.last_repayment_date = timezone.now()
if save:
self.save()
#---------------
def as_dict(self):
current_balance = self.current_balance
_dict = dict(
account_id=self.account_id,
loan_limit=self.loan_profile.loan_limit,
date_disbursed=self.date_disbursed,
status=self.status,
is_active=self.is_active)
try:
_dict.update(current_balance=current_balance.as_dict())
except AttributeError as e:
logger.exception(e)
return _dict
class LoanTransaction(BaseModel):
transaction_id = models.CharField(max_length=30, unique=True, default=generate_id)
transaction_type = models.PositiveSmallIntegerField(choices=LoanTransactionType.choices(), null=True)
loan_account = models.ForeignKey('LoanAccount', models.CASCADE, null=True, related_name='loan_transactions')
amount = models.DecimalField(decimal_places=2, max_digits=7, default=D('0.0'))
initiated_at = models.DateTimeField(null=True)
processed_at = models.DateTimeField(null=True)
posted_at = models.DateTimeField(null=True)
status = models.PositiveSmallIntegerField(choices=LoanTransactionStatus.choices(), null=True)
def __str__(self):
return f"LoanTransaction: {LoanTransactionType(self.transaction_type).get_text()} of {self.amount} on account {self.loan_account}"
class LoanAccountBalance(BaseModel):
entry_id = models.CharField(max_length=25, default=generate_id)
loan_account = models.ForeignKey('LoanAccount', models.CASCADE, related_name='account_balances')
previous_balance = models.ForeignKey('self', models.SET_NULL, null=True)
principal_paid_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
interest_paid_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
fees_paid_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
penalties_paid_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
principal_due_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
interest_accrued_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
fees_accrued_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
penalties_accrued_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
liability_credit_balance_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
liability_debit_balance_bf = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
principal_paid = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
interest_paid = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
fees_paid = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
penalties_paid = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
principal_due = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
interest_accrued = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
fees_accrued = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
penalties_accrued = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
liability_credit_balance = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
liability_debit_balance = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'), blank=True)
balance_as_at = models.DateTimeField(null=True)
is_current = models.NullBooleanField(null=True)
class Meta:
db_table = 'loan_account_balance'
def __str__(self):
return f"Loan Account Balance for {self.loan_account} as at {self.balance_as_at} UTC"
@property
def outstanding_balance(self):
outstanding_loan_balance = (
(self.principal_balance + self.fees_balance) +
self.interest_balance +
self.penalties_balance
)
return outstanding_loan_balance
@property
def principal_balance(self):
principal_balance = (
(self.principal_due_bf + self.principal_due) -
(self.principal_paid_bf + self.principal_paid))
return principal_balance - self.liability_balance
@property
def fees_balance(self):
return (
(self.fees_accrued_bf + self.fees_accrued) -
(self.fees_paid_bf + self.fees_paid))
@property
def interest_balance(self):
return (
(self.interest_accrued_bf + self.interest_accrued) -
(self.interest_paid_bf + self.interest_paid))
@property
def penalties_balance(self):
return (
(self.penalties_accrued_bf + self.penalties_accrued) -
(self.penalties_paid_bf + self.penalties_paid))
@property
def liability_balance(self):
return (
(self.liability_credit_balance_bf + self.liability_credit_balance) +
(self.liability_debit_balance_bf + self.liability_debit_balance))
def as_dict(self):
return OrderedDict(
entry_id=self.entry_id,
loan_account=self.loan_account.account_id,
previous_balance=self.previous_balance.entry_id if self.previous_balance_id else None,
outstanding_balance=self.outstanding_balance,
liability_balance=self.liability_balance,
principal_paid=(self.principal_paid_bf + self.principal_paid),
interest_paid=(self.interest_paid_bf + self.interest_paid),
fees_paid=(self.fees_paid_bf + self.fees_paid),
penalties_paid=(self.penalties_paid_bf + self.penalties_paid),
principal_due=(self.principal_due_bf + self.principal_due),
interest_accrued=(self.interest_accrued_bf + self.interest_accrued),
fees_accrued=(self.fees_accrued_bf + self.fees_accrued),
penalties_accrued=(self.penalties_accrued_bf + self.penalties_accrued),
balance_as_at=timezone.localtime(self.balance_as_at))
|
import psutil
from multiprocessing import Pool
from tqdm import tqdm
import numpy as np
import xarray as xr
def track_events(class_masks_xarray, minimum_time_length=5,
tc_drop_threshold=250, ar_drop_threshold=250,
future_lookup_range=1):
"""track AR and TC events across time
Keyword arguments:
class_masks_xarray -- the class masks as xarray, 0==Background, 1==TC, 2 ==AR
minimum_time_length -- the minimum number of time stamps an event ought to persist to be considered
tc_dop_threshold -- the pixel threshold below which TCs are droped
ar_dop_threshold -- the pixel threshold below which ARs are droped
future_lookup_range -- across how many time stamps events get stitched together
"""
class_masks = class_masks_xarray.values
# per timestamp, assign ids to connected components
global identify_components # make function visible to pool
def identify_components(time):
"""Returns an event mask with ids assigned to the connected components at time"""
class_mask = class_masks[time] # class masks of assigned time stamp
# data structure for ids of connected components
event_mask = np.zeros(np.shape(class_mask)).astype(np.int)
next_id = 1
for i in range(np.shape(class_mask)[0]):
for j in range(np.shape(class_mask)[1]):
class_type = class_mask[i][j]
if class_type != 0 and event_mask[i][j] == 0: # label connected component with new id with BFS
frontier = [(i, j)]
event_mask[i, j] = next_id
while len(frontier) > 0:
element = frontier.pop(0)
row = element[0]
col = element[1]
for neighbor_row in range(row-1, row+2):
neighbor_row = neighbor_row % event_mask.shape[0]
for neighbor_col in range(col-1, col+2):
neighbor_col = neighbor_col % event_mask.shape[1]
if class_mask[neighbor_row][neighbor_col] != class_type: # don't propagate to other type
continue
if event_mask[neighbor_row][neighbor_col] == 0: # not yet considered
event_mask[neighbor_row][neighbor_col] = next_id
frontier.append((neighbor_row, neighbor_col))
next_id = next_id + 1
return event_mask
# paralelize call to identify_components
print('identifying connected components..', flush=True)
pool = Pool(psutil.cpu_count(logical=False))
event_masks = np.array(pool.map(
identify_components,
range(len(class_masks)))).astype(np.int)
def size_is_smaller_threshold(mask, i, j, threshold):
"""Returns True iff the size of the connected component that (i, j) is part of is smaller threshold"""
visited = np.full(mask.shape, False)
component_class = mask[i][j]
frontier = [(i, j)]
visited[i][j] = True
count = 1
while len(frontier) > 0:
element = frontier.pop(0)
row = element[0]
col = element[1]
for neighbor_row in range(row-1, row+2):
neighbor_row = neighbor_row % mask.shape[0]
for neighbor_col in range(col-1, col+2):
neighbor_col = neighbor_col % mask.shape[1]
if visited[neighbor_row][neighbor_col] == True:
continue
if mask[neighbor_row][neighbor_col] == component_class:
visited[neighbor_row][neighbor_col] = True
frontier.append((neighbor_row, neighbor_col))
count = count + 1
if count >= threshold:
return False
return True
def drop_threshold(class_mask, i, j):
"""Returns the minimal size a connected component containting [i, j] must have to not get removed"""
if class_mask[i][j] == 1: #TC
return tc_drop_threshold
if class_mask[i][j] == 2: #AR
return ar_drop_threshold
def label_component(mask, i, j, new_label, threshold=None):
"""Labels a connected component
Labels the connected component at pixel [i, j] in mask as part of a component with new_label
If a threshold is given: If the size of the connected component <= threshold: set the component to background (0)
Return True if the componend was set to new_label, False if it was set to background"""
# apply thresholding
if threshold != None and size_is_smaller_threshold(mask, i, j, threshold):
label_component(mask, i, j, 0) # set component to background
return False
old_label = mask[i][j]
if old_label == 0:
return False
frontier = [(i, j)]
mask[i, j] = new_label
while len(frontier) > 0:
element = frontier.pop(0)
row = element[0]
col = element[1]
for neighbor_row in range(row-1, row+2):
neighbor_row = neighbor_row % mask.shape[0]
for neighbor_col in range(col-1, col+2):
neighbor_col = neighbor_col % mask.shape[1]
if mask[neighbor_row][neighbor_col] == old_label:
mask[neighbor_row][neighbor_col] = new_label
frontier.append((neighbor_row, neighbor_col))
return True
print('tracking components across time..', flush=True)
event_ids_per_time = [set() for x in range(len(event_masks))]
# per time stamp and per id, have a pixel pointing to each connected component
pixels_per_event_id_per_time = [{} for x in range(len(event_masks))] # one pixel per event Id
new_event_index = 1000
for time in tqdm(range(len(event_masks))):
class_mask = class_masks[time]
event_mask = event_masks[time]
for i in range(np.shape(event_mask)[0]):
for j in range(np.shape(event_mask)[1]):
id = event_mask[i][j]
# ignore background
if id == 0:
continue
# label new components
if id < 1000:
above_threshold = label_component(event_mask, i, j, new_event_index,
drop_threshold(class_mask, i, j))
if above_threshold:
id = new_event_index
new_event_index = new_event_index + 1
event_ids_per_time[time].add(id)
pixels_per_event_id_per_time[time][id] = [(i, j)]
else:
label_component(class_mask, i, j, 0) # set component class to background
continue # component removed, don't propagate across time
# label components in next time stamp(s) that are already present in this time stamp
for future_time in range(time+1, min(time+1+future_lookup_range, len(event_masks))):
future_event_mask = event_masks[future_time]
future_class_mask = class_masks[future_time]
if class_mask[i][j] == future_class_mask[i][j] \
and future_event_mask[i][j] < 100 \
and future_event_mask[i][j] != 0: # not removed yet
above_threshold = label_component(future_event_mask, i, j, id, drop_threshold(class_mask, i, j))
if above_threshold:
event_ids_per_time[future_time].add(id)
if id in pixels_per_event_id_per_time[future_time].keys():
# id was already in other connected component for this time stamp
# add another pixel so that all connected components with this id are ponited to
pixels_per_event_id_per_time[future_time][id].append((i, j))
else:
pixels_per_event_id_per_time[future_time][id] = [(i, j)]
else: # [i,j] removed from event_masks, also remove from class_masks
label_component(future_class_mask, i, j, 0)
# removing connected components appearing less than minimum_time_length
# finding time stamps per id
times_per_event_id = {}
for time in range(len(event_ids_per_time)):
for id in event_ids_per_time[time]:
if id in times_per_event_id:
times_per_event_id[id].append(time)
else:
times_per_event_id[id] = [time]
# finding ids_for_removal i.e. ids occuring shorter than minimum_time_length
ids_for_removal = set()
for id in times_per_event_id.keys():
times = times_per_event_id[id]
if len(times) < minimum_time_length:
ids_for_removal.add(id)
# removing ids_for_removal
for id in ids_for_removal:
for time in times_per_event_id[id]:
for pixel in pixels_per_event_id_per_time[time][id]:
label_component(event_masks[time], pixel[0], pixel[1], 0)
label_component(class_masks[time], pixel[0], pixel[1], 0)
event_ids_per_time[time].discard(id)
del pixels_per_event_id_per_time[time][id]
del times_per_event_id[id]
# counting ARs and TCs
num_tcs = 0
num_ars = 0
for id in times_per_event_id.keys():
time = times_per_event_id[id][0]
pixel = pixels_per_event_id_per_time[time][id][0]
if class_masks[time][pixel[0]][pixel[1]] == 1:
num_tcs = num_tcs + 1
else:
num_ars = num_ars + 1
print('num TCs: ' + str(num_tcs))
print('num ARs: ' + str(num_ars))
event_masks_xarray = xr.DataArray(event_masks,
coords=class_masks_xarray.coords,
attrs=class_masks_xarray.attrs)
return event_masks_xarray
|
from typing import List
import pydantic
class EnvConstants(pydantic.BaseSettings):
"""Environment constants.
Constants should be set in .env file with keys matching variable names
at root of project.
"""
API_KEY_TABLE_NAME: str
FIREHOSE_TABLE_NAME: str
SENTRY_DSN: str
SENTRY_ENVIRONMENT: str
EMAILS_ENABLED: bool
AWS_REGION: str = "us-east-1"
HUBSPOT_API_KEY: str
HUBSPOT_ENABLED: bool
# Hubspot portal id differs between environments. We have a separate
# hubspot portal for our dev environment.
HUBSPOT_PORTAL_ID: str
# GUID of hubspot form used for API signup
HUBSPOT_REG_FORM_GUID: str
EMAIL_BLOCKLIST: List[str]
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
class Config:
Constants: EnvConstants = None
@staticmethod
def init():
Config.Constants = EnvConstants()
|
import distutils.spawn
import os
from pathlib import Path
from pytest import fixture
from pytest_ngrok.install import install_bin
from pytest_ngrok.manager import NgrokContextManager
try:
from .django import * # noqa
except ImportError:
pass
def pytest_addoption(parser):
parser.addoption(
'--ngrok-bin',
default=distutils.spawn.find_executable('ngrok'),
help='path to ngrok [%default]'
)
parser.addoption(
'--ngrok-no-install',
action='store_true',
default=False,
help='Disable fetch ngrok binary from remote'
)
REMOTE_URL = 'https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip'
@fixture(scope='session')
def ngrok_install_url():
# TODO verify
return REMOTE_URL
@fixture(scope='session')
def ngrok_allow_install(request):
"""
Allow install ngrok from remote. Default: True
"""
return not request.config.getoption('--ngrok-no-install', False)
@fixture(scope='session')
def ngrok_bin(request):
"""
Path to ngrok-bin. by default - $HOME/.local/bin/ngrok
"""
ngrok_path = request.config.getoption('--ngrok-bin')
if not ngrok_path:
ngrok_path = os.path.join(Path.home(), '.local', 'bin', 'ngrok')
return ngrok_path
@fixture(scope='function')
def ngrok(ngrok_bin, ngrok_install_url, ngrok_allow_install):
"""
Usage:
```
def test_ngrok_context_manager(ngrok, httpserver):
httpserver.expect_request("/foobar").respond_with_data("ok")
with ngrok(httpserver.port) as remote_url:
assert 'ngrok.io' in str(remote_url)
_test_url = str(remote_url) + '/foobar'
assert urlopen(_test_url).read() == b'ok'
pytest.raises(HTTPError, urlopen, _test_url)
```
"""
if not os.path.exists(ngrok_bin):
if ngrok_allow_install:
install_bin(ngrok_bin, remote_url=ngrok_install_url)
else:
raise OSError("Ngrok %s bin not found!" % ngrok_bin)
managers = []
def _wrap(port=None):
manager = NgrokContextManager(ngrok_bin, port)
managers.append(manager)
return manager()
yield _wrap
for m in managers:
m.stop()
|
#!/usr/bin/env python
"""
Make the lg1 file for GOES-R type resolution overlaid on TEMPO with GEOS-R parking spot
"""
import numpy as np
from numpy import radians, degrees
from math import radians, cos, sin, asin, sqrt, tan, atan
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from geopy import distance, point
from dateutil.parser import parse as isoparser
from datetime import timedelta
def haversine(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def set_basemap(projection,clon,clat,elon,elat,lon_0=128.2,satellite_height=35785831.0):
# Default is GEMS
# GEO disk parked at 128.2E
# -----------------------
m = Basemap(projection=projection,lon_0=lon_0,resolution=None,
rsphere=(6378137.00,6356752.3142),
satellite_height = satellite_height)
cX, cY, cBOX,cbox = getCoords(m,clon,clat,'CENTER Coordinates')
eX, eY, eBOX,ebox = getCoords(m,elon,elat,'EDGE Coordinates')
# Plot Map
# --------
m = Basemap(projection=projection,lon_0=lon_0,resolution='l',
rsphere=(6378137.00,6356752.3142),
satellite_height = satellite_height,
llcrnrx=ebox[0],
llcrnry=ebox[1],
urcrnrx=ebox[2],
urcrnry=ebox[3])
return m
def map_(fig,m,data,cmap,minval,maxval,title,norm=None,format=None,elon=None,elat=None,
fill_color=None,bluemarble=None):
if (m.projection == 'lcc'):
if (bluemarble is not None):
m.warpimage(image=bluemarble)
else:
m.bluemarble()
im = m.pcolormesh(elon,elat,data,latlon=True,cmap=cmap,vmin=minval,vmax=maxval,norm=norm)
im.set_rasterized(True)
elif (m.projection == 'geos'):
im = m.imshow(data,cmap=cmap,vmin=minval,vmax=maxval,norm=norm)
m.drawcoastlines(color='white')
m.drawcountries(color='white')
m.drawstates(color='white')
meridian = np.arange(-125.,60.,5.)
parallels = np.arange(-10.,80.,5.)
m.drawparallels(parallels) # draw parallels
m.drawmeridians(meridian) # draw meridians
meridian = np.delete(meridian,len(meridian)*0.5)
for i in np.arange(len(meridian)):
plt.annotate(np.str(meridian[i]),xy=m(meridian[i],20),xycoords='data')
for i in np.arange(len(parallels)):
plt.annotate(np.str(parallels[i]),xy=m(110,parallels[i]),xycoords='data')
if (m.projection == 'geos'):
if (fill_color is None):
fill_color = 'k' #'0.90'
m.drawmapboundary(color='k',fill_color=fill_color,linewidth=2.0)
plt.title(title, fontsize=20)
return im
def getCoords(m,lon,lat,name):
# No undefs
# ---------
I = (lon<1e14)&(lat<1e14)
X, Y = m(lon[I],lat[I])
J = (X<1e30)&(Y<1e30)
X, Y = X[J], Y[J]
XA, YA, XB, YB = m.llcrnrx, m.llcrnry, m.urcrnrx, m.urcrnry
DX, DY = XB-XA, YB-YA
XC, YC = (XA+XB)/2, (YA+YB)/2
Xa,Ya,Xb,Yb = (X.min(),Y.min(),X.max(),Y.max())
xa,ya,xb,yb = (X.min()-XC,Y.min()-YC,X.max()-XC,Y.max()-YC)
xa_ = (xa)/DX
xb_ = (xb)/DX
ya_ = (ya)/DY
yb_ = (yb)/DY
BBOX = (Xa, Ya, Xb, Yb)
Bbox = (xa,ya,xb,yb)
bbox = (xa_,ya_,xb_,yb_)
print
print name
print 'Native Bounding box: ', BBOX
print 'Recentered Bounding box: ', Bbox
print 'Normalized Bounding box: ', bbox
return (X,Y,BBOX,Bbox)
#---
def scanTimes(nNS,nEW,tBeg):
"""
Calculate TEMPO scantimes.
"""
tScan = np.array([tBeg + i * timedelta(seconds=2.85) for i in np.arange(nEW) ])
tScan = tScan[-1::-1] # flip times
return np.tile(tScan,(nNS,1))
#---
def writeNC(elon,elat,clon,clat,pixel_top,pixel_bottom,pixel_right,pixel_left,DT,fname):
# Save to netcdf file
ncOut = Dataset(fname,'w',format='NETCDF4_CLASSIC')
ncOut.institution = 'NASA/Goddard Space Flight Center'
ncOut.source = 'Global Model and Assimilation Office'
ncOut.history = 'Created from make_lg1_tempo_cld.py'
ncOut.references = 'none'
ncOut.comment = "This file contains TEMPO geolocation information for cloud modeling"
ncOut.contact = "Patricia Castellanos <patricia.castellanos@nasa.gov>"
ncOut.conventions = 'CF'
ncOut.sat_lat = 0.0
ncOut.sat_lon = lon_0
ncOut.sat_alt = satellite_height*1e-3
ncOut.Earth_radius = rsphere[0]*1e-3
ncOut.Earth_flattening = (rsphere[0] - rsphere[1])/rsphere[0]
ncOut.NCO = '0.2.1'
nNS,nEW = clon.shape
ns = ncOut.createDimension('ns',nNS)
ew = ncOut.createDimension('ew',nEW)
ns_e = ncOut.createDimension('ns_e',nNS+1)
ew_e = ncOut.createDimension('ew_e',nEW+1)
pc = ncOut.createDimension('pix_corner',4)
time = ncOut.createDimension('time',1)
# clat
varobj = ncOut.createVariable('clat','f4',('ns','ew',))
varobj.long_name = 'pixel center latitude'
varobj.units = 'degrees_north'
varobj.missing_value = 1e15
varobj[:] = clat
#clon
varobj = ncOut.createVariable('clon','f4',('ns','ew',))
varobj.long_name = 'pixel center longitude'
varobj.units = 'degrees_east'
varobj.missing_value = 1e15
varobj[:] = clon
# elat
varobj = ncOut.createVariable('elat','f4',('ns_e','ew_e',))
varobj.long_name = 'latitude at pixel edge'
varobj.units = 'degrees_north'
varobj.missing_value = 1e15
varobj[:] = elat
# elon
varobj = ncOut.createVariable('elon','f4',('ns_e','ew_e',))
varobj.long_name = 'longitude at pixel edge'
varobj.units = 'degrees_east'
varobj.missing_value = 1e15
varobj[:] = elon
# GRADS ew
varobj = ncOut.createVariable('ew','f4',('ew',))
varobj.long_name = 'pseudo longitude'
varobj.units = 'degrees_east'
#varobj[:] = np.linspace(lonmin,lonmax,nEW)
varobj[:] = clon[0.5*nNS,:]
# GRADS ew
varobj = ncOut.createVariable('ns','f4',('ns',))
varobj.long_name = 'pseudo latitude'
varobj.units = 'degrees_north'
#varobj[:] = np.linspace(latmin,latmax,nNS)
varobj[:] = clat[:,0.5*nEW]
# pixel size
varobj = ncOut.createVariable('pix_size','f4',('pix_corner','ns','ew',))
varobj.long_name = 'pixel size'
varobj.units = 'km'
varobj.missing_value = 1e15
varobj[0,:,:] = pixel_top
varobj[1,:,:] = pixel_right
varobj[2,:,:] = pixel_bottom
varobj[3,:,:] = pixel_left
# time
varobj = ncOut.createVariable('time','f4',('time',))
varobj.long_name = 'time'
varobj.units = 'hours since 2006-06-01 12:00:00'
varobj.time_increment = int(10000)
varobj.begin_date = int(20060601)
varobj.begin_time = int(120000)
# scanTime
varobj = ncOut.createVariable('scanTime','f4',('ew',))
varobj.units = "seconds since 0001-01-01 00:00:00.0"
varobj[:] = DT
ncOut.close()
#---
if __name__ == '__main__':
outFile = 'goes-r.lg1.cld.invariant.'
inFile = 'tempo.lg1.cld.invariant.'
layout = '41'
projection = 'geos'
lon_0 = -75
lat_0 = 0.0
satellite_height = 35785831.0
rsphere = (6378137.00,6356752.3142)
#-------
## End of User Input
#-------
m = Basemap(projection=projection,lon_0=lon_0,resolution=None,
rsphere=(6378137.00,6356752.3142),
satellite_height = satellite_height)
# Read in TEMPO
ntiles = int(layout[0])*int(layout[1])
for tile in range(ntiles):
print 'Reading file',inFile + layout + str(tile) + '.nc4'
ncTempo = Dataset(inFile + layout + str(tile) + '.nc4')
clon = ncTempo.variables['clon'][:]
clat = ncTempo.variables['clat'][:]
pixel_top = np.squeeze(ncTempo.variables['pix_size'][0,:,:])
pixel_right = np.squeeze(ncTempo.variables['pix_size'][1,:,:])
pixel_bottom = np.squeeze(ncTempo.variables['pix_size'][2,:,:])
pixel_left = np.squeeze(ncTempo.variables['pix_size'][3,:,:])
if not hasattr(clon,'mask'):
print 'tile',tile
clon = np.ma.array(clon,mask=np.zeros(clon.shape).astype(bool))
if not hasattr(clat,'mask'):
clat = np.ma.array(clat,mask=np.zeros(clat.shape).astype(bool))
if not hasattr(pixel_top,'mask'):
pixel_top = np.ma.array(pixel_top,mask=np.zeros(pixel_top.shape).astype(bool))
if not hasattr(pixel_right,'mask'):
pixel_right = np.ma.array(pixel_right,mask=np.zeros(pixel_right.shape).astype(bool))
if not hasattr(pixel_bottom,'mask'):
pixel_bottom = np.ma.array(pixel_bottom,mask=np.zeros(pixel_bottom.shape).astype(bool))
if not hasattr(pixel_left,'mask'):
pixel_left = np.ma.array(pixel_left,mask=np.zeros(pixel_left.shape).astype(bool))
if tile == 0:
PP_bottom = np.ma.array(pixel_bottom,mask=np.zeros(pixel_bottom.shape).astype(bool))
else:
PP_bottom = np.ma.append(PP_bottom,pixel_bottom,axis=1)
X, Y = m(clon,clat)
I = (X == 1e30) | (Y == 1e30)
X.mask[I] = True
Y.mask[I] = True
clon.mask = X.mask | Y.mask
clat.mask = X.mask | Y.mask
pixel_top.mask = X.mask | Y.mask
pixel_bottom.mask = X.mask | Y.mask
pixel_left.mask = X.mask | Y.mask
pixel_right.mask = X.mask | Y.mask
elon = ncTempo.variables['elon'][:]
elat = ncTempo.variables['elat'][:]
if not hasattr(elon,'mask'):
elon = np.ma.array(elon,mask=np.zeros(elon.shape).astype(bool))
if not hasattr(elat,'mask'):
elat = np.ma.array(elat,mask=np.zeros(elat.shape).astype(bool))
if tile == 0:
EElon = np.ma.array(elon,mask=np.zeros(elon.shape).astype(bool))
else:
EElon = np.ma.append(EElon[:,0:-1],elon,axis=1)
X, Y = m(elon,elat)
I = (X == 1e30) | (Y == 1e30)
X.mask[I] = True
Y.mask[I] = True
elon.mask = X.mask | Y.mask
elat.mask = X.mask | Y.mask
mask_1 = elon.mask[0:-1,0:-1]
mask_2 = elon.mask[0:-1,1:]
mask_3 = elon.mask[1:,0:-1]
mask_4 = elon.mask[1:,1:]
clon.mask = mask_1 | mask_2 | mask_3 | mask_4
clat.mask = mask_1 | mask_2 | mask_3 | mask_4
pixel_top.mask = mask_1 | mask_2 | mask_3 | mask_4
pixel_bottom.mask = mask_1 | mask_2 | mask_3 | mask_4
pixel_left.mask = mask_1 | mask_2 | mask_3 | mask_4
pixel_right.mask = mask_1 | mask_2 | mask_3 | mask_4
DT = ncTempo.variables['scanTime'][:]
print 'writing file',outFile + layout + str(tile) + '.nc4'
writeNC (elon, elat, clon, clat, pixel_top, pixel_bottom, pixel_right, pixel_left, DT,
fname = outFile + layout + str(tile) + '.nc4')
if tile == 0:
P_bottom = pixel_bottom
P_right = pixel_right
Elon = elon
Elat = elat
else:
P_bottom = np.ma.append(P_bottom,pixel_bottom,axis=1)
P_right = np.ma.append(P_right,pixel_right,axis=1)
Elon = np.ma.append(Elon[:,0:-1],elon,axis=1)
Elat = np.ma.append(Elat[:,0:-1],elat,axis=1)
ncTempo.close()
# ---
# Make some plots
# ---
pixel_bottom = P_bottom
pixel_right = P_right
elon = Elon
elat = Elat
lon_0 = -100
nNS = pixel_bottom.shape[0]
nEW = pixel_bottom.shape[1]
# Discrete colorbar
cmap = cm.jet
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
# --
# EW
# --
# define the bins and normalize
bounds = np.arange(0,10,1)
norm = colors.BoundaryNorm(bounds, cmap.N)
cmap.set_bad(color='w',alpha=0)
m = set_basemap(projection,clon,clat,elon,elat,lon_0=lon_0,satellite_height=35785831.0)
fig = plt.figure()
fig.set_size_inches(18.5, 10.5)
im = map_(fig,m,pixel_bottom,cmap,bounds.min(),bounds.max(),'EW Pixel Size [km] nX = {}'.format(nEW),elon=elon,elat=elat,norm=norm)
ax = plt.gca()
#[left, bottom, width, height]
cbaxes = fig.add_axes([0.91, 0.147, 0.02, 0.73])
fig.colorbar(im,ax=ax, cax = cbaxes)
plt.savefig('goes-r_EW_pixelsize_nX={}.png'.format(nEW), transparent='true')
plt.close(fig)
# --
# NS
# --
# define the bins and normalize
bounds = np.arange(0,5.5,0.5)
norm = colors.BoundaryNorm(bounds, cmap.N)
cmap.set_bad(color='w',alpha=0)
fig = plt.figure()
fig.set_size_inches(18.5, 10.5)
im = map_(fig,m,pixel_right,cmap,bounds.min(),bounds.max(),'NS Pixel Size [km] nY = {}'.format(nNS),elon=elon,elat=elat,norm=norm)
ax = plt.gca()
#[left, bottom, width, height]
cbaxes = fig.add_axes([0.91, 0.147, 0.02, 0.73])
fig.colorbar(im,ax=ax, cax = cbaxes)
plt.savefig('goes-r_NS_pixelsize_nY={}.png'.format(nNS), transparent='true')
plt.close(fig)
|
my_grades = {
"linguagens": 630,
"humanas" : 660,
"natureza" : 660,
"matematica" : 830,
"redacao" : 800
}
matematica_aplicada = {
"curso" : "Matemática Aplicada",
"corte" : 720.96,
"linguagens" : 3,
"humanas" : 1,
"natureza" : 4,
"matematica" : 5,
"redacao" : 1
}
estatistica = {
"curso" : "Estatística",
"corte" : 742.91,
"linguagens" : 2,
"humanas" : 1,
"natureza" : 3,
"matematica" : 5,
"redacao" : 3
}
ufrj_majors = [matematica_aplicada, estatistica]
|
def solveMeFirst(a,b):
return a + b
|
powers = [31]
channels = [26]
sinks = [1]
testbed = "fbk"
start_epoch = 1
active_epochs = 200
full_epochs = 15
#num_senderss = [0,1,2,5,10,20]
num_senderss = [1,20]
period = 1
n_tx_s = 3
dur_s = 10
n_tx_t = 3
dur_t = 8
n_tx_a = 3
dur_a = 8
payload = 2
longskips = [0]
n_emptys = [(2,2,4,6)]
ccas = [(-15, 80)]
nodemaps=["all"]
#nodemaps=["all", "n7_MWO", "n13_MWO", "n42_MWO", "n7_12_20_31_37_49_WIFI4"]
chmap = "nomap"
boot_chop = "hop3"
#chmap = "nohop"
#boot_chop = "nohop"
logging = True
seed = 123
|
# -*- coding: utf-8 -*-
"""Top-level package for Python Business Objects import."""
__author__ = """Hugo van den Berg"""
__email__ = 'hugo.van.den.berg@brabantwater.nl'
__version__ = '0.1.0'
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 15:55:15 2019
@author: tsd
"""
import torch
from .tokenization_albert import AlbertTokenizer
from .preprocessing_funcs import save_as_pickle, load_pickle
from .ALBERT import AlbertForSequenceClassification
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger('__file__')
class XLNet_infer(object):
def __init__(self,):
super(XLNet_infer, self).__init__()
logger.info("Loading fine-tuned XLNet...")
self.args = load_pickle("./data/args.pkl")
self.net = AlbertForSequenceClassification.from_pretrained('albert-base-v2', num_labels=self.args.num_classes)
self.tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2', do_lower_case=False)
logger.info("Done!")
def classify(self, text=None):
if text is not None:
logger.info("Classifying text...")
text = self.tokenizer.tokenize(text)
text = self.tokenizer.convert_tokens_to_ids(text[:(self.args.tokens_length-1)]).long().unsqueeze(0)
self.net.eval()
with torch.no_grad():
outputs, _ = self.net(text)
_, predicted = torch.max(outputs.data, 1)
print("Predicted label: %d" % predicted.item())
else:
while True:
text = input("Input text to classify: \n")
if text in ["quit", "exit"]:
break
logger.info("Classifying text...")
text = self.tokenizer.tokenize(text)
text = self.tokenizer.convert_tokens_to_ids(text[:(self.args.tokens_length-1)]).long().unsqueeze(0)
self.net.eval()
with torch.no_grad():
outputs, _ = self.net(text)
_, predicted = torch.max(outputs.data, 1)
print("Predicted label: %d" % predicted.item())
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv.UMat(data) # from ndarray
data_sub_um = cv.UMat(data_um, [128, 256], [128, 256]) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
# test continuous and submatrix flags
assert data_um.isContinuous() and not data_um.isSubmatrix()
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
def test_umat_handle(self):
a_um = cv.UMat(256, 256, cv.CV_32F)
_ctx_handle = cv.UMat.context() # obtain context handle
_queue_handle = cv.UMat.queue() # obtain queue handle
_a_handle = a_um.handle(cv.ACCESS_READ) # obtain buffer handle
_offset = a_um.offset # obtain buffer offset
def test_umat_matching(self):
img1 = self.get_sample("samples/data/right01.jpg")
img2 = self.get_sample("samples/data/right02.jpg")
orb = cv.ORB_create()
img1, img2 = cv.UMat(img1), cv.UMat(img2)
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
self.assertIsInstance(descs_umat1, cv.UMat)
self.assertIsInstance(descs_umat2, cv.UMat)
self.assertGreater(len(ps1), 0)
self.assertGreater(len(ps2), 0)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
res_umats = bf.match(descs_umat1, descs_umat2)
res = bf.match(descs_umat1.get(), descs_umat2.get())
self.assertGreater(len(res), 0)
self.assertEqual(len(res_umats), len(res))
def test_umat_optical_flow(self):
img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE)
# Note, that if you want to see performance boost by OCL implementation - you need enough data
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
# img = np.hstack([np.vstack([img] * 6)] * 6)
feature_params = dict(maxCorners=239,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params)
self.assertEqual(p0_umat.get().shape, p0.shape)
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
self.assertTrue(np.allclose(p0_umat.get(), p0))
_p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None)
_p1_mask_err_umat0 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
_p1_mask_err_umat1 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None))
_p1_mask_err_umat2 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None))
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
# for data, data_umat in zip(p1_mask_err, p1_mask_err_umat):
# self.assertTrue(np.allclose(data, data_umat))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
"""
Class Hierarchy
G{classtree: BaseController}
Package tree
G{packagetree: controller}
Import Graph
G{importgraph: controller}
"""
from cluster_tool import KubernetesTool
from controller_param import ControllerParam
class BaseController:
def __init__(self,controller_id,config_path):
self.controller_id = controller_id
""" @type: C{string} """
self.config_path = config_path
""" @type: C{string} """
self.controller_param = None
""" @type: L{ControllerParam} """
self.tool = KubernetesTool()
""" @type: L{KubernetesTool} """
def get_controller_id(self):
return self.controller_id
def get_config_path(self):
return self.config_path
def get_controller_param(self):
return self.controller_param
def get_tool(self):
return self.tool
def start(self):
self.tool.create_replication_controller(self.config_path)
def stop(self):
self.tool.delete_replication_controller(self.controller_id)
def expand(self,new_replicas):
self.tool.resize_replication_controller(self.controller_id,new_replicas)
def shrink(self,new_replicas):
self.tool.resize_replication_controller(self.controller_id,new_replicas)
class ApacheController(BaseController):
def __init__(self):
print "[ApacheController] init ..."
self.create_controller_param()
print "[ApacheController] OK"
def create_controller_param(self):
controller_id = 'apache-controller'
config_path = 'json/apache-controller.json'
BaseController.__init__(self,controller_id,config_path)
# init controller_param
self.controller_param = ControllerParam(controller_id,1,"apache-pod",None)
def start(self):
print "[ApacheController] start..."
BaseController.start(self)
class MysqlController(BaseController):
def __init__(self):
print "[MysqlController] init ..."
self.create_controller_param()
print "[MysqlController] OK"
def create_controller_param(self):
controller_id = 'mysql-controller'
config_path = 'json/mysql-controller.json'
BaseController.__init__(self,controller_id,config_path)
def start(self):
print "[MysqlController] start..."
BaseController.start(self)
class RobustController(BaseController):
def __init__(self):
print "[RobustController] init ..."
self.create_controller_param()
print "[RobustController] OK"
def create_controller_param(self):
controller_id = 'robust-controller'
config_path = 'json/robust-controller.json'
BaseController.__init__(self,controller_id,config_path)
def start(self):
print "[RobustController] start..."
BaseController.start(self)
class ControllerTesting(ApacheController,MysqlController,RobustController):
pass
|
from ._node import Node
__all__ = ['List']
class EmptyError(ValueError):
"""
Raised when a list is empty.
"""
class NotEmptyError(ValueError):
"""
Raised when a list not is empty.
"""
class List:
"""
Doubly linked list.
>>> a = List.from_iterable([1, 2, 3])
>>> print(a)
[1, 2, 3]
>>> 1 in a
True
>>> print(a)
[1, 2, 3]
>>> 5 in a
False
>>> print(a)
[1, 2, 3]
"""
def __init__(self):
self.head = None
self.tail = None
self.size = 0
@classmethod
def from_iterable(cls, iterable):
"""
Alternate constructor for `List`.
Gets data from a single `iterable` argument.
>>> a = List.from_iterable([1, 2, 3])
>>> print(a)
[1, 2, 3]
>>> b = List.from_iterable(('Hello', 'World'))
>>> print(b)
['Hello', 'World']
"""
new_list = cls()
new_list.extend(iterable)
return new_list
def __repr__(self):
"""
Return an unambiguous representation of an object.
"""
return (f'{self.__class__.__name__}('
f'head={self.head!r}, tail={self.tail!r})')
def __str__(self):
"""
Return elements of the list as a string.
"""
return f'{[element for element in self]}'
def __iter__(self):
"""
Traverse the list in forward direction.
"""
node = self.head
while node:
yield node.data
node = node.next_node
def __reversed__(self):
"""
Traverse the list in reverse direction.
"""
node = self.tail
while node:
yield node.data
node = node.prev_node
def __len__(self):
return self.size
def __contains__(self, data):
"""
Check if the list contains an element with
a given data.
"""
for node in self._nodes():
if node.data == data:
self._rearrange(node)
return True
return False
def __eq__(self, iterable):
"""
Check if list is equal to `iterable`.
"""
if len(self) != len(iterable):
return False
for element, other_element in zip(self, iterable):
if element != other_element:
return False
return True
def append(self, data):
"""
Insert an element to the end of the list.
>>> a = List()
>>> a.append(1)
>>> a.append(2)
>>> print(a)
[1, 2]
"""
new_node = Node(data)
if self.tail:
self._insert_after(self.tail, new_node)
else:
self._insert_first(new_node)
def prepend(self, data):
"""
Insert an element to the beginning of the list.
>>> a = List()
>>> a.prepend(1)
>>> a.prepend(0)
>>> print(a)
[0, 1]
"""
new_node = Node(data)
if self.head:
self._insert_before(self.head, new_node)
else:
self._insert_first(new_node)
def _insert_first(self, new_node):
"""
Insert a node into the empty list.
Raise `NotEmptyError` if the list is not empty.
"""
if self.size > 0:
raise NotEmptyError('List must be empty')
self.head = new_node
self.tail = new_node
self.size += 1
def _insert_before(self, existing_node, new_node):
"""
Insert a node before a given node.
"""
new_node.next_node = existing_node
if existing_node is self.head:
new_node.prev_node = None
self.head = new_node
else:
new_node.prev_node = existing_node.prev_node
existing_node.prev_node.next_node = new_node
existing_node.prev_node = new_node
self.size += 1
def _insert_after(self, existing_node, new_node):
"""
Insert a node after a given one.
"""
new_node.prev_node = existing_node
if existing_node is self.tail:
new_node.next_node = None
self.tail = new_node
else:
new_node.next_node = existing_node.next_node
existing_node.next_node.prev_node = new_node
existing_node.next_node = new_node
self.size += 1
def extend(self, iterable):
"""
Insert items from `iterable` to the end of the list.
>>> a = List()
>>> b = [1, 2, 3]
>>> a.extend(b)
>>> print(a)
[1, 2, 3]
"""
for item in iterable:
self.append(item)
def is_empty(self):
"""
Check if list is empty.
"""
return len(self) == 0
def pop_back(self):
"""
Remove the last element of the list.
>>> a = List.from_iterable([1, 2, 3])
>>> print(a)
[1, 2, 3]
>>> a.pop_back()
>>> print(a)
[1, 2]
"""
if self.is_empty():
raise EmptyError('List must not be empty')
if self.size == 1:
self._remove_last()
else:
self.tail = self.tail.prev_node
self.tail.next_node = None
self.size -= 1
def pop_front(self):
"""
Remove the first element of the list.
>>> a = List.from_iterable([1, 2, 3])
>>> print(a)
[1, 2, 3]
>>> a.pop_front()
>>> print(a)
[2, 3]
"""
if self.is_empty():
raise EmptyError('List must not be empty')
if self.size == 1:
self._remove_last()
else:
self.head = self.head.next_node
self.head.prev_node = None
self.size -= 1
def erase(self, data):
"""
Erase all elements of the list that
contain a given data.
>>> a = List.from_iterable([1, 2, 3, 3, 3, 4])
>>> print(a)
[1, 2, 3, 3, 3, 4]
>>> a.erase(3)
>>> print(a)
[1, 2, 4]
"""
for node in self._nodes():
next_node = node.next_node
if node.data == data:
self._remove(node)
node = next_node
def _remove(self, node):
"""
Remove a given node from the list.
"""
if node is self.head:
self.pop_front()
elif node is self.tail:
self.pop_back()
else:
node.next_node.prev_node = node.prev_node
node.prev_node.next_node = node.next_node
self.size -= 1
def _remove_last(self):
"""
Remove the only node in the list.
"""
if self.size > 1:
raise ValueError('List has more than one node')
self.head = None
self.tail = None
self.size = 0
def _rearrange(self, node):
"""
Apply self-organizing method.
"""
return
def _nodes(self):
"""
Traverse the list _nodes in forward direction.
"""
node = self.head
while node:
yield node
node = node.next_node
|
# =====================================================================================
# Copyright (c) 2010-2012, G. Fiori, G. Iannaccone, University of Pisa
#
# This file is released under the BSD license.
# See the file "license.txt" for information on usage and
# redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
# =====================================================================================
from numpy import *
from NanoTCAD_ViDESmod import *
from section import *
import sys
import types
writeout("\n")
writeout("-------------------------------------------------------------------\n")
writeout(" NanoTCAD ViDES ")
writeout(" Version 1.5 (rel-1-6)")
writeout(" Last Modified 19 May 2016")
writeout(" Copyright (C) 2004-2016 \n")
writeout("-------------------------------------------------------------------\n")
writeout("\n")
NEmax = 5e3;
DIGIT_PRECISION = 20;
max_number_of_cores_on_a_server = 8;
# I check if mpi4py is installed on the machine or not
try:
from mpi4py import MPI
mpi4py_loaded = True
sizeMPI = MPI.COMM_WORLD.Get_size()
except ImportError:
mpi4py_loaded = False
# I check if scipy is installed
# This is needed for graphene_TOB
try:
from scipy.optimize import newton
newton_loaded = True
except ImportError:
newton_loaded = False
# I check if pylab is installed on the machine or not
try:
if (mpi4py_loaded):
if (sizeMPI <= max_number_of_cores_on_a_server):
from pylab import *
pylab_loaded = True
else:
from pylab import *
pylab_loaded = True
# except ImportError:
except Exception:
pylab_loaded = False
writeout("pylab not installed on this machine or not set up correctly DISPLAY variable")
# definition of constants
kboltz = 1.3807e-23
hbar = 1.05459e-34
m0 = 9.1095e-31
q = 1.60219e-19
eps0 = 8.85e-12
# Slater-Costner parameter for sp3d5s* tight-binding Hamiltonian in Si
thop_Si = array(
[-1.95933, -4.24135, -1.52230, 3.02562, 3.15565, -2.28485, -0.80993, 4.10364, -1.51801, -1.35554, 2.38479, -1.68136,
2.58880, -1.81400]);
onsite_Si = array([-2.15168, 4.22925, 4.22925, 4.22925, 13.78950, 13.78950, 13.78950, 13.78950, 13.78950, 19.11650]);
def MPIze(channel):
if (mpi4py_loaded):
del channel.E;
channel.E = zeros(int(NEmax));
Eupper_save = channel.Eupper;
Elower_save = channel.Elower;
vt = kboltz * channel.Temp / q;
sizeMPI = MPI.COMM_WORLD.Get_size()
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
channel.rank = rank;
# I compute the maximum and the minimum
# of the energy interval
if ((channel.Eupper > 900) & (channel.Elower < -900)):
Eupper = max(max(channel.mu1, max(-channel.Phi)), channel.mu2) + 0.5 * channel.gap() + 10 * vt;
Elower = min(min(channel.mu1, min(-channel.Phi)), channel.mu2) - 0.5 * channel.gap() - 10 * vt;
else:
Eupper = channel.Eupper;
Elower = channel.Elower;
# string="Eupper and Elower %s %s " %(Eupper,Elower)
# if (rank==0): writeout(string)
E = arange(Elower, Eupper, channel.dE);
arraydim = size(E) / sizeMPI;
excess = size(E) - sizeMPI * arraydim
if (rank < excess):
channel.Elower = E[rank * (arraydim + 1)];
channel.Eupper = E[(rank + 1) * (arraydim + 1) - 1];
else:
channel.Elower = E[(rank - excess) * arraydim + excess * (arraydim + 1)];
if (rank == (sizeMPI - 1)):
channel.Eupper = E[size(E) - 1];
else:
channel.Eupper = E[(rank - excess + 1) * arraydim - 1 + excess * (arraydim + 1)];
# string="Inizio rank %s %s %s" %(rank,channel.Elower,channel.Eupper)
# writeout(string)
channel.charge_T();
# writeout("Finito rank "),rank,channel.Elower,channel.Eupper;
# I send the charge and the transmission coefficient
if (rank != 0):
temp = array(channel.charge);
MPI.COMM_WORLD.Send([temp, MPI.DOUBLE], dest=0, tag=11);
del temp;
NPE = zeros(1, int);
NPE[0] = int(ceil((channel.Eupper - channel.Elower) / channel.dE)) + 1;
# size(arange(channel.Elower,channel.Eupper,channel.dE));
# int((channel.Eupper-channel.Elower)/channel.dE);
# size(nonzero(channel.E));
temp = array(channel.T[:NPE[0]]);
temp2 = array(channel.E[:NPE[0]]);
# NPE[0]=size(temp);
MPI.COMM_WORLD.Send([NPE, MPI.INT], dest=0, tag=10);
MPI.COMM_WORLD.Send([temp, MPI.DOUBLE], dest=0, tag=12);
MPI.COMM_WORLD.Send([temp2, MPI.DOUBLE], dest=0, tag=14);
# writeout("Spedito rank "),rank
del temp;
del temp2;
else:
channel.charge = array(channel.charge);
NNEE = int(ceil((channel.Eupper - channel.Elower) / channel.dE)) + 1;
# size(arange(channel.Elower,channel.Eupper,channel.dE));
# NNEE=((channel.Eupper-channel.Elower)/channel.dE);
# size(nonzero(channel.E));
channel.T = array(channel.T[:NNEE]);
channel.E = array(channel.E[:NNEE]);
for i in range(1, sizeMPI):
temp = empty(size(channel.charge), dtype=double);
MPI.COMM_WORLD.Recv([temp, MPI.DOUBLE], source=i, tag=11);
channel.charge = channel.charge + temp;
del temp;
NPE = empty(1, int);
MPI.COMM_WORLD.Recv([NPE, MPI.INT], source=i, tag=10);
temp = empty(NPE[0], dtype=double);
MPI.COMM_WORLD.Recv([temp, MPI.DOUBLE], source=i, tag=12);
temp2 = empty(NPE[0], dtype=double);
MPI.COMM_WORLD.Recv([temp2, MPI.DOUBLE], source=i, tag=14);
channel.T = concatenate((channel.T, temp));
channel.E = concatenate((channel.E, temp2));
del temp;
del temp2;
# writeout("Preso rank "),i
channel.charge = MPI.COMM_WORLD.bcast(channel.charge, root=0)
channel.T = MPI.COMM_WORLD.bcast(channel.T, root=0)
channel.E = MPI.COMM_WORLD.bcast(channel.E, root=0)
channel.Eupper = Eupper_save;
channel.Elower = Elower_save;
# MPI.Finalize();
else:
writeout("*********************************")
writeout("MPI not installed on this machine")
writeout("*********************************")
return;
def MPIze_kt(channel):
if (mpi4py_loaded):
kmin_save = channel.kmin;
kmax_save = channel.kmax;
vt = kboltz * channel.Temp / q;
sizeMPI = MPI.COMM_WORLD.Get_size()
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
channel.rank = rank;
# I compute the maximum and the minimum
# of the wave-vector kt
kt_max = channel.kmax;
kt_min = channel.kmin;
if (rank == 0): writeout("kt_max, kt_min"), kt_max, kt_min
k = arange(kt_min, kt_max, channel.dk);
arraydim = size(k) / sizeMPI;
channel.kmin = k[rank * arraydim];
if (rank == (sizeMPI - 1)):
channel.kmax = k[size(k) - 1];
else:
channel.kmax = k[(rank + 1) * arraydim - 1];
channel.charge_T();
NE = size(channel.E);
# I send the charge and the transmission coefficient
if (rank != 0):
temp = array(channel.charge);
MPI.COMM_WORLD.Send([temp, MPI.DOUBLE], dest=0, tag=11);
del temp;
temp = array(channel.T);
MPI.COMM_WORLD.Send([temp, MPI.DOUBLE], dest=0, tag=12);
del temp;
else:
channel.charge = array(channel.charge);
channel.T = array(channel.T);
for i in range(1, sizeMPI):
temp = empty(size(channel.charge), dtype=double);
MPI.COMM_WORLD.Recv([temp, MPI.DOUBLE], source=i, tag=11);
channel.charge = channel.charge + temp;
del temp;
temp = empty(NE, dtype=double);
MPI.COMM_WORLD.Recv([temp, MPI.DOUBLE], source=i, tag=12);
channel.T = channel.T + temp;
del temp;
channel.charge = MPI.COMM_WORLD.bcast(channel.charge, root=0)
channel.T = MPI.COMM_WORLD.bcast(channel.T, root=0)
channel.kmin = kmin_save;
channel.kmax = kmax_save;
# MPI.Finalize();
else:
writeout("*********************************")
writeout("MPI not installed on this machine")
writeout("*********************************")
return;
def set_gate(interface, gate):
interface.boundary_conditions[gate.index] = gate.Ef;
def solve_init(grid, interface, channel):
# I get the rank
if (mpi4py_loaded):
channel.rank = MPI.COMM_WORLD.Get_rank()
# I set the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
interface.rank = rank;
else:
interface.rank = 0;
# I first give an estimation of the density of states
# when computing the flat band potential in the regions
# where the fixed_charge is not equal to zero, assuming
# full ionization
# I save the temperature, mu1, mu2, the potential, n, Nc, Eupper, Elower
# temp_save=channel.Temp;
mu1_save = channel.mu1;
mu2_save = channel.mu2;
Nc_save = channel.Nc;
Eupper_save = channel.Eupper;
Elower_save = channel.Elower;
boundary_conditions_save = copy(interface.boundary_conditions);
normpoisson_save = interface.normpoisson;
interface.normpoisson = 1e-3;
# I impose a low-temperature, so to compute the LDOS, instead of the
# LDOS multiplied by the Fermi-Dirac
name = grid.__class__.__name__;
name_channel = channel.__class__.__name__;
if (name == "grid3D"):
if (name_channel == "multilayer_graphene"):
channel.Nc = 8;
x_save = channel.x
y_save = channel.y
z_save = channel.z
channel.atoms_coordinates();
else:
channel.Nc = 6;
channel.Phi = zeros(channel.n * channel.Nc);
channel.mu1 = 0;
channel.mu2 = 0;
vt = kboltz * channel.Temp / q;
channel.Eupper = channel.gap() + 10 * vt;
channel.Elower = 0;
# I compute the NEGF
# if (interface.modespace=="yes"):
# channel.mode_charge_T();
# else:
# if (interface.MPI=="yes"):
# MPIze(channel);
# else:
channel.charge_T();
# N1D=abs(sum(channel.charge))/(6*channel.Nc)/(3*channel.acc)*1e9;
Ec = channel.gap() * 0.5;
N1D = sum(abs(channel.charge)) / (6 * channel.n) / (4 * channel.acc) * 1e9 * exp(Ec / vt);
# return N1D
# I compute the mean z: if atoms have a z-coordinate > zmean => I impose the electrochemical potential mu2
# if atoms have a z-coordinate < zmean => I impose the electrochemical potential mu1
zmean = (grid.zmin + grid.zmax) * 0.5;
indexS = nonzero((abs(interface.fixed_charge) > 1e-20) & (grid.z3D < zmean));
indexD = nonzero((abs(interface.fixed_charge) > 1e-20) & (grid.z3D >= zmean));
potential = zeros(grid.Np);
argoS = (abs(interface.fixed_charge[indexS]) * grid.surf[indexS, 5] / N1D);
argoD = (abs(interface.fixed_charge[indexD]) * grid.surf[indexD, 5] / N1D);
potential[indexS] = (vt * (log(exp(argoS) - 1)) + Ec) * sign(interface.fixed_charge[indexS]) + mu1_save;
potential[indexD] = (vt * (log(exp(argoD) - 1)) + Ec) * sign(interface.fixed_charge[indexD]) + mu2_save;
interface.boundary_conditions[indexS] = potential[indexS];
interface.boundary_conditions[indexD] = potential[indexD];
solve_Poisson(grid, interface);
elif (name == "grid2D"):
channel.Nc = 8;
channel.Phi = zeros(channel.n * channel.Nc);
channel.mu1 = 0;
channel.mu2 = 0;
vt = kboltz * channel.Temp / q;
channel.Eupper = channel.gap() + 10 * vt;
channel.Elower = 0;
# I compute the NEGF
# if (interface.modespace=="yes"):
# channel.mode_charge_T();
# else:
# if (interface.MPI_kt=="yes"):
# MPIze_kt(channel);
# else:
channel.charge_T();
Ec = channel.gap() * 0.5;
N1D = sum(abs(channel.charge)) / (8 * channel.n) / (8 * channel.acc) * 1e9 * exp(Ec / vt);
# I compute the mean z: if atoms have a z-coordinate > zmean => I impose the electrochemical potential mu2
# if atoms have a z-coordinate < zmean => I impose the electrochemical potential mu1
ymean = (grid.ymin + grid.ymax) * 0.5;
indexS = nonzero((abs(interface.fixed_charge) > 1e-20) & (grid.y2D < ymean));
indexD = nonzero((abs(interface.fixed_charge) > 1e-20) & (grid.y2D >= ymean));
potential = zeros(grid.Np);
argoS = (abs(interface.fixed_charge[indexS]) / N1D);
argoD = (abs(interface.fixed_charge[indexD]) / N1D);
potential[indexS] = (vt * (log(exp(argoS) - 1)) + Ec) * sign(interface.fixed_charge[indexS]) + mu1_save;
potential[indexD] = (vt * (log(exp(argoD) - 1)) + Ec) * sign(interface.fixed_charge[indexD]) + mu2_save;
# potential[indexS]=Ec;
# potential[indexD]=Ec;
interface.boundary_conditions[indexS] = potential[indexS];
interface.boundary_conditions[indexD] = potential[indexD];
solve_Poisson(grid, interface);
# going back to the old values
channel.Nc = Nc_save
channel.mu2 = mu2_save;
channel.mu1 = mu1_save;
channel.Eupper = Eupper_save;
channel.Elower = Elower_save;
interface.boundary_conditions = boundary_conditions_save;
interface.normpoisson = normpoisson_save;
if (name_channel == "multilayer_graphene"):
channel.x = x_save
channel.y = y_save
channel.z = z_save
del x_save, y_save, z_save
# deleting the save variables
del mu1_save, mu2_save, Nc_save, Eupper_save, Elower_save, boundary_conditions_save;
return;
def solve_self_consistent(grid, interface, channel):
normad = 1e30;
# Phiold=1.0*interface.Phi;
interface.Phiold = interface.Phi.copy();
counter = 1;
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0;
while (normad > interface.normd):
# I pass the potential in correspondence of the
# atoms of the material for which I compute the NEGF
channel.Phi = interface.Phi[grid.swap]
# I compute the NEGF
# channel.Phi=zeros(size(grid.swap));
# savetxt("Phi.before",interface.Phi[grid.swap]);
if (interface.modespace == "yes"):
channel.mode_charge_T();
else:
if (interface.MPI == "yes"):
MPIze(channel);
elif (interface.MPI_kt == "yes"):
MPIze_kt(channel);
else:
channel.charge_T();
# savetxt("Phi.temp2",interface.Phi);
# a=[channel.E,channel.T];
# savetxt("T.temp",transpose(a));
if (rank == 0):
writeout("--------------------------------------------")
string = " CURRENT = %s A/m" % (channel.current());
writeout(string);
writeout("--------------------------------------------")
# I pass back the free_charge term to
# the 3D domain
interface.free_charge[grid.swap] = channel.charge
if (rank == 0):
savetxt("ncar.ini", interface.free_charge);
savetxt("Phi.ini", interface.Phi);
# I solve Poisson
solve_Poisson(grid, interface);
# normad=sqrt(sum((interface.Phiold-interface.Phi)**2));
# Phiold=zeros(grid.Np);
normad = max(abs(interface.Phiold - interface.Phi))
interface.Phi = interface.Phi + (interface.underel) * (interface.Phiold - interface.Phi)
del interface.Phiold;
# del Phiold;
# Phiold=1.0*interface.Phi;
interface.Phiold = interface.Phi.copy();
if (rank == 0): print()
string = "Iteration # %s; ||Phi-Phiold||2 = %s" % (counter, normad)
if (rank == 0): writeout(string)
if (rank == 0): print()
counter = counter + 1;
if (counter > 600):
return;
def solve_Poisson(grid, interface):
name = grid.__class__.__name__;
if (name == "grid3D"):
solvePoisson(grid, interface);
elif (name == "grid2D"):
solvePoisson2D(grid, interface);
elif (name == "grid1D"):
solvePoisson1D(grid, interface);
interface.Phi = array(interface.Phi)
return;
def nonuniformgrid(argu):
# This is a wrapper for the nonuniformgridmod function
# so to convert both the argument and the output to numpy arrays
# I convert the argument in an array
argarr = array(argu);
out = nonuniformgridmod(argarr);
# I return a pyarray
outarr = array(out);
return outarr;
# Fermi-Dirac Function
def Fermi(x):
return 1 / (1 + exp(x));
def delete_class(class_obj):
del_class(class_obj);
del class_obj;
return;
# This is the class for the nanotube
class nanotube:
acc = 0.144;
def __init__(self, n, L):
self.Nc = int(4 * (floor((floor(L / nanotube.acc) - 1) / 3)) + 2);
self.n = n;
self.Phi = zeros(n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.dE = 1e-3;
self.thop = -2.7;
self.eta = 1e-5;
self.mu1 = 0;
self.mu2 = 0;
self.Temp = 300;
self.contact = "doped";
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(self.n * self.Nc);
self.Nmodes = n;
self.x = zeros(n * self.Nc);
self.y = zeros(n * self.Nc);
self.z = zeros(n * self.Nc);
self.L = int(self.Nc / 2 + ((self.Nc - 1) - self.Nc * 0.5) * 0.5) * nanotube.acc;
self.atoms_coordinates();
self.rank = 0;
def gap(self):
return abs(2 * self.acc * self.thop * pi / (self.n * sqrt(3) * self.acc));
def atoms_coordinates(self):
CNT_atoms_coordinates(self);
self.x = array(self.x);
self.y = array(self.y);
self.z = array(self.z);
return;
def charge_T(self):
CNT_charge_T(self);
self.E = array(self.E);
self.T = array(self.T);
self.charge = array(self.charge);
return;
def mode_charge_T(self):
CNTmode_charge_T(self);
self.E = array(self.E);
self.T = array(self.T);
self.charge = array(self.charge);
return
def current(self):
vt = kboltz * self.Temp / q;
E = self.E;
T = self.T;
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE;
return sum(arg);
# This is the class for the nanoribbon
class GNRphonon:
def __init__(self, dimer):
self.N = 1000; # number of points qx (longitudinal direction)
while (((((self.N) - 1) % (dimer / 2)) != 0) | (((self.N) % 2) == 0)):
(self.N) += 1;
self.dimer = dimer; # numero dimer lines
self.rank = 0;
self.phi = 0.0; # channel potential (midgap)
self.numberAC = 2; # number of AC modes of different simmetry considered (=2: LA+TA, =1: only LA)
self.Ecutoff = 1.0; # cutoff energy
self.delta = 2; # integer: it specifies the sampling along the kx direction
self.deltak = 0;
self.kyE = zeros(dimer); # transverse electron wavevector
self.qy = zeros(dimer); # transverse phonon wavevector
self.kx = zeros(self.N); # longitudinal electron wavevector
self.qx = zeros(self.N); # longitudinal phonon wavevector
self.qx0 = 0.0; # fixed value for qx (computation of graphene branches)
self.qy0 = 0.0; # fixed value for qy (computation of graphene branches)
self.kxup = 0; # maximum value for kx (computation of rates)
self.kxdown = 0; # minimum value for kx (computation of rates)
self.dim1 = self.N;
self.dim2 = dimer;
self.dim3 = 6;
self.mmin = 0;
self.mmax = dimer - 1;
self.kxmin = 0;
self.kxmax = 0;
self.Phi_r1 = 39.87 * 10.0; # first neighbors
self.Phi_ti1 = 17.28 * 10.0;
self.Phi_to1 = 9.89 * 10.0;
self.Phi_r2 = 7.29 * 10.0; # second neighbors
self.Phi_ti2 = -4.61 * 10.0;
self.Phi_to2 = -0.82 * 10.0;
self.Phi_r3 = -2.64 * 10.0; # third neighbors
self.Phi_ti3 = 3.31 * 10.0;
self.Phi_to3 = 0.58 * 10.0;
self.Phi_r4 = 0.10 * 10.0; # fourth neighbors
self.Phi_ti4 = 0.79 * 10.0;
self.Phi_to4 = -0.52 * 10.0;
self.energyE = zeros((self.dim1, (2 * self.dim2))) # GNR electron curves
self.energyP2D = zeros((self.dim1, (self.dim2 * self.dim3))) # GNR phonon subbranches
self.minAC = zeros((self.dim2, 3)); # minimum of the acoustic subbranches
self.Egraphene = zeros(self.dim3); # graphene
self.rateAA = zeros((self.dim1, self.dim2));
self.rateAE = zeros((self.dim1, self.dim2));
self.rateOA = zeros((self.dim1, self.dim2));
self.rateOE = zeros((self.dim1, self.dim2));
self.Dac = 4.5 * (1.60219e-19); # deformation potential value (eV)
self.temp = 300; # temperature (K)
self.thop = 2.7; # hopping parameter (eV)
self.aCC = 0.144e-9; # lattice constant (m)
def electron_GNR(self):
electron_GNR(self);
self.kx = array(self.kx);
self.kyE = array(self.kyE);
self.energyE = array(self.energyE);
return;
def phonon_GNR(self):
phonon_GNR(self);
self.qx = array(self.qx);
self.qy = array(self.qy);
self.energyP2D = array(self.energyP2D);
return;
def phonon_graphene(self):
phonon_graphene(self);
self.Egraphene = array(self.Egraphene);
return;
def rateACABS(self):
rateACABS(self);
self.rateAA = array(self.rateAA);
return;
def rateACEM(self):
rateACEM(self);
self.rateAE = array(self.rateAE);
return;
def rateOPTABS(self):
rateOPTABS(self);
self.rateOA = array(self.rateOA);
return;
def rateOPTEM(self):
rateOPTEM(self);
self.rateOE = array(self.rateOE);
return;
# This is the class for the nanoribbon
class nanoribbon:
acc = 0.144;
def __init__(self, n, L):
self.Nc = int(4 * (int((int(L / nanoribbon.acc) - 1) / 3)) + 2);
self.n = n;
self.Phi = zeros(n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.dE = 1e-3;
self.thop = -2.7;
self.eta = 1e-5;
self.mu1 = 0;
self.mu2 = 0;
self.Temp = 300;
self.contact = "doped";
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(self.n * self.Nc);
self.defects = "no";
self.roughness = "no";
self.rank = 0;
self.atoms_coordinates();
def atoms_coordinates(self):
GNR_atoms_coordinates(self);
self.x = array(self.x);
self.y = array(self.y);
self.z = array(self.z);
return;
def gap(self):
return GNRgap(self);
def charge_T(self):
GNR_charge_T(self);
self.E = array(self.E);
self.T = array(self.T);
self.charge = array(self.charge);
return;
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
# This is the class for the graphene
class graphene:
acc = 0.144;
n = 1;
def __init__(self, L):
self.Nc = int(4 * (floor((floor(L / graphene.acc) - 1) / 3)));
self.Phi = zeros(self.Nc);
self.Ei = zeros(self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.delta = sqrt(3) * graphene.acc;
self.kmax = pi / self.delta;
self.kmin = 0;
self.dk = 0.1;
self.dE = 1e-3;
self.thop = -2.7;
self.eta = 1e-8;
self.mu1 = 0.0;
self.mu2 = 0.0;
self.Temp = 300;
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(self.Nc);
self.rank = 0;
self.atoms_coordinates();
self.gap();
self.T2D = "no"
def atoms_coordinates(self):
GNR_atoms_coordinates(self);
self.y = array(self.z);
self.x = zeros(size(self.y));
del self.z;
return;
def gap(self):
return 0;
def charge_T(self):
# Number of slices and atoms
slices = self.Nc;
atoms = 1;
# I define the vector of the k-wave vector
kvect = arange(self.kmin, self.kmax, self.dk)
# I start defining the Hamiltonian for the graphene flake
h = zeros((2 * slices, 3), dtype=complex);
h[0][0] = 1;
for i in range(1, slices + 1):
h[i][0] = i
h[i][1] = i
kk = 1;
for ii in range(slices + 1, 2 * slices):
if ((ii % 2) == 1):
h[ii][0] = kk;
h[ii][1] = kk + 1;
h[ii][2] = self.thop;
kk = kk + 1;
# I then compute the charge and the T for each energy and k and perform the integral
i = 0;
k = self.kmin;
H = Hamiltonian(atoms, slices)
if (self.T2D == "yes"):
EE = arange(self.Elower, self.Eupper, self.dE);
kvect = arange(self.kmin, self.kmax + self.dk, self.dk);
X, Y = meshgrid(EE, kvect);
Z = zeros((size(EE), size(kvect)))
while (k <= (self.kmax + self.dk * 0.5)):
if (self.rank == 0): writeout("----------------------------------")
string = " kx range: [%s,%s] " % (self.kmin, self.kmax);
if (self.rank == 0): writeout(string)
string = " iteration %s " % i;
if (self.rank == 0): writeout(string);
if (self.rank == 0): writeout("----------------------------------")
flaggo = 0;
kk = 1;
# I fill the Hamiltonian for the actual wavevector k in the cycle
for ii in range(slices + 1, 2 * slices):
if ((ii % 2) == 0):
h[ii][0] = kk;
h[ii][1] = kk + 1;
if ((flaggo % 2) == 0):
h[ii][2] = self.thop + self.thop * exp(k * self.delta * 1j);
else:
h[ii][2] = self.thop + self.thop * exp(-k * self.delta * 1j);
flaggo = flaggo + 1;
kk = kk + 1;
H.Eupper = self.Eupper;
H.Elower = self.Elower;
H.rank = self.rank;
H.H = h
H.dE = self.dE;
H.Phi = self.Phi;
H.Ei = -self.Phi;
H.eta = self.eta;
H.mu1 = self.mu1;
H.mu2 = self.mu2;
H.Egap = self.gap();
# I then compute T and the charge for the actual kx
H.charge_T()
# I sum up all the contribution
if (i == 0):
self.E = H.E;
# the factor 2 is because I integrate over kx>0
self.T = H.T * (2 * self.dk / (2 * pi));
self.charge = H.charge * (2 * self.dk / (2 * pi));
else:
# the factor 2 is because I integrate over kx>0
self.T = self.T + H.T * (2 * self.dk / (2 * pi));
self.charge = self.charge + H.charge * (2 * self.dk / (2 * pi));
if (self.T2D == "yes"):
Z[:, i] = H.T[:size(EE)];
k = k + self.dk
i = i + 1;
if (self.T2D == "yes"):
plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
origin='lower', extent=[self.kmin, self.kmax, self.Elower, self.Eupper])
show()
del H;
self.E = array(self.E);
self.T = array(self.T) * 1e9;
self.charge = array(self.charge) * 1e9;
del kvect, h;
return;
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
# This is the class for the graphene bilayer
class bilayer_graphene:
acc = 0.144;
acc_p = 0.35;
n = 2;
def __init__(self, L):
self.Nc = int(4 * (floor((floor(L / bilayer_graphene.acc) - 1) / 3)));
self.n = 2;
self.Phi = zeros(bilayer_graphene.n * self.Nc);
self.Ei = zeros(bilayer_graphene.n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.delta = sqrt(3) * bilayer_graphene.acc;
self.kmax = pi / self.delta;
self.kmin = 0;
self.dk = 0.1;
self.dE = 1e-3;
self.thop = -2.7;
self.tp = -0.35;
self.eta = 1e-8;
self.mu1 = 0.0;
self.mu2 = 0.0;
self.Temp = 300;
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(bilayer_graphene.n * self.Nc);
self.rank = 0;
self.atoms_coordinates();
self.gap();
self.T2D = "no"
def atoms_coordinates(self):
n_save = self.n;
self.n = 1;
GNR_atoms_coordinates(self);
ydown = array(self.z);
yup = ydown - self.acc * 0.5;
NN = size(ydown);
kkk = 0;
self.y = zeros(2 * NN);
for i in range(0, NN):
self.y[kkk] = ydown[i];
self.y[kkk + 1] = yup[i];
kkk = kkk + 2;
self.x = zeros(size(self.y));
i = linspace(0, size(self.y) - 1, size(self.y))
i_even = nonzero((i % 2) == 0);
i_odd = nonzero((i % 2) == 1);
self.x[i_even] = 0;
self.x[i_odd] = bilayer_graphene.acc_p;
del self.z, i, i_even, i_odd;
self.n = n_save;
return;
def gap(self):
# This is an rough exstimation of
# the Energy gap: for sure this is
# the largest attainable value, within
# the pz tight-binding model
return abs(self.tp);
def charge_T(self):
# Number of slices and atoms
slices = self.Nc;
atoms = self.n;
# I define the vector of the k-wave vector
kvect = arange(self.kmin, self.kmax, self.dk)
# I start defining the Hamiltonian for the bilayer graphene
h = zeros((4 * slices + 2 * (slices / 4) - 2, 3), dtype=complex);
h[0][0] = 1;
for i in range(1, 2 * slices + 1):
h[i][0] = i
h[i][1] = i
h[i][2] = 0.0;
# I then compute the charge and the T for each energy
# and k and perform the integral
i = 0;
k = self.kmin;
H = Hamiltonian(atoms, slices)
if (self.T2D == "yes"):
EE = arange(self.Elower, self.Eupper, self.dE);
kvect = arange(self.kmin, self.kmax + self.dk, self.dk);
X, Y = meshgrid(EE, kvect);
Z = zeros((size(EE), size(kvect)))
while (k <= (self.kmax + self.dk * 0.5)):
if (self.rank == 0): writeout("----------------------------------")
string = " kx range: [%s,%s] " % (self.kmin, self.kmax);
if (self.rank == 0): writeout(string);
string = " k: %s " % k;
if (self.rank == 0): writeout(string);
if (self.rank == 0): writeout("----------------------------------")
# -------------------------------------------------
# BEGINNING OF THE HAMILTONIAN DEFINITION
# FOR THE GRAPHENE BILAYER
# -------------------------------------------------
# I work on the bottom graphene layer
kk = 1;
flaggo = 0;
for ii in range(2 * slices + 1, 3 * slices):
if ((ii % 2) == 1):
h[ii][0] = kk;
h[ii][1] = kk + 2;
h[ii][2] = self.thop;
kk = kk + 2;
else:
h[ii][0] = kk;
h[ii][1] = kk + 2;
if ((flaggo % 2) == 0):
h[ii][2] = self.thop + self.thop * exp(k * self.delta * 1j);
else:
h[ii][2] = self.thop + self.thop * exp(-k * self.delta * 1j);
kk = kk + 2;
flaggo = flaggo + 1;
# I work on the top graphene layer
kk = 2;
flaggo = 1;
for ii in range(3 * slices, 4 * slices - 1):
if ((ii % 2) == 0):
h[ii][0] = kk;
h[ii][1] = kk + 2;
h[ii][2] = self.thop;
kk = kk + 2;
else:
h[ii][0] = kk;
h[ii][1] = kk + 2;
if ((flaggo % 2) == 0):
h[ii][2] = self.thop + self.thop * exp(k * self.delta * 1j);
else:
h[ii][2] = self.thop + self.thop * exp(-k * self.delta * 1j);
kk = kk + 2;
flaggo = flaggo + 1;
# I now work on the perpendicular hopping parameter
kk = 3;
for ii in range(4 * slices - 1, 4 * slices + int(slices / 2) - 2):
h[ii][0] = kk;
h[ii][1] = kk + 3;
h[ii][2] = self.tp;
kk = kk + 4;
# -------------------------------------------------
# END OF THE HAMILTONIAN
# -------------------------------------------------
H.Eupper = self.Eupper;
H.Elower = self.Elower;
H.H = h
H.rank = self.rank;
H.dE = self.dE;
H.Phi = self.Phi;
ind_even = arange(0, size(H.Phi), 2);
ind_odd = ind_even + 1;
H.Ei[ind_even] = -(self.Phi[ind_even] + self.Phi[ind_odd]) * 0.5;
H.Ei[ind_odd] = -(self.Phi[ind_even] + self.Phi[ind_odd]) * 0.5;
H.Ei_flag = "no"
H.eta = self.eta;
H.mu1 = self.mu1;
H.mu2 = self.mu2;
H.Egap = self.gap();
# return H.H
# I then compute T and the charge for the actual kx
H.charge_T()
# I sum up all the contribution
if (i == 0):
self.E = H.E;
# the factor 2 is because I integrate over kx>0
self.T = H.T * (2 * self.dk / (2 * pi));
self.charge = H.charge * (2 * self.dk / (2 * pi));
# self.charge=H.charge;
else:
# The spin is taken into account in the integral for the current
# the factor 2 is because I integrate over kx>0
self.T = self.T + H.T * (2 * self.dk / (2 * pi));
# 2 because I take into account
# that I integrate over kx>0
self.charge = self.charge + H.charge * (2 * self.dk / (2 * pi));
if (self.T2D == "yes"):
Z[:, i] = H.T[:size(EE)];
k = k + self.dk
i = i + 1;
if (self.T2D == "yes"):
plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
origin='lower', extent=[self.kmin, self.kmax, self.Elower, self.Eupper])
show()
del H;
self.E = array(self.E);
self.T = array(self.T) * 1e9;
self.charge = array(self.charge) * 1e9;
# self.charge=array(self.charge);
del kvect, h;
return;
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
# This is the class for the general Hamiltonian
class Hamiltonian:
def __init__(self, n, Nc):
self.Nc = Nc;
self.n = n;
self.x = zeros(n * self.Nc);
self.y = zeros(n * self.Nc);
self.z = zeros(n * self.Nc);
self.Phi = zeros(n * self.Nc);
self.Ei = zeros(n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.dE = 0.001;
self.eta = 1e-8;
self.mu1 = 0;
self.mu2 = 0;
self.Temp = 300;
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(n * self.Nc);
self.Egap = 0;
self.rank = 0;
# if this flag is set to "yes" then Ei=-Phi
self.Ei_flag = "yes"
# The +1 will be then replaced by the number of orbitals per atoms in the nearest neighbourgh approximation
# self.H=zeros((((Nc*n)*(Nc*n+1)/2),2+100+10));
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
def charge_T(self):
if (self.Ei_flag == "yes"):
self.Ei = -self.Phi;
H_charge_T(self);
self.E = array(self.E);
self.T = array(self.T);
self.charge = array(self.charge);
def gap(self):
return 0.5;
# This is the class for the zincblend structures
# This is the class for the zincblend structures
class Zincblend:
def __init__(self, material, sqci, tilt, edge, zmax):
self.material = material
if self.material == 'Si':
self.aux = [-2.15168,
4.22925,
19.11650,
13.78950,
-1.95933,
-4.24135,
-1.52230,
3.02562,
3.15565,
-2.28485,
-0.80993,
4.10364,
-1.51801,
-1.35554,
2.38479,
-1.68136,
2.58880,
-1.81400,
]
self.skparameters = array(self.aux, dtype=float)
self.a0 = 5.431
self.flag = 0
if self.material == 'Ge':
self.aux = [-1.95617,
5.30970,
19.29600,
13.58060,
-1.39456,
-3.56680,
-2.01830,
2.73135,
2.68638,
-2.64779,
-1.12312,
4.28921,
-1.73707,
-2.00115,
2.10953,
-1.32941,
2.56261,
-1.95120
]
self.skparameters = array(self.aux, dtype=float)
self.a0 = 5.6575
self.flag = 0
if self.material == 'InAs':
self.aux = [-5.500420,
4.151070,
-0.581930,
6.971630,
19.710590,
19.941380,
13.031690,
13.307090,
-1.694350,
-4.210450,
-2.426740,
-1.159870,
2.598230,
2.809360,
2.067660,
0.937340,
-2.268370,
-2.293090,
-0.899370,
-0.488990,
4.310640,
-1.288950,
-1.731410,
-1.978420,
2.188860,
2.456020,
-1.584610,
2.717930,
-0.505090
]
self.skparameters = array(self.aux, dtype=float)
self.a0 = 6.0583
self.flag = 1
self.sqci = sqci;
self.tilt = tilt;
self.edge = edge;
self.zmax = zmax;
layers = int(4 * self.zmax / (self.a0) + 1)
if (rank == 0):
writeout("prima="), layers
if layers % 4 == 1:
layers -= 1
elif layers % 4 == 2:
layers -= 2
elif layers % 4 == 3:
layers += 1
if layers % 4 != 0:
writeout("INTERRUPT AT WIRE"), material, parameters[0][i]
writeout("NUMBER OF SLICES NOT MULTIPLE OF 4")
quit()
layers += 8
self.L = (self.a0 / 4) * (layers - 1)
self.n_aux = int((4 * self.edge / self.a0) * (4 * self.edge / self.a0)) + 10;
# forse se ci si leva il +10 non cambia nulla (provare)
self.Nc_aux = int((4 * self.zmax / self.a0)) + 10;
self.zmax = self.L
self.atoms = zeros(1);
self.slices = zeros(1);
self.max = zeros(1);
self.rank = 0;
self.deltae = 20.0;
self.ics = zeros(self.n_aux * self.Nc_aux);
self.ipsilon = zeros(self.n_aux * self.Nc_aux);
self.zeta = zeros(self.n_aux * self.Nc_aux);
self.H_aux = zeros((self.Nc_aux * self.n_aux) * ((self.Nc_aux * self.n_aux + 1) / 2) * (2 + 100));
self.H = zeros((((self.Nc_aux * self.n_aux) * (self.Nc_aux * self.n_aux + 1) / 2), 2 + 100));
self.Zinc();
self.n = int(self.atoms[0]);
self.Nc = int(self.slices[0]);
self.x = self.ics;
self.y = self.ipsilon;
self.z = self.zeta;
self.Phi = zeros(self.n * self.Nc);
self.Ei = zeros(self.n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.dE = 0.001;
self.eta = 1e-8;
self.mu1 = 0;
self.mu2 = 0;
self.Temp = 300;
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(self.n * self.Nc);
self.Egap = 0;
def gap(self):
return 0;
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
def charge_T(self):
H_charge_T(self);
self.E = array(self.E);
self.T = array(self.T);
self.charge = array(self.charge);
return;
def Zinc(self):
writeout(self.skparameters)
# quit()
Zinc(self);
# self.zeta = array(self.zeta);
# ics1 = []
# ipsilon1 = []
# zeta1 = []
# i = 0
# j = 0
# k = 0
# temp = self.zeta[0]- self.a0
# zeta1.append(temp)
# aux = []
# for ln in self.zeta:
# if (self.zeta[i]- self.a0) == temp:
# #temp = self.zeta[i]- self.a0
# i = i + 1
# j = j + 1
# else:
# zeta1.append(self.zeta[i]- self.a0)
# temp = self.zeta[i]- self.a0
# i = i + 1
# aux.append(j)
# j=1;
#
# print aux
# print self.zeta
# for i in range (100):
# print zeta1
# print 'slices =', int(self.slices[0])
# print 'atoms =', int(self.atoms[0])
# zeta2 = []
# for i in range (int(self.slices[0])):
# for j in range(int(self.atoms[0])):
# zeta2.append(zeta1[i])
#
# print 'ECCOLO'
# print zeta2
# self.zeta = zeta2
# print self.zeta
H_back = []
i = 0
j = 0
bound = int(self.max[0] / 102)
writeout(bound)
for i in range(bound):
row = []
for j in range(102):
row.append(self.H_aux[j + 102 * i])
H_back.append(row)
# print row
del row
# print H_back[40]
new = array(H_back, dtype=complex)
self.H = new
# print self.H[17]
# quit()
return;
def ciccione(vettore, n, Nc, z, a0):
ics1 = []
ipsilon1 = []
zeta1 = []
i = 0
j = 0
k = 0
temp = z[0] - a0
z1 = [];
z1.append(temp)
aux = []
for ln in arange(0, n * Nc):
if (z[i] - a0) == temp:
# temp = self.zeta[i]- self.a0
i = i + 1
j = j + 1
else:
z1.append(z[i] - a0)
temp = z[i] - a0
i = i + 1
aux.append(j)
j = 1;
# TODO: the following sum is equal to the total number of
# atoms, really present in the simulated nanowire
#
# Ntot_atoms=sum(aux[:Nc])
#
#
array2 = []
for i in range(Nc):
k = 0;
if (aux[i] == n):
for j in arange(sum(aux[:i]), sum(aux[:i]) + n):
array2.append(vettore[j])
else:
for j in arange(sum(aux[:i]), sum(aux[:i]) + aux[i]):
array2.append(vettore[j]);
for j in arange(sum(aux[:i]) + aux[i], sum(aux[:i]) + n):
array2.append(0)
return array(array2, dtype=float);
# This is the class for graphene within the top-of-the-barrier model
class graphene_TOB:
if (newton_loaded == "false"):
print ("scipy not installed")
print ("Cannot go on")
exit(0);
def __init__(self, C1, C2, Vg1, Vg2, Vds, pot_ini):
self.Vg1 = Vg1;
self.Vg2 = Vg2;
self.Vds = Vds;
self.mu1 = 0;
self.mu2 = -Vds;
self.Temp = 300;
self.thop = 2.7
self.acc = 0.144e-9
self.Pot_c = pot_ini;
self.C1 = C1;
self.C2 = C2;
def current(self):
vF = 3 * self.acc * self.thop * q / 2 / hbar;
E = linspace(-3, 3, 6000)
dE = E[1] - E[0];
T = 2 * q * abs(E + self.Pot_c) / pi / hbar / vF
vt = kboltz * self.Temp / q;
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * dE;
return sum(arg)
def rho(self):
vF = 3 * self.acc * self.thop * q / 2 / hbar;
return 0.5 * (-2 * (kboltz * self.Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, (
self.mu1 + self.Pot_c) / (kboltz * self.Temp / q)) + 2 * (
kboltz * self.Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, -(
self.mu1 + self.Pot_c) / (kboltz * self.Temp / q))) + 0.5 * (
-2 * (kboltz * self.Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, (
self.mu2 + self.Pot_c) / (kboltz * self.Temp / q)) + 2 * (
kboltz * self.Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, -(
self.mu2 + self.Pot_c) / (kboltz * self.Temp / q)))
def charge_I(self):
Pot_c = self.Pot_c;
Vg1 = self.Vg1;
Vg2 = self.Vg2;
C1 = self.C1;
C2 = self.C2;
Temp = self.Temp;
mu1 = self.mu1;
mu2 = self.mu2;
self.Pot_c = newton(eq1, self.Pot_c, fprime=None, args=(Vg1, Vg2, C1, C2, Temp, mu1, mu2), tol=1e-15,
maxiter=10000)
Id = self.current();
return self.Pot_c, Id, self.rho()
def eq1(Pot_c, Vg1, Vg2, C1, C2, Temp, mu1, mu2):
vF = 3 * 0.144e-9 * q * 2.7 / 2 / hbar;
charge_S = -2 * (kboltz * Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, (mu1 + Pot_c) / (
kboltz * Temp / q)) + 2 * (kboltz * Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, -(
mu1 + Pot_c) / (kboltz * Temp / q));
charge_D = -2 * (kboltz * Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, (mu2 + Pot_c) / (
kboltz * Temp / q)) + 2 * (kboltz * Temp) ** 2 / pi / hbar ** 2 / vF ** 2 * Fermi_Integrals(1, -(
mu2 + Pot_c) / (kboltz * Temp / q));
charge = (charge_S + charge_D) * 0.5
return C1 * (Vg1 - Pot_c) + C2 * (Vg2 - Pot_c) + q * charge;
class grid3D:
def __init__(self, *args):
# I initialize the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0;
# args is a tuple and len(args) return
# the number of arguments
# the number of arguments can be either 3 or 6
# if 3, the first three inputs are the grid along the
# x,y,z axis
# if 6, the first three inputs are the grid along the
# x,y,z axis, while the last three inputs are the x-y-z
# coordinates of the atoms
if (len(args) > 3):
xg = around(args[0], 5);
yg = around(args[1], 5);
zg = around(args[2], 5);
xC = around(args[3], 5);
yC = around(args[4], 5);
zC = around(args[5], 5);
npC = size(xC);
else:
xg = around(args[0], 5);
yg = around(args[1], 5);
zg = around(args[2], 5);
npC = 0;
# I create the grid
if (npC != 0):
# find the unique values for xC,yC and zC
uxC = unique(xC);
uyC = unique(yC);
uzC = unique(zC);
# I find the only the additional values which are in xg and not in uxC
# the same for the other axis
exg = intersect1d(setxor1d(xg, uxC), xg);
eyg = intersect1d(setxor1d(yg, uyC), yg);
ezg = intersect1d(setxor1d(zg, uzC), zg);
if (npC != 0):
x = unique(concatenate((uxC, xg), 0));
y = unique(concatenate((uyC, yg), 0));
z = unique(concatenate((uzC, zg), 0));
else:
x = xg;
y = yg;
z = zg;
# I start to compute the volume associated to each grid point
X, Y = meshgrid(x, y);
# Number of points
nx = size(x);
ny = size(y);
nxy = nx * ny;
nz = size(z);
Np = nxy * nz;
string = "Number of grid points %s " % Np
if (rank == 0): writeout(string)
####################################################################################
# I create the Volume elements using the sorted grid
xd = avervect(x);
yd = avervect(y);
zd = avervect(z);
X, Y = meshgrid(x, y);
X, Z = meshgrid(x, z);
XD, ZD = meshgrid(xd, zd);
surfxz = XD * ZD;
YD, ZD = meshgrid(yd, zd);
surfyz = YD * ZD;
XD, YD = meshgrid(xd, yd);
surfxy = XD * YD;
# The volumes for the sorted grid are finally computed
a, b = meshgrid((XD * YD).flatten(), zd);
dVes = abs((a * b).flatten());
if (rank == 0): writeout("Volumes computed")
####################################################################################
# I create the dist vectors
dists = zeros((Np, 6));
# I take care of dists[:,1]
i = arange(0, nx);
ip1 = i + 1;
ip1[nx - 1] = nx - 1;
xdistp = x[ip1] - x[i];
dists[:, 1] = meshgrid(meshgrid(xdistp, y)[0].flatten(), z)[0].flatten();
del ip1, xdistp;
# I take care of dists[:,0]
im1 = i - 1;
im1[0] = 0;
xdistm = x[i] - x[im1];
dists[:, 0] = meshgrid(meshgrid(xdistm, y)[0].flatten(), z)[0].flatten();
del i, im1, xdistm;
# I take care of dists[:,3]
j = arange(0, ny);
jp1 = j + 1;
jp1[ny - 1] = ny - 1;
ydistp = y[jp1] - y[j];
dists[:, 3] = meshgrid(meshgrid(x, ydistp)[1].flatten(), z)[0].flatten();
del jp1, ydistp;
# I take care of dists[:,2]
jm1 = j - 1;
jm1[0] = 0;
ydistm = y[j] - y[jm1];
dists[:, 2] = meshgrid(meshgrid(x, ydistm)[1].flatten(), z)[0].flatten();
del j, jm1, ydistm;
# I take care of dists[:,5]
k = arange(0, nz);
kp1 = k + 1;
kp1[nz - 1] = nz - 1;
zdistp = z[kp1] - z[k];
dists[:, 5] = meshgrid(meshgrid(x, y)[1].flatten(), zdistp)[1].flatten();
del kp1, zdistp;
# I take care of dists[:,4]
km1 = k - 1;
km1[0] = 0;
zdistm = z[k] - z[km1];
dists[:, 4] = meshgrid(meshgrid(x, y)[1].flatten(), zdistm)[1].flatten();
del k, km1, zdistm;
####################################################################################
# Now I work on the surfaces
surfs = zeros((Np, 6));
# surf 0
XD, YD = meshgrid(xd, yd)
##YD[:,0]=0;
a, b = meshgrid(YD.flatten(), zd)
surfs[:, 0] = abs((a * b).flatten());
# surf 1
XD, YD = meshgrid(xd, yd)
##YD[:,nx-1]=0;
a, b = meshgrid(YD.flatten(), zd)
surfs[:, 1] = abs((a * b).flatten());
# surf 2
XD, YD = meshgrid(xd, yd)
##XD[0,:]=0;
a, b = meshgrid(XD.flatten(), zd)
surfs[:, 2] = abs((a * b).flatten());
# surf 3
XD, YD = meshgrid(xd, yd)
##XD[ny-1,:]=0;
a, b = meshgrid(XD.flatten(), zd)
surfs[:, 3] = abs((a * b).flatten());
# surf 4
XD, YD = meshgrid(xd, yd)
a, b = meshgrid((XD * YD).flatten(), z)
surfs[:, 4] = abs(a.flatten());
##surfs[0:nx*ny-1,4]=0;
# surf 5
XD, YD = meshgrid(xd, yd)
a, b = meshgrid((XD * YD).flatten(), z)
surfs[:, 5] = abs(a.flatten());
##surfs[(nz-1)*(nx*ny):nz*nx*ny,5]=0;
if (rank == 0): writeout("Surfaces created")
####################################################################################
# Now I have to go back to the unsorted grid.
# I create the sorted and unsorted coordinates
# vectors as a function of the index
# sorted positions
x3Ds = meshgrid(meshgrid(x, y)[0].flatten(), z)[0].flatten();
y3Ds = meshgrid(meshgrid(x, y)[1].flatten(), z)[0].flatten();
z3Ds = meshgrid(meshgrid(x, y)[1].flatten(), z)[1].flatten();
# unsorted positions
if (npC != 0):
xtemp = unique(concatenate((uxC, xg), 0));
ytemp = unique(concatenate((uyC, yg), 0));
ztemp = unique(concatenate((uzC, zg), 0));
if (rank == 0): writeout("I work on the swap array");
NpC = size(xC);
swap = array(arange(0, NpC), int);
for i in range(0, NpC):
ixC = nonzero(xtemp == xC[i])[0][0];
iyC = nonzero(ytemp == yC[i])[0][0];
izC = nonzero(ztemp == zC[i])[0][0];
ii = ixC + iyC * nx + izC * nx * ny;
swap[i] = ii;
####################################################################################
# I now fill the attributes of the istance of the grid class
self.x3D = x3Ds;
self.y3D = y3Ds
self.z3D = z3Ds
self.dVe = dVes;
self.surf = surfs;
self.dist = dists;
self.nx = nx;
self.ny = ny;
self.nz = nz;
self.Np = Np;
self.gridx = x;
self.gridy = y;
self.gridz = z;
if (npC != 0):
self.swap = swap;
self.xmin = min(x);
self.xmax = max(x);
self.ymin = min(y);
self.ymax = max(y);
self.zmin = min(z);
self.zmax = max(z);
return;
class grid2D:
def __init__(self, *args):
# I initialize the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0;
# args is a tuple and len(args) return
# the number of arguments
# the number of arguments can be either 2 or 4
# if 2, the first two inputs are the grid along the
# x,y axis
# if 4, the first two inputs are the grid along the
# x,y axis, while the last two inputs are the x-y
# coordinates of the atoms
if (len(args) > 2):
xg = around(args[0], 5);
yg = around(args[1], 5);
xC = around(args[2], 5);
yC = around(args[3], 5);
npC = size(xC);
else:
xg = around(args[0], 5);
yg = around(args[1], 5);
npC = 0;
# I create the grid
if (npC != 0):
# find the unique values for xC,yC and zC
uxC = unique(xC);
uyC = unique(yC);
# I find the only the additional values which are in xg and not in uxC
# the same for the other axis
exg = intersect1d(setxor1d(xg, uxC), xg);
eyg = intersect1d(setxor1d(yg, uyC), yg);
if (npC != 0):
x = unique(concatenate((uxC, xg), 0));
y = unique(concatenate((uyC, yg), 0));
else:
x = xg;
y = yg;
# Number of points
nx = size(x);
ny = size(y);
nxy = nx * ny;
Np = nxy;
string = "Number of grid points %s " % Np
if (rank == 0): writeout(string)
####################################################################################
# I create the Volume elements using the sorted grid
xd = avervect(x);
yd = avervect(y);
X, Y = meshgrid(x, y);
XD, YD = meshgrid(xd, yd);
surfxy = XD * YD;
if (rank == 0): writeout("Volumes computed")
####################################################################################
# I create the dist vectors
dists = zeros((Np, 4));
# I take care of dists[:,1]
i = arange(0, nx);
ip1 = i + 1;
ip1[nx - 1] = nx - 1;
xdistp = x[ip1] - x[i];
dists[:, 1] = meshgrid(xdistp, y)[0].flatten();
del ip1, xdistp;
# I take care of dists[:,0]
im1 = i - 1;
im1[0] = 0;
xdistm = x[i] - x[im1];
dists[:, 0] = meshgrid(xdistm, y)[0].flatten()
del i, im1, xdistm;
# I take care of dists[:,3]
j = arange(0, ny);
jp1 = j + 1;
jp1[ny - 1] = ny - 1;
ydistp = y[jp1] - y[j];
dists[:, 3] = meshgrid(x, ydistp)[1].flatten()
del jp1, ydistp;
# I take care of dists[:,2]
jm1 = j - 1;
jm1[0] = 0;
ydistm = y[j] - y[jm1];
dists[:, 2] = meshgrid(x, ydistm)[1].flatten();
del j, jm1, ydistm;
####################################################################################
# Now I work on the surface
XD, YD = meshgrid(xd, yd)
surfs = (XD * YD).flatten();
if (rank == 0): writeout("Surface created")
####################################################################################
# Now I have to go back to the unsorted grid.
# I create the sorted and unsorted coordinates
# vectors as a function of the index
# sorted positions
x2Ds = meshgrid(x, y)[0].flatten();
y2Ds = meshgrid(x, y)[1].flatten();
# unsorted positions
if (npC != 0):
xtemp = unique(concatenate((uxC, xg), 0));
ytemp = unique(concatenate((uyC, yg), 0));
if (rank == 0): writeout("I work on the swap array");
NpC = size(xC);
swap = array(arange(0, NpC), int);
for i in range(0, NpC):
ixC = nonzero(xtemp == xC[i])[0][0];
iyC = nonzero(ytemp == yC[i])[0][0];
ii = ixC + iyC * nx;
swap[i] = ii;
####################################################################################
# I now fill the attributes of the istance of the grid class
self.x2D = x2Ds;
self.y2D = y2Ds
self.surf = surfs;
self.dist = dists;
self.nx = nx;
self.ny = ny;
self.Np = Np;
self.gridx = x;
self.gridy = y;
if (npC != 0):
self.swap = swap;
self.xmin = min(x);
self.xmax = max(x);
self.ymin = min(y);
self.ymax = max(y);
return;
class grid1D:
def __init__(self, *args):
# I initialize the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0;
# args is a tuple and len(args) return
# the number of arguments
# the number of arguments can be either 1 or 2
# if 1, the first input is the grid along the
# x axis
# if 2, the first input is the grid along the
# x axis, while the second input is the x
# coordinates of the atoms
if (len(args) > 1):
xg = around(args[0], 5);
xC = around(args[1], 5); # attenzione: modificato il 28/5/2011
npC = size(xC);
else:
xg = around(args[0], 5);
npC = 0;
# I create the grid
if (npC != 0):
# find the unique values for xC
uxC = unique(xC);
# I find the only the additional values which are in xg and not in uxC
exg = intersect1d(setxor1d(xg, uxC), xg);
if (npC != 0):
x = unique(concatenate((uxC, xg), 0));
else:
x = xg;
# Number of points
nx = size(x);
Np = nx;
if (rank == 0): print(("Number of grid points ", Np))
####################################################################################
# I create the dist vectors
dists = zeros((Np, 4));
# I take care of dists[:,1]
i = arange(0, nx);
ip1 = i + 1;
ip1[nx - 1] = nx - 1;
xdistp = x[ip1] - x[i];
dists[:, 1] = xdistp;
del ip1, xdistp;
# I take care of dists[:,0]
im1 = i - 1;
im1[0] = 0;
xdistm = x[i] - x[im1];
dists[:, 0] = xdistm;
del i, im1, xdistm;
####################################################################################
# Now I have to go back to the unsorted grid.
# I create the sorted and unsorted coordinates
# vectors as a function of the index
if (npC != 0):
xtemp = unique(concatenate((uxC, xg), 0));
if (rank == 0): print("I work on the swap array");
NpC = size(xC);
swap = array(arange(0, NpC), int);
for i in range(0, NpC):
ixC = nonzero(xtemp == xC[i])[0][0];
ii = ixC;
swap[i] = ii;
####################################################################################
# I now fill the attributes of the istance of the grid class
self.x = x;
self.dist = dists;
self.nx = nx;
self.Np = Np;
self.gridx = x;
if (npC != 0):
self.swap = swap;
self.xmin = min(x);
self.xmax = max(x);
return;
class region:
def __init__(self, *args):
self.name = "none";
self.geometry = "hex";
self.eps = 3.9;
self.rho = 0;
if (args[0] == "hex"):
if (len(args) > 5):
self.xmin = args[1];
self.xmax = args[2];
self.ymin = args[3];
self.ymax = args[4];
self.zmin = args[5];
self.zmax = args[6];
elif ((len(args) > 3) & (len(args) <= 5)):
self.xmin = args[1];
self.xmax = args[2];
self.ymin = args[3];
self.ymax = args[4];
elif (len(args) <= 3):
self.xmin = args[1];
self.xmax = args[2];
def set_material(self, material):
if (material.lower() == "sio2"):
self.eps = 3.9;
self.mel = 0.5;
self.met = 0.5;
self.Egap = 8.05;
self.chi = 0.95;
self.mhole = 0.42
if (material.lower() == "si"):
self.eps = 11.8;
self.mel = 0.916;
self.met = 0.19;
self.Egap = 1.124519;
self.chi = 4.05;
self.mhole = 0.549;
class gate:
def __init__(self, *args):
self.geometry = "hex";
self.Ef = 0;
self.wf = 4.1;
if (args[0] == "hex"):
if (len(args) > 5):
self.xmin = args[1];
self.xmax = args[2];
self.ymin = args[3];
self.ymax = args[4];
self.zmin = args[5];
self.zmax = args[6];
elif ((len(args) > 3) & (len(args) <= 5)):
self.xmin = args[1];
self.xmax = args[2];
self.ymin = args[3];
self.ymax = args[4];
elif (len(args) <= 3):
self.xmin = args[1];
self.xmax = args[2];
if (args[0] == "cyl"):
self.xc = args[1];
self.yc = args[2];
self.radius = args[3];
self.geometry = "cyl"
if (args[0] == "trapz"):
self.xmin = args[1];
self.xmax = args[2];
self.y1 = args[3];
self.z1 = args[4];
self.y2 = args[5];
self.z2 = args[6];
self.y3 = args[7];
self.z3 = args[8];
self.y4 = args[9];
self.z4 = args[10];
self.geometry = "trapz"
class interface3D:
def __init__(self, *args):
# I set the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
self.rank = rank;
else:
self.rank = 0;
# I compute the number of arguments (classes)
Narg = size(args);
# I first find the index of the class grid
igrid = -10;
for i in range(0, Narg):
name = args[i].__class__.__name__
if (name == "grid3D"):
igrid = i;
# If no grid class is specified I exit
if (igrid == -10):
writeout("ERROR: grid not passed to structure")
return;
# I create the arrays to be used
self.eps = zeros(args[igrid].Np);
# I create the vector, where the boundary conditions
# are specified:
# if 2000 : inner point
# if 1001 : Neumann 1
# if 1002 : Neumann 2
# if 1003 : Neumann 3
# if 1004 : Neumann 4
# if 1005 : Neumann 5
# if 1006 : Neumann 6
# if <= 1000: Fermi level of the gate
# I start defining all the points as inner points
self.boundary_conditions = 2000 * ones(args[igrid].Np);
###############################################################################################
# Now I impose the Neumann Boundary conditions on
# the surfaces delimiting the 3D domain
###############################################################################################
# I take care of Neumann1
indexNeu1 = nonzero(args[igrid].x3D == min(args[igrid].gridx));
self.boundary_conditions[indexNeu1] = 1001;
# I take care of Neumann2
indexNeu2 = nonzero(args[igrid].x3D == max(args[igrid].gridx));
self.boundary_conditions[indexNeu2] = 1002;
# I take care of Neumann3
indexNeu3 = nonzero(args[igrid].y3D == min(args[igrid].gridy));
self.boundary_conditions[indexNeu3] = 1003;
# I take care of Neumann4
indexNeu4 = nonzero(args[igrid].y3D == max(args[igrid].gridy));
self.boundary_conditions[indexNeu4] = 1004
# I take care of Neumann5 and Neumann6
indexNeu5 = nonzero(args[igrid].z3D == min(args[igrid].gridz));
self.boundary_conditions[indexNeu5] = 1005;
indexNeu6 = nonzero(args[igrid].z3D == max(args[igrid].gridz));
self.boundary_conditions[indexNeu6] = 1006;
###############################################################################################
# I check to which class the args belongs to
# and I proceed accordingly
###############################################################################################
for i in range(0, Narg):
name = args[i].__class__.__name__
# I check if the class is a gate
if (name == "gate"):
# I check if the geometry is an hexahedron
if (args[i].geometry == "hex"):
# I find the indexes of the 3D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x3D) & (args[i].xmax >= args[igrid].x3D) &
(args[i].ymin <= args[igrid].y3D) & (args[i].ymax >= args[igrid].y3D) &
(args[i].zmin <= args[igrid].z3D) & (args[i].zmax >= args[igrid].z3D));
self.boundary_conditions[index] = args[i].Ef;
args[i].index = index;
if (args[i].geometry == "trapz"):
# I find the indexes of the 2D grid which belongs to the gate
# with trapezoidal geometry
if (args[i].y2 == args[i].y1):
m1 = (args[i].z2 - args[i].z1) / (args[i].y2 - args[i].y1 + 1e-3)
else:
m1 = (args[i].z2 - args[i].z1) / (args[i].y2 - args[i].y1)
if (args[i].y3 == args[i].y2):
m2 = (args[i].z3 - args[i].z2) / (args[i].y3 - args[i].y2 + 1e-3)
else:
m2 = (args[i].z3 - args[i].z2) / (args[i].y3 - args[i].y2)
if (args[i].y4 == args[i].y3):
m3 = (args[i].z4 - args[i].z3) / (args[i].y4 - args[i].y3 + 1e-3)
else:
m3 = (args[i].z4 - args[i].z3) / (args[i].y4 - args[i].y3)
if (args[i].y4 == args[i].y1):
m4 = (args[i].z4 - args[i].z1) / (args[i].y4 - args[i].y1 + 1e-3)
else:
m4 = (args[i].z4 - args[i].z1) / (args[i].y4 - args[i].y1)
index = nonzero((args[igrid].z3D >= (m1 * (args[igrid].y3D - args[i].y1) + args[i].z1)) &
(args[igrid].z3D >= (m2 * (args[igrid].y3D - args[i].y2) + args[i].z2)) &
(args[igrid].z3D <= (m3 * (args[igrid].y3D - args[i].y3) + args[i].z3)) &
(args[igrid].z3D <= (m2 * (args[igrid].y3D - args[i].y1) + args[i].z1)) &
(args[i].xmin <= args[igrid].x3D) & (args[i].xmax >= args[igrid].x3D));
self.boundary_conditions[index] = args[i].Ef;
args[i].index = index;
elif (name == "region"):
if (args[i].geometry == "hex"):
# I find the indexes of the 3D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x3D) & (args[i].xmax >= args[igrid].x3D) &
(args[i].ymin <= args[igrid].y3D) & (args[i].ymax >= args[igrid].y3D) &
(args[i].zmin <= args[igrid].z3D) & (args[i].zmax >= args[igrid].z3D));
self.eps[index] = args[i].eps;
elif (name == "grid3D"):
# dummy line
name;
else:
writeout("ERROR: Unrecognized input")
return;
###############################################################################################
# I fill the field of the interface class
###############################################################################################
# self.boundary already filled
# self.eps already filled
self.Phiold = zeros(args[igrid].Np)
self.Phi = zeros(args[igrid].Np);
self.normpoisson = 1e-3;
self.tolldomn = 1e-1;
self.underel = 0;
self.free_charge = zeros(args[igrid].Np);
self.fixed_charge = zeros(args[igrid].Np);
self.normd = 5e-2;
self.modespace = "no"
self.MPI = "no"
self.MPI_kt = "no"
return;
class interface2D:
def __init__(self, *args):
# I set the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
self.rank = rank;
else:
self.rank = 0;
# I compute the number of arguments (classes)
Narg = size(args);
# I first find the index of the class grid
igrid = -10;
for i in range(0, Narg):
name = args[i].__class__.__name__
if (name == "grid2D"):
igrid = i;
# If no grid class is specified I exit
if (igrid == -10):
writeout("ERROR: grid not passed to structure")
return;
# I create the arrays to be used
self.eps = zeros(args[igrid].Np);
# I create the vector, where the boundary conditions
# are specified:
# if 2000 : inner point
# if 1001 : Neumann 1
# if 1002 : Neumann 2
# if 1003 : Neumann 3
# if 1004 : Neumann 4
# if <= 1000: Fermi level of the gate
# I start defining all the points as inner points
self.boundary_conditions = 2000 * ones(args[igrid].Np);
###############################################################################################
# Now I impose the Neumann Boundary conditions on
# the surfaces delimiting the 3D domain
###############################################################################################
# I take care of Neumann1
indexNeu1 = nonzero(args[igrid].x2D == min(args[igrid].gridx));
self.boundary_conditions[indexNeu1] = 1001;
# I take care of Neumann2
indexNeu2 = nonzero(args[igrid].x2D == max(args[igrid].gridx));
self.boundary_conditions[indexNeu2] = 1002;
# I take care of Neumann3
indexNeu3 = nonzero(args[igrid].y2D == min(args[igrid].gridy));
self.boundary_conditions[indexNeu3] = 1003;
# I take care of Neumann4
indexNeu4 = nonzero(args[igrid].y2D == max(args[igrid].gridy));
self.boundary_conditions[indexNeu4] = 1004
###############################################################################################
# I check to which class the args belongs to
# and I proceed accordingly
###############################################################################################
for i in range(0, Narg):
name = args[i].__class__.__name__
# I check if the class is a gate
if (name == "gate"):
# I check if the geometry is an hexahedron
if (args[i].geometry == "hex"):
# I find the indexes of the 2D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x2D) & (args[i].xmax >= args[igrid].x2D) &
(args[i].ymin <= args[igrid].y2D) & (args[i].ymax >= args[igrid].y2D));
self.boundary_conditions[index] = args[i].Ef;
args[i].index = index;
# I check if the geometry is an cylindrical
if (args[i].geometry == "cyl"):
# I find the indexes of the 2D grid which belongs to the gate
# with cyl geometry
index = nonzero(((args[i].xc - args[igrid].x2D) ** 2 + (args[i].yc - args[igrid].y2D) ** 2) < (
args[i].radius) ** 2);
self.boundary_conditions[index] = args[i].Ef;
args[i].index = index;
elif (name == "region"):
if (args[i].geometry == "hex"):
# I find the indexes of the 2D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x2D) & (args[i].xmax >= args[igrid].x2D) &
(args[i].ymin <= args[igrid].y2D) & (args[i].ymax >= args[igrid].y2D));
self.eps[index] = args[i].eps;
elif (name == "grid2D"):
# dummy line
name;
else:
writeout("ERROR: Unrecognized input")
return;
###############################################################################################
# I fill the field of the interface class
###############################################################################################
# self.boundary already filled
# self.eps already filled
self.Phiold = zeros(args[igrid].Np)
self.Phi = zeros(args[igrid].Np);
self.normpoisson = 1e-3;
self.tolldomn = 1e-1;
self.underel = 0;
self.free_charge = zeros(args[igrid].Np);
self.fixed_charge = zeros(args[igrid].Np);
self.normd = 5e-2;
self.modespace = "no"
self.MPI = "no"
self.MPI_kt = "no"
return;
class interface1D:
def __init__(self, *args):
# I set the rank
if (mpi4py_loaded):
rank = MPI.COMM_WORLD.Get_rank()
self.rank = rank;
else:
self.rank = 0;
# I compute the number of arguments (classes)
Narg = size(args);
# I first find the index of the class grid
igrid = -10;
for i in range(0, Narg):
name = args[i].__class__.__name__
if (name == "grid1D"):
igrid = i;
# If no grid class is specified I exit
if (igrid == -10):
print("ERROR: grid not passed to structure")
return;
# I create the arrays to be used
self.eps = zeros(args[igrid].Np);
self.mel = zeros(args[igrid].Np);
self.met = zeros(args[igrid].Np);
self.chi = zeros(args[igrid].Np);
self.Egap = zeros(args[igrid].Np);
self.fixed_charge = zeros(args[igrid].Np);
self.mhole = zeros(args[igrid].Np);
# I create the vector, where the boundary conditions
# are specified:
# if 2000 : inner point
# if 1001 : Neumann 1
# if 1002 : Neumann 2
# if <= 1000: Fermi level of the gate
# I start defining all the points as inner points
self.boundary_conditions = 2000 * ones(args[igrid].Np);
###############################################################################################
# Now I impose the Neumann Boundary conditions on
# the surfaces delimiting the 3D domain
###############################################################################################
# I take care of Neumann1
indexNeu1 = nonzero(args[igrid].x == min(args[igrid].gridx));
self.boundary_conditions[indexNeu1] = 1001;
# I take care of Neumann2
indexNeu2 = nonzero(args[igrid].x == max(args[igrid].gridx));
self.boundary_conditions[indexNeu2] = 1002;
###############################################################################################
# I check to which class the args belongs to
# and I proceed accordingly
###############################################################################################
for i in range(0, Narg):
name = args[i].__class__.__name__
# I check if the class is a gate
if (name == "gate"):
# I check if the geometry is an hexahedron
if (args[i].geometry == "hex"):
# I find the indexes of the 2D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x) & (args[i].xmax >= args[igrid].x));
self.boundary_conditions[index] = args[i].Ef;
args[i].index = index;
elif (name == "region"):
if (args[i].geometry == "hex"):
dist = avervect(args[igrid].x) * 1e-9;
# I find the indexes of the 2D grid which belongs to the gate
# with hex geometry
index = nonzero((args[i].xmin <= args[igrid].x) & (args[i].xmax >= args[igrid].x));
self.eps[index] = args[i].eps;
self.mel[index] = args[i].mel;
self.met[index] = args[i].met;
self.chi[index] = args[i].chi;
self.Egap[index] = args[i].Egap;
self.fixed_charge[index] = args[i].rho * dist[index];
self.mhole[index] = args[i].mhole;
elif (name == "grid1D"):
# dummy line
name;
else:
print("ERROR: Unrecognized input")
return;
###############################################################################################
# I fill the field of the interface class
###############################################################################################
# self.boundary already filled
# self.eps already filled
self.Phiold = zeros(args[igrid].Np)
self.Phi = zeros(args[igrid].Np);
self.normpoisson = 1e-3;
self.tolldomn = 1e-1;
self.underel = 0;
self.free_charge = zeros(args[igrid].Np);
self.normd = 5e-2;
self.modespace = "no"
self.MPI = "no"
self.MPI_kt = "no"
return;
def dope_reservoir(grid, interface, channel, molar_fraction, bbox): #bbox is the bounding box of the reservoir
name = grid.__class__.__name__;
if (name == "grid3D"):
xmin = bbox[0];
xmax = bbox[1];
ymin = bbox[2];
ymax = bbox[3];
zmin = bbox[4];
zmax = bbox[5];
index = nonzero((xmin <= grid.x3D[grid.swap]) & (xmax >= grid.x3D[grid.swap]) &
(ymin <= grid.y3D[grid.swap]) & (ymax >= grid.y3D[grid.swap]) &
(zmin <= grid.z3D[grid.swap]) & (zmax >= grid.z3D[grid.swap]))
interface.fixed_charge[grid.swap[index]] = molar_fraction;
elif (name == "grid2D"):
xmin = bbox[0];
xmax = bbox[1];
ymin = bbox[2];
ymax = bbox[3];
index = nonzero((xmin <= grid.x2D[grid.swap]) & (xmax >= grid.x2D[grid.swap]) &
(ymin <= grid.y2D[grid.swap]) & (ymax >= grid.y2D[grid.swap]))
interface.fixed_charge[grid.swap[index]] = molar_fraction / channel.delta * 1e9;
elif (name == "grid1D"):
xmin = bbox[0];
xmax = bbox[1];
index = nonzero((xmin <= grid.x[grid.swap]) & (xmax >= grid.x[grid.swap]));
interface.fixed_charge[grid.swap[index]] = molar_fraction / (channel.deltaz * channel.deltay) * 1e18;
# MODIFICATO IL 6/6/2011: aggiunto il deltay e deltaz
return index;
class Device:
def __init__(self):
self.Nregions = 1;
self.regions = [];
self.E = zeros(int(NEmax));
def test(self):
return self.E;
def test_var_args(farg, *args):
writeout("formal arg:"), size(args)
for arg in args:
writeout("another arg:"), arg
def avervect(x):
# This function compute the length of
# the Voronoi segment of a one-dimensional array x
nx = size(x);
xd = zeros(nx);
xini = x[0];
xd[0] = abs(x[0] - x[1]) * 0.5;
for i in range(1, nx - 1):
xd[i] = abs((x[i + 1] - x[i - 1]) * 0.5);
xd[nx - 1] = abs(x[nx - 1] - x[nx - 2]) * 0.5
return xd;
def save_format_xyz(outputfile, x, y, z, atom):
if sys.version > '3':
import subprocess;
else:
import subprocess
out = [x * 10, y * 10, z * 10]
fp = open(outputfile, "w");
fp.write(str(size(x)));
fp.write("\n");
fp.write("\n");
for i in range(0, size(x)):
string = "%s %s %s %s" % (atom, out[0][i], out[1][i], out[2][i]);
fp.write(string);
fp.write(" ");
fp.write("\n");
fp.close()
return;
"""def convert_pdb(filename,thop):
fp=open(filename,"r");
hh=[];
atoms=0;
i=0;
x=[];
y=[];
z=[];
h=[];
h.append([1,0,0]);
for line in fp:
hh.append(line);
atoms=atoms+(hh[i].split()).count('HETATM');
if (((hh[i].split()).count('HETATM')==1)|((hh[i].split()).count('ATOM')==1)):
x.append((hh[i].split())[5]);
y.append((hh[i].split())[6]);
z.append((hh[i].split())[7]);
h.append([int((hh[i].split())[1]),int((hh[i].split())[1]),0]);
if ((hh[i].split()).count('CONECT')==1):
a=(hh[i].split());
NPV=size(a)-1
for j in range(0,NPV):
a1=int(a[1]);
if (a1<int(a[j+1])):
h.append([a1,int(a[j+1]),thop])
if ((hh[i].split()).count('CRYST1')==1):
a=(hh[i].split());
if (double(a[1])>=100):
deltax=0.0;
else:
deltax=double(a[1])/10.0;
if (double(a[2])>=100):
deltay=0.0;
else:
deltay=double(a[2])/10.0;
if (double(a[3])>=100):
deltaz=0.0;
else:
deltaz=double(a[3])/10.0;
i=i+1;
fp.close()
H=array(h,dtype(complex));
x=array(x,dtype(float))/10.0;
y=array(y,dtype(float))/10.0;
z=array(z,dtype(float))/10.0;
return H,x,y,z,deltax,deltay,deltaz;"""
def create_H_from_xyz(x, y, z, orbitals, onsite, thop, d_bond, Nbond):
# WE ASSUME THAT:
#
# 1) TRANSPORT IS IN THE Z DIRECTION
# 2) THE STRUCTURE IS COMPOSED BY THE SAME TYPE OF ATOMS
# 3) ALONG THE Z-DIRECTION THE STRUCTURE IS PERIODIC WITH PERIOD EQUAL TO 4 SLICES
#
# I find the minimum and maximum coordinates at the border
# so to take care of the passivation of the atoms at the borders
xmin = min(x);
xmax = max(x);
ymin = min(y);
ymax = max(y);
zmin = min(z);
zmax = max(z);
# I compute the number of slices (ASSUMPTION 2)
Nc = int(size(unique(z)));
# I have already computed n at the beginning
# n=int(size(nonzero(z==zmin)));
# I compute the number of atoms in the first 4 slices
temp = unique(z);
Natom_slices = size(nonzero(z <= temp[3]));
del temp;
# I check the maximum number of atoms on each slice;
u = unique(z);
Nuz = size(u);
n = -1;
for i in range(0, Nuz):
nnew = size(nonzero(z == u[i]));
if (nnew >= n):
n = nnew;
del i;
# Now I start doing though stuff
# I fill x,y and z with dummy atoms
# If it is a dummy atom, the coordinate is equal to dummy_coord
dummy_coord = 10000;
xa = [];
ya = [];
za = [];
k = 0;
for i in range(0, Nuz):
# print ya
nnew = size(nonzero(z == u[i]));
for j in range(0, nnew):
xa.append(x[k]);
ya.append(y[k]);
za.append(z[k]);
k = k + 1;
if (nnew < n):
for j in range(nnew, n):
xa.append(dummy_coord);
ya.append(dummy_coord);
za.append(dummy_coord);
# k=k+1;
del x, y, z, u, i
x = array(xa, dtype(float));
y = array(ya, dtype(float));
z = array(za, dtype(float));
# del xa,ya,za
Np = size(x);
Ncol_max = 10;
NN = zeros((Np, Ncol_max), dtype(int));
border = []
# I first find the Nearest Neighbour
for i in range(0, Np):
ind = nonzero((sqrt((x - x[i]) ** 2 + (y - y[i]) ** 2 + (z - z[i]) ** 2) <= d_bond) & (
sqrt((x - x[i]) ** 2 + (y - y[i]) ** 2 + (z - z[i]) ** 2) > 1e-10))[0];
if (size(ind) > Ncol_max):
print()
writeout("ERROR IN create_H_from_xyz subroutine in NanoTCAD_ViDES.py file")
writeout("Use a larger value for Ncol_max")
print()
exit(0);
# print i
NN[i, 0] = i + 1;
NN[i, 1:size(ind) + 1] = ind + 1;
NPV = size(nonzero(NN[i, :])) - 1;
if (NPV < Nbond):
border.append(i);
# Now I work on the Hamiltonian
atoms = 0;
i = 0;
h = [];
# I fill the h list with the number of orbitals
ll = [orbitals, 0];
fill = zeros(orbitals ** 2);
h.append(ll + list(fill))
del ll, i
# I take care of the diagonal elements
for i in range(0, Np):
if ((x[i] < dummy_coord)):
if (orbitals > 1):
# (ASSUMPTION 1)
if i in border:
xfn = zeros(4);
yfn = zeros(4);
zfn = zeros(4);
if (z[i] == zmin):
NPV = size(nonzero(NN[i + 4 * n, :])) - 1;
xfn = x[NN[i + n * 4, 1:NPV + 1] - 1];
yfn = y[NN[i + n * 4, 1:NPV + 1] - 1];
zfn = z[NN[i + n * 4, 1:NPV + 1] - 1];
xp = x[i + n * 4];
yp = y[i + n * 4];
zp = z[i + n * 4];
elif (z[i] == zmax):
NPV = size(nonzero(NN[i - 4 * n, :])) - 1;
xfn = x[NN[i - n * 4, 1:NPV + 1] - 1];
yfn = y[NN[i - n * 4, 1:NPV + 1] - 1];
zfn = z[NN[i - n * 4, 1:NPV + 1] - 1];
xp = x[i - n * 4];
yp = y[i - n * 4];
zp = z[i - n * 4];
else:
NPV = size(nonzero(NN[i, :])) - 1;
xfn = x[NN[i, 1:NPV + 1] - 1];
yfn = y[NN[i, 1:NPV + 1] - 1];
zfn = z[NN[i, 1:NPV + 1] - 1];
xp = x[i];
yp = y[i];
zp = z[i];
deltae = 20.0;
tempM = Sipassivation(xp, yp, zp, NPV, xfn, yfn, zfn, deltae);
# print tempM
# print x[i],y[i],z[i]
# print xfn
# print yfn
# print zfn
# exit(0);
B = zeros((10, 10));
B[:4, :4] = tempM.reshape(4, 4);
h.append([i + 1, i + 1] + list((diag(onsite) + B).flatten()));
# h.append([i+1,i+1]+list((diag(onsite)).flatten()));
del B, tempM, xfn, yfn, zfn;
else:
h.append([i + 1, i + 1] + list((diag(onsite)).flatten()));
else:
h.append([i + 1, i + 1] + list(fill));
else:
# If the atom is dummy then I mark it with the 77777 value
# Right now it works only for one orbital
h.append([i + 1, i + 1] + list(77777 * ones(orbitals ** 2)));
# I take care of the off-diagonal elements
for i in range(0, Np):
NPV = size(nonzero(NN[i, :])) - 1;
for j in range(0, NPV):
a1 = int(NN[i, 0]);
if (a1 < int(NN[i, j + 1])):
if (orbitals > 1):
# I compute the cosine
module = sqrt(((double(x[a1 - 1]) - double(x[int(NN[i, j + 1]) - 1])) ** 2) + (
double(y[a1 - 1]) - double(y[int(NN[i, j + 1]) - 1])) ** 2 + (
double(z[a1 - 1]) - double(z[int(NN[i, j + 1]) - 1])) ** 2);
cosx = (-double(x[a1 - 1]) + double(x[int(NN[i, j + 1]) - 1])) / module;
cosy = (-double(y[a1 - 1]) + double(y[int(NN[i, j + 1]) - 1])) / module;
cosz = (-double(z[a1 - 1]) + double(z[int(NN[i, j + 1]) - 1])) / module;
# print a1,int(NN[i,j+1]),cosx,cosy,cosz,module
# input=hstack((array([cosx,cosy,cosy]),thop));
# print input
# matrix_thop=Simatrix(input);
matrix_thop = Simatrix(cosx, cosy, cosz, thop);
# print matrix_thop
# print "----------------"
h.append([a1, int(NN[i, j + 1])] + list(matrix_thop));
else:
h.append([a1, int(NN[i, j + 1]), thop])
H = array(h, dtype=complex);
return H, n, Nc, x, y, z;
def get_xyz_from_file(filename):
fp = open(filename, "r");
xa = []
ya = []
za = []
for line in fp:
if (size(line.split()) > 3):
xa.append((line.split())[1]);
ya.append((line.split())[2]);
za.append((line.split())[3]);
x = array(xa, dtype(float));
y = array(ya, dtype(float));
z = array(za, dtype(float));
del xa, ya, za
return x, y, z;
def convert_pdb(filename, orbitals, thop):
# ASSUMPTION: ALL THE ATOMS ARE OF THE SAME MATERIAL
# I first read the atoms coordinates
hh = [];
deltax = 0;
deltay = 0;
deltaz = 0;
x = [];
y = [];
z = [];
i = 0;
fp = open(filename, "r");
for line in fp:
hh.append(line);
if (((hh[i].split()).count('HETATM') == 1) | ((hh[i].split()).count('ATOM') == 1)):
# ATOM_TYPE=(hh[i].split())[2];
x.append((hh[i].split())[5]);
y.append((hh[i].split())[6]);
z.append((hh[i].split())[7]);
i = i + 1;
fp.close()
del hh;
# Now I work on the Hamiltonian
hh = [];
atoms = 0;
i = 0;
h = [];
# I fill the h list with the number of orbitals
ll = [orbitals, 0];
fill = zeros(orbitals ** 2);
h.append(ll + list(fill))
del ll
# I fill the rest of the h list
fp = open(filename, "r");
for line in fp:
hh.append(line);
atoms = atoms + (hh[i].split()).count('HETATM');
if (((hh[i].split()).count('HETATM') == 1) | ((hh[i].split()).count('ATOM') == 1)):
if (orbitals > 1):
h.append([int((hh[i].split())[1]), int((hh[i].split())[1])] + list((diag(onsite)).flatten()));
else:
h.append([int((hh[i].split())[1]), int((hh[i].split())[1])] + list(fill));
if ((hh[i].split()).count('CONECT') == 1):
a = (hh[i].split());
NPV = size(a) - 1
for j in range(0, NPV):
a1 = int(a[1]);
if (a1 < int(a[j + 1])):
if (orbitals > 1):
# I compute the cosine
module = sqrt(((double(x[a1 - 1]) - double(x[int(a[j + 1]) - 1])) ** 2) + (
double(y[a1 - 1]) - double(y[int(a[j + 1]) - 1])) ** 2 + (
double(z[a1 - 1]) - double(z[int(a[j + 1]) - 1])) ** 2);
cosx = (double(x[a1 - 1]) - double(x[int(a[j + 1]) - 1])) / module;
cosy = (double(y[a1 - 1]) - double(y[int(a[j + 1]) - 1])) / module;
cosz = (double(z[a1 - 1]) - double(z[int(a[j + 1]) - 1])) / module;
cosx = 1;
cosy = 1;
cosz = 1;
input = hstack((array([cosx, cosy, cosy]), thop));
matrix_thop = Simatrix(input);
h.append([a1, int(a[j + 1])] + list(matrix_thop));
else:
h.append([a1, int(a[j + 1]), thop])
if ((hh[i].split()).count('CRYST1') == 1):
a = (hh[i].split());
if (double(a[1]) >= 100):
deltax = 0.0;
else:
deltax = double(a[1]) / 10.0;
if (double(a[2]) >= 100):
deltay = 0.0;
else:
deltay = double(a[2]) / 10.0;
if (double(a[3]) >= 100):
deltaz = 0.0;
else:
deltaz = double(a[3]) / 10.0;
i = i + 1;
fp.close()
H = array(h, dtype(complex));
x = array(x, dtype(float)) / 10.0;
y = array(y, dtype(float)) / 10.0;
z = array(z, dtype(float)) / 10.0;
return H, x, y, z, deltax, deltay, deltaz;
def Hamiltonian_per(H, x, y, z, deltax, deltay, deltaz, aCC, thop, k):
Np = size(x);
Hnew = H.copy();
conn_per = []
for ii in range(0, Np):
xc = x[ii];
yc = y[ii];
zc = z[ii];
# Here I compare with 1.05*aCC in order to take into account numerical tollerances
indp = nonzero(sqrt((x - xc + deltax) ** 2 + (y - yc + deltay) ** 2 + (z - zc + deltaz) ** 2) < aCC * 1.05)[
0] + 1;
indm = nonzero(sqrt((x - xc - deltax) ** 2 + (y - yc - deltay) ** 2 + (z - zc - deltaz) ** 2) < aCC * 1.05)[
0] + 1;
if (size(indp) > 0):
for j in range(0, size(indp)):
conn_per.append([ii + 1, indp[j]]);
if (size(indm) > 0):
for j in range(0, size(indm)):
conn_per.append([ii + 1, indm[j]]);
del ii
Nconn = len(conn_per);
for ii in range(Nconn):
ind = nonzero((H[:, 0] == conn_per[ii][0]) & (H[:, 1] == conn_per[ii][1]))[0]
if (size(ind) > 0):
if (deltax > 0):
segno = sign(x[int(abs(H[ind, 0])) - 1] - x[int(abs(H[ind, 1])) - 1]);
Hnew[ind, 2] = H[ind, 2] + thop * exp(-segno * k * deltax * 1j);
elif (deltay > 0):
segno = sign(y[int(abs(H[ind, 0])) - 1] - y[int(abs(H[ind, 1])) - 1]);
Hnew[ind, 2] = H[ind, 2] + thop * exp(-segno * k * deltay * 1j);
else:
segno = sign(z[int(abs(H[ind, 0])) - 1] - z[int(abs(H[ind, 1])) - 1]);
Hnew[ind, 2] = H[ind, 2] + thop * exp(-segno * k * deltaz * 1j);
else:
if (conn_per[ii][0] < conn_per[ii][1]):
if (deltax > 0):
segno = sign(x[conn_per[ii][0] - 1] - x[conn_per[ii][1] - 1]);
temp = array([conn_per[ii][0], conn_per[ii][1], thop * exp(-segno * k * deltax * 1j)]);
elif (deltay > 0):
segno = sign(y[conn_per[ii][0] - 1] - y[conn_per[ii][1] - 1]);
temp = array([conn_per[ii][0], conn_per[ii][1], thop * exp(-segno * k * deltay * 1j)]);
else:
segno = sign(z[conn_per[ii][0] - 1] - z[conn_per[ii][1] - 1]);
temp = array([conn_per[ii][0], conn_per[ii][1], thop * exp(-segno * k * deltaz * 1j)]);
Hnew = vstack([Hnew, temp]);
del ii
return Hnew
class nanoribbon_fast_ohmic:
acc = 0.144;
def __init__(self, n, L):
self.Nc = int(4 * (floor((floor(L / nanoribbon_fast_ohmic.acc) - 1) / 3)));
self.n = n;
self.Phi = zeros(n * self.Nc);
self.Eupper = 1000.0;
self.Elower = -1000.0;
self.dE = 1e-3;
self.thop = -2.7;
self.eta = 1e-8;
self.mu1 = 0;
self.mu2 = 0;
self.Temp = 300;
self.E = zeros(int(NEmax));
self.T = zeros(int(NEmax));
self.charge = zeros(self.n * self.Nc);
self.rank = 0;
self.atoms_coordinates();
self.defects_list = []
self.onsite_E = -1.5;
def atoms_coordinates(self):
GNR_atoms_coordinates(self);
self.x = array(self.x);
self.y = array(self.y);
self.z = array(self.z);
return;
def gap(self):
return GNRgap(self);
def charge_T(self):
M = self.Nc;
N = self.n;
t = self.thop;
Energy = 0.0
Ene = 0.0
p = 0.0
d = 0.0
orbitals = [1, 0]
hamiltonian = []
zeroes = [0, 0, 0, 0]
ene = [Energy, 0, 0, Ene]
coupling1 = [t, 0, 0, p]
coupling2 = [t * 1.12, 0, 0, p]
orbitals = orbitals + zeroes
hamiltonian.append(orbitals)
for j in range(M):
for i in range(N):
n = i + 1 + j * N
p = [n, n]
p = p + ene
hamiltonian.append(p)
for j in range(1, M - 1, +4):
for i in range(1, N):
n = i + 1 + j * N
m = i + (j + 1) * N
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
for j in range(3, M - 1, +4):
for i in range(0, N - 1):
n = i + 1 + j * N
m = i + 2 + (j + 1) * N
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
# nell'if ripristinare il fattore t*1.12
for j in range(0, M - 1, +4):
for i in range(N):
n = i + 1 + j * N
m = i + 1 + (j + 1) * N
if i == 0:
p = [n, m]
p = p + coupling2
hamiltonian.append(p)
# hamiltonian.append([m, n, t*1.12, p, d])
else:
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
for j in range(1, M - 1, +4):
for i in range(N):
n = i + 1 + j * N
m = i + 1 + (j + 1) * N
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
# nell'if ripristinare il fattore t*1.12
for j in range(2, M - 1, +4):
for i in range(N):
n = i + 1 + j * N
m = i + 1 + (j + 1) * N
if i == (N - 1):
p = [n, m]
p = p + coupling2
hamiltonian.append(p)
# hamiltonian.append([m, n, t*1.12, p, d])
else:
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
for j in range(3, M - 1, +4):
for i in range(N):
n = i + 1 + j * N
m = i + 1 + (j + 1) * N
p = [n, m]
p = p + coupling1
hamiltonian.append(p)
# hamiltonian.append([m, n, t, p, d])
H = Hamiltonian(N, M)
# I work on the defects
ind = array(self.defects_list, dtype=int);
H.H = array(hamiltonian, dtype=complex)
H.H[ind, 2] = self.onsite_E;
H.Eupper = self.Eupper;
H.Elower = self.Elower;
H.rank = self.rank;
H.dE = self.dE;
H.Phi = self.Phi;
H.Ei = -self.Phi;
H.eta = self.eta;
H.mu1 = self.mu1;
H.mu2 = self.mu2;
H.Egap = self.gap();
H.charge_T()
self.E = array(H.E);
self.T = array(H.T);
self.charge = array(H.charge);
del hamiltonian, H
return;
def current(self):
vt = kboltz * self.Temp / q;
E = array(self.E);
T = array(self.T);
arg = 2 * q * q / (2 * pi * hbar) * T * (Fermi((E - self.mu1) / vt) - Fermi((E - self.mu2) / vt)) * self.dE
return sum(arg);
# This is the class for the solution of the 1D drift-diffusion
class multisubband1D:
def __init__(self, nx, ny, Neig):
self.ny = ny;
self.nx = nx;
self.x = zeros(nx);
self.y = zeros(ny);
self.Phi = zeros(nx * self.ny);
self.Ei = zeros(nx * self.ny);
self.Egap = zeros(nx * self.ny);
self.Temp = 300;
self.charge = zeros(nx * self.ny);
self.rank = 0;
self.Neig = Neig;
self.Psi = zeros((nx * ny, Neig));
self.eig = zeros((ny, Neig));
self.mass = zeros((nx, ny));
self.mu = 100e-4 * ones(self.ny);
self.genric = zeros(self.ny);
self.n1d = zeros(self.ny);
self.ecs = zeros(self.ny);
self.charge_left_contact = 0;
self.charge_right_contact = 0;
self.tolljay = 1e-3;
# This is the class for the solution of the QM 1D
class QM1D:
def __init__(self, nx, Neig, gridx, p=None, charge_T=None):
if charge_T is not None:
self.charge_T = types.MethodType(charge_T, self);
self.nx = nx;
self.x = zeros(nx);
self.ny = 1;
ny = 1;
self.Phi = zeros(nx * self.ny);
self.Ei = zeros(nx * self.ny);
self.Temp = 300;
self.charge = zeros(nx * self.ny);
self.rank = 0;
self.Neig = Neig;
self.Psi = zeros((nx * ny, Neig));
self.eig = zeros((ny, Neig));
if p is not None:
self.Egap = p.Egap;
self.massl = p.mel
self.masst = p.met;
self.massh = p.mhole
self.chi = p.chi
self.mass = p.mel;
else:
self.Egap = zeros(nx * self.ny)
self.massl = zeros(nx * self.ny)
self.masst = zeros(nx * self.ny)
self.massh = zeros(nx * self.ny)
self.chi = zeros(nx * self.ny)
self.mass = zeros(nx * self.ny)
self.Ef = 0;
self.x = gridx;
self.ecs = zeros(self.ny);
def charge_T(self):
del self.charge
self.charge = zeros(self.nx * self.ny);
self.Ei = -self.Phi;
# I compute the confined electrons
dist = avervect(self.x)
# self.Ei=4.05-self.Phi-self.chi-self.Egap*0.5
self.mass = self.massl;
solve_schroedinger_1D(self);
vt = self.Temp * kboltz / q;
for i in range(0, self.Neig):
self.charge = self.charge - 2 * dist * 1e-9 * (
self.Psi[:, i]) ** 2 * self.masst * m0 * kboltz * self.Temp / pi / hbar ** 2 * log(
1 + exp(-(self.eig[0, i] - self.Ef) / vt));
self.mass = self.masst;
solve_schroedinger_1D(self);
vt = self.Temp * kboltz / q;
for i in range(0, self.Neig):
self.charge = self.charge - 4 * dist * 1e-9 * (
self.Psi[:, i]) ** 2 * self.massl * m0 * kboltz * self.Temp / pi / hbar ** 2 * log(
1 + exp(-(self.eig[0, i] - self.Ef) / vt));
# I now add the holes
for i in range(0, size(self.charge)):
self.charge[i] = self.charge[i] + dist[i] * 1e-9 * (2 / sqrt(pi)) * 2 * (
vt / (2 * pi) * (self.massh[i] * m0 / hbar) * (q / hbar)) ** 1.5 * fphalf(
(self.Ei[i] - self.Egap[i] * 0.5 - self.Ef) / vt)
return;
def current(self):
return 0;
|
# Copyright (c) 2013, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
OUTBOUND_SUFFIX = "Outbound"
KARIGAR_SUFFIX = "Karigar"
INBOUND_SUFFIX = "Inbound"
SPACE = " "
def execute(filters=None):
columns, data = [], []
#items_code_start
item_list = []
if filters.get("group_wise") == "Item":
columns = get_columns(filters)
if filters and filters.get("item_filter"):
item_list = [filters.get("item_filter")]
else:
item_list = get_all_items_list()
# print "sur_len",len(item_list)
for item in item_list:
# print "sur_item",item
item_row_pass_through_data = get_item_row_pass_through_data(item) # forming item ow day data for all procces 3 wh
data.append(item_row_pass_through_data)
if filters.get("group_wise") == "Item Group":
columns = get_columns(filters)
item_group_list = []
if filters and filters.get("ig_filter"):
item_group_list = [filters.get("ig_filter")]
else:
item_group_list = get_all_item_group_list()
# print "sur_len",len(item_list)
for item_group in item_group_list:
item_group_row_pass_through_data = get_item_group_row_pass_through_data(item_group)
data.append(item_group_row_pass_through_data)
return columns, data
#for single item column names will be choosed according to item choosed ,choosen item will be querying against manufacture method child doc to fetch method name
#fetched method will be qerying against mmd to get unique processs (asumming each item has one method)
def get_columns(filters):
columns = []
column_process_list = get_process_list_for_columns(filters) #for single item
column_keys_list = get_process_column_key_list (column_process_list)
range_temp = 3 + (len(column_keys_list) )
for col in range(range_temp):
columns.append("")
columns[0] = {
"label": "Item",
"fieldname": "item_code",
"options":"Item",
"fieldtype": "Link",
"width": 160
}
columns[1] = {
"label": "Item Group",
"fieldname": "item_group",
"options": "Item Group",
"fieldtype": "Link",
"width": 160
}
columns[2] = {
"label": "Manufacturing Method",
"fieldname": "manufacturing_method",
"options": "Pch Manufacturing Method",
"fieldtype": "Link",
"width": 160
}
last_col = 2
for column_key in column_keys_list:
columns[last_col + 1] = {
"label": column_key,
"fieldname": column_key,
"width": 160
}
last_col += 1
#print "columns",columns
return columns
#return processes in process order ac to item from Pch Manufacturing Method Details
def get_process_ac_to_item(item) :
method_name = frappe.db.get_value("Pch Manufacturing Method Child", {"item_made": item},"parent")
#print "method_name", method_name
process_dic = frappe.db.sql("""select DISTINCT pch_process from `tabPch Manufacturing Method Details` where pch_method=%s order by process_order """,(method_name), as_dict=1)
process_list = []
for process in process_dic:
process_list.append(process["pch_process"])
return process_list
#return processes from glbal processes doctype where item_group is input
def get_process_ac_to_item_group(item_group):
process_dic = frappe.db.sql("""
SELECT
gpc.pch_process,gpc.pch_process_order
FROM
`tabPch Global Process Child` gpc,`tabPch Global Process` gp
WHERE
gp.item_group= %s and gp.name = gpc.parent
order by
gpc.pch_process_order; """,(item_group), as_dict=1)
process_list = []
for process in process_dic:
process_list.append(process["pch_process"])
return process_list
#for single item
def get_process_list_for_columns( filters ):
process_list = []
if filters.get("group_wise") == "Item":
if filters.get("item_filter"):
process_list = get_process_ac_to_item(filters.get("item_filter")) # for one item
else:
process_list = get_global_process_order_list()
if filters.get("group_wise") == "Item Group":
if filters.get("ig_filter"):
process_list = get_process_ac_to_item_group(filters.get("ig_filter"))
else:
process_list = get_global_process_order_list()
return process_list
def get_global_process_order_list():
process_dic = frappe.db.sql("""
SELECT
gpc.pch_process,gpc.pch_process_order
FROM
`tabPch Global Process Child` gpc,`tabPch Global Process` gp
WHERE
gp.is_global=1 and gp.name = gpc.parent
order by
gpc.pch_process_order; """, as_dict=1)
process_list=[]
for process in process_dic:
process_list.append(process["pch_process"])
return process_list
def get_item_group_mrec_data(item_group,process_name):
mrec_dic = frappe.db.sql("""
SELECT
sum(units_s_r) as sum_units_s_r
FROM
`tabPch Manufacturing Record`
WHERE
item_group=%s and start_process=(select name from `tabPch Manufacturing Method Details` where pch_process =%s and item_group_mmd = %s limit 1); """, (item_group,process_name,item_group),as_dict=1)
#print "mrec_dic",mrec_dic
return mrec_dic[0]["sum_units_s_r"] if mrec_dic[0]["sum_units_s_r"] else "NO DATA"
#code here
#prepare process label as key value as data here
def get_item_row_pass_through_data(item):
method_name = frappe.db.get_value("Pch Manufacturing Method Child", {"item_made": item}, "parent")
process_list_for_item = get_process_ac_to_item(item)
item_process_column_key_list = get_process_column_key_list(process_list_for_item)
parent_row_dic = {"item_code": item, "manufacturing_method": method_name}
mrec_dic = frappe.db.sql("""
SELECT
manufacturing_record_type,units_s_r, start_process ,end_process ,item_group
FROM
`tabPch Manufacturing Record`
WHERE
item_made = %s and manufacturing_method = %s and docstatus = 1
ORDER BY
creation asc""",
(item, method_name ), as_dict=1)
# from process_column_bind_list i will get data all column names along with process asingned for that column
is_differend_end_process = 0
process_wise_data_dics = {}
#print "mrec_dic", mrec_dic
for mrec in mrec_dic :
#new code start ..below are the code for one transaction wee need to sum all transaction using dictionary tommorow
process_wise_data_dics["item_group"] = mrec.get("item_group")
start_process_karigar_key = get_process_name(mrec.get("start_process")) + SPACE + KARIGAR_SUFFIX
end_process_karigar_key = get_process_name(mrec.get("end_process")) + SPACE + KARIGAR_SUFFIX
end_process_inbound_key = get_process_name(mrec.get("end_process")) + SPACE + INBOUND_SUFFIX
end_process_outbound_key = get_process_name(mrec.get("end_process")) + SPACE + OUTBOUND_SUFFIX
if mrec.get("manufacturing_record_type") == "Send Material for Manufacturing":
if process_wise_data_dics.get(start_process_karigar_key) :
process_wise_data_dics[start_process_karigar_key] += mrec.get("units_s_r")
else:
process_wise_data_dics[start_process_karigar_key] = mrec.get("units_s_r")
if mrec.get("manufacturing_record_type") == "Receive Material from Manufacturing":
if process_wise_data_dics.get(end_process_inbound_key) :
process_wise_data_dics[end_process_inbound_key] += mrec.get("units_s_r")
else:
process_wise_data_dics[end_process_inbound_key] = mrec.get("units_s_r")
if mrec.get("start_process") != mrec.get("end_process"):
in_between_s_and_e_process_data = get_in_between_s_and_e_process_data(start_process_karigar_key,
end_process_inbound_key,
item_process_column_key_list,
mrec.get("units_s_r"))
for column_key,key_val in in_between_s_and_e_process_data.items():
if process_wise_data_dics.get(column_key):
process_wise_data_dics[column_key] += key_val
else:
process_wise_data_dics[column_key] = key_val
if mrec.get("manufacturing_record_type") == "Send Materials to Internal Storage WH":
if process_wise_data_dics.get(end_process_outbound_key) :
process_wise_data_dics[end_process_outbound_key] += mrec.get("units_s_r")
else:
process_wise_data_dics[end_process_outbound_key] = mrec.get("units_s_r")
parent_row_dic.update(process_wise_data_dics)
# new code end
return parent_row_dic
def get_item_group_row_pass_through_data(item_group) :
parent_ig_row_dic = {"item_group":item_group}
process_list_for_item_group = get_process_ac_to_item_group(item_group) # each item group have one process order defined
item_group_process_column_key_list = get_process_column_key_list(process_list_for_item_group)
item_list_ac_to_item_group = get_item_list_ac_to_item_group(item_group)
item_row_data_list = [] #we will get list of all calculated item row data per item group here
for item in item_list_ac_to_item_group:
item_row_pass_through_data = get_item_row_pass_through_data(item)
item_row_data_list.append(item_row_pass_through_data)
ig_process_wise_data_dics = {} #this dic contains some of all col keys values across all item in that item group
for ig_process_key in item_group_process_column_key_list : #ig_col_key loop
for item_row_data in item_row_data_list : # each item row key
if item_row_data.get(ig_process_key):
if ig_process_wise_data_dics.get(ig_process_key):
ig_process_wise_data_dics[ig_process_key] += item_row_data.get(ig_process_key)
else:
ig_process_wise_data_dics[ig_process_key] = item_row_data.get(ig_process_key)
parent_ig_row_dic.update(ig_process_wise_data_dics)
return parent_ig_row_dic
def get_process_name(mmd_id):
method_name = frappe.db.get_value("Pch Manufacturing Method Details", {"name": mmd_id}, "pch_process")
return method_name
def get_process_column_key_list(process_list):
process_column_key_list = []
for process in process_list:
PROCESS_LABEL = process
#print "PROCESS_LABEL",PROCESS_LABEL
outbound_label = PROCESS_LABEL + SPACE + OUTBOUND_SUFFIX
process_column_key_list.append(outbound_label)
karigar_label = PROCESS_LABEL + SPACE + KARIGAR_SUFFIX
process_column_key_list.append(karigar_label)
inbound_label = PROCESS_LABEL + SPACE + INBOUND_SUFFIX
process_column_key_list.append(inbound_label)
return process_column_key_list
def get_in_between_s_and_e_process_data(start_process_karigar_key,end_process_inbound_key,item_process_column_key_list,units_s_r) :
temp_dic={}
isvalid= 0
for column_key in item_process_column_key_list :
if column_key == end_process_inbound_key:
isvalid = 0
break
if isvalid ==0 :
if column_key == start_process_karigar_key:
isvalid = 1
else: #if start process column key comes
t_dic = {column_key: units_s_r}
temp_dic.update(t_dic)
return temp_dic
def get_all_items_list():
mrec_dic = frappe.db.sql("""
SELECT
DISTINCT item_made
FROM
`tabPch Manufacturing Record`
WHERE
docstatus = 1 and item_made IS NOT NULL
ORDER BY
creation desc""",
as_dict=1)
item_list = []
for mrec in mrec_dic:
item_list.append(mrec["item_made"])
return item_list
def get_all_item_group_list():
mrec_dic = frappe.db.sql("""
SELECT
DISTINCT item_group
FROM
`tabPch Manufacturing Record`
WHERE
docstatus = 1 and item_made IS NOT NULL and item_group IS NOT NULL
ORDER BY
creation desc""",
as_dict=1)
item_list = []
for mrec in mrec_dic:
item_list.append(mrec["item_group"])
return item_list
def get_item_list_ac_to_item_group(item_group):
mrec_dic = frappe.db.sql("""
SELECT
DISTINCT item_made
FROM
`tabPch Manufacturing Record`
WHERE
docstatus = 1 and item_made IS NOT NULL and item_group = %s
ORDER BY
creation desc""",
(item_group), as_dict=1)
item_list = []
for mrec in mrec_dic:
item_list.append(mrec["item_made"])
return item_list
|
def not_gate(bit):
assert bit in (0, 1), "Only 0 or 1"
if bit:
return 0
return 1
def and_gate(bit1, bit2):
assert bit1 in (0, 1) and bit2 in (0, 1), "Only 0 or 1"
return bit1 * bit2
def or_gate(bit1, bit2):
assert bit1 in (0, 1) and bit2 in (0, 1), "Only 0 or 1"
if 1 in (bit1, bit2):
return 1
return 0
def xor_gate(bit1, bit2):
assert bit1 in (0, 1) and bit2 in (0, 1), "Only 0 or 1"
if bit1 != bit2:
return 1
return 0
|
"""Test module for the Templates plugin."""
import unittest
import os
import cProfile, pstats
import test_project
test_project.TEST_SETTINGS += """
from modelmanager.plugins import templates
from modelmanager.plugins.templates import TemplatesDict as _TemplatesDict
from modelmanager import utils
@utils.propertyplugin
class params(_TemplatesDict):
template_patterns = ['param.txt']
"""
TEST_TEMPLATES = {'input/test_param.txt': ("Test parameters\n{n:d} {d:f}",
"Test parameters\n 1 1.1 "),
'input/test_config.pr': ("parameters {test}\n{time}\n{n:d}",
"parameters XYZ \n2000-01-01\n1")}
class TestTemplates(test_project.ProjectTestCase):
def setUp(self):
super(TestTemplates, self).setUp()
self.assertTrue(hasattr(self.project, 'templates'))
self.templates = self.project.templates
os.mkdir(os.path.join(self.project.projectdir, 'input'))
os.mkdir(os.path.join(self.templates.resourcedir, 'input'))
for p, (tmplt, tfile) in TEST_TEMPLATES.items():
with open(os.path.join(self.templates.resourcedir, p), 'w') as f:
f.write(tmplt)
with open(os.path.join(self.project.projectdir, p), 'w') as f:
f.write(tfile)
return
def test_get_template(self):
for i in ['param', 'config', 'input/*config*']:
tmplt = self.templates.get_template(i)
self.assertIn(os.path.relpath(tmplt.filepath, self.projectdir),
TEST_TEMPLATES)
self.assertEqual(len(self.templates.get_templates('input/*')), 2)
def test_read_values(self):
self.assertEqual(self.templates('n'), 1)
self.assertEqual(self.templates('d'), 1.1)
self.assertEqual(self.templates('test'), "XYZ")
self.assertRaises(KeyError, self.templates, "unknown")
config = self.templates['config']
# return value only
self.assertEqual(config.read_values('test'), 'XYZ')
# return dict
d = config.read_values('test', 'time')
self.assertEqual(d['time'], '2000-01-01')
self.assertRaises(KeyError, config.read_values, 'unknown')
def test_write_values(self):
self.templates(n=100)
self.assertEqual(self.templates('n'), 100)
self.templates(d=1.111)
self.assertEqual(self.templates('d'), 1.111)
self.templates(test='Somelongstr')
self.assertEqual(self.templates('test'), "Somelongstr")
self.assertRaises(KeyError, self.templates, unknown=1)
param = self.templates['param']
self.assertRaises(KeyError, param.write_values, unknown=1)
def test_subset(self):
self.assertEqual(self.templates('n', templates='config'), 1)
self.templates(n=2, templates=['config'])
self.assertEqual(self.templates('n', templates='param'), 1)
self.assertEqual(self.templates('n', templates='config'), 2)
# value from template listed first is returned
self.assertEqual(self.templates("n", templates=['config', 'param']), 2)
def test_templates_dict(self):
self.assertEqual(self.project.params['n'], 1)
print(self.project.params)
self.project.params['n'] = 3
self.assertEqual(self.templates('n', templates='param'), 3)
if __name__ == '__main__':
cProfile.run('unittest.main()', 'pstats')
# print profile stats ordered by time
pstats.Stats('pstats').strip_dirs().sort_stats('time').print_stats(5)
|
# -*- coding: utf-8 -*-
# Код основан на пакете esia-connector
# https://github.com/eigenmethod/esia-connector
# Лицензия:
# https://github.com/eigenmethod/esia-connector/blob/master/LICENSE.txt
# Copyright (c) 2015, Septem Capital
import base64
import datetime
import json
import os
import tempfile
import pytz
import requests
from .exceptions import CryptoBackendError, HttpError, IncorrectJsonError
def make_request(url, method='GET', headers=None, data=None, verify=True):
"""
Выполняет запрос по заданному URL и возвращает dict на основе JSON-ответа
:param str url: URL-адрес
:param str method: (optional) HTTP-метод запроса, по умолчанию GET
:param dict headers: (optional) массив HTTP-заголовков, по умолчанию None
:param dict data: (optional) массив данных передаваемых в запросе,
по умолчанию None
:param boolean verify: optional, производить ли верификацию
ssl-сертификата при запросае
:return: dict на основе JSON-ответа
:rtype: dict
:raises HttpError: если выбрасыватеся исключение requests.HTTPError
:raises IncorrectJsonError: если JSON-ответ не может быть
корректно прочитан
"""
try:
response = requests.request(
method, url, headers=headers, data=data, verify=verify)
response.raise_for_status()
return json.loads(response.content)
except requests.HTTPError as e:
raise HttpError(e)
except ValueError as e:
raise IncorrectJsonError(e)
def smime_sign(certificate_file, private_key_file, data, backend='m2crypto'):
"""
Подписывает данные в формате SMIME с использование sha256.
В качестве бэкенда используется либо вызов openssl, либо
библиотека M2Crypto
:param str certificate_file: путь к сертификату
:param str private_key_file: путь к приватному ключу
:param str data: подписываемые данные
:param str backend: (optional) бэкенд, используемый
для подписи (m2crypto|openssl)
:raises CryptoBackendError: если неверно указан backend
:return: открепленная подпись
:rtype: str
"""
if backend == 'm2crypto' or backend is None:
from M2Crypto import SMIME, BIO
if not isinstance(data, bytes):
data = bytes(data)
signer = SMIME.SMIME()
signer.load_key(private_key_file, certificate_file)
p7 = signer.sign(
BIO.MemoryBuffer(data), flags=SMIME.PKCS7_DETACHED, algo='sha256')
signed_message = BIO.MemoryBuffer()
p7.write_der(signed_message)
return signed_message.read()
elif backend == 'openssl':
source_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
source_file.write(data)
source_file.close()
source_path = source_file.name
destination_file = tempfile.NamedTemporaryFile(mode='wb', delete=False)
destination_file.close()
destination_path = destination_file.name
cmd = (
'openssl smime -sign -md sha256 -in {f_in} -signer {cert} -inkey '
'{key} -out {f_out} -outform DER')
os.system(cmd.format(
f_in=source_path,
cert=certificate_file,
key=private_key_file,
f_out=destination_path,
))
signed_message = open(destination_path, 'rb').read()
os.unlink(source_path)
os.unlink(destination_path)
return signed_message
else:
raise CryptoBackendError(
'Unknown cryptography backend. Use openssl or m2crypto value.')
def csp_sign(thumbprint, password, data):
"""
Подписывает данные с использованием ГОСТ Р 34.10-2012 открепленной подписи.
В качестве бэкенда используется утилита cryptcp из ПО КриптоПРО CSP.
:param str thumbprint: SHA1 отпечаток сертификата, связанного
с зкарытым ключем
:param str password: пароль для контейнера закрытого ключа
:param str data: подписываемые данные
"""
tmp_dir = tempfile.gettempdir()
source_file = tempfile.NamedTemporaryFile(
mode='w', delete=False, dir=tmp_dir)
source_file.write(data)
source_file.close()
source_path = source_file.name
destination_path = source_path + '.sgn'
cmd = (
"cryptcp -signf -norev -dir {tmp_dir} -der -strict -cert -detached "
"-thumbprint {thumbprint} -pin {password} {f_in} 2>&1 >/dev/null")
os.system(cmd.format(
tmp_dir=tmp_dir,
thumbprint=thumbprint,
password=password,
f_in=source_path
))
signed_message = open(destination_path, 'rb').read()
os.unlink(source_path)
os.unlink(destination_path)
return signed_message
def sign_params(params, settings, backend='csp'):
"""
Подписывает параметры запроса и добавляет в params ключ client_secret.
Подпись основывается на полях: `scope`, `timestamp`, `client_id`, `state`.
:param dict params: параметры запроса
:param EsiaSettings settings: настройки модуля ЕСИА
:param str backend: (optional) бэкенд используемый
для подписи (m2crypto|openssl|csp)
:raises CryptoBackendError: если неверно указан backend
:return: подписанные параметры запроса
:rtype: dict
"""
plaintext = params.get('scope', '') + params.get('timestamp', '') + \
params.get('client_id', '') + params.get('state', '')
if backend == 'csp':
raw_client_secret = csp_sign(
settings.csp_cert_thumbprint,
settings.csp_container_pwd, plaintext)
else:
raw_client_secret = smime_sign(
settings.certificate_file, settings.private_key_file,
plaintext, backend)
params.update(
client_secret=base64.urlsafe_b64encode(
raw_client_secret).decode('utf-8'),
)
return params
def get_timestamp():
"""
Возвращает текущую дату и время в строковом представлении с указанем зоны
в формате пригодном для использования при взаимодействии с ЕСИА
:return: текущая дата и время
:rtype: str
"""
return datetime.datetime.now(pytz.utc).\
strftime('%Y.%m.%d %H:%M:%S %z').strip()
|
# 24
from math import factorial
n = list(range(10))
f = []
left = 1000000
for i in range(9):
if left % factorial(9 - i) == 0:
f += [n.pop(int(left / factorial(9 - i)) - 1)]
else:
f += [n.pop(int(left / factorial(9 - i)))]
left = left % factorial(9 - i)
f += n
print(f)
|
import argparse
import cv2
import os
import shutil
import subprocess
import sys
def check_for_ffmpeg():
with open(os.devnull, 'w') as devnull:
try:
ret = subprocess.call(['ffmpeg', '-version'], stdout=devnull)
except OSError:
ret = 1
if ret != 0:
print 'ffmpeg not installed'
sys.exit(1)
check_for_ffmpeg()
parser = argparse.ArgumentParser('combine videos')
parser.add_argument('videos', nargs='+',
help='video files to combine')
parser.add_argument('--output', '-o', default='output.mp4',
help='filename to output to (default: output.mp4)')
parser.add_argument('--frame-rate', '-f', type=int, default=30,
help='video frame rate (default: 30)')
args = parser.parse_args()
cwd = os.getcwd()
videos = [os.path.abspath(v) for v in args.videos]
captures = [cv2.VideoCapture(v) for v in videos]
width = int(captures[0].get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(captures[0].get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
os.mkdir('tmp')
comb = 'combined.mp4'
out_file = os.path.join('tmp', comb)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(out_file, fourcc, args.frame_rate, size)
# Combine video frames
while True:
comb_frame = None
num_finished = 0
for v in captures:
ret, frame = v.read()
if not ret:
num_finished += 1
continue
frame = cv2.resize(frame, size)
if comb_frame is None:
comb_frame = frame.copy()
else:
alpha = 0.75
comb_frame = cv2.addWeighted(comb_frame, alpha, frame, alpha, 0)
if num_finished == len(captures):
break
out.write(comb_frame)
for v in captures:
v.release()
out.release()
# Combine the audio
os.chdir('tmp')
# Extract audio
fns = []
for v in videos:
fn, ext = os.path.splitext(v)
fn = os.path.basename(fn)
fns.append(fn)
ret = subprocess.call('ffmpeg -i "{}" -ab 160k -ac 2 -ar 44100 -vn "{}.wav"'
.format(v, fn), shell=True)
if ret != 0:
sys.exit(1)
# Combine audio
inputs = ''.join(' -i "{}.wav"'.format(fn) for fn in fns)
output_wav = 'output.wav'
ret = subprocess.call('ffmpeg {} -filter_complex amix=inputs={} {}'
.format(inputs, len(videos), output_wav), shell=True)
if ret != 0:
sys.exit(1)
# Replace audio on video
ret = subprocess.call('ffmpeg -i {} -i {} -c:v copy -map 0:v:0 -map 1:a:0 {}'
.format(comb, output_wav, args.output), shell=True)
if ret != 0:
sys.exit(1)
os.chdir(cwd)
os.rename(os.path.join('tmp', args.output), args.output)
# Clean up
shutil.rmtree('tmp')
|
"""
remote_method decorator
"""
from zorp.registry import registry
def func_name(func):
"""
Return func's fully-qualified name
"""
if hasattr(func, "__module__"):
return "{}.{}".format(func.__module__, func.__name__)
return func.__name__
def remote_method(name=None, use_registry=registry):
"""
Register the decorated function
Use the function's own name if none is supplied
"""
if callable(name):
# Allow calling without arguments
use_registry.put(func_name(name), name)
return name
def wrap(func):
"""
Function wrapper
"""
use_registry.put(name or func_name(func), func)
return func
return wrap
|
import redis
from django.conf import settings
from .models import Product
# Connect to redis
conn = redis.Redis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
class Recommend(object):
def get_product_id(self, id):
return f'product:{id}:pruchased_with'
def products_bought(self, products):
product_ids = [product.id for product in products]
for product_id in product_ids:
for with_id in product_ids:
# get the other products bought with each product
if product_id != with_id:
# Increment score for product purchased together
conn.zincrby(self.get_product_id(product_id), 1, with_id)
def suggest_products_for(self, products, max_results=5):
product_ids = [product.id for product in products]
if len(products) == 1:
#only a product
suggestions = conn.zrange(
self.get_product_id(product_ids[0]), 0, -1, desc=True)[:max_results]
else:
# Not a single product, generate a temporary key
flat_ids = ''.join([str(id) for id in product_ids])
temp_key = f'temp_{flat_ids}'
# Store the resulting sorted set in a temporary_key
keys = [self.get_product_id(id) for id in product_ids]
conn.zunionstore(temp_key, keys)
# Remove ids for the products the recommendation is for
conn.zrem(temp_key, *product_ids)
# get the product ids by their score, descendant sort
suggestions = conn.zrange(temp_key, 0, -1, desc=True)[:max_results]
#remove the temporary key
conn.delete(temp_key)
suggested_products_ids = [int(id) for id in suggestions]
#get suggested products and sort by order of appearance
suggested_products = list(Product.objects.filter(id__in=suggested_products_ids))
suggested_products.sort(key=lambda x: suggested_products_ids.index(x.id))
return suggested_products
def clear_purchases(self):
for id in Product.objects.values_list('id', flat=True):
conn.delete(self.get_product_id(id))
|
#-*-coding:utf-8-*-
import librosa
import numpy as np
import random
import os , time
import soundfile
from multiprocessing import Pool
sample_rate = 16000
pitch_shift1, pitch_shift2 = 0.01 , 5.0
time_stretch1, time_stretch2 = 0.05, 0.25
augmentation_num = 10
def audio_augmentation(wav_file):
print("original wav file: ", wav_file)
y, sr = librosa.load(wav_file, sr=sample_rate)
for j in range(augmentation_num):
rd1 = random.uniform(pitch_shift1, pitch_shift2)
ii = random.choice((-1, 1))
rd2 = random.uniform(time_stretch1, time_stretch2)
rd2 = 1.0 + ii * rd2
y_ps = librosa.effects.pitch_shift(y, sr, n_steps = rd1)
y_ts = librosa.effects.time_stretch(y, rate = rd2)
dir_path, wav_file_name = os.path.split(wav_file)
wav_name = wav_file_name.split('.')[0]
ps_wav = os.path.join(dir_path, wav_name + '_ps_' + str(j) + '.wav')
ts_wav = os.path.join(dir_path, wav_name + '_ts_' + str(j) + '.wav')
print("pitch_shift: ", ps_wav)
print("time_stretch: ", ts_wav)
# librosa.output.write_wav(ps_wav, y_ps, sample_rate)
# librosa.output.write_wav(ts_wav, y_ts, sample_rate)
soundfile.write(ps_wav, y_ps, sample_rate)
soundfile.write(ts_wav, y_ts, sample_rate)
return
if __name__ == "__main__":
import sys
from datasets import create_audio_lists_recursive
train_audio_dir_path = "/data/Speech/XunFei/BabyCry/train"
all_wav_list = create_audio_lists_recursive(train_audio_dir_path)
random.shuffle(all_wav_list)
all_file_len = len(all_wav_list)
print('all wav file len:',all_file_len)
print('start wav data augmentation ...')
# multi processes
with Pool(20) as p:
p.map(audio_augmentation, all_wav_list)
# single process
# for i in range(all_file_len):
# audio_file = all_wav_list[i]
# audio_augmentation(audio_file)
# if (i % 100 == 0):
# now = time.localtime()
# now_time = time.strftime("%Y-%m-%d %H:%M:%S", now)
# print('time:', now_time)
# print('predict num:', i)
print('wav data augmentation done ...')
|
#!/usr/bin/env python3
"""
Copyright 2018 Twitter, Inc.
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
"""
This script can be used to add license headers to all the source files of a project.
It is designed to be safe, so do not worry and try it out!
Works with languages with nonempty value in MAP_LANGUAGE_TO_COMMENT_CHARS
"""
import argparse
import datetime
import itertools
import logging
import os
import sys
import shutil
import subprocess
import tempfile
"""
The License Header
"""
LICENSE_HEADER = """
Copyright {} Twitter, Inc.
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0
""".format(datetime.datetime.now().year).split("\n")
# Comment out non-source extensions e.g. .md
MAP_EXTENTION_TO_LANGUAGE = \
{
'.C': 'C++',
'.H': 'C++',
'.PL': 'Perl',
'.R': 'R',
'._js': 'JavaScript',
'.adb': 'Ada',
'.apacheconf': 'ApacheConf',
'.applescript': 'AppleScript',
'.asm': 'Assembly',
'.asp': 'ASP',
'.aux': 'TeX',
'.aw': 'PHP',
'.b': 'Brainfuck',
'.bas': 'Visual Basic',
'.bats': 'Shell',
'.bf': 'Brainfuck',
'.bib': 'TeX',
'.builder': 'Ruby',
'.c': 'C',
'.c++': 'C++',
'.clj': 'Clojure',
'.cmake': 'CMake',
'.coffee': 'CoffeeScript',
'.cpp': 'C++',
'.cs': 'C#',
'.css': 'CSS',
'.csx': 'C#',
'.ctp': 'PHP',
'.cu': 'Cuda',
'.cxx': 'C++',
'.dfm': 'Pascal',
'.diff': 'Diff',
'.dtx': 'TeX',
'.el': 'Emacs Lisp',
'.elm': 'Elm',
'.emacs': 'Emacs Lisp',
'.erb': 'HTML+ERB',
'.f': 'FORTRAN',
'.f90': 'FORTRAN',
'.frm': 'Visual Basic',
'.frx': 'Visual Basic',
'.gemspec': 'Ruby',
'.go': 'Go',
'.god': 'Ruby',
'.gyp': 'Python',
'.h++': 'C++',
'.hh': 'C++',
'.hpp': 'C++',
'.hs': 'Haskell',
'.hsc': 'Haskell',
'.htm': 'HTML',
'.html': 'HTML',
'.http': 'HTTP',
'.hxx': 'C++',
'.ino': 'Arduino',
'.ins': 'TeX',
'.io': 'Io',
'.irbrc': 'Ruby',
'.java': 'Java',
'.jinja': 'HTML+Django',
'.jl': 'Julia',
'.js': 'JavaScript',
# '.json': 'JSON',
'.jsp': 'Java Server Pages',
'.jsx': 'JavaScript',
'.kt': 'Kotlin',
'.ktm': 'Kotlin',
'.kts': 'Kotlin',
'.less': 'Less',
'.lisp': 'Common Lisp',
'.ll': 'LLVM',
'.lpr': 'Pascal',
'.ltx': 'TeX',
'.lua': 'Lua',
'.m': 'Objective-C',
'.mak': 'Makefile',
'.matlab': 'Matlab',
# '.md': 'Markdown',
'.mkii': 'TeX',
'.mkiv': 'TeX',
'.mkvi': 'TeX',
'.ml': 'OCaml',
'.mm': 'Objective-C',
'.mspec': 'Ruby',
'.mustache': 'HTML+Django',
# '.nginxconf': 'Nginx',
'.nqp': 'Perl',
'.numpy': 'NumPy',
'.pas': 'Pascal',
'.perl': 'Perl',
'.ph': 'Perl',
'.php': 'PHP',
'.php3': 'PHP',
'.php4': 'PHP',
'.php5': 'PHP',
'.phpt': 'PHP',
'.phtml': 'HTML+PHP',
'.pl': 'Perl',
'.plx': 'Perl',
'.pm6': 'Perl',
'.pod': 'Perl',
'.podspec': 'Ruby',
'.prg': 'xBase',
'.psgi': 'Perl',
'.py': 'Python',
'.pyt': 'Python',
'.pytb': 'Python traceback',
'.pyw': 'Python',
'.pyx': 'Cython',
'.r': 'R',
'.rb': 'Ruby',
'.rbuild': 'Ruby',
'.rbw': 'Ruby',
'.rbx': 'Ruby',
# '.rest': 'reStructuredText',
'.rs': 'Rust',
# '.rst': 'reStructuredText',
'.ru': 'Ruby',
'.sage': 'Sage',
'.sass': 'Sass',
'.scala': 'Scala',
'.scss': 'SCSS',
'.sh': 'Shell',
'.sql': 'SQL',
'.sty': 'TeX',
'.tcc': 'C++',
'.tex': 'TeX',
'.thor': 'Ruby',
'.tmux': 'Shell',
'.toc': 'TeX',
'.tpp': 'C++',
'.ts': 'TypeScript',
'.vb': 'Visual Basic',
'.vba': 'Visual Basic',
'.vbs': 'Visual Basic',
'.vim': 'VimL',
'.w': 'C',
'.watchr': 'Ruby',
'.wsgi': 'Python',
'.xhtml': 'HTML',
# '.xml': 'XML',
'.xpy': 'Python',
# '.yaml': 'YAML',
# '.yml': 'YAML',
}
"""
The keys in MAP_LANGUAGE_TO_COMMENT_CHARS make an exhaustive list of the values in MAP_EXTENTION_TO_LANGUAGE.
Please keep both of them in sync.
The values in the list are the characters used to prepare the License Header as a comment block.
First elemenet starts a block comment
Second element is prepended to the lines in comment body
Third element ends a block comment
EXAMPLE (Scala): For a comment like below
/**
* A
* block
* comment
*/
Set the following in MAP_LANGUAGE_TO_COMMENT_CHARS
'Scala': ['/**', ' * ', ' */']
"""
MAP_LANGUAGE_TO_COMMENT_CHARS = \
{
'ASP': ['', '\' ', ''],
'Ada': [], # TODO
'ApacheConf': [], # TODO
'AppleScript': ['', '-- ', ''],
'Arduino': [], # TODO
'Assembly': ['', '; ', ''],
'Brainfuck': [], # TODO
'C': ['/*', ' * ', ' */'],
'C#': ['/*', ' ', '*/'],
'C++': ['/*', ' *', ' */'],
'CMake': ['', '# ', ''],
'CSS': ['/*', '', '*/'],
'Clojure': ['', '; ', ''],
'CoffeeScript': ['###', '', '###'],
'Common Lisp': ['', '; ', ''],
'Cuda': [], # TODO
'Cython': ['"""', '', '"""'],
'Diff': [], # TODO
'Elm': [], # TODO
'Emacs Lisp': ['', '; ', ''],
'FORTRAN': [], # TODO
'Go': ['', '//', ''],
'HTML': ['<!--', '', ' --!>'],
'HTML+Django': ['<!--', '', ' --!>'],
'HTML+ERB': ['<!--', '', ' --!>'],
'HTML+PHP': ['<!--', '', ' --!>'],
'HTTP': [], # TODO
'Haskell': ['', '-- ', ''],
'Io': [], # TODO
'JSON': ['', '// ', ''],
'Java': ['', '// ', ''],
'Java Server Pages': ['', '// ', ''],
'JavaScript': ['', '// ', ''],
'Julia': ['###', ' ', '###'],
'Kotlin': ['/**', ' * ', ' */'],
'LLVM': [], # TODO
'Less': ['/*', '', '*/'],
'Lua': ['--[=====[', '', '--]=====]'],
'Makefile': ['', '# ', ''],
'Markdown': ['<!--', '', ' --!>'],
'Matlab': [], # TODO
'Nginx': [], # TODO
'NumPy': ['"""', '', '"""'],
'OCaml': ['(*', '', ' *)'],
'Objective-C': ['', '// ', ''],
'PHP': ['<!--', '', ' --!>'],
'Pascal': [], # TODO
'Perl': ['', '# ', ''],
'Python': ['"""', '', '"""'],
'Python traceback': [], # TODO
'R': ['', '# ', ''],
'Ruby': ['', '# ', ''],
'Rust': ['', '// ', ''],
'SCSS': ['/*', '', '*/'],
'SQL': ['', '-- ', ''],
'Sage': [], # TODO
'Sass': ['/*', '', '*/'],
'Scala': ['/**', ' * ', ' */'],
'Shell': ['', '# ', ''],
'TeX': ['', '% ', ''],
'TypeScript': ['/**', ' * ', ' */'],
'VimL': [], # TODO
'Visual Basic': [], # TODO
'XML': ['<!--', '', ' --!>'],
'YAML': ['', '# ', ''],
'reStructuredText': ['', '.. ', ''],
'xBase': [], # TODO
}
"""
Argument Parsing
"""
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
"""
Utility
"""
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class color(object):
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def apply_changes(source_path, skip_log=False):
files_with_extensions = []
for root, directories, filenames in os.walk(source_path):
if ".git" in root.split("/"):
continue
for filename in filenames:
path_to_file = os.path.join(root, filename)
_, file_extension = os.path.splitext(path_to_file)
files_with_extensions.append((path_to_file, file_extension))
source_files = []
not_source_files = []
for file in files_with_extensions:
if file[1] in MAP_EXTENTION_TO_LANGUAGE:
source_files.append(file)
else:
not_source_files.append(file)
if not skip_log:
logging.info("Not making any changes to the following files. The script does not recognize them as a source file")
for file in not_source_files:
logging.info('\t - ' + file[0][len(source_path):])
# print("All the source files")
# for file in source_files:
# print('\t -', file[0][len(source_path):])
"""
Detect if License Headers exist
Check first 50 lines for keywords "Copyright" and "License" both.
If found, remove the file from source_files
"""
files_with_headers = []
files_without_headers = []
for file in source_files:
with open(file[0]) as f:
first_50_lines = "".join([x.strip() for x in itertools.islice(f, 50)])
first_50_lines = first_50_lines.lower()
if "copyright" in first_50_lines and "license" in first_50_lines:
files_with_headers.append(file)
else:
files_without_headers.append(file)
if not skip_log:
print("\nFound {} source file(s) with existing License headers".format(len(files_with_headers)))
logging.info("\nFound {} source file(s) with existing License headers".format(len(files_with_headers)))
for file in files_with_headers:
logging.info("\t " + file[0][len(source_path):])
"""
Prepare comment block for each language
"""
languages = {}
# key: Language Name
# value: list of files
for file in files_without_headers:
lang = MAP_EXTENTION_TO_LANGUAGE[file[1]]
try:
languages[lang].append(file)
except KeyError:
languages[lang] = [file]
map_language_to_block_comment = {}
for lang in languages:
try:
characters = MAP_LANGUAGE_TO_COMMENT_CHARS[lang]
except KeyError:
print("ERROR: Language '{}' not found in MAP_LANGUAGE_TO_COMMENT_CHARS. Please Keep both dictionaries in sync".format(lang))
continue
if len(characters) != 3:
print("ERROR: Language '{}' does not have the required 3 block comment characters. Check MAP_LANGUAGE_TO_COMMENT_CHARS".format(lang))
continue
comments = []
if characters[0] != "":
comments.append(characters[0] + LICENSE_HEADER[0])
for line in LICENSE_HEADER[1:-1]:
comments.append(characters[1] + line)
comments.append(characters[-1] + LICENSE_HEADER[-1])
map_language_to_block_comment[lang] = "\n".join(comments)
if map_language_to_block_comment and not skip_log:
logging.info("\n\nList of languages and their block comments\n")
for lang in map_language_to_block_comment:
logging.info(lang + "\n")
logging.info(map_language_to_block_comment[lang] + "\n")
"""
Make the changes
Exceptional cases:
- If the first two bytes of the file are "#!", skip the first line
"""
for file in files_without_headers:
with open(file[0]) as f:
file_text = f.read()
lang = MAP_EXTENTION_TO_LANGUAGE[file[1]]
comment = map_language_to_block_comment[lang]
new_file_text = ""
if file_text[:2] == "#!":
lines = file_text.split("\n", 1)
lines.insert(1, comment)
new_file_text = "\n".join(lines)
else:
new_file_text = comment + "\n" + file_text
with open(file[0], 'w') as f:
f.write(new_file_text)
if not skip_log:
print("{} source file(s) need to be updated".format(len(files_without_headers)))
logging.info("{} source file(s) need to be updated".format(len(files_without_headers)))
def get_current_branch(path):
# Save current working directory
cur_dir = os.getcwd()
os.chdir(path)
cur_branch = ""
try:
output = subprocess.check_output(["git", "branch"]).decode("utf-8")
for line in output.split("\n"):
if line[:2] == "* ":
cur_branch = line[2:]
break
except Exception as e:
print("Error in get_current_branch function", e)
print("Are you the repository is tracked by git?")
return cur_branch
def entry():
"""
Parse arguments
"""
parser = argparse.ArgumentParser(description="Recursively add license headers to source files")
parser.add_argument('source_dir', help="Path to the root of the directory containing source files",
action=FullPaths, type=is_dir)
args = parser.parse_args()
print("Path detected :", color.BOLD + args.source_dir + color.END)
current_branch = get_current_branch(args.source_dir)
if current_branch:
print("Branch detected -", color.BOLD + current_branch, color.END, "\n")
"""
Enable logging to a file
"""
# f = tempfile.NamedTemporaryFile(delete=False, suffix=".log")
# f.close()
# LOG_FILENAME = f.name
LOG_FILENAME = "header-" + datetime.datetime.now().isoformat() + ".log"
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format="%(message)s")
print(color.YELLOW + "* Please make sure that the source directory is tracked by git.")
print("* Create a new branch before proceeding ahead.")
print("* Make sure you do not have any uncommitted changes in your repository.")
print("It will later enable you to run 'git checkout -- .' and revert all the changes made by this script." + color.END)
if not query_yes_no("Proceed ahead? (Don't worry, this won't make changes yet)", default="no"):
print("Aborted!")
sys.exit(1)
"""
Make a temporary copy of the source directory and make changes there
"""
tempdir = tempfile.mkdtemp()
shutil.rmtree(tempdir) # shutil.copytree mandates the destination to not exist
print(color.BOLD, "\nCreating a copy of the project at\n")
print("\t", color.GREEN, tempdir, color.END)
shutil.copytree(args.source_dir, tempdir)
apply_changes(tempdir)
print(color.BOLD, "\nApplied changes to the copy of the project\n", color.END)
print("1. Make sure to run `git diff` in the following directory and verify the diff.\n")
print("\t$ cd", tempdir)
print("\t$ git diff\n")
print("2. Review the detailed log file - " + color.BOLD + os.getcwd() + "/" + LOG_FILENAME, color.END)
print("3. Run the unit tests and build the project.")
print("\nIf everything looks good in the copy of the project, proceed ahead.")
print("Changes will now be made to " + color.BOLD + args.source_dir + color.END)
if not query_yes_no("Want to continue?", default="no"):
print("Aborted!")
sys.exit(1)
apply_changes(args.source_dir, skip_log=True)
print(color.GREEN + "\nFinished running the script!")
print("You can do `git checkout -- .` to revert all the unstaged changes")
print("`git checkout -- <path>` can also undo a specific file or multiple files in a directory" + color.END)
if __name__ == '__main__':
# Clear screen
os.system('clear')
entry()
|
from .models import PREVIEW_FLAG
class ContentModelAdminMixin(object):
"""Enables staff preview of non-live objects, in combination with
ContentModelQuerySet.live(request). Note the view must pass a request to
this method or the preview won't work, and the mixin needs to come
before admin.ModelAdmin in the parent classes. Requires Django 1.7. """
def view_on_site(self, obj):
url = obj.get_absolute_url()
if not obj.live:
return url + '?%s=1' % PREVIEW_FLAG
return url
|
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGroupBox, QPushButton, QComboBox, QSizePolicy
class ImageControlsWidget(QWidget):
def __init__(self):
super().__init__()
self.classes = []
self.init_ui()
def init_ui(self):
prevButton = QPushButton("Previous")
nextButton = QPushButton("Next")
classSelect = QComboBox(self)
classSelect.addItem("Test")
classSelect.activated[str].connect(self.combo_changed)
prevButton.clicked.connect(self.clicked_prev)
nextButton.clicked.connect(self.clicked_next)
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
hbox = QHBoxLayout()
hbox.addWidget(prevButton)
hbox.addWidget(nextButton)
hbox.addWidget(classSelect)
self.setLayout(hbox)
def clicked_prev(self):
print("Clicked prev button.")
def clicked_next(self):
print("Clicked next button.")
def combo_changed(self, text):
print(text)
|
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help= 'Path to the image')
args = vars(ap.parse_args())
# load the image, grab its dimensions and show it
image = cv2.imread(args['image'])
(h, w) = image.shape[:2]
cv2.imshow('Original', image)
# images are just numpy arrays. The top-left pixel can be found at (0, 0)
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {r}, Green: {g}, Blue: {b}".format(r=r, g= g, b=b))
# lt's change the value of the pixel at (0, 0) and make it red
image[0, 0] = (0, 0 , 255)
(b, g, r) = image[0, 0]
print('Pixel at (0, 0) - Red: {r}, Green: {g}, Blue: {b}'.format(r=r, g=g, b=b))
# compute the center of the image, which is simply the width and height
# divided by two
(cx, cy) = (w // 2, h //2 )
# since we are using the numpy arrays, we can apply slicing and grab large chunks
# of the image -- let's grab the top-left corner
tl = image[0:cy, 0:cx]
cv2.imshow('Top-Left Corner', tl)
# in similar fashion, let's grab the top-right, bottom-right, and bottom-left
# corners and display them
tr = image[0:cy, cx:w]
br = image[cy:h, cx:w]
bl = image[cy:h, 0:cx]
cv2.imshow('Top-Right Corner', tr)
cv2.imshow('Bottom-Right Corner', br)
cv2.imshow('Bottom-Left Corner', bl)
# now let's make the top-left corner of the original image green
image[0:cy, 0:cx] = (0, 255, 0)
# show updated image
cv2.imshow('Updated', image)
# Exercise - What is the approximate value of pixel located at point x=111
# and y = 225
(b1, g1 ,r1) = image[225, 111]
print(f'Pixel at (111, 255) - Red: {r1}, Green: {g1}, Blue: {b1}')
cv2.waitKey(0)
|
import atexit
from math import sin, cos
from numpy import array
from pycuda import driver
from pycuda import gpuarray
from _axpy import daxpy
n = 10000
a = 3.4
def detach(context):
context.pop()
context.detach()
# initialise CUDA
driver.init()
device = driver.Device(0)
context = device.make_context(flags=driver.ctx_flags.SCHED_YIELD)
context.set_cache_config(driver.func_cache.PREFER_L1)
context.push()
atexit.register(detach, context)
# initialise data and calculate reference values on CPU
x = array([sin(i) * 2.3 for i in range(n)], float)
y = array([cos(i) * 1.1 for i in range(n)], float)
y_ref = a * x + y
# allocate + copy initial values
x_ = gpuarray.to_gpu(x)
y_ = gpuarray.to_gpu(y)
# calculate axpy on GPU
daxpy(n, a, x_.gpudata, y_.gpudata)
# copy result back to host and print with reference
print(' initial: {0} {1} {2} {3} {4} {5}'.format(
y[0], y[1], y[2], y[3], y[-2], y[-1]))
y_.get(y)
print('reference: {0} {1} {2} {3} {4} {5}'.format(
y_ref[0], y_ref[1], y_ref[2], y_ref[3], y_ref[-2], y_ref[-1]))
print(' result: {0} {1} {2} {3} {4} {5}'.format(
y[0], y[1], y[2], y[3], y[-2], y[-1]))
|
"""
Enum of available ONS indexes
"""
from enum import Enum
from dp_conceptual_search.config import CONFIG
class Index(Enum):
ONS = CONFIG.SEARCH.search_index
DEPARTMENTS = CONFIG.SEARCH.departments_search_index
|
from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertConcert, RelationConcertOrganisation, RelationConcertArtist, Location, ConcertannouncementToConcert
from django.core.management.base import BaseCommand, CommandError
from datetime import datetime
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
for concertannouncement in ConcertAnnouncement.objects.filter(concert__isnull=True).exclude(ignore=True):
#for concertannouncement in ConcertAnnouncement.objects.filter(concert__isnull=True).exclude(ignore=True).filter(raw_venue__organisation__location__pk=34789):
print("concertannouncement\thttp://hlwtadmin.herokuapp.com/hlwtadmin/concertannouncement/" + str(concertannouncement.pk) + "\trelated venue\thttp://hlwtadmin.herokuapp.com/hlwtadmin/venue/" + str(concertannouncement.raw_venue.pk) if concertannouncement.raw_venue else "---")
ca2c = ConcertannouncementToConcert(concertannouncement)
ca2c.automate()
concertannouncement.save()
|
import logging
import os
from flask import Flask, jsonify, redirect, request
from flask_cors import CORS
from flasgger import Swagger
from ensembl.production.metadata.config import MetadataConfig
from ensembl.production.core.models.hive import HiveInstance
from ensembl.production.core.exceptions import HTTPRequestError
app_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
static_path = os.path.join(app_path, 'static')
template_path = os.path.join(app_path, 'templates')
app = Flask(__name__,
static_url_path='/static/metadata',
static_folder=static_path,
template_folder=template_path,
instance_relative_config=True)
app.config.from_object(MetadataConfig)
CORS(app)
Swagger(app, template_file='swagger.yml')
hive = None
def get_hive():
global hive
if hive is None:
if app.config["HIVE_URI"] is None:
raise RuntimeError('Undefined environment variable: HIVE_URI')
else:
hive = HiveInstance(app.config["HIVE_URI"])
return hive
@app.route('/', methods=['GET'])
def info():
return jsonify(app.config['SWAGGER'])
@app.route('/jobs', methods=['POST'])
def submit_job():
if request.is_json:
request.json["metadata_uri"] = app.config["METADATA_URI"]
app.logger.debug('Submitting metadata job %s', request.json)
try:
analysis = app.config["HIVE_ANALYSIS"]
job = get_hive().create_job(analysis, request.json)
except ValueError as e:
raise HTTPRequestError(str(e), 404)
results = {"job_id": job.job_id}
return jsonify(results), 201
else:
error_msg = 'Could not handle input of type %s', request.headers['Content-Type']
app.logger.error(error_msg)
raise HTTPRequestError(error_msg)
@app.route('/jobs', methods=['GET'])
def jobs():
app.logger.info('Retrieving jobs')
analysis = app.config['HIVE_ANALYSIS']
return jsonify(get_hive().get_all_results(analysis, child=False))
@app.route('/jobs/<int:job_id>', methods=['GET'])
def job_result(job_id):
fmt = request.args.get('format')
app.logger.debug('Format %s', fmt)
if fmt == 'email':
email = request.args.get('email')
return job_email(email, job_id)
elif fmt == 'failures':
return failure(job_id)
elif fmt is None:
app.logger.info('Retrieving job with ID %s', job_id)
try:
result = get_hive().get_result_for_job_id(job_id, child=True)
except ValueError as e:
raise HTTPRequestError(str(e), 404)
return jsonify(result)
else:
raise HTTPRequestError("Format " + fmt + " not valid")
def job_email(email, job_id):
app.logger.info('Retrieving job with ID %s for %s', job_id, email)
try:
results = get_hive().get_result_for_job_id(job_id, child=True)
if results['status'] == 'complete':
results['subject'] = 'Metadata load for database %s is successful' % (results['output']['database_uri'])
results['body'] = "Metadata load for database %s is successful\n" % (results['output']['database_uri'])
results['body'] += "Load took %s" % (results['output']['runtime'])
elif results['status'] == 'failed':
job_failure = get_hive().get_job_failure_msg_by_id(job_id, child=True)
results['subject'] = 'Metadata load for %s failed' % (results['input']['database_uri'])
results['body'] = 'Metadata load failed with following message:\n'
results['body'] += '%s' % job_failure.msg
except ValueError as e:
raise HTTPRequestError(str(e), 404)
results['output'] = None
return jsonify(results)
def failure(job_id):
app.logger.info('Retrieving failure for job with ID %s', job_id)
try:
job_failure = get_hive().get_job_failure_msg_by_id(job_id, child=True)
except ValueError as e:
raise HTTPRequestError(str(e), 404)
return jsonify({"msg": job_failure.msg})
@app.route('/jobs/<int:job_id>', methods=['DELETE'])
def delete_job(job_id):
try:
job = get_hive().get_job_by_id(job_id)
get_hive().delete_job(job, child=True)
except ValueError as e:
raise HTTPRequestError(str(e), 404)
return jsonify({"id": job_id})
@app.errorhandler(HTTPRequestError)
def handle_bad_request_error(e):
app.logger.error(str(e))
return jsonify(error=str(e)), e.status_code
if __name__ == "__main__":
app.run(debug=True)
|
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
try:
from unittest import mock
except ImportError:
import mock
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
@pytest.mark.natural_earth
@ImageTesting(['gridliner1'])
def test_gridliner():
ny, nx = 2, 4
plt.figure(figsize=(10, 10))
ax = plt.subplot(nx, ny, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 2, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 3, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), color='blue', linestyle='-')
ax.gridlines(ccrs.OSGB())
ax = plt.subplot(nx, ny, 4, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.NorthPolarStereo(), alpha=0.5,
linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 5, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 6, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
ax.gridlines(alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 7, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 8,
projection=ccrs.Robinson(central_longitude=135))
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), alpha=0.5, linewidth=1.5, linestyle='-')
delta = 1.5e-2
plt.subplots_adjust(left=0 + delta, right=1 - delta,
top=1 - delta, bottom=0 + delta)
def test_gridliner_specified_lines():
xs = [0, 60, 120, 180, 240, 360]
ys = [-90, -60, -30, 0, 30, 60, 90]
ax = mock.Mock(_gridliners=[], spec=GeoAxes)
gl = GeoAxes.gridlines(ax, xlocs=xs, ylocs=ys)
assert isinstance(gl.xlocator, mticker.FixedLocator)
assert isinstance(gl.ylocator, mticker.FixedLocator)
assert gl.xlocator.tick_values(None, None).tolist() == xs
assert gl.ylocator.tick_values(None, None).tolist() == ys
# The tolerance on this test is particularly high because of the high number
# of text objects. A new testing strategy is needed for this kind of test.
if MPL_VERSION >= '2.0':
grid_label_image = 'gridliner_labels'
else:
grid_label_image = 'gridliner_labels_1.5'
@pytest.mark.natural_earth
@ImageTesting([grid_label_image])
def test_grid_labels():
plt.figure(figsize=(8, 10))
crs_pc = ccrs.PlateCarree()
crs_merc = ccrs.Mercator()
crs_osgb = ccrs.OSGB()
ax = plt.subplot(3, 2, 1, projection=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that adding labels to Mercator gridlines gives an error.
# (Currently can only label PlateCarree gridlines.)
ax = plt.subplot(3, 2, 2,
projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
with pytest.raises(TypeError):
ax.gridlines(crs=crs_merc, draw_labels=True)
ax.set_title('Known bug')
gl = ax.gridlines(crs=crs_pc, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = False
ax = plt.subplot(3, 2, 3, projection=crs_merc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that labelling the gridlines on an OSGB plot gives an error.
# (Currently can only draw these on PlateCarree or Mercator plots.)
ax = plt.subplot(3, 2, 4, projection=crs_osgb)
ax.coastlines()
with pytest.raises(TypeError):
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 4, projection=crs_pc)
ax.coastlines()
gl = ax.gridlines(
crs=crs_pc, linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_bottom = True
gl.ylabels_right = True
gl.xlines = False
gl.xlocator = mticker.FixedLocator([-180, -45, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red'}
gl.xpadding = 10
gl.ypadding = 15
# trigger a draw at this point and check the appropriate artists are
# populated on the gridliner instance
FigureCanvasAgg(plt.gcf()).draw()
assert len(gl.xlabel_artists) == 4
assert len(gl.ylabel_artists) == 5
assert len(gl.ylabel_artists) == 5
assert len(gl.xline_artists) == 0
ax = plt.subplot(3, 2, 5, projection=crs_pc)
ax.set_extent([-20, 10.0, 45.0, 70.0])
ax.coastlines()
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 6, projection=crs_merc)
ax.set_extent([-20, 10.0, 45.0, 70.0], crs=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Increase margins between plots to stop them bumping into one another.
plt.subplots_adjust(wspace=0.25, hspace=0.25)
|
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 8
_modified_time = 1434060690.119963
_enable_loop = True
_template_filename = '/home/cairisuser/CAIRIS-web/cairis/cairis/templates/index.mako'
_template_uri = 'index.mako'
_source_encoding = 'ascii'
_exports = ['innerTree', 'printIcon']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
body = context.get('body', UNDEFINED)
title = context.get('title', UNDEFINED)
def printIcon(nav):
return render_printIcon(context.locals_(__M_locals),nav)
def innerTree(navObjects):
return render_innerTree(context.locals_(__M_locals),navObjects)
navList = context.get('navList', UNDEFINED)
hasattr = context.get('hasattr', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'<!DOCTYPE html>\r\n<html>\r\n<head lang="en">\r\n\r\n <meta charset="UTF-8">\r\n <title>')
# SOURCE LINE 6
__M_writer(unicode(title))
__M_writer(u'</title>\r\n <meta content=\'width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no\' name=\'viewport\'>\r\n <!-- Bootstrap 3.3.2 -->\r\n <link href="bootstrap/css/bootstrap.min.css" rel="stylesheet" type="text/css" />\r\n <!-- Font Awesome Icons -->\r\n <link href="https://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet" type="text/css" />\r\n <!-- Ionicons -->\r\n <link href="http://code.ionicframework.com/ionicons/2.0.0/css/ionicons.min.css" rel="stylesheet" type="text/css" />\r\n <!-- Theme style -->\r\n <link href="dist/css/AdminLTE.min.css" rel="stylesheet" type="text/css" />\r\n <!-- AdminLTE Skins. We have chosen the skin-blue for this starter\r\n page. However, you can choose any other skin. Make sure you\r\n apply the skin class to the body tag so the changes take effect.\r\n -->\r\n <link href="dist/css/skins/skin-blue.min.css" rel="stylesheet" type="text/css" />\r\n\r\n <!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->\r\n <!-- WARNING: Respond.js doesn\'t work if you view the page via file:// -->\r\n <!--[if lt IE 9]>\r\n <script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>\r\n <script src="https://oss.maxcdn.com/libs/respond.js/1.3.0/respond.min.js"></script>\r\n <![endif]-->\r\n</head>\r\n<body class="skin-blue">\r\n<div class="wrapper">\r\n\r\n <!-- Main Header -->\r\n <header class="main-header">\r\n\r\n <!-- Logo -->\r\n <a href="#" class="logo">CAIRIS</a>\r\n\r\n <!-- Header Navbar -->\r\n <nav class="navbar navbar-static-top" role="navigation">\r\n <!-- Sidebar toggle button-->\r\n <a href="#" class="sidebar-toggle" data-toggle="offcanvas" role="button">\r\n <span class="sr-only">Toggle navigation</span>\r\n </a>\r\n <!-- Navbar Right Menu -->\r\n <div class="navbar-custom-menu">\r\n <ul class="nav navbar-nav">\r\n <!-- Messages: style can be found in dropdown.less-->\r\n <!-- Tasks Menu -->\r\n <li class="dropdown tasks-menu">\r\n <!-- Menu Toggle Button -->\r\n <a href="#" class="dropdown-toggle" data-toggle="dropdown">\r\n <i class="fa fa-flag-o"></i>\r\n <span class="label label-danger">9</span>\r\n </a>\r\n <ul class="dropdown-menu">\r\n <li class="header">You have 9 tasks</li>\r\n <li>\r\n <!-- Inner menu: contains the tasks -->\r\n <ul class="menu">\r\n <li><!-- Task item -->\r\n <a href="#">\r\n <!-- Task title and progress text -->\r\n <h3>\r\n Design some buttons\r\n <small class="pull-right">20%</small>\r\n </h3>\r\n <!-- The progress bar -->\r\n <div class="progress xs">\r\n <!-- Change the css width attribute to simulate progress -->\r\n <div class="progress-bar progress-bar-aqua" style="width: 20%" role="progressbar" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100">\r\n <span class="sr-only">20% Complete</span>\r\n </div>\r\n </div>\r\n </a>\r\n </li><!-- end task item -->\r\n </ul>\r\n </li>\r\n <li class="footer">\r\n <a href="#">View all tasks</a>\r\n </li>\r\n </ul>\r\n </li>\r\n </ul>\r\n </div>\r\n </nav>\r\n </header>\r\n\r\n <aside class="main-sidebar">\r\n <!-- sidebar - sidebar.less -->\r\n <section class="sidebar">\r\n <div id="sidebar-scrolling">\r\n <ul class="sidebar-menu">\r\n <li class="header">MENU</li>\r\n <!-- Optionally, you can add icons to the links -->\r\n')
# SOURCE LINE 95
for nav in navList:
# SOURCE LINE 96
if hasattr(nav, 'navObjects'):
# SOURCE LINE 97
__M_writer(u' <li class="treeview">\r\n <a href="')
# SOURCE LINE 98
__M_writer(unicode(nav.href))
__M_writer(u'">')
__M_writer(unicode(printIcon(nav)))
__M_writer(u'<span>')
__M_writer(unicode(nav.text))
__M_writer(u'</span><i class="fa fa-angle-left pull-right"></i>\r\n </a>\r\n <ul class="treeview-menu">\r\n ')
# SOURCE LINE 101
__M_writer(unicode(innerTree(navObjects=nav.navObjects)))
__M_writer(u'\r\n </ul>\r\n </li>\r\n')
# SOURCE LINE 104
else:
# SOURCE LINE 105
__M_writer(u' <li><a href="')
__M_writer(unicode(nav.href))
__M_writer(u'">')
__M_writer(unicode(printIcon(nav)))
__M_writer(u'<span>')
__M_writer(unicode(nav.text))
__M_writer(u'</span>\r\n </a></li>\r\n')
pass
pass
# SOURCE LINE 109
__M_writer(u' </ul>\r\n </div>\r\n </section>\r\n </aside>\r\n\r\n <!-- Content Wrapper. Contains page content -->\r\n <div class="content-wrapper">\r\n <!-- Content Header (Page header) -->\r\n <section class="content-header">\r\n <h1>\r\n Page Header\r\n <small>Optional description</small>\r\n </h1>\r\n <ol class="breadcrumb">\r\n <li><a href="#"><i class="fa fa-dashboard"></i> Level</a></li>\r\n <li class="active">Here</li>\r\n </ol>\r\n </section>\r\n <!-- Main content -->\r\n <section class="content">\r\n <!-- Your Page Content Here -->\r\n\r\n ')
# SOURCE LINE 131
__M_writer(unicode(body))
__M_writer(u'\r\n\r\n </section><!-- /.content -->\r\n </div><!-- /.content-wrapper -->\r\n\r\n <!-- rightnav -->\r\n <div id="rightnavGear" class="no-print"\r\n style="position: fixed; top: 100px; right: 0px; border-radius: 5px 0px 0px 5px; padding: 10px 15px; font-size: 16px; z-index: 99999; cursor: pointer; color: rgb(60, 141, 188); box-shadow: rgba(0, 0, 0, 0.0980392) 0px 1px 3px; background: rgb(255, 255, 255);">\r\n <i class="fa fa-gear"></i></div>\r\n <div id="rightnavMenu" class="no-print"\r\n style="padding: 10px; position: fixed; top: 100px; right: -250px; border: 0px solid rgb(221, 221, 221); width: 250px; z-index: 99999; box-shadow: rgba(0, 0, 0, 0.0980392) 0px 1px 3px; background: rgb(255, 255, 255);">\r\n <h4 class="text-light-blue" style="margin: 0 0 5px 0; border-bottom: 1px solid #ddd; padding-bottom: 15px;">\r\n Options</h4>\r\n </div>\r\n <footer class="main-footer">\r\n <!-- To the right -->\r\n <div class="pull-right hidden-xs">\r\n Anything you want\r\n </div>\r\n <!-- Default to the left -->\r\n <strong>Copyright © 2015 <a href="#">Company</a>.</strong> All rights reserved.\r\n </footer>\r\n\r\n</div>\r\n<!-- REQUIRED JS SCRIPTS -->\r\n<!-- jQuery 2.1.3 -->\r\n<script src="plugins/jQuery/jQuery-2.1.3.min.js"></script>\r\n<!-- Bootstrap 3.3.2 JS -->\r\n<script src="bootstrap/js/bootstrap.min.js" type="text/javascript"></script>\r\n<!-- AdminLTE App -->\r\n<script src="dist/js/app.min.js" type="text/javascript"></script>\r\n<!-- Slimscroll App -->\r\n<script src="plugins/slimScroll/jquery.slimscroll.js" type="text/javascript"></script>\r\n<!-- Script for the right nav -->\r\n<script>\r\n $(document).ready(function(){\r\n $(\'#rightnavGear\').click(function(){\r\n var navGear = $(\'#rightnavGear\');\r\n var navMenu = $(\'#rightnavMenu\');\r\n if (!navGear.hasClass("open")) {\r\n navGear.animate({"right": "250px"});\r\n navMenu.animate({"right": "0"});\r\n navGear.addClass("open");\r\n } else {\r\n navGear.animate({"right": "0"});\r\n navMenu.animate({"right": "-250px"});\r\n navGear.removeClass("open");\r\n }\r\n });\r\n });\r\n</script>\r\n<script>\r\n $(window).load(function(){\r\n $(\'#sidebar-scrolling\').slimScroll({\r\n height: $(\'.main-sidebar\').height() - 20\r\n });\r\n });\r\n</script>\r\n</body>\r\n</html>\r\n')
# SOURCE LINE 196
__M_writer(u'\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_innerTree(context,navObjects):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 191
__M_writer(u'\r\n')
# SOURCE LINE 192
for row in navObjects:
# SOURCE LINE 193
__M_writer(u'<li><a href="')
__M_writer(unicode(row.href))
__M_writer(u'">')
__M_writer(unicode(row.text))
__M_writer(u'\r\n</a></li>\r\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_printIcon(context,nav):
__M_caller = context.caller_stack._push_frame()
try:
hasattr = context.get('hasattr', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 197
__M_writer(u'\r\n')
# SOURCE LINE 198
if hasattr(nav, 'icon'):
# SOURCE LINE 199
__M_writer(u"<i class='")
__M_writer(unicode(nav.icon))
__M_writer(u"'></i>\r\n")
pass
return ''
finally:
context.caller_stack._pop_frame()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Huawei
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
###############################################################################
# Documentation
###############################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hwc_vpc_eip
description:
- elastic ip management.
short_description: Creates a resource of Vpc/EIP in Huawei Cloud
version_added: '2.9'
author: Huawei Inc. (@huaweicloud)
requirements:
- keystoneauth1 >= 3.6.0
options:
state:
description:
- Whether the given object should exist in Huawei Cloud.
type: str
choices: ['present', 'absent']
default: 'present'
filters:
description:
- A list of filters to apply when deciding whether existing
resources match and should be altered. The item of filters
is the name of input options.
type: list
required: true
timeouts:
description:
- The timeouts for each operations.
type: dict
suboptions:
create:
description:
- The timeouts for create operation.
type: str
default: '5m'
update:
description:
- The timeouts for update operation.
type: str
default: '5m'
type:
description:
- Specifies the EIP type. The value can be 5_telcom, 5_union,
5_bgp, or 5_sbgp.
- CN Northeast-Dalian is 5_telcom and 5_union.
- CN South-Guangzhou is 5_sbgp.
- CN East-Shanghai2 is 5_sbgp.
- CN North-Beijing1 is 5_bgp and 5_sbgp.
- AP-Hong Kong is 5_bgp.
type: str
required: true
dedicated_bandwidth:
description:
- Specifies the dedicated bandwidth object.
type: complex
required: false
suboptions:
charge_mode:
description:
- Specifies whether the bandwidth is billed by traffic or
by bandwidth size. The value can be bandwidth or traffic.
If this parameter is left blank or is null character
string, default value bandwidth is used. For IPv6
addresses, the default parameter value is bandwidth
outside China and is traffic in China.
type: str
required: true
name:
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
underscores C(_), hyphens (-), and periods (.).
type: str
required: true
size:
description:
- Specifies the bandwidth size. The value ranges from 1
Mbit/s to 2000 Mbit/s by default. (The specific range may
vary depending on the configuration in each region. You
can see the bandwidth range of each region on the
management console.) The minimum unit for bandwidth
adjustment varies depending on the bandwidth range. The
details are as follows.
- The minimum unit is 1 Mbit/s if the allowed bandwidth
size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
included).
- The minimum unit is 50 Mbit/s if the allowed bandwidth
size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
included).
- The minimum unit is 500 Mbit/s if the allowed bandwidth
size is greater than 1000 Mbit/s.
type: int
required: true
enterprise_project_id:
description:
- Specifies the enterprise project ID.
type: str
required: false
ip_version:
description:
- The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
parameter is left blank, an IPv4 address will be assigned.
type: str
required: false
ipv4_address:
description:
- Specifies the obtained IPv4 EIP. The system automatically assigns
an EIP if you do not specify it.
type: str
required: false
port_id:
description:
- Specifies the port ID. This parameter is returned only when a
private IP address is bound with the EIP.
type: str
required: false
shared_bandwidth_id:
description:
- Specifies the ID of shared bandwidth.
type: str
required: false
extends_documentation_fragment: hwc
'''
EXAMPLES = '''
# create an eip and bind it to a port
- name: create vpc
hwc_network_vpc:
cidr: "192.168.100.0/24"
name: "ansible_network_vpc_test"
register: vpc
- name: create subnet
hwc_vpc_subnet:
gateway_ip: "192.168.100.32"
name: "ansible_network_subnet_test"
dhcp_enable: True
vpc_id: "{{ vpc.id }}"
filters:
- "name"
cidr: "192.168.100.0/26"
register: subnet
- name: create a port
hwc_vpc_port:
subnet_id: "{{ subnet.id }}"
ip_address: "192.168.100.33"
filters:
- "name"
- "network_id"
- "ip_address"
register: port
- name: create an eip and bind it to a port
hwc_vpc_eip:
type: "5_bgp"
dedicated_bandwidth:
charge_mode: "traffic"
name: "ansible_test_dedicated_bandwidth"
size: 1
port_id: "{{ port.id }}"
filters:
- "type"
- "dedicated_bandwidth"
'''
RETURN = '''
type:
description:
- Specifies the EIP type. The value can be 5_telcom, 5_union,
5_bgp, or 5_sbgp.
- CN Northeast-Dalian: 5_telcom and 5_union.
- CN South-Guangzhou: 5_sbgp.
- CN East-Shanghai2: 5_sbgp.
- CN North-Beijing1: 5_bgp and 5_sbgp.
- AP-Hong Kong: 5_bgp.
type: str
returned: success
dedicated_bandwidth:
description:
- Specifies the dedicated bandwidth object.
type: complex
returned: success
contains:
charge_mode:
description:
- Specifies whether the bandwidth is billed by traffic or
by bandwidth size. The value can be bandwidth or traffic.
If this parameter is left blank or is null character
string, default value bandwidth is used. For IPv6
addresses, the default parameter value is bandwidth
outside China and is traffic in China.
type: str
returned: success
name:
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
underscores C(_), hyphens (-), and periods (.).
type: str
returned: success
size:
description:
- Specifies the bandwidth size. The value ranges from 1
Mbit/s to 2000 Mbit/s by default. (The specific range may
vary depending on the configuration in each region. You
can see the bandwidth range of each region on the
management console.) The minimum unit for bandwidth
adjustment varies depending on the bandwidth range. The
details are as follows:.
- The minimum unit is 1 Mbit/s if the allowed bandwidth
size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
included).
- The minimum unit is 50 Mbit/s if the allowed bandwidth
size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
included).
- The minimum unit is 500 Mbit/s if the allowed bandwidth
size is greater than 1000 Mbit/s.
type: int
returned: success
id:
description:
- Specifies the ID of dedicated bandwidth.
type: str
returned: success
enterprise_project_id:
description:
- Specifies the enterprise project ID.
type: str
returned: success
ip_version:
description:
- The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
parameter is left blank, an IPv4 address will be assigned.
type: int
returned: success
ipv4_address:
description:
- Specifies the obtained IPv4 EIP. The system automatically assigns
an EIP if you do not specify it.
type: str
returned: success
port_id:
description:
- Specifies the port ID. This parameter is returned only when a
private IP address is bound with the EIP.
type: str
returned: success
shared_bandwidth_id:
description:
- Specifies the ID of shared bandwidth.
type: str
returned: success
create_time:
description:
- Specifies the time (UTC time) when the EIP was assigned.
type: str
returned: success
ipv6_address:
description:
- Specifies the obtained IPv6 EIP.
type: str
returned: success
private_ip_address:
description:
- Specifies the private IP address bound with the EIP. This
parameter is returned only when a private IP address is bound
with the EIP.
type: str
returned: success
'''
from ansible.module_utils.hwc_utils import (
Config, HwcClientException, HwcClientException404, HwcModule,
are_different_dicts, build_path, get_region, is_empty_value,
navigate_value, wait_to_finish)
def build_module():
return HwcModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'],
type='str'),
filters=dict(required=True, type='list', elements='str'),
timeouts=dict(type='dict', options=dict(
create=dict(default='5m', type='str'),
update=dict(default='5m', type='str'),
), default=dict()),
type=dict(type='str', required=True),
dedicated_bandwidth=dict(type='dict', options=dict(
charge_mode=dict(type='str', required=True),
name=dict(type='str', required=True),
size=dict(type='int', required=True)
)),
enterprise_project_id=dict(type='str'),
ip_version=dict(type='int'),
ipv4_address=dict(type='str'),
port_id=dict(type='str'),
shared_bandwidth_id=dict(type='str')
),
supports_check_mode=True,
)
def main():
"""Main function"""
module = build_module()
config = Config(module, "vpc")
try:
resource = None
if module.params['id']:
resource = True
else:
v = search_resource(config)
if len(v) > 1:
raise Exception("find more than one resources(%s)" % ", ".join([
navigate_value(i, ["id"]) for i in v]))
if len(v) == 1:
resource = v[0]
module.params['id'] = navigate_value(resource, ["id"])
result = {}
changed = False
if module.params['state'] == 'present':
if resource is None:
if not module.check_mode:
create(config)
changed = True
current = read_resource(config, exclude_output=True)
expect = user_input_parameters(module)
if are_different_dicts(expect, current):
if not module.check_mode:
update(config)
changed = True
result = read_resource(config)
result['id'] = module.params.get('id')
else:
if resource:
if not module.check_mode:
delete(config)
changed = True
except Exception as ex:
module.fail_json(msg=str(ex))
else:
result['changed'] = changed
module.exit_json(**result)
def user_input_parameters(module):
return {
"dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
"enterprise_project_id": module.params.get("enterprise_project_id"),
"ip_version": module.params.get("ip_version"),
"ipv4_address": module.params.get("ipv4_address"),
"port_id": module.params.get("port_id"),
"shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
"type": module.params.get("type"),
}
def create(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
opts = user_input_parameters(module)
params = build_create_parameters(opts)
r = send_create_request(module, params, client)
obj = async_wait_create(config, r, client, timeout)
module.params['id'] = navigate_value(obj, ["publicip", "id"])
def update(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
opts = user_input_parameters(module)
params = build_update_parameters(opts)
if params:
r = send_update_request(module, params, client)
async_wait_update(config, r, client, timeout)
def delete(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
if module.params["port_id"]:
module.params["port_id"] = ""
update(config)
send_delete_request(module, None, client)
url = build_path(module, "publicips/{id}")
def _refresh_status():
try:
client.get(url)
except HwcClientException404:
return True, "Done"
except Exception:
return None, ""
return True, "Pending"
timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
try:
wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(delete) to "
"be done, error= %s" % str(ex))
def read_resource(config, exclude_output=False):
module = config.module
client = config.client(get_region(module), "vpc", "project")
res = {}
r = send_read_request(module, client)
res["read"] = fill_read_resp_body(r)
return update_properties(module, res, None, exclude_output)
def _build_query_link(opts):
query_params = []
v = navigate_value(opts, ["ip_version"])
if v:
query_params.append("ip_version=" + str(v))
v = navigate_value(opts, ["enterprise_project_id"])
if v:
query_params.append("enterprise_project_id=" + str(v))
query_link = "?marker={marker}&limit=10"
if query_params:
query_link += "&" + "&".join(query_params)
return query_link
def search_resource(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
opts = user_input_parameters(module)
identity_obj = _build_identity_object(module, opts)
query_link = _build_query_link(opts)
link = "publicips" + query_link
result = []
p = {'marker': ''}
while True:
url = link.format(**p)
r = send_list_request(module, client, url)
if not r:
break
for item in r:
item = fill_list_resp_body(item)
if not are_different_dicts(identity_obj, item):
result.append(item)
if len(result) > 1:
break
p['marker'] = r[-1].get('id')
return result
def build_create_parameters(opts):
params = dict()
v = expand_create_bandwidth(opts, None)
if not is_empty_value(v):
params["bandwidth"] = v
v = navigate_value(opts, ["enterprise_project_id"], None)
if not is_empty_value(v):
params["enterprise_project_id"] = v
v = expand_create_publicip(opts, None)
if not is_empty_value(v):
params["publicip"] = v
return params
def expand_create_bandwidth(d, array_index):
v = navigate_value(d, ["dedicated_bandwidth"], array_index)
sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
if v and sbwid:
raise Exception("don't input shared_bandwidth_id and "
"dedicated_bandwidth at same time")
if not (v or sbwid):
raise Exception("must input shared_bandwidth_id or "
"dedicated_bandwidth")
if sbwid:
return {
"id": sbwid,
"share_type": "WHOLE"}
return {
"charge_mode": v["charge_mode"],
"name": v["name"],
"share_type": "PER",
"size": v["size"]}
def expand_create_publicip(d, array_index):
r = dict()
v = navigate_value(d, ["ipv4_address"], array_index)
if not is_empty_value(v):
r["ip_address"] = v
v = navigate_value(d, ["ip_version"], array_index)
if not is_empty_value(v):
r["ip_version"] = v
v = navigate_value(d, ["type"], array_index)
if not is_empty_value(v):
r["type"] = v
return r
def send_create_request(module, params, client):
url = "publicips"
try:
r = client.post(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(create), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def async_wait_create(config, result, client, timeout):
module = config.module
path_parameters = {
"publicip_id": ["publicip", "id"],
}
data = {
key: navigate_value(result, path)
for key, path in path_parameters.items()
}
url = build_path(module, "publicips/{publicip_id}", data)
def _query_status():
r = None
try:
r = client.get(url, timeout=timeout)
except HwcClientException:
return None, ""
try:
s = navigate_value(r, ["publicip", "status"])
return r, s
except Exception:
return None, ""
try:
return wait_to_finish(
["ACTIVE", "DOWN"],
None,
_query_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(create) to "
"be done, error= %s" % str(ex))
def build_update_parameters(opts):
params = dict()
v = navigate_value(opts, ["ip_version"], None)
if not is_empty_value(v):
params["ip_version"] = v
v = navigate_value(opts, ["port_id"], None)
if v is not None:
params["port_id"] = v
if not params:
return params
params = {"publicip": params}
return params
def send_update_request(module, params, client):
url = build_path(module, "publicips/{id}")
try:
r = client.put(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(update), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def async_wait_update(config, result, client, timeout):
module = config.module
url = build_path(module, "publicips/{id}")
def _query_status():
r = None
try:
r = client.get(url, timeout=timeout)
except HwcClientException:
return None, ""
try:
s = navigate_value(r, ["publicip", "status"])
return r, s
except Exception:
return None, ""
try:
return wait_to_finish(
["ACTIVE", "DOWN"],
None,
_query_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(update) to "
"be done, error= %s" % str(ex))
def send_delete_request(module, params, client):
url = build_path(module, "publicips/{id}")
try:
r = client.delete(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(delete), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def send_read_request(module, client):
url = build_path(module, "publicips/{id}")
r = None
try:
r = client.get(url)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(read), error: %s" % str(ex))
module.fail_json(msg=msg)
return navigate_value(r, ["publicip"], None)
def fill_read_resp_body(body):
result = dict()
result["bandwidth_id"] = body.get("bandwidth_id")
result["bandwidth_name"] = body.get("bandwidth_name")
result["bandwidth_share_type"] = body.get("bandwidth_share_type")
result["bandwidth_size"] = body.get("bandwidth_size")
result["create_time"] = body.get("create_time")
result["enterprise_project_id"] = body.get("enterprise_project_id")
result["id"] = body.get("id")
result["ip_version"] = body.get("ip_version")
result["port_id"] = body.get("port_id")
result["private_ip_address"] = body.get("private_ip_address")
result["public_ip_address"] = body.get("public_ip_address")
result["public_ipv6_address"] = body.get("public_ipv6_address")
result["status"] = body.get("status")
result["tenant_id"] = body.get("tenant_id")
result["type"] = body.get("type")
return result
def update_properties(module, response, array_index, exclude_output=False):
r = user_input_parameters(module)
if not exclude_output:
v = navigate_value(response, ["read", "create_time"], array_index)
r["create_time"] = v
v = r.get("dedicated_bandwidth")
v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
r["dedicated_bandwidth"] = v
v = navigate_value(response, ["read", "enterprise_project_id"],
array_index)
r["enterprise_project_id"] = v
v = navigate_value(response, ["read", "ip_version"], array_index)
r["ip_version"] = v
v = navigate_value(response, ["read", "public_ip_address"], array_index)
r["ipv4_address"] = v
if not exclude_output:
v = navigate_value(response, ["read", "public_ipv6_address"],
array_index)
r["ipv6_address"] = v
v = navigate_value(response, ["read", "port_id"], array_index)
r["port_id"] = v
if not exclude_output:
v = navigate_value(response, ["read", "private_ip_address"],
array_index)
r["private_ip_address"] = v
v = r.get("shared_bandwidth_id")
v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
r["shared_bandwidth_id"] = v
v = navigate_value(response, ["read", "type"], array_index)
r["type"] = v
return r
def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
if not (v and v == "PER"):
return current_value
result = current_value
if not result:
result = dict()
if not exclude_output:
v = navigate_value(d, ["read", "bandwidth_id"], array_index)
if v is not None:
result["id"] = v
v = navigate_value(d, ["read", "bandwidth_name"], array_index)
if v is not None:
result["name"] = v
v = navigate_value(d, ["read", "bandwidth_size"], array_index)
if v is not None:
result["size"] = v
return result if result else current_value
def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
v = navigate_value(d, ["read", "bandwidth_id"], array_index)
v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
return v if (v1 and v1 == "WHOLE") else current_value
def send_list_request(module, client, url):
r = None
try:
r = client.get(url)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(list), error: %s" % str(ex))
module.fail_json(msg=msg)
return navigate_value(r, ["publicips"], None)
def _build_identity_object(module, all_opts):
filters = module.params.get("filters")
opts = dict()
for k, v in all_opts.items():
opts[k] = v if k in filters else None
result = dict()
v = expand_list_bandwidth_id(opts, None)
result["bandwidth_id"] = v
v = navigate_value(opts, ["dedicated_bandwidth", "name"], None)
result["bandwidth_name"] = v
result["bandwidth_share_type"] = None
v = navigate_value(opts, ["dedicated_bandwidth", "size"], None)
result["bandwidth_size"] = v
result["create_time"] = None
v = navigate_value(opts, ["enterprise_project_id"], None)
result["enterprise_project_id"] = v
result["id"] = None
v = navigate_value(opts, ["ip_version"], None)
result["ip_version"] = v
v = navigate_value(opts, ["port_id"], None)
result["port_id"] = v
result["private_ip_address"] = None
v = navigate_value(opts, ["ipv4_address"], None)
result["public_ip_address"] = v
result["public_ipv6_address"] = None
result["status"] = None
result["tenant_id"] = None
v = navigate_value(opts, ["type"], None)
result["type"] = v
return result
def expand_list_bandwidth_id(d, array_index):
v = navigate_value(d, ["dedicated_bandwidth"], array_index)
sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
if v and sbwid:
raise Exception("don't input shared_bandwidth_id and "
"dedicated_bandwidth at same time")
return sbwid
def fill_list_resp_body(body):
result = dict()
result["bandwidth_id"] = body.get("bandwidth_id")
result["bandwidth_name"] = body.get("bandwidth_name")
result["bandwidth_share_type"] = body.get("bandwidth_share_type")
result["bandwidth_size"] = body.get("bandwidth_size")
result["create_time"] = body.get("create_time")
result["enterprise_project_id"] = body.get("enterprise_project_id")
result["id"] = body.get("id")
result["ip_version"] = body.get("ip_version")
result["port_id"] = body.get("port_id")
result["private_ip_address"] = body.get("private_ip_address")
result["public_ip_address"] = body.get("public_ip_address")
result["public_ipv6_address"] = body.get("public_ipv6_address")
result["status"] = body.get("status")
result["tenant_id"] = body.get("tenant_id")
result["type"] = body.get("type")
return result
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ImportedObjects',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='date created', auto_now=True)),
('modified', models.DateTimeField(verbose_name='last modified', auto_now_add=True)),
('object_pk', models.IntegerField(db_index=True)),
('old_object_pk', models.CharField(db_index=True, max_length=255)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'abstract': False,
'ordering': ('-modified', '-created'),
},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.