hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b91e43c53b9635fa2ef0dc770610e64bf82e300e
| 65,476
|
py
|
Python
|
python/paddle/fluid/tests/unittests/dist_transformer.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/dist_transformer.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/dist_transformer.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import os
import sys
import six
import argparse
import ast
import multiprocessing
import time
from functools import partial
from os.path import expanduser
import glob
import random
import tarfile
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
import paddle.compat as cpt
from paddle.compat import long_type
import hashlib
const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001))
const_bias_attr = const_para_attr
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
#from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list
class TrainTaskConfig(object):
# only support GPU currently
use_gpu = True
# the epoch number to train.
pass_num = 1
# the number of sequences contained in a mini-batch.
# deprecated, set batch_size in args.
batch_size = 20
# the hyper parameters for Adam optimizer.
# This static learning_rate will be multiplied to the LearningRateScheduler
# derived learning rate the to get the final learning rate.
learning_rate = 1
beta1 = 0.9
beta2 = 0.98
eps = 1e-9
# the parameters for learning rate scheduling.
warmup_steps = 4000
# the weight used to mix up the ground-truth distribution and the fixed
# uniform distribution in label smoothing when training.
# Set this as zero if label smoothing is not wanted.
label_smooth_eps = 0.1
# the directory for saving trained models.
model_dir = "trained_models"
# the directory for saving checkpoints.
ckpt_dir = "trained_ckpts"
# the directory for loading checkpoint.
# If provided, continue training from the checkpoint.
ckpt_path = None
# the parameter to initialize the learning rate scheduler.
# It should be provided if use checkpoints, since the checkpoint doesn't
# include the training step counter currently.
start_step = 0
check_acc = True
data_path = expanduser("~") + (
"/.cache/paddle/dataset/test_dist_transformer/")
src_vocab_fpath = data_path + "vocab.bpe.32000"
trg_vocab_fpath = data_path + "vocab.bpe.32000"
train_file_pattern = data_path + "train.tok.clean.bpe.32000.en-de"
val_file_pattern = data_path + "newstest2013.tok.bpe.32000.en-de.cut"
pool_size = 2000
sort_type = None
local = True
shuffle = False
shuffle_batch = False
special_token = ['<s>', '<e>', '<unk>']
token_delimiter = ' '
use_token_batch = False
class InferTaskConfig(object):
use_gpu = True
# the number of examples in one run for sequence generation.
batch_size = 10
# the parameters for beam search.
beam_size = 5
max_out_len = 256
# the number of decoded sentences to output.
n_best = 1
# the flags indicating whether to output the special tokens.
output_bos = False
output_eos = False
output_unk = True
# the directory for loading the trained model.
model_path = "trained_models/pass_1.infer.model"
class ModelHyperParams(object):
# These following five vocabularies related configurations will be set
# automatically according to the passed vocabulary path and special tokens.
# size of source word dictionary.
src_vocab_size = 10000
# size of target word dictionay
trg_vocab_size = 10000
# index for <bos> token
bos_idx = 0
# index for <eos> token
eos_idx = 1
# index for <unk> token
unk_idx = 2
# max length of sequences deciding the size of position encoding table.
# Start from 1 and count start and end tokens in.
max_length = 256
# the dimension for word embeddings, which is also the last dimension of
# the input and output of multi-head attention, position-wise feed-forward
# networks, encoder and decoder.
d_model = 512
# size of the hidden layer in position-wise feed-forward networks.
d_inner_hid = 2048
# the dimension that keys are projected to for dot-product attention.
d_key = 64
# the dimension that values are projected to for dot-product attention.
d_value = 64
# number of head used in multi-head attention.
n_head = 8
# number of sub-layers to be stacked in the encoder and decoder.
n_layer = 6
# dropout rate used by all dropout layers.
dropout = 0.0 # no random
# random seed used in dropout for CE.
dropout_seed = None
# the flag indicating whether to share embedding and softmax weights.
# vocabularies in source and target should be same for weight sharing.
weight_sharing = True
def merge_cfg_from_list(cfg_list, g_cfgs):
"""
Set the above global configurations using the cfg_list.
"""
assert len(cfg_list) % 2 == 0
for key, value in zip(cfg_list[0::2], cfg_list[1::2]):
for g_cfg in g_cfgs:
if hasattr(g_cfg, key):
try:
value = eval(value)
except Exception: # for file path
pass
setattr(g_cfg, key, value)
break
# The placeholder for batch_size in compile time. Must be -1 currently to be
# consistent with some ops' infer-shape output in compile time, such as the
# sequence_expand op used in beamsearch decoder.
batch_size = -1
# The placeholder for squence length in compile time.
seq_len = ModelHyperParams.max_length
# Here list the data shapes and data types of all inputs.
# The shapes here act as placeholder and are set to pass the infer-shape in
# compile time.
input_descs = {
# The actual data shape of src_word is:
# [batch_size * max_src_len_in_batch, 1]
"src_word": [(batch_size, seq_len, long_type(1)), "int64", 2],
# The actual data shape of src_pos is:
# [batch_size * max_src_len_in_batch, 1]
"src_pos": [(batch_size, seq_len, long_type(1)), "int64"],
# This input is used to remove attention weights on paddings in the
# encoder.
# The actual data shape of src_slf_attn_bias is:
# [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch]
"src_slf_attn_bias":
[(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"],
# The actual data shape of trg_word is:
# [batch_size * max_trg_len_in_batch, 1]
"trg_word": [(batch_size, seq_len, long_type(1)), "int64",
2], # lod_level is only used in fast decoder.
# The actual data shape of trg_pos is:
# [batch_size * max_trg_len_in_batch, 1]
"trg_pos": [(batch_size, seq_len, long_type(1)), "int64"],
# This input is used to remove attention weights on paddings and
# subsequent words in the decoder.
# The actual data shape of trg_slf_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch]
"trg_slf_attn_bias":
[(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"],
# This input is used to remove attention weights on paddings of the source
# input in the encoder-decoder attention.
# The actual data shape of trg_src_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch]
"trg_src_attn_bias":
[(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"],
# This input is used in independent decoder program for inference.
# The actual data shape of enc_output is:
# [batch_size, max_src_len_in_batch, d_model]
"enc_output": [(batch_size, seq_len, ModelHyperParams.d_model), "float32"],
# The actual data shape of label_word is:
# [batch_size * max_trg_len_in_batch, 1]
"lbl_word": [(batch_size * seq_len, long_type(1)), "int64"],
# This input is used to mask out the loss of padding tokens.
# The actual data shape of label_weight is:
# [batch_size * max_trg_len_in_batch, 1]
"lbl_weight": [(batch_size * seq_len, long_type(1)), "float32"],
# These inputs are used to change the shape tensor in beam-search decoder.
"trg_slf_attn_pre_softmax_shape_delta": [(long_type(2), ), "int32"],
"trg_slf_attn_post_softmax_shape_delta": [(long_type(4), ), "int32"],
"init_score": [(batch_size, long_type(1)), "float32"],
}
# Names of word embedding table which might be reused for weight sharing.
word_emb_param_names = (
"src_word_emb_table",
"trg_word_emb_table",
)
# Names of position encoding table which will be initialized externally.
pos_enc_param_names = (
"src_pos_enc_table",
"trg_pos_enc_table",
)
# separated inputs for different usages.
encoder_data_input_fields = (
"src_word",
"src_pos",
"src_slf_attn_bias",
)
decoder_data_input_fields = (
"trg_word",
"trg_pos",
"trg_slf_attn_bias",
"trg_src_attn_bias",
"enc_output",
)
label_data_input_fields = (
"lbl_word",
"lbl_weight",
)
# In fast decoder, trg_pos (only containing the current time step) is generated
# by ops and trg_slf_attn_bias is not needed.
fast_decoder_data_input_fields = (
"trg_word",
"init_score",
"trg_src_attn_bias",
)
# fast_decoder_util_input_fields = (
# "trg_slf_attn_pre_softmax_shape_delta",
# "trg_slf_attn_post_softmax_shape_delta", )
#from optim import LearningRateScheduler
class LearningRateScheduler(object):
"""
Wrapper for learning rate scheduling as described in the Transformer paper.
LearningRateScheduler adapts the learning rate externally and the adapted
learning rate will be fed into the main_program as input data.
"""
def __init__(self,
d_model,
warmup_steps,
learning_rate=0.001,
current_steps=0,
name="learning_rate"):
self.current_steps = current_steps
self.warmup_steps = warmup_steps
self.d_model = d_model
self.static_lr = learning_rate
self.learning_rate = layers.create_global_var(
name=name,
shape=[1],
value=float(learning_rate),
dtype="float32",
persistable=True)
def update_learning_rate(self):
self.current_steps += 1
lr_value = np.power(self.d_model, -0.5) * np.min([
np.power(self.current_steps, -0.5),
np.power(self.warmup_steps, -1.5) * self.current_steps
]) * self.static_lr
return np.array([lr_value], dtype="float32")
#from transformer_train import train_loop
def pad_batch_data(insts,
pad_idx,
n_head,
is_target=False,
is_label=False,
return_attn_bias=True,
return_max_len=True,
return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
num_token = six.moves.reduce(lambda x, y: x + y,
[len(inst)
for inst in insts]) if return_num_token else 0
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + [pad_idx] * (max_len - len(inst)) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, 1])]
if is_label: # label weight
inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))
for inst in insts])
return_list += [inst_weight.astype("float32").reshape([-1, 1])]
else: # position data
inst_pos = np.array([
list(range(1,
len(inst) + 1)) + [0] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, 1])]
if return_attn_bias:
if is_target:
# This is used to avoid attention on paddings and subsequent
# words.
slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len))
slf_attn_bias_data = np.triu(slf_attn_bias_data,
1).reshape([-1, 1, max_len, max_len])
slf_attn_bias_data = np.tile(slf_attn_bias_data,
[1, n_head, 1, 1]) * [-1e9]
else:
# This is used to avoid attention on paddings.
slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *
(max_len - len(inst))
for inst in insts])
slf_attn_bias_data = np.tile(
slf_attn_bias_data.reshape([-1, 1, 1, max_len]),
[1, n_head, max_len, 1])
return_list += [slf_attn_bias_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0]
def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx,
n_head, d_model):
"""
Put all padded data needed by training into a dict.
"""
src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(
[inst[0] for inst in insts], src_pad_idx, n_head, is_target=False)
src_word = src_word.reshape(-1, src_max_len, 1)
src_pos = src_pos.reshape(-1, src_max_len, 1)
trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(
[inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True)
trg_word = trg_word.reshape(-1, trg_max_len, 1)
trg_pos = trg_pos.reshape(-1, trg_max_len, 1)
trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],
[1, 1, trg_max_len, 1]).astype("float32")
lbl_word, lbl_weight, num_token = pad_batch_data(
[inst[2] for inst in insts],
trg_pad_idx,
n_head,
is_target=False,
is_label=True,
return_attn_bias=False,
return_max_len=False,
return_num_token=True)
data_input_dict = dict(
list(
zip(data_input_names, [
src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,
trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight
])))
return data_input_dict, np.asarray([num_token], dtype="float32")
def read_multiple(reader, count, clip_last=True):
"""
Stack data from reader for multi-devices.
"""
def __impl__():
res = []
for item in reader():
res.append(item)
if len(res) == count:
yield res
res = []
if len(res) == count:
yield res
elif not clip_last:
data = []
for item in res:
data += item
if len(data) > count:
inst_num_per_part = len(data) // count
yield [
data[inst_num_per_part * i:inst_num_per_part * (i + 1)]
for i in range(count)
]
return __impl__
def split_data(data, num_part):
"""
Split data for each device.
"""
if len(data) == num_part:
return data
data = data[0]
inst_num_per_part = len(data) // num_part
return [
data[inst_num_per_part * i:inst_num_per_part * (i + 1)]
for i in range(num_part)
]
def test_context(test_program, avg_cost, train_exe, dev_count, data_input_names,
sum_cost, token_num):
val_data = DataReader(
src_vocab_fpath=TrainTaskConfig.src_vocab_fpath,
trg_vocab_fpath=TrainTaskConfig.trg_vocab_fpath,
fpattern=TrainTaskConfig.val_file_pattern,
token_delimiter=TrainTaskConfig.token_delimiter,
use_token_batch=TrainTaskConfig.use_token_batch,
batch_size=TrainTaskConfig.batch_size *
(1 if TrainTaskConfig.use_token_batch else dev_count),
pool_size=TrainTaskConfig.pool_size,
sort_type=TrainTaskConfig.sort_type,
start_mark=TrainTaskConfig.special_token[0],
end_mark=TrainTaskConfig.special_token[1],
unk_mark=TrainTaskConfig.special_token[2],
# count start and end tokens out
max_length=ModelHyperParams.max_length - 2,
clip_last_batch=False,
shuffle=False,
shuffle_batch=False)
build_strategy = fluid.BuildStrategy()
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
test_exe = fluid.ParallelExecutor(use_cuda=TrainTaskConfig.use_gpu,
main_program=test_program,
share_vars_from=train_exe,
build_strategy=build_strategy,
exec_strategy=strategy)
def test(exe=test_exe):
test_total_cost = 0
test_total_token = 0
test_data = read_multiple(
reader=val_data.batch_generator,
count=dev_count if TrainTaskConfig.use_token_batch else 1)
for batch_id, data in enumerate(test_data()):
feed_list = []
for place_id, data_buffer in enumerate(
split_data(data, num_part=dev_count)):
data_input_dict, _ = prepare_batch_input(
data_buffer, data_input_names, ModelHyperParams.eos_idx,
ModelHyperParams.eos_idx, ModelHyperParams.n_head,
ModelHyperParams.d_model)
feed_list.append(data_input_dict)
outs = exe.run(feed=feed_list,
fetch_list=[sum_cost.name, token_num.name])
sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1])
test_total_cost += sum_cost_val.sum()
test_total_token += token_num_val.sum()
test_avg_cost = test_total_cost / test_total_token
test_ppl = np.exp([min(test_avg_cost, 100)])
return test_avg_cost, test_ppl
return test
def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
token_num, predict, test_program):
# Initialize the parameters.
if TrainTaskConfig.ckpt_path:
lr_scheduler.current_steps = TrainTaskConfig.start_step
else:
exe.run(fluid.framework.default_startup_program())
train_data = DataReader(
src_vocab_fpath=TrainTaskConfig.src_vocab_fpath,
trg_vocab_fpath=TrainTaskConfig.trg_vocab_fpath,
fpattern=TrainTaskConfig.train_file_pattern,
token_delimiter=TrainTaskConfig.token_delimiter,
use_token_batch=TrainTaskConfig.use_token_batch,
batch_size=TrainTaskConfig.batch_size *
(1 if TrainTaskConfig.use_token_batch else dev_count),
pool_size=TrainTaskConfig.pool_size,
sort_type=TrainTaskConfig.sort_type,
shuffle=TrainTaskConfig.shuffle,
shuffle_batch=TrainTaskConfig.shuffle_batch,
start_mark=TrainTaskConfig.special_token[0],
end_mark=TrainTaskConfig.special_token[1],
unk_mark=TrainTaskConfig.special_token[2],
# count start and end tokens out
max_length=ModelHyperParams.max_length - 2,
clip_last_batch=False)
train_data = read_multiple(
reader=train_data.batch_generator,
count=dev_count if TrainTaskConfig.use_token_batch else 1)
build_strategy = fluid.BuildStrategy()
# Since the token number differs among devices, customize gradient scale to
# use token average cost among multi-devices. and the gradient scale is
# `1 / token_number` for average cost.
build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
train_exe = fluid.ParallelExecutor(use_cuda=TrainTaskConfig.use_gpu,
loss_name=sum_cost.name,
main_program=train_progm,
build_strategy=build_strategy,
exec_strategy=strategy)
data_input_names = encoder_data_input_fields + decoder_data_input_fields[:
-1] + label_data_input_fields
if TrainTaskConfig.val_file_pattern is not None:
test = test_context(test_program, avg_cost, train_exe, dev_count,
data_input_names, sum_cost, token_num)
# the best cross-entropy value with label smoothing
loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log(
(1. - TrainTaskConfig.label_smooth_eps)) +
TrainTaskConfig.label_smooth_eps *
np.log(TrainTaskConfig.label_smooth_eps /
(ModelHyperParams.trg_vocab_size - 1) + 1e-20))
init = False
for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
pass_start_time = time.time()
for batch_id, data in enumerate(train_data()):
if batch_id >= RUN_STEP:
break
feed_list = []
total_num_token = 0
if TrainTaskConfig.local:
lr_rate = lr_scheduler.update_learning_rate()
for place_id, data_buffer in enumerate(
split_data(data, num_part=dev_count)):
data_input_dict, num_token = prepare_batch_input(
data_buffer, data_input_names, ModelHyperParams.eos_idx,
ModelHyperParams.eos_idx, ModelHyperParams.n_head,
ModelHyperParams.d_model)
total_num_token += num_token
feed_kv_pairs = list(data_input_dict.items())
if TrainTaskConfig.local:
feed_kv_pairs += list(
{lr_scheduler.learning_rate.name: lr_rate}.items())
feed_list.append(dict(feed_kv_pairs))
if not init:
for pos_enc_param_name in pos_enc_param_names:
pos_enc = position_encoding_init(
ModelHyperParams.max_length + 1,
ModelHyperParams.d_model)
feed_list[place_id][pos_enc_param_name] = pos_enc
if not TrainTaskConfig.check_acc:
for feed_dict in feed_list:
feed_dict[sum_cost.name + "@GRAD"] = 1. / total_num_token
else:
b = 100 * TrainTaskConfig.batch_size
a = np.asarray([b], dtype="float32")
for feed_dict in feed_list:
feed_dict[sum_cost.name + "@GRAD"] = 1. / a
outs = train_exe.run(fetch_list=[sum_cost.name, token_num.name],
feed=feed_list)
sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1])
total_sum_cost = sum_cost_val.sum()
total_token_num = token_num_val.sum()
total_avg_cost = total_sum_cost / total_token_num
init = True
# Validate and save the model for inference.
if TrainTaskConfig.val_file_pattern is not None:
val_avg_cost, val_ppl = test()
print("[%f]" % val_avg_cost)
else:
assert (False)
#import transformer_reader as reader
class SortType(object):
GLOBAL = 'global'
POOL = 'pool'
NONE = "none"
class Converter(object):
def __init__(self, vocab, beg, end, unk, delimiter):
self._vocab = vocab
self._beg = beg
self._end = end
self._unk = unk
self._delimiter = delimiter
def __call__(self, sentence):
return [self._beg] + [
self._vocab.get(w, self._unk)
for w in sentence.split(self._delimiter)
] + [self._end]
class ComposedConverter(object):
def __init__(self, converters):
self._converters = converters
def __call__(self, parallel_sentence):
return [
self._converters[i](parallel_sentence[i])
for i in range(len(self._converters))
]
class SentenceBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self._batch_size = batch_size
def append(self, info):
self.batch.append(info)
if len(self.batch) == self._batch_size:
tmp = self.batch
self.batch = []
return tmp
class TokenBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self.max_len = -1
self._batch_size = batch_size
def append(self, info):
cur_len = info.max_len
max_len = max(self.max_len, cur_len)
if max_len * (len(self.batch) + 1) > self._batch_size:
result = self.batch
self.batch = [info]
self.max_len = cur_len
return result
else:
self.max_len = max_len
self.batch.append(info)
class SampleInfo(object):
def __init__(self, i, max_len, min_len):
self.i = i
self.min_len = min_len
self.max_len = max_len
class MinMaxFilter(object):
def __init__(self, max_len, min_len, underlying_creator):
self._min_len = min_len
self._max_len = max_len
self._creator = underlying_creator
def append(self, info):
if info.max_len > self._max_len or info.min_len < self._min_len:
return
else:
return self._creator.append(info)
@property
def batch(self):
return self._creator.batch
class DataReader(object):
"""
The data reader loads all data from files and produces batches of data
in the way corresponding to settings.
An example of returning a generator producing data batches whose data
is shuffled in each pass and sorted in each pool:
```
train_data = DataReader(
src_vocab_fpath='data/src_vocab_file',
trg_vocab_fpath='data/trg_vocab_file',
fpattern='data/part-*',
use_token_batch=True,
batch_size=2000,
pool_size=10000,
sort_type=SortType.POOL,
shuffle=True,
shuffle_batch=True,
start_mark='<s>',
end_mark='<e>',
unk_mark='<unk>',
clip_last_batch=False).batch_generator
```
:param src_vocab_fpath: The path of vocabulary file of source language.
:type src_vocab_fpath: basestring
:param trg_vocab_fpath: The path of vocabulary file of target language.
:type trg_vocab_fpath: basestring
:param fpattern: The pattern to match data files.
:type fpattern: basestring
:param batch_size: The number of sequences contained in a mini-batch.
or the maximum number of tokens (include paddings) contained in a
mini-batch.
:type batch_size: int
:param pool_size: The size of pool buffer.
:type pool_size: int
:param sort_type: The grain to sort by length: 'global' for all
instances; 'pool' for instances in pool; 'none' for no sort.
:type sort_type: basestring
:param clip_last_batch: Whether to clip the last uncompleted batch.
:type clip_last_batch: bool
:param tar_fname: The data file in tar if fpattern matches a tar file.
:type tar_fname: basestring
:param min_length: The minimum length used to filt sequences.
:type min_length: int
:param max_length: The maximum length used to filt sequences.
:type max_length: int
:param shuffle: Whether to shuffle all instances.
:type shuffle: bool
:param shuffle_batch: Whether to shuffle the generated batches.
:type shuffle_batch: bool
:param use_token_batch: Whether to produce batch data according to
token number.
:type use_token_batch: bool
:param field_delimiter: The delimiter used to split source and target in
each line of data file.
:type field_delimiter: basestring
:param token_delimiter: The delimiter used to split tokens in source or
target sentences.
:type token_delimiter: basestring
:param start_mark: The token representing for the beginning of
sentences in dictionary.
:type start_mark: basestring
:param end_mark: The token representing for the end of sentences
in dictionary.
:type end_mark: basestring
:param unk_mark: The token representing for unknown word in dictionary.
:type unk_mark: basestring
:param seed: The seed for random.
:type seed: int
"""
def __init__(self,
src_vocab_fpath,
trg_vocab_fpath,
fpattern,
batch_size,
pool_size,
sort_type=SortType.GLOBAL,
clip_last_batch=True,
tar_fname=None,
min_length=0,
max_length=100,
shuffle=True,
shuffle_batch=False,
use_token_batch=False,
field_delimiter="\t",
token_delimiter=" ",
start_mark="<s>",
end_mark="<e>",
unk_mark="<unk>",
seed=0):
self._src_vocab = self.load_dict(src_vocab_fpath)
self._only_src = True
if trg_vocab_fpath is not None:
self._trg_vocab = self.load_dict(trg_vocab_fpath)
self._only_src = False
self._pool_size = pool_size
self._batch_size = batch_size
self._use_token_batch = use_token_batch
self._sort_type = sort_type
self._clip_last_batch = clip_last_batch
self._shuffle = shuffle
self._shuffle_batch = shuffle_batch
self._min_length = min_length
self._max_length = max_length
self._field_delimiter = field_delimiter
self._token_delimiter = token_delimiter
self.load_src_trg_ids(end_mark, fpattern, start_mark, tar_fname,
unk_mark)
self._random = random.Random(x=seed)
def load_src_trg_ids(self, end_mark, fpattern, start_mark, tar_fname,
unk_mark):
converters = [
Converter(vocab=self._src_vocab,
beg=self._src_vocab[start_mark],
end=self._src_vocab[end_mark],
unk=self._src_vocab[unk_mark],
delimiter=self._token_delimiter)
]
if not self._only_src:
converters.append(
Converter(vocab=self._trg_vocab,
beg=self._trg_vocab[start_mark],
end=self._trg_vocab[end_mark],
unk=self._trg_vocab[unk_mark],
delimiter=self._token_delimiter))
converters = ComposedConverter(converters)
self._src_seq_ids = []
self._trg_seq_ids = None if self._only_src else []
self._sample_infos = []
for i, line in enumerate(self._load_lines(fpattern, tar_fname)):
src_trg_ids = converters(line)
self._src_seq_ids.append(src_trg_ids[0])
lens = [len(src_trg_ids[0])]
if not self._only_src:
self._trg_seq_ids.append(src_trg_ids[1])
lens.append(len(src_trg_ids[1]))
self._sample_infos.append(SampleInfo(i, max(lens), min(lens)))
def _load_lines(self, fpattern, tar_fname):
fpaths = glob.glob(fpattern)
if len(fpaths) == 1 and tarfile.is_tarfile(fpaths[0]):
if tar_fname is None:
raise Exception("If tar file provided, please set tar_fname.")
f = tarfile.open(fpaths[0], "r")
for line in f.extractfile(tar_fname):
line = cpt.to_text(line)
fields = line.strip("\n").split(self._field_delimiter)
if (not self._only_src
and len(fields) == 2) or (self._only_src
and len(fields) == 1):
yield fields
else:
for fpath in fpaths:
if not os.path.isfile(fpath):
raise IOError("Invalid file: %s" % fpath)
with open(fpath, "rb") as f:
for line in f:
line = cpt.to_text(line)
fields = line.strip("\n").split(self._field_delimiter)
if (not self._only_src
and len(fields) == 2) or (self._only_src
and len(fields) == 1):
yield fields
@staticmethod
def load_dict(dict_path, reverse=False):
word_dict = {}
with open(dict_path, "rb") as fdict:
for idx, line in enumerate(fdict):
line = cpt.to_text(line)
if reverse:
word_dict[idx] = line.strip("\n")
else:
word_dict[line.strip("\n")] = idx
return word_dict
def batch_generator(self):
# global sort or global shuffle
if self._sort_type == SortType.GLOBAL:
infos = sorted(self._sample_infos,
key=lambda x: x.max_len,
reverse=True)
else:
if self._shuffle:
infos = self._sample_infos
self._random.shuffle(infos)
else:
infos = self._sample_infos
if self._sort_type == SortType.POOL:
for i in range(0, len(infos), self._pool_size):
infos[i:i + self._pool_size] = sorted(
infos[i:i + self._pool_size], key=lambda x: x.max_len)
# concat batch
batches = []
batch_creator = TokenBatchCreator(
self._batch_size
) if self._use_token_batch else SentenceBatchCreator(self._batch_size)
batch_creator = MinMaxFilter(self._max_length, self._min_length,
batch_creator)
for info in infos:
batch = batch_creator.append(info)
if batch is not None:
batches.append(batch)
if not self._clip_last_batch and len(batch_creator.batch) != 0:
batches.append(batch_creator.batch)
if self._shuffle_batch:
self._random.shuffle(batches)
for batch in batches:
batch_ids = [info.i for info in batch]
if self._only_src:
yield [[self._src_seq_ids[idx]] for idx in batch_ids]
else:
yield [(self._src_seq_ids[idx], self._trg_seq_ids[idx][:-1],
self._trg_seq_ids[idx][1:]) for idx in batch_ids]
#from transformer_model import transformer
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: queries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of input tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
if n_head == 1:
return x
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(x=x,
shape=[0, 0, n_head, hidden_size // n_head])
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of input tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])))
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(weights,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
k = cache["k"] = layers.concat([cache["k"], k], axis=1)
v = cache["v"] = layers.concat([cache["v"], v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu",
param_attr=const_para_attr,
bias_attr=const_bias_attr)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(out,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_max_len,
dropout_rate=0.,
word_emb_param_name=None,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
if TrainTaskConfig.check_acc:
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
param_attr=fluid.ParamAttr(
name=word_emb_param_name,
initializer=fluid.initializer.ConstantInitializer(0.001)))
else:
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
param_attr=fluid.ParamAttr(name=word_emb_param_name,
initializer=fluid.initializer.Normal(
0., src_emb_dim**-0.5)))
src_word_emb = layers.scale(x=src_word_emb, scale=src_emb_dim**0.5)
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
param_attr=fluid.ParamAttr(
name=pos_enc_param_name,
trainable=False,
initializer=fluid.initializer.ConstantInitializer(0.001)))
src_pos_enc.stop_gradient = True
enc_input = src_word_emb + src_pos_enc
return layers.dropout(enc_input,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False) if dropout_rate else enc_input
prepare_encoder = partial(prepare_encoder,
pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(prepare_encoder,
pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(enc_input, enc_input, enc_input,
attn_bias, d_key, d_value, d_model,
n_head, dropout_rate)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value,
d_model, d_inner_hid, dropout_rate)
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
cache=None):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate,
cache,
)
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate,
)
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate,
)
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate,
)
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model,
)
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate,
)
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
caches=None):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
cache = None
if caches is not None:
cache = caches[i]
dec_output = decoder_layer(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
cache=cache)
dec_input = dec_output
return dec_output
def make_all_inputs(input_fields):
"""
Define the input data layers for the transformer model.
"""
inputs = []
for input_field in input_fields:
input_var = layers.data(name=input_field,
shape=input_descs[input_field][0],
dtype=input_descs[input_field][1],
lod_level=input_descs[input_field][2]
if len(input_descs[input_field]) == 3 else 0,
append_batch_size=False)
inputs.append(input_var)
return inputs
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
label_smooth_eps,
):
if weight_sharing:
assert src_vocab_size == src_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
enc_inputs = make_all_inputs(encoder_data_input_fields)
enc_output = wrap_encoder(
src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
enc_inputs,
)
dec_inputs = make_all_inputs(decoder_data_input_fields[:-1])
predict = wrap_decoder(
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs,
enc_output,
)
# Padding index do not contribute to the total loss. The weights is used to
# cancel padding index in calculating the loss.
label, weights = make_all_inputs(label_data_input_fields)
if label_smooth_eps:
label = layers.label_smooth(label=layers.one_hot(input=label,
depth=trg_vocab_size),
epsilon=label_smooth_eps)
cost = layers.softmax_with_cross_entropy(
logits=layers.reshape(predict, shape=[-1, trg_vocab_size]),
label=label,
soft_label=True if label_smooth_eps else False)
weighted_cost = cost * weights
sum_cost = layers.reduce_sum(weighted_cost)
token_num = layers.reduce_sum(weights)
avg_cost = sum_cost / token_num
avg_cost.stop_gradient = True
return sum_cost, avg_cost, predict, token_num
def wrap_encoder(src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
enc_inputs=None):
"""
The wrapper assembles together all needed layers for the encoder.
"""
if enc_inputs is None:
# This is used to implement independent encoder program in inference.
src_word, src_pos, src_slf_attn_bias = \
make_all_inputs(encoder_data_input_fields)
else:
src_word, src_pos, src_slf_attn_bias = \
enc_inputs
enc_input = prepare_encoder(src_word,
src_pos,
src_vocab_size,
d_model,
max_length,
dropout_rate,
word_emb_param_name=word_emb_param_names[0])
enc_output = encoder(enc_input, src_slf_attn_bias, n_layer, n_head, d_key,
d_value, d_model, d_inner_hid, dropout_rate)
return enc_output
def wrap_decoder(trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs=None,
enc_output=None,
caches=None):
"""
The wrapper assembles together all needed layers for the decoder.
"""
if dec_inputs is None:
# This is used to implement independent decoder program in inference.
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, \
enc_output = make_all_inputs(
decoder_data_input_fields)
else:
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs
dec_input = prepare_decoder(trg_word,
trg_pos,
trg_vocab_size,
d_model,
max_length,
dropout_rate,
word_emb_param_name=word_emb_param_names[0]
if weight_sharing else word_emb_param_names[1])
dec_output = decoder(dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
caches=caches)
# Return logits for training and probs for inference.
if weight_sharing:
predict = layers.matmul(x=dec_output,
y=fluid.framework._get_var(
word_emb_param_names[0]),
transpose_y=True)
else:
predict = layers.fc(input=dec_output,
size=trg_vocab_size,
num_flatten_dims=2,
param_attr=const_para_attr,
bias_attr=const_bias_attr)
if dec_inputs is None:
predict = layers.softmax(predict)
return predict
def fast_decode(
src_vocab_size,
trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
beam_size,
max_out_len,
eos_idx,
):
"""
Use beam search to decode. Caches will be used to store states of history
steps which can make the decoding faster.
"""
enc_output = wrap_encoder(src_vocab_size, max_in_len, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid,
dropout_rate, weight_sharing)
start_tokens, init_scores, trg_src_attn_bias = \
make_all_inputs(fast_decoder_data_input_fields )
def beam_search():
max_len = layers.fill_constant(shape=[1],
dtype=start_tokens.dtype,
value=max_out_len)
step_idx = layers.fill_constant(shape=[1],
dtype=start_tokens.dtype,
value=0)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond)
# array states will be stored for each step.
ids = layers.array_write(layers.reshape(start_tokens, (-1, 1)),
step_idx)
scores = layers.array_write(init_scores, step_idx)
# cell states will be overwrited at each step.
# caches contains states of history steps to reduce redundant
# computation in decoder.
caches = [{
"k":
layers.fill_constant_batch_size_like(input=start_tokens,
shape=[-1, 0, d_model],
dtype=enc_output.dtype,
value=0),
"v":
layers.fill_constant_batch_size_like(input=start_tokens,
shape=[-1, 0, d_model],
dtype=enc_output.dtype,
value=0)
} for i in range(n_layer)]
with while_op.block():
pre_ids = layers.array_read(array=ids, i=step_idx)
pre_ids = layers.reshape(pre_ids, (-1, 1, 1))
pre_scores = layers.array_read(array=scores, i=step_idx)
# sequence_expand can gather sequences according to lod thus can be
# used in beam search to sift states corresponding to selected ids.
pre_src_attn_bias = layers.sequence_expand(x=trg_src_attn_bias,
y=pre_scores)
pre_enc_output = layers.sequence_expand(x=enc_output, y=pre_scores)
pre_caches = [{
"k":
layers.sequence_expand(x=cache["k"], y=pre_scores),
"v":
layers.sequence_expand(x=cache["v"], y=pre_scores),
} for cache in caches]
pre_pos = layers.elementwise_mul(
x=layers.fill_constant_batch_size_like(
input=
pre_enc_output, # can't use pre_ids here since it has lod
value=1,
shape=[-1, 1, 1],
dtype=pre_ids.dtype),
y=layers.increment(x=step_idx, value=1.0, in_place=False),
axis=0)
logits = wrap_decoder(trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs=(pre_ids, pre_pos, None,
pre_src_attn_bias),
enc_output=pre_enc_output,
caches=pre_caches)
logits = layers.reshape(logits, (-1, trg_vocab_size))
topk_scores, topk_indices = layers.topk(
input=layers.softmax(logits), k=beam_size)
accu_scores = layers.elementwise_add(x=layers.log(topk_scores),
y=layers.reshape(pre_scores,
shape=[-1]),
axis=0)
# beam_search op uses lod to distinguish branches.
topk_indices = layers.lod_reset(topk_indices, pre_ids)
selected_ids, selected_scores = layers.beam_search(
pre_ids=pre_ids,
pre_scores=pre_scores,
ids=topk_indices,
scores=accu_scores,
beam_size=beam_size,
end_id=eos_idx)
layers.increment(x=step_idx, value=1.0, in_place=True)
# update states
layers.array_write(selected_ids, i=step_idx, array=ids)
layers.array_write(selected_scores, i=step_idx, array=scores)
layers.assign(pre_src_attn_bias, trg_src_attn_bias)
layers.assign(pre_enc_output, enc_output)
for i in range(n_layer):
layers.assign(pre_caches[i]["k"], caches[i]["k"])
layers.assign(pre_caches[i]["v"], caches[i]["v"])
length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode(
ids, scores, beam_size=beam_size, end_id=eos_idx)
return finished_ids, finished_scores
finished_ids, finished_scores = beam_search()
return finished_ids, finished_scores
def get_model(is_dist, is_async):
sum_cost, avg_cost, predict, token_num = transformer(
ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size,
ModelHyperParams.max_length + 1, ModelHyperParams.n_layer,
ModelHyperParams.n_head, ModelHyperParams.d_key,
ModelHyperParams.d_value, ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid, ModelHyperParams.dropout,
ModelHyperParams.weight_sharing, TrainTaskConfig.label_smooth_eps)
local_lr_scheduler = LearningRateScheduler(ModelHyperParams.d_model,
TrainTaskConfig.warmup_steps,
TrainTaskConfig.learning_rate)
# Context to do validation.
test_program = fluid.default_main_program().clone(for_test=True)
if not is_dist:
optimizer = fluid.optimizer.Adam(
learning_rate=local_lr_scheduler.learning_rate,
beta1=TrainTaskConfig.beta1,
beta2=TrainTaskConfig.beta2,
epsilon=TrainTaskConfig.eps)
optimizer.minimize(sum_cost)
elif is_async:
optimizer = fluid.optimizer.SGD(0.003)
optimizer.minimize(sum_cost)
else:
lr_decay = fluid.layers\
.learning_rate_scheduler\
.noam_decay(ModelHyperParams.d_model,
TrainTaskConfig.warmup_steps)
optimizer = fluid.optimizer.Adam(learning_rate=lr_decay,
beta1=TrainTaskConfig.beta1,
beta2=TrainTaskConfig.beta2,
epsilon=TrainTaskConfig.eps)
optimizer.minimize(sum_cost)
return sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program
def update_args():
src_dict = DataReader.load_dict(TrainTaskConfig.src_vocab_fpath)
trg_dict = DataReader.load_dict(TrainTaskConfig.trg_vocab_fpath)
dict_args = [
"src_vocab_size",
str(len(src_dict)), "trg_vocab_size",
str(len(trg_dict)), "bos_idx",
str(src_dict[TrainTaskConfig.special_token[0]]), "eos_idx",
str(src_dict[TrainTaskConfig.special_token[1]]), "unk_idx",
str(src_dict[TrainTaskConfig.special_token[2]])
]
merge_cfg_from_list(dict_args, [TrainTaskConfig, ModelHyperParams])
class DistTransformer2x2(TestDistRunnerBase):
def run_pserver(self, args):
get_model(True, not args.sync_mode)
t = self.get_transpiler(args.trainer_id, fluid.default_main_program(),
args.endpoints, args.trainers, args.sync_mode)
pserver_prog = t.get_pserver_program(args.current_endpoint)
startup_prog = t.get_startup_program(args.current_endpoint,
pserver_prog)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
exe.run(pserver_prog)
def run_trainer(self, args):
TrainTaskConfig.use_gpu = args.use_cuda
sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program = get_model(
args.is_dist, not args.sync_mode)
if args.is_dist:
t = self.get_transpiler(args.trainer_id,
fluid.default_main_program(),
args.endpoints, args.trainers,
args.sync_mode)
trainer_prog = t.get_trainer_program()
TrainTaskConfig.batch_size = 10
TrainTaskConfig.train_file_pattern = TrainTaskConfig.data_path + "train.tok.clean.bpe.32000.en-de.train_{}".format(
args.trainer_id)
else:
TrainTaskConfig.batch_size = 20
trainer_prog = fluid.default_main_program()
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
startup_exe = fluid.Executor(place)
TrainTaskConfig.local = not args.is_dist
train_loop(startup_exe, trainer_prog, 1, sum_cost, avg_cost,
local_lr_scheduler, token_num, predict, test_program)
if __name__ == "__main__":
update_args()
runtime_main(DistTransformer2x2)
| 37.694876
| 127
| 0.588246
|
225e732b84035265a317ecc34da2ac703e550fe0
| 1,185
|
py
|
Python
|
main.py
|
Deepanjalli/job_portal6
|
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
|
[
"MIT"
] | null | null | null |
main.py
|
Deepanjalli/job_portal6
|
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
|
[
"MIT"
] | 4
|
2020-06-06T01:42:22.000Z
|
2021-09-08T01:50:57.000Z
|
main.py
|
Deepanjalli/job_portal6
|
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
|
[
"MIT"
] | null | null | null |
import os
#import magic
import urllib.request
from app import app
from flask import Flask, flash, request, redirect, render_template
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
return render_template('register.html')
@app.route('/', methods=['POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('File successfully uploaded')
return redirect('/')
else:
flash('Allowed file types are txt, pdf, png, jpg, jpeg, gif')
return redirect(request.url)
if __name__ == "__main__":
app.debug = True
app.run()
| 29.625
| 84
| 0.699578
|
3244dece2f3802a3633f68949939a2369ca8a76c
| 977
|
py
|
Python
|
api/medicine_api/management/commands/link_database_populator.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 2
|
2022-03-21T14:00:27.000Z
|
2022-03-21T17:17:51.000Z
|
api/medicine_api/management/commands/link_database_populator.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 19
|
2022-03-21T14:14:26.000Z
|
2022-03-31T07:49:40.000Z
|
api/medicine_api/management/commands/link_database_populator.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 4
|
2022-03-02T13:56:30.000Z
|
2022-03-22T10:49:00.000Z
|
import os
import logging
from medicine_api import models
from medicine_api.readers.price_reader import PriceReader
from django.core.management.base import BaseCommand
logger = logging.getLogger('medicine_api.readers.link_database_populator')
class Command(BaseCommand):
"""
Runs the CRON job to (re)populate the LinkMetadata model with refreshed information from the spreadsheet source.
"""
help = '(Re)populates the Medicine for Ukraine links database table with updated information.'
def handle(self, *args, **options):
logger.info('Link Database Checks Starting')
price_reader = PriceReader()
data = price_reader.get_link_data()
for item in data:
metadata_object = models.LinkMetadata()
metadata_object.set_from_link_data(item)
logger.info('Link Database Checks Complete')
self.stdout.write(self.style.SUCCESS('Link checking successfully completed.'))
| 36.185185
| 116
| 0.714432
|
22e631daa8890fc280d5abef1e78f6c9eed5c87c
| 481
|
py
|
Python
|
src/queries/hacker_news_comments.py
|
llazarmk/hacker_news_comments_processing
|
8a3b8206e7bfaa7847a85deb05bf721f0f64772a
|
[
"Apache-2.0"
] | null | null | null |
src/queries/hacker_news_comments.py
|
llazarmk/hacker_news_comments_processing
|
8a3b8206e7bfaa7847a85deb05bf721f0f64772a
|
[
"Apache-2.0"
] | null | null | null |
src/queries/hacker_news_comments.py
|
llazarmk/hacker_news_comments_processing
|
8a3b8206e7bfaa7847a85deb05bf721f0f64772a
|
[
"Apache-2.0"
] | null | null | null |
def get_hacker_news_comments_query(table):
query = f"""SELECT comments.id AS comment_id,
comments.parent AS comment_story_id,
comments.text AS comment_text,
comments.by AS comment_author,
DATE(comments.timestamp) AS comment_date
FROM `{table}` as comments
WHERE comments.text IS NOT NULL AND RAND() <= 0.1
"""
return query
| 40.083333
| 65
| 0.532225
|
22aa86c670e26451dc9ec0d740610b8bb2d5aa6b
| 370
|
py
|
Python
|
parking_lot/entities/merchant.py
|
kuntalchandra/parking_lot
|
7990162c15b7f6bd33e5c0e99544b9abe1c76940
|
[
"MIT"
] | null | null | null |
parking_lot/entities/merchant.py
|
kuntalchandra/parking_lot
|
7990162c15b7f6bd33e5c0e99544b9abe1c76940
|
[
"MIT"
] | null | null | null |
parking_lot/entities/merchant.py
|
kuntalchandra/parking_lot
|
7990162c15b7f6bd33e5c0e99544b9abe1c76940
|
[
"MIT"
] | null | null | null |
from datetime import datetime
class Merchant:
def __init__(self, id: int, name: str, registered_at: datetime):
self.id = id
self.name = name
self.registered_at = registered_at
def get_name(self):
return self.name
def get_id(self):
return self.id
def get_registered_at(self):
return self.registered_at
| 20.555556
| 68
| 0.643243
|
377d7f373cd3505587ad6b5d0f8343402185bea2
| 17,215
|
py
|
Python
|
IPython/utils/_tokenize_py2.py
|
pyarnold/ipython
|
c4797f7f069d0a974ddfa1e4251c7550c809dba0
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-12-18T01:07:55.000Z
|
2020-12-18T01:07:55.000Z
|
IPython/utils/_tokenize_py2.py
|
pyarnold/ipython
|
c4797f7f069d0a974ddfa1e4251c7550c809dba0
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/utils/_tokenize_py2.py
|
pyarnold/ipython
|
c4797f7f069d0a974ddfa1e4251c7550c809dba0
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""Patched version of standard library tokenize, to deal with various bugs.
Patches
- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing),
manually applied.
- Newlines in comments and blank lines should be either NL or NEWLINE, depending
on whether they are in a multi-line statement. Filed as Python issue #17061.
-------------------------------------------------------------------------------
Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
from __future__ import print_function
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger')
import string
import re
from token import *
import token
__all__ = [x for x in dir(token) if not x.startswith("_")]
__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
del x
del token
__all__ += ["TokenError"]
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices):
return '(' + '|'.join(choices) + ')'
def any(*choices):
return group(*choices) + '*'
def maybe(*choices):
return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = r'0[bB][01]+[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"'):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception):
pass
class StopTokenizing(Exception):
pass
def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
srow, scol = srow_scol
erow, ecol = erow_ecol
print("%d,%d-%d,%d:\t%s\t%s" %
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row >= self.prev_row
col_offset = col - self.prev_col
if col_offset > 0:
self.tokens.append(" " * col_offset)
elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
# Line was backslash-continued
self.tokens.append(" ")
def untokenize(self, tokens):
iterable = iter(tokens)
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end = t[:4]
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
# This import is here to avoid problems when the itertools
# module is not built yet and tokenize is imported.
from itertools import chain
startline = False
prevstring = False
indents = []
toks_append = self.tokens.append
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line:
break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column // tabsize + 1) * tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NEWLINE, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield (NEWLINE, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos + 1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1:
tokenize(open(sys.argv[1]).readline)
else:
tokenize(sys.stdin.readline)
| 37.342733
| 80
| 0.512809
|
a92d9dde939f0ffa8c8b7906ce4cf432a94739f7
| 448
|
py
|
Python
|
微信聊天记录/utils/string_utils.py
|
xingag/spider_python
|
80668005f1416dab04c25569b35b679a2a6b2e5d
|
[
"Apache-2.0"
] | 762
|
2018-11-22T04:40:15.000Z
|
2022-03-31T03:53:33.000Z
|
微信聊天记录/utils/string_utils.py
|
xingag/spider_python
|
80668005f1416dab04c25569b35b679a2a6b2e5d
|
[
"Apache-2.0"
] | 15
|
2019-07-22T17:57:58.000Z
|
2022-03-11T23:36:46.000Z
|
微信聊天记录/utils/string_utils.py
|
xingag/spider_python
|
80668005f1416dab04c25569b35b679a2a6b2e5d
|
[
"Apache-2.0"
] | 414
|
2018-12-18T00:33:18.000Z
|
2022-03-31T15:02:04.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: StringUtils.py
@time: 2020-04-11 18:39
@description:TODO
"""
import re
def get_ava_string(str):
"""
去掉特殊符号,保留正常内容
:param str:
:return:
"""
return re.sub(u"([^ \u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])", "", str)
| 17.92
| 89
| 0.629464
|
bdfe54dc9eefb56d8774f768df93322b94474a77
| 4,714
|
py
|
Python
|
models/gat.py
|
anshu0612/acmmm21_human_attributes_prediction_in_privacy_conditions
|
98cfc1820952e7d65520b029a53043fdf2d6c005
|
[
"MIT"
] | 6
|
2021-09-01T07:44:59.000Z
|
2021-12-11T11:32:59.000Z
|
models/gat.py
|
anshu0612/human_attributes_prediction_in_privacy_conditions
|
98cfc1820952e7d65520b029a53043fdf2d6c005
|
[
"MIT"
] | null | null | null |
models/gat.py
|
anshu0612/human_attributes_prediction_in_privacy_conditions
|
98cfc1820952e7d65520b029a53043fdf2d6c005
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class _GraphAttentionLayer(nn.Module):
'''
Part of code borrows from https://github.com/Diego999/pyGAT/blob/master/layers.py
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
'''
def __init__(self, in_feat, out_feat, dropout, alpha, concat=True):
super(_GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_feat = in_feat
self.out_feat = out_feat
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_feat, out_feat)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2*out_feat, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
N = h.size()[1]
batch = h.size()[0]
a_input = torch.cat([h.repeat(1, 1, N).view(
batch, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch, N, -1, 2 * self.out_feat)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=2)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_feat) + ' -> ' + str(self.out_feat) + ')'
class _GAT(nn.Module):
'''
Dense version of GAT.
'''
def __init__(self, num_feat, num_hidd, num_class, dropout, alpha, num_heads):
super(_GAT, self).__init__()
self.dropout = dropout
self.attentions = [_GraphAttentionLayer(
num_feat, num_hidd, dropout=dropout, alpha=alpha, concat=True) for _ in range(num_heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = _GraphAttentionLayer(
num_hidd * num_heads, num_class, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=2)
class VisualRelationshipStream(nn.Module):
def __init__(self, device):
super(VisualRelationshipStream, self).__init__()
_context_reshape_size = 180
self.context_pool = nn.Sequential(
nn.AdaptiveMaxPool2d(_context_reshape_size),
nn.Conv2d(3, 1, 1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True)
)
self.target_feat = nn.Sequential(
nn.AdaptiveMaxPool2d((60, 15)),
nn.Conv2d(3, 1, 1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True)
)
self._patch_size = 30
_n_patches = (_context_reshape_size // self._patch_size)
_nodes = (_n_patches * _n_patches) + 1 # extra node for the target
_gat_in_feat = self._patch_size * self._patch_size
self.gat = _GAT(num_feat=_gat_in_feat,
num_hidd=128,
num_class=256,
dropout=0.5,
num_heads=3,
alpha=0.2)
self.adj = torch.ones([_nodes, _nodes], dtype=torch.int32).to(device)
def _generate_image_pataches(self, size, images):
_channel_dim = images.size()[1]
patches = images.unfold(1, _channel_dim, _channel_dim).unfold(
2, size, size).unfold(3, size, size)
s_ = patches.size()
# batch_size, noedes - 36, 00
patches = patches.reshape(s_[0], s_[1]*s_[2]*s_[3], s_[4]*s_[5]*s_[6])
return patches
def forward(self, context, target):
_batch = context.size()[0]
context = self.context_pool(context)
patched_context = self._generate_image_pataches(
self._patch_size, context)
target = self.target_feat(target)
target = target.view(_batch, -1)
target = target.unsqueeze(1)
patched_context = torch.cat((patched_context, target), 1)
patched_context = self.gat(patched_context, self.adj)
patched_context = torch.mean(patched_context, 1)
return patched_context
| 35.443609
| 101
| 0.604794
|
ac682a26d10895b705fcc00eb7a4ae7d356bb1dc
| 94
|
py
|
Python
|
skypy/cluster/__init__.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-28T18:00:24.000Z
|
2020-12-28T18:00:24.000Z
|
skypy/cluster/__init__.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | 2
|
2020-12-28T20:14:40.000Z
|
2020-12-28T21:49:27.000Z
|
skypy/cluster/__init__.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module contains methods that model the properties of galaxy cluster
populations.
"""
| 18.8
| 72
| 0.776596
|
b0e7cff87d8a247f2001cc1033118817f6c37096
| 948
|
py
|
Python
|
rulesets/onboarding/on-gcs-csv-upload/__deploy__.py
|
airspot-dev/iot-demo
|
5f8c1877192043f4118b102ad1f71326d40de858
|
[
"Apache-2.0"
] | 1
|
2021-06-22T10:26:54.000Z
|
2021-06-22T10:26:54.000Z
|
rulesets/onboarding/on-gcs-csv-upload/__deploy__.py
|
airspot-dev/iot-demo
|
5f8c1877192043f4118b102ad1f71326d40de858
|
[
"Apache-2.0"
] | null | null | null |
rulesets/onboarding/on-gcs-csv-upload/__deploy__.py
|
airspot-dev/iot-demo
|
5f8c1877192043f4118b102ad1f71326d40de858
|
[
"Apache-2.0"
] | 1
|
2021-09-20T11:56:50.000Z
|
2021-09-20T11:56:50.000Z
|
name = "on-gcs-csv-upload"
add_files = (
"ruleset.py",
)
add_modules = True # find modules in directory (folders having __init__.py file) and add them to container
extra_commands = (
("RUN", "pip install google-cloud-storage==1.20.0"),
("RUN", "pip install cloudstorage==0.10.0")
)
labels = {
"networking.knative.dev/visibility": "cluster-local",
"krules.airspot.dev/type": "ruleset",
"krules.airspot.dev/ruleset": name,
"configs.krules.airspot.dev/google-cloud": "inject"
}
template_annotations = {
"autoscaling.knative.dev/minScale": "1",
}
#service_account = "my-service-account"
triggers = (
{
"name": "on-gcs-csv-upload-errors",
"broker": "default",
"filter": {
"attributes": {
"type": "on-gcs-csv-upload-errors"
}
}
},
)
triggers_default_broker = "default"
ksvc_sink = "broker:default"
ksvc_procevents_sink = "broker:procevents"
| 21.066667
| 107
| 0.627637
|
1fa5af8e9ca21cc243249470b10ea5d9ae849b01
| 4,779
|
py
|
Python
|
test/functional/tiertwo_masternode_ping.py
|
Simple-Software-Solutions/RBX-Core
|
8cf0dfda708233e080e8729cec0b5014218386e3
|
[
"MIT"
] | null | null | null |
test/functional/tiertwo_masternode_ping.py
|
Simple-Software-Solutions/RBX-Core
|
8cf0dfda708233e080e8729cec0b5014218386e3
|
[
"MIT"
] | null | null | null |
test/functional/tiertwo_masternode_ping.py
|
Simple-Software-Solutions/RBX-Core
|
8cf0dfda708233e080e8729cec0b5014218386e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The PIVX Developers
# Copyright (c) 2020 The Rubus Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import RbxTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
Decimal,
p2p_port,
)
import os
import time
"""
Test checking masternode ping thread
Does not use functions of RbxTier2TestFramework as we don't want to send
pings on demand. Here, instead, mocktime is disabled, and we just wait with
time.sleep to verify that masternodes send pings correctly.
"""
class MasternodePingTest(RbxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# 0=miner 1=mn_owner 2=mn_remote
self.num_nodes = 3
def run_test(self):
miner = self.nodes[0]
owner = self.nodes[1]
remote = self.nodes[2]
mnPrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"
self.log.info("generating 141 blocks...")
miner.generate(141)
self.sync_blocks()
# Create collateral
self.log.info("funding masternode controller...")
masternodeAlias = "mnode"
mnAddress = owner.getnewaddress(masternodeAlias)
collateralTxId = miner.sendtoaddress(mnAddress, Decimal('10000'))
miner.generate(2)
self.sync_blocks()
time.sleep(1)
collateral_rawTx = owner.getrawtransaction(collateralTxId, 1)
assert_equal(owner.getbalance(), Decimal('10000'))
assert_greater_than(collateral_rawTx["confirmations"], 0)
# Block time can be up to median time past +1. We might need to wait...
wait_time = collateral_rawTx["time"] - int(time.time())
if wait_time > 0:
self.log.info("Sleep %d seconds to catch up with the chain..." % wait_time)
time.sleep(wait_time)
# Setup controller
self.log.info("controller setup...")
o = owner.getmasternodeoutputs()
assert_equal(len(o), 1)
assert_equal(o[0]["txhash"], collateralTxId)
vout = o[0]["outputidx"]
self.log.info("collateral accepted for "+ masternodeAlias +". Updating masternode.conf...")
confData = masternodeAlias + " 127.0.0.1:" + str(p2p_port(2)) + " " + \
str(mnPrivkey) + " " + str(collateralTxId) + " " + str(vout)
destPath = os.path.join(self.options.tmpdir, "node1", "regtest", "masternode.conf")
with open(destPath, "a+") as file_object:
file_object.write("\n")
file_object.write(confData)
# Init remote
self.log.info("initializing remote masternode...")
remote.initmasternode(mnPrivkey, "127.0.0.1:" + str(p2p_port(2)))
# sanity check, verify that we are not in IBD
for i in range(0, len(self.nodes)):
node = self.nodes[i]
if (node.getblockchaininfo()['initial_block_downloading']):
raise AssertionError("Error, node(%s) shouldn't be in IBD." % str(i))
# Wait until mnsync is complete (max 120 seconds)
self.log.info("waiting to complete mnsync...")
start_time = time.time()
self.wait_until_mnsync_finished()
self.log.info("MnSync completed in %d seconds" % (time.time() - start_time))
miner.generate(1)
self.sync_blocks()
time.sleep(1)
# Send Start message
self.log.info("sending masternode broadcast...")
self.controller_start_masternode(owner, masternodeAlias)
miner.generate(1)
self.sync_blocks()
time.sleep(1)
# Wait until masternode is enabled everywhere (max 180 secs)
self.log.info("waiting till masternode gets enabled...")
start_time = time.time()
time.sleep(5)
self.wait_until_mn_enabled(collateralTxId, 180)
self.log.info("Masternode enabled in %d seconds" % (time.time() - start_time))
self.log.info("Good. Masternode enabled")
miner.generate(1)
self.sync_blocks()
time.sleep(1)
last_seen = [self.get_mn_lastseen(node, collateralTxId) for node in self.nodes]
self.log.info("Current lastseen: %s" % str(last_seen))
self.log.info("Waiting 2 * 25 seconds and check new lastseen...")
time.sleep(50)
new_last_seen = [self.get_mn_lastseen(node, collateralTxId) for node in self.nodes]
self.log.info("New lastseen: %s" % str(new_last_seen))
for i in range(self.num_nodes):
assert_greater_than(new_last_seen[i], last_seen[i])
self.log.info("All good.")
if __name__ == '__main__':
MasternodePingTest().main()
| 37.928571
| 99
| 0.644068
|
630ac008f07d95486834762ae3d04700f6629255
| 928
|
py
|
Python
|
listings/kafka_utils.py
|
fabiofumarola/scraper
|
d848f71c6c1b68c116fdbe63f9f9b2b3def87441
|
[
"Apache-2.0"
] | null | null | null |
listings/kafka_utils.py
|
fabiofumarola/scraper
|
d848f71c6c1b68c116fdbe63f9f9b2b3def87441
|
[
"Apache-2.0"
] | 3
|
2016-07-13T19:42:42.000Z
|
2016-08-17T13:15:55.000Z
|
listings/kafka_utils.py
|
fabiofumarola/scraper
|
d848f71c6c1b68c116fdbe63f9f9b2b3def87441
|
[
"Apache-2.0"
] | null | null | null |
from kafka import KafkaConsumer, KafkaProducer
import json
class KafkaListing(object):
def __init__(self, bootstrap_servers):
self.bootstrap_servers = '127.0.0.1:9092'
self.topic = 'raw_home_listings'
def producer(self):
return Producer(self.bootstrap_servers, self.topic)
def consumer(self):
consumer = KafkaConsumer(bootstrap_servers=self.bootstrap_servers)
consumer.subscribe([self.topic])
return consumer
class Producer(object):
def __init__(self, bootstrap_servers, topic):
self.bootstrap_servers = '127.0.0.1:9092'
self.topic = 'raw_home_listings'
self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers)
def send(self, record):
key = bytes(record['src'] + ":" + record['id'], 'utf-8')
value = json.dumps(record).encode('utf-8')
return self.producer.send(self.topic, key=key, value=value)
| 32
| 74
| 0.679957
|
e48f21f1a166d8e46d0f930b40380f4a41595b3c
| 1,527
|
py
|
Python
|
appdev/package.py
|
timshannon/freehold
|
3d054738f9d2dcb91543ce2a369af647b910a621
|
[
"MIT"
] | 25
|
2016-11-25T16:49:56.000Z
|
2022-02-20T20:13:31.000Z
|
appdev/package.py
|
fidget77/freehold
|
3d054738f9d2dcb91543ce2a369af647b910a621
|
[
"MIT"
] | 2
|
2018-09-21T10:53:24.000Z
|
2019-11-25T14:09:43.000Z
|
appdev/package.py
|
fidget77/freehold
|
3d054738f9d2dcb91543ce2a369af647b910a621
|
[
"MIT"
] | 7
|
2017-02-26T11:00:09.000Z
|
2021-01-08T10:46:19.000Z
|
#!/usr/bin/env python
import sys
import subprocess
import os
import shutil
def package(app):
if not isApp(app):
return
print "packaging " + app
tmpDir = "."+app+"_tmp"
#copy to temp folder
shutil.copytree(app, tmpDir)
os.chdir(tmpDir)
if not makeFolder("./"):
#minify
minifyFolder("./")
#zip
subprocess.call(["zip", "-r", "../../application/available/"+app+".zip", ".", "-x", ".*"])
os.chdir("..")
#cleanup temp folder
shutil.rmtree(tmpDir)
def isApp(folder):
for path, _, files in os.walk(folder):
for f in files:
if f == "app.json":
return True
return False
def makeFolder(folder):
for path, _, files in os.walk(folder):
for f in files:
if f == "makefile":
subprocess.call(["make"])
return True
def minifyFolder(folder):
for path, _, files in os.walk(folder):
for f in files:
_, ext = os.path.splitext(f)
if ext == ".js":
minify(os.path.join(path, f))
def minify(filename):
print "minify file " + filename
subprocess.call(["uglify", filename, "-c", "-o", filename])
if len(sys.argv) > 1:
#package passed in app
app = sys.argv[1]
if app.endswith("/"):
app = app.rstrip("/")
package(app)
else:
#package all apps in appdev dir
apps = [f for f in os.listdir("./") if not os.path.isfile(f)]
for app in apps:
package(app)
| 21.507042
| 94
| 0.54093
|
0b9b3f881fdce1c6dd12e738e0a95d2ddd264610
| 929
|
py
|
Python
|
nipype/interfaces/nipy/tests/test_auto_Trim.py
|
nicholsn/nipype
|
6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3
|
[
"BSD-3-Clause"
] | 1
|
2018-04-18T12:13:37.000Z
|
2018-04-18T12:13:37.000Z
|
nipype/interfaces/nipy/tests/test_auto_Trim.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/nipy/tests/test_auto_Trim.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T14:31:47.000Z
|
2021-09-08T14:31:47.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.nipy.preprocess import Trim
def test_Trim_inputs():
input_map = dict(begin_index=dict(usedefault=True,
),
end_index=dict(usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(mandatory=True,
),
out_file=dict(),
suffix=dict(usedefault=True,
),
)
inputs = Trim.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Trim_outputs():
output_map = dict(out_file=dict(),
)
outputs = Trim.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 27.323529
| 78
| 0.67169
|
9fb3d2a9aa0ee1d4a8260205d6660497cd9a1f92
| 517
|
py
|
Python
|
Bit Manipulation/power_of_two.py
|
lim1202/LeetCode
|
931c8d5a8d80206fb329dc7792416d45804d2ba3
|
[
"MIT"
] | null | null | null |
Bit Manipulation/power_of_two.py
|
lim1202/LeetCode
|
931c8d5a8d80206fb329dc7792416d45804d2ba3
|
[
"MIT"
] | null | null | null |
Bit Manipulation/power_of_two.py
|
lim1202/LeetCode
|
931c8d5a8d80206fb329dc7792416d45804d2ba3
|
[
"MIT"
] | null | null | null |
# Given an integer, write a function to determine if it is a power of two.
def is_power_of_two(num):
if num < 0:
return False
hasOne = False
while num > 0:
if num & 1:
if hasOne:
return False
else:
hasOne = True
num >>= 1
return hasOne
if __name__ == '__main__':
nums = [2, 3, 4, 8, 12, 16, 24, 32]
for num in nums:
print('Is', num, 'a power of two?', is_power_of_two(num))
| 20.68
| 75
| 0.491296
|
8583d925794a025ba0a55447f501f935910e8319
| 6,484
|
py
|
Python
|
google/cloud/container_v1beta1/types/__init__.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/container_v1beta1/types/__init__.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/container_v1beta1/types/__init__.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .cluster_service import (
AcceleratorConfig,
AddonsConfig,
AuthenticatorGroupsConfig,
AutoprovisioningNodePoolDefaults,
AutoUpgradeOptions,
BinaryAuthorization,
CancelOperationRequest,
ClientCertificateConfig,
CloudRunConfig,
Cluster,
ClusterAutoscaling,
ClusterTelemetry,
ClusterUpdate,
CompleteIPRotationRequest,
ConfidentialNodes,
ConfigConnectorConfig,
CreateClusterRequest,
CreateNodePoolRequest,
DailyMaintenanceWindow,
DatabaseEncryption,
DefaultSnatStatus,
DeleteClusterRequest,
DeleteNodePoolRequest,
DnsCacheConfig,
EphemeralStorageConfig,
GcePersistentDiskCsiDriverConfig,
GetClusterRequest,
GetJSONWebKeysRequest,
GetJSONWebKeysResponse,
GetNodePoolRequest,
GetOpenIDConfigRequest,
GetOpenIDConfigResponse,
GetOperationRequest,
GetServerConfigRequest,
HorizontalPodAutoscaling,
HttpLoadBalancing,
IntraNodeVisibilityConfig,
IPAllocationPolicy,
IstioConfig,
Jwk,
KalmConfig,
KubernetesDashboard,
LegacyAbac,
LinuxNodeConfig,
ListClustersRequest,
ListClustersResponse,
ListLocationsRequest,
ListLocationsResponse,
ListNodePoolsRequest,
ListNodePoolsResponse,
ListOperationsRequest,
ListOperationsResponse,
ListUsableSubnetworksRequest,
ListUsableSubnetworksResponse,
Location,
MaintenancePolicy,
MaintenanceWindow,
Master,
MasterAuth,
MasterAuthorizedNetworksConfig,
MaxPodsConstraint,
NetworkConfig,
NetworkPolicy,
NetworkPolicyConfig,
NodeConfig,
NodeKubeletConfig,
NodeManagement,
NodePool,
NodePoolAutoscaling,
NodeTaint,
NotificationConfig,
Operation,
OperationProgress,
PodSecurityPolicyConfig,
PrivateClusterConfig,
PrivateClusterMasterGlobalAccessConfig,
RecurringTimeWindow,
ReleaseChannel,
ReservationAffinity,
ResourceLimit,
ResourceUsageExportConfig,
RollbackNodePoolUpgradeRequest,
SandboxConfig,
ServerConfig,
SetAddonsConfigRequest,
SetLabelsRequest,
SetLegacyAbacRequest,
SetLocationsRequest,
SetLoggingServiceRequest,
SetMaintenancePolicyRequest,
SetMasterAuthRequest,
SetMonitoringServiceRequest,
SetNetworkPolicyRequest,
SetNodePoolAutoscalingRequest,
SetNodePoolManagementRequest,
SetNodePoolSizeRequest,
ShieldedInstanceConfig,
ShieldedNodes,
StartIPRotationRequest,
StatusCondition,
TimeWindow,
TpuConfig,
UpdateClusterRequest,
UpdateMasterRequest,
UpdateNodePoolRequest,
UpgradeEvent,
UsableSubnetwork,
UsableSubnetworkSecondaryRange,
VerticalPodAutoscaling,
WorkloadIdentityConfig,
WorkloadMetadataConfig,
DatapathProvider,
UpgradeResourceType,
)
__all__ = (
"AcceleratorConfig",
"AddonsConfig",
"AuthenticatorGroupsConfig",
"AutoprovisioningNodePoolDefaults",
"AutoUpgradeOptions",
"BinaryAuthorization",
"CancelOperationRequest",
"ClientCertificateConfig",
"CloudRunConfig",
"Cluster",
"ClusterAutoscaling",
"ClusterTelemetry",
"ClusterUpdate",
"CompleteIPRotationRequest",
"ConfidentialNodes",
"ConfigConnectorConfig",
"CreateClusterRequest",
"CreateNodePoolRequest",
"DailyMaintenanceWindow",
"DatabaseEncryption",
"DefaultSnatStatus",
"DeleteClusterRequest",
"DeleteNodePoolRequest",
"DnsCacheConfig",
"EphemeralStorageConfig",
"GcePersistentDiskCsiDriverConfig",
"GetClusterRequest",
"GetJSONWebKeysRequest",
"GetJSONWebKeysResponse",
"GetNodePoolRequest",
"GetOpenIDConfigRequest",
"GetOpenIDConfigResponse",
"GetOperationRequest",
"GetServerConfigRequest",
"HorizontalPodAutoscaling",
"HttpLoadBalancing",
"IntraNodeVisibilityConfig",
"IPAllocationPolicy",
"IstioConfig",
"Jwk",
"KalmConfig",
"KubernetesDashboard",
"LegacyAbac",
"LinuxNodeConfig",
"ListClustersRequest",
"ListClustersResponse",
"ListLocationsRequest",
"ListLocationsResponse",
"ListNodePoolsRequest",
"ListNodePoolsResponse",
"ListOperationsRequest",
"ListOperationsResponse",
"ListUsableSubnetworksRequest",
"ListUsableSubnetworksResponse",
"Location",
"MaintenancePolicy",
"MaintenanceWindow",
"Master",
"MasterAuth",
"MasterAuthorizedNetworksConfig",
"MaxPodsConstraint",
"NetworkConfig",
"NetworkPolicy",
"NetworkPolicyConfig",
"NodeConfig",
"NodeKubeletConfig",
"NodeManagement",
"NodePool",
"NodePoolAutoscaling",
"NodeTaint",
"NotificationConfig",
"Operation",
"OperationProgress",
"PodSecurityPolicyConfig",
"PrivateClusterConfig",
"PrivateClusterMasterGlobalAccessConfig",
"RecurringTimeWindow",
"ReleaseChannel",
"ReservationAffinity",
"ResourceLimit",
"ResourceUsageExportConfig",
"RollbackNodePoolUpgradeRequest",
"SandboxConfig",
"ServerConfig",
"SetAddonsConfigRequest",
"SetLabelsRequest",
"SetLegacyAbacRequest",
"SetLocationsRequest",
"SetLoggingServiceRequest",
"SetMaintenancePolicyRequest",
"SetMasterAuthRequest",
"SetMonitoringServiceRequest",
"SetNetworkPolicyRequest",
"SetNodePoolAutoscalingRequest",
"SetNodePoolManagementRequest",
"SetNodePoolSizeRequest",
"ShieldedInstanceConfig",
"ShieldedNodes",
"StartIPRotationRequest",
"StatusCondition",
"TimeWindow",
"TpuConfig",
"UpdateClusterRequest",
"UpdateMasterRequest",
"UpdateNodePoolRequest",
"UpgradeEvent",
"UsableSubnetwork",
"UsableSubnetworkSecondaryRange",
"VerticalPodAutoscaling",
"WorkloadIdentityConfig",
"WorkloadMetadataConfig",
"DatapathProvider",
"UpgradeResourceType",
)
| 26.251012
| 74
| 0.730105
|
4b2c09b10701679df9e5486b7df55a879935b63f
| 2,595
|
py
|
Python
|
pychron/tx/factories.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/tx/factories.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/tx/factories.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
import io
import os
from twisted.internet.protocol import Factory
from twisted.logger import Logger
from twisted.logger import jsonFileLogObserver
from pychron.tx.protocols.furnace import FurnaceProtocol
from pychron.tx.protocols.laser import LaserProtocol
from pychron.tx.protocols.valve import ValveProtocol
class LaserFactory(Factory):
_name = None
def __init__(self, application):
self._app = application
def buildProtocol(self, addr):
if self._name is None:
raise NotImplementedError
return LaserProtocol(self._app, self._name, addr, None)
class FusionsCO2Factory(LaserFactory):
_name = "FusionsCO2"
class FusionsDiodeFactory(LaserFactory):
_name = "FusionsDiode"
class FusionsUVFactory(LaserFactory):
_name = "FusionsUV"
class OsTechDiodeFactory(LaserFactory):
_name = "OsTechDiode"
from pychron.paths import paths
path = os.path.join(paths.log_dir, "pps.log.json")
logger = Logger(observer=jsonFileLogObserver(io.open(path, "w")))
from pychron.paths import paths
path = os.path.join(paths.log_dir, "pps.log.json")
logger = Logger(observer=jsonFileLogObserver(io.open(path, "w")))
class BaseFactory(Factory):
protocol_klass = None
def __init__(self, application=None):
self._app = application
def buildProtocol(self, addr):
if self.protocol_klass is None:
raise NotImplementedError
return self.protocol_klass(self._app, addr, logger)
class ValveFactory(BaseFactory):
protocol_klass = ValveProtocol
class FurnaceFactory(BaseFactory):
protocol_klass = FurnaceProtocol
# ============= EOF =============================================
| 27.315789
| 81
| 0.663584
|
696e2cdc85bab58e2e82c4ea0107a83ab9a710e5
| 325
|
py
|
Python
|
sorting/selectionSort.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 4
|
2021-10-01T13:22:20.000Z
|
2021-10-04T11:39:25.000Z
|
sorting/selectionSort.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 2
|
2021-10-11T16:56:03.000Z
|
2021-10-30T14:25:25.000Z
|
sorting/selectionSort.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 10
|
2021-10-11T12:28:48.000Z
|
2021-10-31T16:37:02.000Z
|
import sys
n = int(input("Enter the size of array : "))
A = list(map(int, input("Enter the array elements :\n").strip().split()))[:n]
for i in range(len(A)):
min_idx = i
for j in range(i+1, len(A)):
if A[min_idx] > A[j]:
min_idx = j
A[i], A[min_idx] = A[min_idx], A[i]
print ("Sorted array")
print(A)
| 14.130435
| 77
| 0.578462
|
26e9972bb1654c6e9cbd73a776be08b3044e2b7c
| 5,594
|
py
|
Python
|
bamboo/unit_tests/test_unit_layer_softsign.py
|
jonesholger/lbann
|
3214f189a1438565d695542e076c4fa8e7332d34
|
[
"Apache-2.0"
] | 194
|
2016-07-19T15:40:21.000Z
|
2022-03-19T08:06:10.000Z
|
bamboo/unit_tests/test_unit_layer_softsign.py
|
jonesholger/lbann
|
3214f189a1438565d695542e076c4fa8e7332d34
|
[
"Apache-2.0"
] | 1,021
|
2016-07-19T12:56:31.000Z
|
2022-03-29T00:41:47.000Z
|
bamboo/unit_tests/test_unit_layer_softsign.py
|
jonesholger/lbann
|
3214f189a1438565d695542e076c4fa8e7332d34
|
[
"Apache-2.0"
] | 74
|
2016-07-28T18:24:00.000Z
|
2022-01-24T19:41:04.000Z
|
import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(2019102414)
_num_samples = 11
_sample_size = 7
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=tools.str_list(_sample_size)),
lbann.WeightsLayer(weights=x_weights,
dims=tools.str_list(_sample_size)))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Softsign(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = x / (1 + np.abs(x))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Softsign(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = x / (1 + np.abs(x))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| 29.287958
| 80
| 0.555953
|
ffe182532e59429cdd3fa4496f6bb231ea9a05bf
| 18,092
|
py
|
Python
|
data/rainbow/script1/20210217-121832/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | 1
|
2020-06-14T13:50:28.000Z
|
2020-06-14T13:50:28.000Z
|
data/rainbow/script1/20210217-121832/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
data/rainbow/script1/20210217-121832/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
# Similar to script .
# Uses CompleteEnvironment10dB
# Centralized Learning-Distributed Execution
# Simulates many times, for different number of agents, and take the averages.
# There are different channels to the BS and to the devices.
# Multiple episodes convergence. Everything is in dB.
# One NN is trained and copied to each agent.
from shutil import copyfile
from sys_simulator.general import make_dir_timestamp, save_with_pickle
import matplotlib.pyplot as plt
from sys_simulator.plots import plot_positions_actions_pie
from time import time
from sys_simulator.general import db_to_power, power_to_db
from sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel
from sys_simulator import general as gen
from sys_simulator.q_learning.environments.completeEnvironment10dB \
import CompleteEnvironment10dB
from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent
from sys_simulator.dqn.externalDQNFramework \
import ExternalDQNFramework, RainbowFramework
from sys_simulator.parameters.parameters import \
EnvironmentParameters, TrainingParameters, DQNAgentParameters
from sys_simulator.q_learning.rewards import dis_reward_tensor_db
from copy import deepcopy
import torch
import numpy as np
import pickle
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
carrier_frequency = 2.4 # carrier frequency in GHz
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
device_height = 1.5 # mobile devices height in m
bs_height = 25 # BS antenna height in m
p_max = 40 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
mue_margin = 200 # mue margin in dB
# conversions from dBm to dB
p_max = p_max - 30
noise_power = noise_power - 30
# channel parameters
CHANNEL_RND = True
# q-learning parameters
# training
NUMBER = 1
REWARD_FUNCTION = 'classic'
# exec params
STEPS_PER_EPISODE = 25
TEST_STEPS_PER_EPISODE = 25
MAX_NUM_EPISODES = 1000 # medium training
ITERATIONS_PER_NUM_AGENTS = 100
EVAL_EVERY = 150
EVAL_NUM_EPISODES = 100
EVAL_STEPS_PER_EPISODE = 5
# debug params
# STEPS_PER_EPISODE = 2
# TEST_STEPS_PER_EPISODE = 2
# MAX_NUM_EPISODES = 10
# ITERATIONS_PER_NUM_AGENTS = 10
# EVAL_EVERY = 1000
# EVAL_NUM_EPISODES = 2
# EVAL_STEPS_PER_EPISODE = 2
# common
EPSILON_INITIAL = 1
EPSILON_MIN = .05
# EPSILON_DECAY = .9*1e-4 # medium training
EPSILON_DECAY = 1.3/(MAX_NUM_EPISODES*STEPS_PER_EPISODE) # medium training
PRIO_BETA_ITS = int(.8*MAX_NUM_EPISODES*STEPS_PER_EPISODE)
GAMMA = 0.9 # Discount factor
C = 8 # C constant for the improved reward function
TARGET_UPDATE = 20
REPLAY_MEMORY_SIZE = 100000
BATCH_SIZE = 512
HIDDEN_SIZE = 128
NUM_HIDDEN_LAYERS = 1
LEARNING_RATE = 1e-2
REWARD_PENALTY = 1.5
ENVIRONMENT_MEMORY = 10
MAX_NUMBER_OF_AGENTS = 5
max_d2d = MAX_NUMBER_OF_AGENTS
range_n_d2d = range(1, max_d2d + 1, 1)
# more parameters
# linear discretization
# actions = power_to_db(np.linspace(
# db_to_power(p_max-20), db_to_power(p_max-10), 10
# ))
# db discretization
actions = power_to_db(
np.linspace(
1e-6, db_to_power(p_max-10), 10
)
)
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_train,
n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,
BATCH_SIZE, GAMMA
)
reward_function = dis_reward_tensor_db
channel_to_devices = BANChannel(rnd=CHANNEL_RND)
channel_to_bs = UrbanMacroNLOSWinnerChannel(
rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height
)
ref_env = CompleteEnvironment10dB(
env_params,
channel_to_bs,
channel_to_devices,
reward_penalty=REWARD_PENALTY,
memory=ENVIRONMENT_MEMORY,
bs_height=bs_height,
reward_function=REWARD_FUNCTION
)
# foo env and foo agents stuff
foo_env = deepcopy(ref_env)
foo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]
foo_env.build_scenario(foo_agents)
_, _ = foo_env.step(foo_agents)
env_state_size = foo_env.get_state_size(foo_agents[0])
def train(start):
global actions
framework = RainbowFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
PRIO_BETA_ITS,
NUM_HIDDEN_LAYERS,
LEARNING_RATE,
)
best_reward = float('-inf')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
# aux_range = range(max_d2d+1)[1:]
epsilon = agent_params.start_epsilon
for episode in range(MAX_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
now = (time() - start) / 60
print(
'Training. ' +
f'Number of agents: {n_agents}. ' +
f'Episode: {episode}/{MAX_NUM_EPISODES-1}. ' +
f'Epsilon: {epsilon}. ' +
f'Prio_Beta: {framework.replay_memory._beta}. ' +
f'Elapsed time: {now} minutes.'
)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
for a in agents:
a.set_epsilon(epsilon)
env.build_scenario(agents)
obs, _ = env.step(agents)
total_reward = 0.0
i = 0
bag = list()
while True:
if i >= params.steps_per_episode:
break
else:
past_actions = torch.zeros([len(agents)], device=device)
for j, agent in enumerate(agents):
agent.get_action(framework, obs[j].float())
past_actions[j] = agent.action_index
# # debugging
# if len(agents) == 2:
# print('debugging')
# aux1 = agents[0].action_index == 9
# aux2 = agents[1].action_index == 5
# aux = [aux1, aux2]
# if np.mean(aux) == 1:
# print('debugging')
next_obs, rewards = env.step(agents)
i += 1
for j, agent in enumerate(agents):
framework.replay_memory.push(
obs[j].cpu(), past_actions[j].cpu(),
rewards[j], next_obs[j].cpu(), 0
)
framework.learn()
total_reward = np.sum(rewards)
bag.append(total_reward.item())
obs = next_obs
if i % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(
framework.policy_net.state_dict()
)
if total_reward > best_reward:
best_reward = total_reward
epsilon = agents[0].epsilon
if episode % EVAL_EVERY == 0:
r, d_speff, m_speff = in_training_test(framework)
rewards_bag.append(r)
# average d2d spectral eff
d2d_spectral_eff_bag.append(d_speff)
# mue spectral eff
mue_spectral_eff_bag.append(m_speff)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
torch.save(framework.policy_net.state_dict(), data_path)
# Return the trained policy
return framework, rewards_bag, d2d_spectral_eff_bag, mue_spectral_eff_bag, epsilon # noqa
def test(n_agents, test_env, framework):
framework.policy_net.eval()
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# jain_index = [list() for _ in range(max_d2d+1)]
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.build_scenario(agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards
def in_training_test(framework: ExternalDQNFramework):
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
for _ in range(EVAL_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
env.build_scenario(agents)
obs, _ = env.step(agents)
for _ in range(EVAL_STEPS_PER_EPISODE):
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
next_obs, _ = env.step(agents)
obs = next_obs
# mue spectral eff
mue_spectral_eff_bag.append(env.mue_spectral_eff)
# average d2d spectral eff
d2d_spectral_eff_bag.append(env.d2d_spectral_eff)
rewards_bag.append(env.reward)
mean_mue_speff = np.mean(mue_spectral_eff_bag)
mean_d2d_speff = np.mean(d2d_spectral_eff_bag)
mean_reward = np.mean(rewards_bag)
return mean_reward, mean_d2d_speff, mean_mue_speff
def run(framework=None):
mue_sucess_rate_total = []
mue_spectral_effs_total = []
d2d_spectral_effs_total = []
rewards_total = []
start = time()
r, d_speffs, m_speffs, epsilon = 0, 0, 0, 1
if framework is None:
framework, r, d_speffs, m_speffs, epsilon = train(start)
for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):
mue_suc_rates = []
mue_speff_rates = []
d2d_speff_rates = []
rews = []
for it in range(ITERATIONS_PER_NUM_AGENTS):
now = (time() - start) / 60
print(
'Testing. ' +
f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +
f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +
f'Elapsed time: {now} minutes.'
)
test_env = deepcopy(ref_env)
mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \
test(n, test_env, framework)
mue_suc_rates.append(mue_success_rate)
mue_speff_rates.append(mue_spectral_effs)
d2d_speff_rates.append(d2d_spectral_effs)
rews.append(rewards)
mue_sucess_rate_total.append(mue_suc_rates)
mue_spectral_effs_total.append(mue_speff_rates)
d2d_spectral_effs_total.append(d2d_speff_rates)
rewards_total.append(rews)
# save stuff
now = (time() - start) / 60
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/rainbow/{filename}'
data_path = make_dir_timestamp(dir_path)
data_file_path = f'{data_path}/log.pickle'
data = {
'mue_success_rate': mue_sucess_rate_total,
'd2d_speffs': d2d_spectral_effs_total,
'mue_speffs': mue_spectral_effs_total,
'rewards': rewards_total,
'mue_sinr_threshold': sinr_threshold_train,
'elapsed_time': now,
'training_rewards': r,
'training_d2d_speffs': d_speffs,
'training_mue_speffs': m_speffs,
'eval_every': EVAL_EVERY,
'final_epsilon': epsilon,
}
save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
def run_test():
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
framework = torch.load(data_path)
run(framework)
def test_exec():
# environment
test_env = deepcopy(ref_env)
# load framework
framework = ExternalDQNFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
NUM_HIDDEN_LAYERS,
LEARNING_RATE
)
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
state_dict = torch.load(data_path)
framework.policy_net.load_state_dict(state_dict)
framework.policy_net.eval()
# simulation stuff
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# devices positions
pairs_positions = [
((-400, 0, device_height), (-450, 0, device_height)),
((100, 0, device_height), (150, 0, device_height)),
((225, 225, device_height), (275, 225, device_height)),
((55, -55, device_height), (55, -5, device_height)),
]
mue_position = (0, 200, device_height)
# jain_index = [list() for _ in range(max_d2d+1)]
n_agents = len(pairs_positions)
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.set_scenario(pairs_positions, mue_position, agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
d2d_txs, d2d_rxs = zip(*test_env.d2d_pairs)
# D2D interference on the MUE, in dB
d2d_interferences = np.array([
d.caused_mue_interference for d in d2d_txs
])
d2d_interferences_mag = db_to_power(d2d_interferences)
d2d_total_interference = np.sum(d2d_interferences_mag)
percentage_interferences = d2d_interferences_mag / d2d_total_interference
interferences, tx_labels, rx_labels = calculate_interferences(test_env)
if d2d_total_interference != 0:
plot_positions_actions_pie(
test_env.bs, test_env.mue, d2d_txs, d2d_rxs,
actions_index, percentage_interferences,
test_env.mue.sinr > sinr_threshold_train, sinr_threshold_train,
test_env.reward, interferences, tx_labels, rx_labels
)
# jain_index[n_agents].append(gen.jain_index(test_env.sinr_d2ds))
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'data/rainbow/{filename}_exec.pickle'
data = {
'd2d_speffs_avg_total': d2d_spectral_effs,
'mue_success_rate': mue_success_rate,
'chosen_actions': bag,
'd2d_speffs': d2d_spectral_effs,
'mue_speffs': mue_spectral_effs,
'rewards': rewards_bag,
'mue_sinr_threshold': sinr_threshold_train,
}
with open(data_path, 'wb') as file:
pickle.dump(data, file)
# plot
print_stuff(actions, test_env)
plt.show()
def calculate_interferences(env: CompleteEnvironment10dB):
bs = env.bs
mue = env.mue
d2d_pairs = env.d2d_pairs
txs = [mue]
txs += [p[0] for p in d2d_pairs]
rxs = [bs]
rxs += [p[1] for p in d2d_pairs]
interferences = np.zeros((len(txs), len(rxs)))
for i, tx in enumerate(txs):
for j, (rx, interfered) in enumerate(zip(rxs, txs)):
if tx == interfered:
interf = tx.power_at_receiver
elif tx == mue:
interf = interfered.received_mue_interference
elif rx == bs:
interf = tx.caused_mue_interference
else:
interf = [
power_to_db(i[1]) for i in interfered.interferences
if i[0] == tx.id
][0]
interferences[i][j] = interf
tx_labels = [d.id for d in txs]
rx_labels = [d.id for d in rxs]
return interferences, tx_labels, rx_labels
def print_stuff(actions, env: CompleteEnvironment10dB):
actions = [f'{i:.2f}' for i in actions]
sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]
print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')
print(f'D2D Power levels [dBW]: {actions}')
print(f'D2D SINR [dB]: {sinr_d2ds}')
print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')
if __name__ == '__main__':
run()
| 36.184
| 94
| 0.644097
|
f50031cd1c35db6c3a55a9851b43fb57241070cc
| 69,583
|
py
|
Python
|
tensorflow/python/keras/layers/recurrent_v2.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 50
|
2020-03-15T01:04:36.000Z
|
2021-11-21T23:25:44.000Z
|
tensorflow/python/keras/layers/recurrent_v2.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 47
|
2020-05-15T11:30:04.000Z
|
2021-08-11T16:51:08.000Z
|
tensorflow/python/keras/layers/recurrent_v2.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers for TF 2.0.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import build_info
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# The following string constants are used by Defun approach for unified backend
# of LSTM and GRU.
_FUNCTION_API_NAME_ATTRIBUTE = 'api_implements'
_FUNCTION_DEVICE_ATTRIBUTE = 'api_preferred_device'
_CPU_DEVICE_NAME = 'CPU'
_GPU_DEVICE_NAME = 'GPU'
# The following number constants are used to represent the runtime of the defun
# backend function. Since the CPU/GPU implementation are mathematically same, we
# need some signal for the function to indicate which function is executed. This
# is for testing purpose to verify the correctness of swapping backend function.
_RUNTIME_UNKNOWN = 0
_RUNTIME_CPU = 1
_RUNTIME_GPU = 2
_CUDNN_AVAILABLE_MSG = 'Layer %s will use cuDNN kernel when run on GPU.'
_CUDNN_NOT_AVAILABLE_MSG = ('Layer %s will not use cuDNN kernel since it '
'doesn\'t meet the cuDNN kernel criteria. It will '
'use generic GPU kernel as fallback when running '
'on GPU')
@keras_export('keras.layers.GRUCell', v1=[])
class GRUCell(recurrent.GRUCell):
"""Cell class for the GRU layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.GRU` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 (default) will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications. Default: 2.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state from
the previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
reset_after=True,
**kwargs):
super(GRUCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
**kwargs)
@keras_export('keras.layers.GRU', v1=[])
class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU):
"""Gated Recurrent Unit - Cho et al. 2014.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
There are two variants of the GRU implementation. The default one is based on
[v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden
state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> gru = tf.keras.layers.GRU(4)
>>> output = gru(inputs)
>>> print(output.shape)
(32, 4)
>>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications. Default: 2.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[samples, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
reset_after=True,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self._return_runtime = kwargs.pop('return_runtime', False)
super(GRU, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
time_major=time_major,
reset_after=reset_after,
**kwargs)
# GPU kernel uses following setting by default and not configurable.
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, nn.tanh) and
self.recurrent_activation in (activations.sigmoid, nn.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
reset_after and ops.executing_eagerly_outside_functions())
if context.num_gpus() > 0:
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(_CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warn(_CUDNN_NOT_AVAILABLE_MSG % self.name)
def build(self, input_shape):
super(GRU, self).build(input_shape)
if not all(isinstance(v, resource_variable_ops.ResourceVariable)
for v in self.weights):
# Non-resource variables, such as DistributedVariables and
# AutoCastVariables, do not work properly with the implementation
# selector, which is used when cuDNN is used. However, by chance, such
# variables happen to work in LSTM, so this check is only needed for GRU.
# TODO(b/136512020): Make non-resource variables work with the
# implementation selector.
self._could_use_gpu_kernel = False
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = K.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self._could_use_gpu_kernel:
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(cell_inputs, cell_states):
return self.cell(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
# This is a dummy tensor for testing purpose.
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask, row_lengths)
if self.stateful:
updates = [state_ops.assign(self.states[0], states[0])]
self.add_update(updates)
if self.return_sequences:
output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
def _defun_gru_call(self, inputs, initial_state, training, mask,
sequence_lengths):
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
gpu_gru_kwargs = {
'inputs': inputs,
'init_h': _read_variable_value(initial_state[0]),
'kernel': _read_variable_value(self.cell.kernel),
'recurrent_kernel': _read_variable_value(self.cell.recurrent_kernel),
'bias': _read_variable_value(self.cell.bias),
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards,
'sequence_lengths': sequence_lengths
}
normal_gru_kwargs = gpu_gru_kwargs.copy()
normal_gru_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME
or (device_type is None and context.num_gpus() > 0))
and
(mask is None or is_sequence_right_padded(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
last_output, outputs, new_h, runtime = gpu_gru(**gpu_gru_kwargs)
else:
last_output, outputs, new_h, runtime = standard_gru(**normal_gru_kwargs)
else:
last_output, outputs, new_h, runtime = gru_with_backend_selection(
**normal_gru_kwargs)
states = [new_h]
return last_output, outputs, runtime, states
def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""GRU with standard kernel implementation.
This implementation can be run on all types of hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Arguments:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. The bias contains the
combined input_bias and recurrent_bias.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
input_bias, recurrent_bias = array_ops.unstack(bias)
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(cell_inputs, kernel)
matrix_x = K.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1)
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, recurrent_kernel)
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner, 3,
axis=1)
z = nn.sigmoid(x_z + recurrent_z)
r = nn.sigmoid(x_r + recurrent_r)
hh = nn.tanh(x_h + r * recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=sequence_lengths
if sequence_lengths is not None else timesteps,
zero_output_for_mask=zero_output_for_mask)
return last_output, outputs, new_states[0], _runtime(_RUNTIME_CPU)
def gpu_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths):
"""GRU with CuDNN implementation which is only available for GPU."""
if not time_major and mask is None:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h, cuDNN expects one more dim of num_layers before or after batch
# dim for time major or batch major inputs respectively
init_h = array_ops.expand_dims(init_h, axis=seq_axis)
weights = array_ops.split(kernel, 3, axis=1)
weights += array_ops.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flat it into
# (6 * units)
bias = array_ops.split(K.flatten(bias), 6)
if build_info.is_cuda_build:
# Note that the gate order for CuDNN is different from the canonical format.
# canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need
# to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = _canonical_to_params(
weights=weights,
biases=bias,
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_lengths = calculate_sequence_by_mask(mask, time_major)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = array_ops.reverse_sequence_v2(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode='gru',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = array_ops.reverse_sequence_v2(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = array_ops.reverse(outputs, axis=[seq_axis])
else:
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=0, params=params, is_training=True,
rnn_mode='gru')
last_output = outputs[-1]
if not time_major and mask is None:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = array_ops.squeeze(h, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, _runtime(_RUNTIME_GPU)
def gru_with_backend_selection(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the GRU with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_gru.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_gru_with_fallback(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Use CuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def input_right_padded():
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def input_not_right_padded():
return standard_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return control_flow_ops.cond(
is_sequence_right_padded(mask, time_major),
true_fn=input_right_padded,
false_fn=input_not_right_padded)
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple GRU layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'gru_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_gru = _generate_defun_backend(
api_name, _CPU_DEVICE_NAME, standard_gru, supportive_attribute)
defun_gpu_gru = _generate_defun_backend(
api_name, _GPU_DEVICE_NAME, gpu_gru_with_fallback, supportive_attribute)
# Call the normal GRU impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, runtime = defun_standard_gru(**params)
function.register(defun_gpu_gru, **params)
return last_output, outputs, new_h, runtime
@keras_export('keras.layers.LSTMCell', v1=[])
class LSTMCell(recurrent.LSTMCell):
"""Cell class for the LSTM layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.LSTM` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.LSTMCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of smaller dot
products and additions, whereas mode 2 (default) will batch them into
fewer, larger operations. These modes will have different performance
profiles on different hardware and for different applications. Default: 2.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: List of 2 tensors that corresponding to the cell's units. Both of
them have shape `[batch, units]`, the first tensor is the memory state
from previous time step, the second tensor is the carry state from
previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
**kwargs):
super(LSTMCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
**kwargs)
@keras_export('keras.layers.LSTM', v1=[])
class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM):
"""Long Short-Term Memory layer - Hochreiter 1997.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. Inputs, if use masking, are strictly right-padded.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> lstm = tf.keras.layers.LSTM(4)
>>> output = lstm(inputs)
>>> print(output.shape)
(32, 4)
>>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)
>>> print(whole_seq_output.shape)
(32, 10, 4)
>>> print(final_memory_state.shape)
(32, 4)
>>> print(final_carry_state.shape)
(32, 4)
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
is applied (ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. Default: `glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
implementation: Implementation mode, either 1 or 2. Mode 1 will structure
its operations as a larger number of smaller dot products and additions,
whereas mode 2 will batch them into fewer, larger operations. These modes
will have different performance profiles on different hardware and for
different applications. Default: 2.
return_sequences: Boolean. Whether to return the last output. in the output
sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default `False`). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
unroll: Boolean (default `False`). If True, the network will be unrolled,
else a symbolic loop will be used. Unrolling can speed-up a RNN, although
it tends to be more memory-intensive. Unrolling is only suitable for short
sequences.
Call arguments:
inputs: A 3D tensor with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
unroll=False,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self.return_runtime = kwargs.pop('return_runtime', False)
super(LSTM, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
time_major=time_major,
unroll=unroll,
**kwargs)
self.state_spec = [
InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
]
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, nn.tanh) and
self.recurrent_activation in (activations.sigmoid, nn.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
ops.executing_eagerly_outside_functions())
if context.num_gpus() > 0:
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(_CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warn(_CUDNN_NOT_AVAILABLE_MSG % self.name)
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = K.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# LSTM does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self._could_use_gpu_kernel:
# Fall back to use the normal LSTM.
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(inputs, states):
return self.cell(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
# Since the CuDNN has an extra set of bias, those bias will be passed to
# both normal and CuDNN implementations.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
gpu_lstm_kwargs = {
'inputs': inputs,
'init_h': _read_variable_value(initial_state[0]),
'init_c': _read_variable_value(initial_state[1]),
'kernel': _read_variable_value(self.cell.kernel),
'recurrent_kernel': _read_variable_value(self.cell.recurrent_kernel),
'bias': _read_variable_value(self.cell.bias),
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards,
'sequence_lengths': row_lengths
}
normal_lstm_kwargs = gpu_lstm_kwargs.copy()
normal_lstm_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME
or (device_type is None and context.num_gpus() > 0))
and
(mask is None or is_sequence_right_padded(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
# GPU implementation when GPU is available.
if can_use_gpu:
last_output, outputs, new_h, new_c, runtime = gpu_lstm(
**gpu_lstm_kwargs)
else:
last_output, outputs, new_h, new_c, runtime = standard_lstm(
**normal_lstm_kwargs)
else:
(last_output, outputs, new_h, new_c,
runtime) = lstm_with_backend_selection(**normal_lstm_kwargs)
states = [new_h, new_c]
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates)
if self.return_sequences:
output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self.return_runtime:
return output, runtime
else:
return output
def _canonical_to_params(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to CuDNN compatible parameter.
Note that Keras weights for kernels are different from the CuDNN format. Eg.:
```
Keras CuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the individual kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to CuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to CuDNN ops as param.
"""
def convert(w):
return array_ops.transpose(w) if transpose_weights else w
weights = [array_ops.reshape(convert(x), shape) for x in weights]
biases = [array_ops.reshape(x, shape) for x in biases]
return array_ops.concat(weights + biases, axis=0)
def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""LSTM with standard kernel implementation.
This implementation can be run on all types for hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Note that the first half of the bias tensor should be ignored by this impl.
The CuDNN impl need an extra set of input gate bias. In order to make the both
function take same shape of parameter, that extra set of bias is also feed
here.
Args:
inputs: input tensor of LSTM layer.
init_h: initial state tensor for the cell output.
init_c: initial state tensor for the cell hidden state.
kernel: weights for cell kernel.
recurrent_kernel: weights for cell recurrent kernel.
bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
state_1: the cell hidden state, which has same shape as init_c.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = K.dot(cell_inputs, kernel)
z += K.dot(h_tm1, recurrent_kernel)
z = K.bias_add(z, bias)
z0, z1, z2, z3 = array_ops.split(z, 4, axis=1)
i = nn.sigmoid(z0)
f = nn.sigmoid(z1)
c = f * c_tm1 + i * nn.tanh(z2)
o = nn.sigmoid(z3)
h = o * nn.tanh(c)
return h, [h, c]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=(sequence_lengths
if sequence_lengths is not None else timesteps),
zero_output_for_mask=zero_output_for_mask)
return (last_output, outputs, new_states[0], new_states[1],
_runtime(_RUNTIME_CPU))
def gpu_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths):
"""LSTM with either CuDNN or ROCm implementation which is only available for GPU.
Note that currently only right padded data is supported, or the result will be
polluted by the unmasked data which should be filtered.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of [time, batch,
feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
Returns:
last_output: Output tensor for the last timestep, which has shape
[batch, units].
outputs: Output tensor for all timesteps, which has shape
[batch, time, units].
state_0: The cell output, which has same shape as init_h.
state_1: The cell hidden state, which has same shape as init_c.
runtime: Constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should not be used by user.
"""
if not time_major and mask is None:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h and init_c, cuDNN expects one more dim of num_layers before or
# after batch dim for time major or batch major inputs respectively
init_h = array_ops.expand_dims(init_h, axis=seq_axis)
init_c = array_ops.expand_dims(init_c, axis=seq_axis)
weights = array_ops.split(kernel, 4, axis=1)
weights += array_ops.split(recurrent_kernel, 4, axis=1)
# CuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
if build_info.is_rocm_build:
# ROCm MIOpen's weight sequence for LSTM is different from both canonical
# and Cudnn format
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
# i is input gate weights.
# f is forget gate weights.
# o is output gate weights.
# c is cell gate weights.
weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
# full_bias is a tensor of shape (8*n,)
full_bias = array_ops.split(full_bias, 8, axis=0)
full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)]
params = _canonical_to_params(
weights=weights,
biases=array_ops.split(full_bias, 8),
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_lengths = calculate_sequence_by_mask(mask, time_major)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = array_ops.reverse_sequence_v2(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs,
input_h=init_h,
input_c=init_c,
params=params,
is_training=True,
rnn_mode='lstm',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = array_ops.reverse_sequence_v2(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = array_ops.reverse(outputs, axis=[seq_axis])
else:
# # Fill the array with shape [batch] with value of max timesteps.
# sequence_length = array_ops.fill([array_ops.shape(inputs)[1]],
# array_ops.shape(inputs)[0])
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=init_c, params=params, is_training=True,
rnn_mode='lstm')
last_output = outputs[-1]
if not time_major and mask is None:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = array_ops.squeeze(h, axis=seq_axis)
c = array_ops.squeeze(c, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, c, _runtime(_RUNTIME_GPU)
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use CuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def input_right_padded():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def input_not_right_padded():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return control_flow_ops.cond(
is_sequence_right_padded(mask, time_major),
true_fn=input_right_padded,
false_fn=input_not_right_padded)
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = _generate_defun_backend(
api_name, _CPU_DEVICE_NAME, standard_lstm, supportive_attribute)
defun_gpu_lstm = _generate_defun_backend(
api_name, _GPU_DEVICE_NAME, gpu_lstm_with_fallback, supportive_attribute)
# Call the normal LSTM impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(
**params)
function.register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
def is_sequence_right_padded(mask, time_major):
"""Check the mask tensor and see if it right padded.
For CuDNN kernel, it uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then CuDNN kernel won't be work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not
pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep] or [timestep, batch]
when time_major is True.
time_major: Boolean, whether the input mask is time major or batch major.
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
if time_major:
mask = array_ops.transpose(mask)
max_seq_length = array_ops.shape(mask)[1]
count_of_true = math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=1)
right_padded_mask = array_ops.sequence_mask(
count_of_true, maxlen=max_seq_length)
return math_ops.reduce_all(math_ops.equal(mask, right_padded_mask))
def calculate_sequence_by_mask(mask, time_major):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False],
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should be
1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
time_major=True.
time_major: Boolean, which indicates whether the mask is time major or batch
major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if time_major else 1
return math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32),
axis=timestep_index)
def _generate_defun_backend(unique_api_name, preferred_device, func,
supportive_attributes):
function_attributes = {
_FUNCTION_API_NAME_ATTRIBUTE: unique_api_name,
_FUNCTION_DEVICE_ATTRIBUTE: preferred_device,
}
function_attributes.update(supportive_attributes)
return function.defun_with_attributes(func=func,
attributes=function_attributes,
autograph=False)
def _get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = context.context().device_name
if current_device is None:
return None
return device.DeviceSpec.from_string(current_device).device_type
def _runtime(runtime_name):
with ops.device('/cpu:0'):
return constant_op.constant(
runtime_name, dtype=dtypes.float32, name='runtime')
def _read_variable_value(v):
"""Read the value of a resource variable if it is variable."""
if resource_variable_ops.is_resource_variable(v):
return v.read_value()
return v
| 42.273998
| 83
| 0.693388
|
275d0891a8dd15a24a11627bae5896e719816219
| 4,898
|
py
|
Python
|
ample/util/mrbump_cmd.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 6
|
2017-03-17T14:43:14.000Z
|
2021-08-06T07:07:14.000Z
|
ample/util/mrbump_cmd.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 47
|
2017-03-17T14:37:09.000Z
|
2021-01-28T10:22:15.000Z
|
ample/util/mrbump_cmd.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 6
|
2017-09-26T08:45:09.000Z
|
2020-03-19T14:26:49.000Z
|
"""
Created on 28 Feb 2013
@author: jmht
"""
import os
import sys
from ample.util import ample_util
def mrbump_cmd(name, mtz, mr_sequence, keyword_file):
"""Return the command to run mrbump"""
if sys.platform.startswith("win"):
mrbump = os.path.join(os.environ["CCP4"], "bin", "mrbump" + ample_util.SCRIPT_EXT)
else:
mrbump = os.path.join(os.environ["CCP4"], "bin", "mrbump")
cmd = [
mrbump,
"KEYIN",
"{0}".format(keyword_file),
"HKLIN",
"{0}".format(mtz),
"SEQIN",
"{0}".format(mr_sequence),
"HKLOUT",
"{0}.mtz".format(name),
"XYZOUT",
"{0}.pdb".format(name),
]
return " ".join(cmd)
def keyword_dict(ensemble_pdb, name, amoptd, extra_options={}):
"""Extract the mrbump keywords from the main ample dictionary and add/change any from
the extra_options dict"""
keywords = [
'arpwarp_cycles',
'buccaneer_cycles',
'debug',
'existing_mr_solution',
'F',
'FREE',
'mr_keys',
'mr_sg_all',
'mrbump_programs',
'native_pdb',
'nmasu',
'phaser_kill',
'phaser_rms',
'shelx_cycles',
'shelxe_exe',
'shelxe_rebuild_arpwarp',
'shelxe_rebuild_buccaneer',
'SIGF',
'refine_rebuild_arpwarp',
'refine_rebuild_buccaneer',
'use_shelxe',
]
# Pull out all mrbump options from the main ample dict
if sys.version_info.major == 3:
key_dict = dict((k, v) for k, v in amoptd.items() if k in keywords)
extra_options_d = extra_options.items()
else:
key_dict = dict((k, v) for k, v in amoptd.iteritems() if k in keywords)
extra_options_d = extra_options.iteritems()
# Change any/add options for this ensemble
for k, v in extra_options_d:
key_dict[k] = v
# Add ensemble_pdb and name
key_dict['name'] = name
key_dict['ensemble_pdb'] = ensemble_pdb
return key_dict
def mrbump_keyword_file(odict, fixed_iden=0.6):
"""
Create MRBUMP keywords
Args:
odict -- dictionary of options
jmht - check fixed_iden - 0.6 if not specified
"""
mrs = 'LABIN SIGF={0} F={1} FreeR_flag={2}\n'.format(odict['SIGF'], odict['F'], odict['FREE'])
mrs += 'JOBID {0}_mrbump\n'.format(odict['name'])
mrs += 'MRPROGRAM {0}\n'.format(" ".join(odict['mrbump_programs']))
mrs += 'LOCALFILE {0} CHAIN ALL RMS {1}'.format((odict['ensemble_pdb']), odict['phaser_rms'])
if 'ncopies' in odict and odict['ncopies'] > 0:
mrs += ' COPIES {0}'.format(odict['ncopies'])
mrs += '\n'
# Don't do any of the searches as we are providing a local file
mrs += 'SCOPSEARCH False\n'
mrs += 'PQSSEARCH False\n'
mrs += 'SSMSEARCH False\n'
mrs += 'DOFASTA False\n'
mrs += 'DOPHMMER False\n'
mrs += 'DOHHPRED False\n'
mrs += 'FAST False\n'
mrs += 'MDLD False\n'
mrs += 'MDLC False\n'
mrs += 'MDLM False\n'
mrs += 'MDLP False\n'
mrs += 'MDLS False\n'
mrs += 'MDLU True\n'
mrs += 'UPDATE False\n'
mrs += 'BUCC {0}\n'.format(odict['refine_rebuild_buccaneer'])
mrs += 'BCYCLES {0}\n'.format(odict['buccaneer_cycles'])
mrs += 'ARPWARP {0}\n'.format(odict['refine_rebuild_arpwarp'])
mrs += 'ACYCLES {0}\n'.format(odict['arpwarp_cycles'])
mrs += 'SHELXE {0}\n'.format(odict['use_shelxe'])
mrs += 'SHLXEXE {0}\n'.format(odict['shelxe_exe'])
mrs += 'SCYCLES {0}\n'.format(odict['shelx_cycles'])
mrs += 'FIXSG True\n'
mrs += 'PJOBS 1\n'
mrs += 'CHECK False\n'
mrs += 'LITE True\n'
mrs += 'PICKLE False\n'
mrs += 'TRYALL True\n'
mrs += 'USEACORN False\n'
mrs += 'USEENSEM False\n'
mrs += 'CLEAN False\n'
mrs += 'DEBUG {0}\n'.format(odict['debug'])
if odict['shelxe_rebuild_arpwarp'] or odict['shelxe_rebuild_buccaneer']:
# Rebuild SHELXE trace with both Buccaneer and ArpWarp
mrs += 'SXREBUILD True\n'
if odict['shelxe_rebuild_buccaneer']:
mrs += 'SXRBUCC True\n'
if odict['shelxe_rebuild_arpwarp']:
mrs += 'SXRARPW True\n'
if odict['nmasu'] > 0:
mrs += 'NMASU {0}\n'.format(odict['nmasu'])
if odict['existing_mr_solution']:
mrs += 'FIXED_XYZIN {0} IDEN {1}\n'.format(odict['existing_mr_solution'], fixed_iden)
if odict['native_pdb']:
mrs += 'PDBNATIVE {0}\n'.format(odict['native_pdb'])
if odict['phaser_kill'] > 0:
mrs += 'PKEY KILL TIME {0}\n'.format(odict['phaser_kill'])
if odict['mr_sg_all']:
mrs += 'PKEY SGALTERNATIVE SELECT ALL\n'
# Extra keywords
# This assumes everything in mr_keys is a list of [ KEYWORD, VALUE0, VALUE1, ...]
if odict['mr_keys']:
for l in odict['mr_keys']:
mrs += " ".join(l) + "\n"
mrs += 'END\n'
return mrs
| 32.437086
| 98
| 0.584116
|
2edc71e479412a68160442fbace3e04789a2bd59
| 11,642
|
py
|
Python
|
fairseq/data/seq2sql_dataset.py
|
nikitacs16/fairseq
|
4db7d93e03bae242bb845dd4b6a193e999a9af99
|
[
"MIT"
] | null | null | null |
fairseq/data/seq2sql_dataset.py
|
nikitacs16/fairseq
|
4db7d93e03bae242bb845dd4b6a193e999a9af99
|
[
"MIT"
] | null | null | null |
fairseq/data/seq2sql_dataset.py
|
nikitacs16/fairseq
|
4db7d93e03bae242bb845dd4b6a193e999a9af99
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
import copy
import torch.nn as nn
from . import data_utils, FairseqDataset, dictionary
from collections import Counter
from fairseq import metrics, options, utils
def load_random_embedding(fname):
fname = open(fname,'r')
embed_tokens = {}
for i, line in enumerate(fname.readlines()):
pieces = line.strip().split(" ")
embed_tokens[i] = torch.Tensor([float(weight) for weight in pieces])
return embed_tokens
def copy_prev_embedding(embed_path, dictionary, embed_dim, prev_embedded_tokens_path, prev_dict):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx)
prev_embedded_tokens = load_random_embedding(prev_embedded_tokens_path)
for i in range(5, num_embeddings):
if prev_dict.index(dictionary.symbols[i])!= prev_dict.unk() and i!=dictionary.unk():
embed_tokens.weight.data[i] = prev_embedded_tokens[prev_dict.index(dictionary.symbols[i])]
#embed_tokens.weight = nn.Parameter(prev_embedded_tokens)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
logger = logging.getLogger(__name__)
def get_valid_indices(sequence,mapping_dict,len_sql_dict,unk_idx, src_dict, sql_dict):
valid_indices = list(np.arange(len_sql_dict))
valid_indices.remove(unk_idx)
for i in set(sequence):
valid_indices.append(mapping_dict[i])
return sorted(valid_indices)
def collate(
samples, src_embedding, tgt_embedding, src_dict, sql_dict, pad_idx, eos_idx, unk_idx, left_pad_source=False, left_pad_target=False,
input_feeding=True, eot_symbol=4, mapping_dict=None, len_sql_dict=53
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
flatten_source = [s['source'].flatten().tolist() for s in samples]
col_lengths_unordered = [s.index(eot_symbol) for s in flatten_source]
col_lengths = torch.LongTensor(col_lengths_unordered).index_select(0,sort_order)
valid_indices = [get_valid_indices(flatten_source[s][:col_lengths_unordered[s]],mapping_dict,len_sql_dict, unk_idx, src_dict, sql_dict) for s in sort_order.flatten().tolist()]
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor([s['target'].numel() for s in samples]).index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
#'src_dict': src_dict,
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'col_lengths': col_lengths,
'src_embedding': src_embedding,
'tgt_embedding': tgt_embedding,
'valid_indices': valid_indices,
},
'target': target,
#'sql_dict': target_dict,
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
class Seq2SqlPairDataSet(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
src_column_sizes (List[int]): column lengths
sql (torch.utils.data.Dataset, optional): target dataset to wrap
sql_sizes (List[int], optional): target sentence lengths
sql_dict (~fairseq.data.Dictionary): sql vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
max_source_positions (int, optional): max number of tokens in the
source sentence (default: 1024).
max_target_positions (int, optional): max number of tokens in the
target sentence (default: 1024).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
"""
def __init__(
self, src, src_sizes, src_dict, prev_src_dict,
sql, sql_sizes, sql_dict, prev_sql_dict,
encoder_embed_path, encoder_embed_dim,
decoder_embed_path, decoder_embed_dim,
encoder_random_embedding_path,
decoder_random_embedding_path,
left_pad_source=False, left_pad_target=False,
max_source_positions=1500, max_target_positions=1024,
shuffle=True, input_feeding=True,
remove_eos_from_source=False, append_eos_to_target=False,
append_bos = False
):
if sql_dict is not None:
assert src_dict.pad() == sql_dict.pad()
assert src_dict.eos() == sql_dict.eos()
assert src_dict.unk() == sql_dict.unk()
self.src = src
self.sql = sql
self.src_dict = src_dict
self.sql_dict = sql_dict
self.src_sizes = np.array(src_sizes)
self.eot_symbol = self.src_dict.index('<EOT>')
self.eov_symbol = self.src_dict.index('<EOV>')
#print()
self.sql_sizes = np.array(sql_sizes)
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.append_bos = append_bos
self.mapping_dict = None
self.create_mapper(src_dict, sql_dict)
self.src_embedding = copy_prev_embedding(encoder_embed_path, src_dict, encoder_embed_dim, encoder_random_embedding_path, prev_src_dict)
self.tgt_embedding = copy_prev_embedding(decoder_embed_path, sql_dict, decoder_embed_dim, decoder_random_embedding_path, prev_sql_dict)
#print(self.tgt_embedding)
def __getitem__(self, index):
sql_item = self.sql[index]
src_item = self.src[index]
# Append EOS to end of sql sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use sql_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.sql_dict.eos() if self.sql_dict else self.src_dict.eos()
if self.sql and self.sql[index][-1] != eos:
sql_item = torch.cat([self.sql[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.sql_dict.bos() if self.sql_dict else self.src_dict.bos()
if self.sql and self.sql[index][0] != bos:
sql_item = torch.cat([torch.LongTensor([bos]), self.sql[index]])
bos = self.src_dict.bos()
if self.src[index][-1] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
col_item = col_item + 1
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
#src_item = SrcObject(index, src_item, self.src_dict, col_item)
#sql_item = SqlObject(index, sql_item, self.sql_dict, src_item.col_dict)
example = {
'id': index,
'source': src_item,
'target': sql_item,
}
return example
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, sql_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, sql_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
"""
return collate(
samples, self.src_embedding, self.tgt_embedding, self.src_dict, self.sql_dict,
pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),
unk_idx=self.src_dict.unk(),
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding, eot_symbol=self.eot_symbol, mapping_dict=self.mapping_dict,
len_sql_dict=self.eov_symbol + 1
)
def create_mapper(self,src_dict,sql_dict):
new_dict = {}
src_tokens = src_dict.symbols
sql_tokens = sql_dict.symbols
common_symbols = set(src_tokens).intersection(sql_tokens)
for c in common_symbols:
new_dict[src_dict.index(c)] = sql_dict.index(c)
self.mapping_dict = new_dict
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(self.src_sizes[index], self.sql_sizes[index])
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (self.src_sizes[index], self.sql_sizes[index])
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
if self.sql_sizes is not None:
indices = indices[np.argsort(self.sql_sizes[indices], kind='mergesort')]
return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]
@property
def supports_prefetch(self):
return (
getattr(self.src, 'supports_prefetch', False)
and (getattr(self.sql, 'supports_prefetch', False))
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.sql is not None:
self.sql.prefetch(indices)
| 37.076433
| 176
| 0.738705
|
1acb9f25c0c8c3713d2e173dc12825a5c32288a5
| 5,864
|
py
|
Python
|
homeassistant/components/mobile_app/notify.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 5
|
2018-10-23T14:15:05.000Z
|
2021-11-26T06:38:44.000Z
|
homeassistant/components/mobile_app/notify.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
homeassistant/components/mobile_app/notify.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 3
|
2022-01-17T20:10:54.000Z
|
2022-01-17T20:17:22.000Z
|
"""Support for mobile_app push notifications."""
import asyncio
import logging
import aiohttp
import async_timeout
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import (
HTTP_ACCEPTED,
HTTP_CREATED,
HTTP_OK,
HTTP_TOO_MANY_REQUESTS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.util.dt as dt_util
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_VERSION,
ATTR_DEVICE_NAME,
ATTR_OS_VERSION,
ATTR_PUSH_RATE_LIMITS,
ATTR_PUSH_RATE_LIMITS_ERRORS,
ATTR_PUSH_RATE_LIMITS_MAXIMUM,
ATTR_PUSH_RATE_LIMITS_RESETS_AT,
ATTR_PUSH_RATE_LIMITS_SUCCESSFUL,
ATTR_PUSH_TOKEN,
ATTR_PUSH_URL,
DATA_CONFIG_ENTRIES,
DATA_NOTIFY,
DATA_PUSH_CHANNEL,
DOMAIN,
)
from .util import supports_push
_LOGGER = logging.getLogger(__name__)
def push_registrations(hass):
"""Return a dictionary of push enabled registrations."""
targets = {}
for webhook_id, entry in hass.data[DOMAIN][DATA_CONFIG_ENTRIES].items():
if not supports_push(hass, webhook_id):
continue
targets[entry.data[ATTR_DEVICE_NAME]] = webhook_id
return targets
# pylint: disable=invalid-name
def log_rate_limits(hass, device_name, resp, level=logging.INFO):
"""Output rate limit log line at given level."""
if ATTR_PUSH_RATE_LIMITS not in resp:
return
rate_limits = resp[ATTR_PUSH_RATE_LIMITS]
resetsAt = rate_limits[ATTR_PUSH_RATE_LIMITS_RESETS_AT]
resetsAtTime = dt_util.parse_datetime(resetsAt) - dt_util.utcnow()
rate_limit_msg = (
"mobile_app push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
device_name,
rate_limits[ATTR_PUSH_RATE_LIMITS_SUCCESSFUL],
rate_limits[ATTR_PUSH_RATE_LIMITS_MAXIMUM],
rate_limits[ATTR_PUSH_RATE_LIMITS_ERRORS],
str(resetsAtTime).split(".")[0],
)
async def async_get_service(hass, config, discovery_info=None):
"""Get the mobile_app notification service."""
service = hass.data[DOMAIN][DATA_NOTIFY] = MobileAppNotificationService(hass)
return service
class MobileAppNotificationService(BaseNotificationService):
"""Implement the notification service for mobile_app."""
def __init__(self, hass):
"""Initialize the service."""
self._hass = hass
@property
def targets(self):
"""Return a dictionary of registered targets."""
return push_registrations(self.hass)
async def async_send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
# Remove default title from notifications.
if (
kwargs.get(ATTR_TITLE) is not None
and kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT
):
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = push_registrations(self.hass).values()
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
local_push_channels = self.hass.data[DOMAIN][DATA_PUSH_CHANNEL]
for target in targets:
if target in local_push_channels:
local_push_channels[target](data)
continue
entry = self.hass.data[DOMAIN][DATA_CONFIG_ENTRIES][target]
entry_data = entry.data
app_data = entry_data[ATTR_APP_DATA]
push_token = app_data[ATTR_PUSH_TOKEN]
push_url = app_data[ATTR_PUSH_URL]
target_data = dict(data)
target_data[ATTR_PUSH_TOKEN] = push_token
reg_info = {
ATTR_APP_ID: entry_data[ATTR_APP_ID],
ATTR_APP_VERSION: entry_data[ATTR_APP_VERSION],
}
if ATTR_OS_VERSION in entry_data:
reg_info[ATTR_OS_VERSION] = entry_data[ATTR_OS_VERSION]
target_data["registration_info"] = reg_info
try:
with async_timeout.timeout(10):
response = await async_get_clientsession(self._hass).post(
push_url, json=target_data
)
result = await response.json()
if response.status in [HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED]:
log_rate_limits(self.hass, entry_data[ATTR_DEVICE_NAME], result)
continue
fallback_error = result.get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = result.get("message", fallback_message)
if "message" in result:
if message[-1] not in [".", "?", "!"]:
message += "."
message += (
" This message is generated externally to Home Assistant."
)
if response.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(
self.hass, entry_data[ATTR_DEVICE_NAME], result, logging.WARNING
)
else:
_LOGGER.error(message)
except asyncio.TimeoutError:
_LOGGER.error("Timeout sending notification to %s", push_url)
except aiohttp.ClientError as err:
_LOGGER.error("Error sending notification to %s: %r", push_url, err)
| 31.697297
| 88
| 0.625853
|
715d5664c93f1bf62f6077e117761199b0dbf1aa
| 541
|
py
|
Python
|
launch/minimal.launch.py
|
mlherd/turtlebot2_ros2
|
0ecf31cd81bed1a269916e963b2a788221bd845e
|
[
"Apache-2.0"
] | null | null | null |
launch/minimal.launch.py
|
mlherd/turtlebot2_ros2
|
0ecf31cd81bed1a269916e963b2a788221bd845e
|
[
"Apache-2.0"
] | null | null | null |
launch/minimal.launch.py
|
mlherd/turtlebot2_ros2
|
0ecf31cd81bed1a269916e963b2a788221bd845e
|
[
"Apache-2.0"
] | null | null | null |
from launch import LaunchDescription
import launch.actions
import launch_ros.actions
def generate_launch_description():
return LaunchDescription([
launch.actions.LogInfo(
msg="ROS2 start turtlebot_bringup minimal node."
),
launch_ros.actions.Node(
package="ydlidar",
node_executable="ydlidar_node",
output="screen",
),
launch_ros.actions.Node(
package="turtlebot_bringup",
node_executable="turtlebot2",
output="screen",
),
])
| 21.64
| 57
| 0.63586
|
4df6b79d25cf2380b55fe9d3c5a3b8c00d828c1d
| 962
|
py
|
Python
|
friends/migrations/0003_friendrequest.py
|
DK-Nguyen/Django_Social_Network
|
6061e28b7574a612a71ba2661eabf6d024b930cd
|
[
"MIT"
] | 14
|
2020-12-05T08:20:21.000Z
|
2022-03-07T12:18:40.000Z
|
friends/migrations/0003_friendrequest.py
|
DK-Nguyen/Django_Social_Network
|
6061e28b7574a612a71ba2661eabf6d024b930cd
|
[
"MIT"
] | 1
|
2021-02-22T17:48:10.000Z
|
2021-02-22T17:48:10.000Z
|
friends/migrations/0003_friendrequest.py
|
DK-Nguyen/Django_Social_Network
|
6061e28b7574a612a71ba2661eabf6d024b930cd
|
[
"MIT"
] | 13
|
2020-10-20T09:32:46.000Z
|
2022-01-02T00:27:51.000Z
|
# Generated by Django 2.1.2 on 2018-11-11 12:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('friends', '0002_auto_20181111_1249'),
]
operations = [
migrations.CreateModel(
name='FriendRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='from_user', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='to_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| 37
| 149
| 0.665281
|
1a1e30b443a70d9c9d91e163587ba717da36c5bf
| 1,690
|
py
|
Python
|
tempest/stress/actions/server_create_destroy.py
|
citrix-openstack-build/tempest
|
385f0b116e8f02d24338e0f11f4ae3ccf2edd661
|
[
"Apache-2.0"
] | null | null | null |
tempest/stress/actions/server_create_destroy.py
|
citrix-openstack-build/tempest
|
385f0b116e8f02d24338e0f11f4ae3ccf2edd661
|
[
"Apache-2.0"
] | null | null | null |
tempest/stress/actions/server_create_destroy.py
|
citrix-openstack-build/tempest
|
385f0b116e8f02d24338e0f11f4ae3ccf2edd661
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils.data_utils import rand_name
import tempest.stress.stressaction as stressaction
class ServerCreateDestroyTest(stressaction.StressAction):
def setUp(self, **kwargs):
self.image = self.manager.config.compute.image_ref
self.flavor = self.manager.config.compute.flavor_ref
def run(self):
name = rand_name("instance")
self.logger.info("creating %s" % name)
resp, server = self.manager.servers_client.create_server(
name, self.image, self.flavor)
server_id = server['id']
assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id,
'ACTIVE')
self.logger.info("created %s" % server_id)
self.logger.info("deleting %s" % name)
resp, _ = self.manager.servers_client.delete_server(server_id)
assert(resp.status == 204)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted %s" % server_id)
| 42.25
| 77
| 0.681065
|
798803e076d2e5ca04b9d22a04051df99ba64bc5
| 82
|
py
|
Python
|
bot/error_classes/__init__.py
|
NMisko/monkalot
|
965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b
|
[
"MIT"
] | 20
|
2017-09-08T21:13:38.000Z
|
2022-01-29T03:24:13.000Z
|
bot/error_classes/__init__.py
|
NMisko/monkalot
|
965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b
|
[
"MIT"
] | 32
|
2017-08-20T17:46:14.000Z
|
2021-11-18T22:54:59.000Z
|
bot/error_classes/__init__.py
|
NMisko/monkalot
|
965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b
|
[
"MIT"
] | 10
|
2017-08-19T01:13:41.000Z
|
2021-08-07T08:45:30.000Z
|
""" Package for custom classes."""
from .error_classes import UserNotFoundError
| 16.4
| 44
| 0.768293
|
381731d57d1bd9397e2803e2fa8594ea4771e1ca
| 1,027
|
py
|
Python
|
analyze.py
|
Term-inator/Brain-Tumor-Detection
|
b59715092cca7a17b589b5d906983eb42ee4ad87
|
[
"MIT"
] | null | null | null |
analyze.py
|
Term-inator/Brain-Tumor-Detection
|
b59715092cca7a17b589b5d906983eb42ee4ad87
|
[
"MIT"
] | null | null | null |
analyze.py
|
Term-inator/Brain-Tumor-Detection
|
b59715092cca7a17b589b5d906983eb42ee4ad87
|
[
"MIT"
] | null | null | null |
import numpy as np
seed_list = [31, 37, 41, 42, 43, 47, 53]
type_list = ['tumor', 'T1SS', 'T2SS', 'T1', '2label', 'randT1', 'randTumor']
def get_test_result(test_scores):
print(f'{np.round(np.mean(test_scores, axis=0), decimals=4)}')
for type in type_list:
for epochs in range(10, 61, 10):
all_scores = []
for seed in seed_list:
directory = f'../output_V100/seed{seed}/{type}/{type}_seed{seed}-ep{epochs}/'
filename = 'train.log'
with open(directory + filename, 'r') as f:
line = f.readlines()[-3]
l = line.find('[')
r = line.find(']')
scores = []
for score in line[l + 1: r].strip().split(' '):
if score != '':
scores.append(score)
# print(scores)
all_scores.append(np.array(scores).astype('float64'))
print(f'{type} {epochs}: ', end='')
get_test_result(np.array(all_scores))
print()
| 33.129032
| 89
| 0.508277
|
e5205b6f20a239e93a1300c5f360763227e6d7fe
| 3,883
|
py
|
Python
|
tests/test_extraction_synthesis.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 121
|
2020-09-01T21:55:22.000Z
|
2022-03-28T11:38:43.000Z
|
tests/test_extraction_synthesis.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 49
|
2020-09-08T19:02:16.000Z
|
2022-02-11T20:52:40.000Z
|
tests/test_extraction_synthesis.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 16
|
2020-09-09T00:45:30.000Z
|
2022-02-25T01:53:53.000Z
|
# This test code was modified from code written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
import numpy as np
import pytest
import torch
from art.estimators.classification import BlackBoxClassifier
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import privacyraven.extraction.synthesis
import privacyraven.utils.query
from privacyraven.models.victim import train_four_layer_mnist_victim
from privacyraven.utils import model_creation, query
from privacyraven.utils.data import get_emnist_data
from privacyraven.utils.query import get_target
"""
The synthesis tests rely on sampling data from a model.
We will be training one and returning a query function here
and not inside of a separate function in order to minimize
the cycles dedicated for training this model.
"""
device = torch.device("cpu")
model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
art_model = BlackBoxClassifier(
predict=query,
input_shape=(1, 28, 28, 1),
nb_classes=10,
clip_values=None, # (0, 255),
preprocessing_defences=None,
postprocessing_defences=None,
preprocessing=(0, 1), # None,
)
def query_mnist(input_data):
return privacyraven.utils.query.get_target(model, input_data, (1, 28, 28, 1))
def valid_query():
return st.just(query_mnist)
def valid_data():
return arrays(np.float64, (10, 28, 28, 1), st.floats())
@settings(deadline=None)
@given(
data=valid_data(),
query=st.just(query_mnist),
query_limit=st.integers(10, 25),
art_model=st.just(art_model),
victim_input_shape=st.just((1, 28, 28, 1)),
substitute_input_shape=st.just((3, 1, 28, 28)),
victim_output_targets=st.just(10),
)
def test_copycat_preserves_shapes(
data,
query,
query_limit,
art_model,
victim_input_shape,
substitute_input_shape,
victim_output_targets,
):
# data = torch.from_numpy(data).detach().clone().float()
data = privacyraven.extraction.synthesis.process_data(data, query_limit)
x_data, y_data = privacyraven.extraction.synthesis.copycat(
data=data,
query=query,
query_limit=query_limit,
art_model=art_model,
victim_input_shape=victim_input_shape,
substitute_input_shape=substitute_input_shape,
victim_output_targets=victim_output_targets,
)
x_1 = x_data.size()
y_1 = y_data.size()
assert x_1 == torch.Size([10, 1, 28, 28])
assert y_1 == torch.Size([10])
@given(data=valid_data(), query_limit=st.integers(10, 25))
def process_data_preserves_shape_and_type(data, query_limit):
processed_data = privacyraven.extraction.synthesis.process_data(
data=data, query_limit=query_limit
)
(x, y) = processed_data
assert x.size() == torch.Size([10, 28, 28, 1])
assert x.type() == torch.FloatTensor
"""
This is error-prone, but should be fixed eventually
@given(
data=valid_data(),
query=st.just(query_mnist),
query_limit=st.integers(10, 25),
victim_input_shape=st.just((1, 28, 28, 1)),
substitute_input_shape=st.just((1, 3, 28, 28)),
victim_input_targets=st.just(10),
)
def test_fuzz_hopskipjump(
data,
query,
query_limit,
victim_input_shape,
substitute_input_shape,
victim_input_targets,
):
data = torch.from_numpy(data).detach().clone().float()
data = privacyraven.extraction.synthesis.process_data(data, query_limit)
x_data, y_data = privacyraven.extraction.synthesis.hopskipjump(
data=data,
query=query,
query_limit=query_limit,
victim_input_shape=victim_input_shape,
substitute_input_shape=substitute_input_shape,
victim_input_targets=victim_input_targets,
)
print(x_data.size())
print(y_data.size())
"""
| 29.641221
| 92
| 0.726243
|
c2763c7a85f3334b1c58ee999328017be0957ac5
| 2,708
|
py
|
Python
|
training/dataloader_lightning.py
|
denix56/stylegan2-ada-pytorch
|
9df6a704a5dd1db779966307422b70c293b393fc
|
[
"BSD-Source-Code"
] | 1
|
2021-02-24T19:24:04.000Z
|
2021-02-24T19:24:04.000Z
|
training/dataloader_lightning.py
|
denix56/stylegan2-ada-pytorch
|
9df6a704a5dd1db779966307422b70c293b393fc
|
[
"BSD-Source-Code"
] | null | null | null |
training/dataloader_lightning.py
|
denix56/stylegan2-ada-pytorch
|
9df6a704a5dd1db779966307422b70c293b393fc
|
[
"BSD-Source-Code"
] | null | null | null |
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import dnnlib
import numpy as np
from .dataset import worker_init_fn
class StyleGANDataModule(pl.LightningDataModule):
def __init__(self, batch_size_per_unit, training_set_kwargs, data_loader_kwargs):
super().__init__()
self._batch_size_per_unit = batch_size_per_unit
self._data_loader_kwargs = data_loader_kwargs
shuffle = training_set_kwargs.pop('shuffle', False)
window_size = training_set_kwargs.pop('window_size', 0.5)
self.training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs)
self.worker_init_fn = lambda id: worker_init_fn(id, shuffle, window_size)
self.n_phases = None
self.z_dim = None
def setup_noise_params(self, n_phases, z_dim):
self.n_phases = n_phases
self.z_dim = z_dim
@property
def name(self):
return self.training_set.name
def _get_noise(self, batch: torch.Tensor) -> (torch.Tensor, torch.Tensor):
assert self.n_phases
assert self.z_dim
imgs, _ = batch
batch_size = imgs.shape[0]
all_gen_z = torch.randn([self.n_phases, batch_size, self.z_dim], device=imgs.device)
all_gen_c = self.training_set.get_label(np.random.randint(self.training_set.get_len(),
size=self.n_phases*batch_size))
if len(all_gen_c.shape) == 1:
all_gen_c = all_gen_c.reshape((self.n_phases, batch_size))
else:
all_gen_c = all_gen_c.reshape((self.n_phases, batch_size) + all_gen_c.shape[1:])
all_gen_c = torch.tensor(all_gen_c, device=imgs.device)
return all_gen_z, all_gen_c
def train_dataloader(self):
return DataLoader(dataset=self.training_set, batch_size=self._batch_size_per_unit, worker_init_fn=self.worker_init_fn,
persistent_workers=True, **self._data_loader_kwargs)
def val_dataloader(self):
return DataLoader(dataset=self.training_set, batch_size=self._batch_size_per_unit, worker_init_fn=self.worker_init_fn,
persistent_workers=True, **self._data_loader_kwargs)
def test_dataloader(self):
return DataLoader(dataset=self.training_set, batch_size=self._batch_size_per_unit, worker_init_fn=self.worker_init_fn,
persistent_workers=True, **self._data_loader_kwargs)
def on_after_batch_transfer(self, batch, dataloader_idx):
batch[0] = batch[0].to(torch.float32) / 127.5 - 1
all_gen_z, all_gen_c = self._get_noise(batch)
batch = tuple(batch) + (all_gen_z, all_gen_c)
return batch
| 42.3125
| 126
| 0.688331
|
cf8d36174783f3deb1919c0c05c6091b6805374c
| 8,890
|
py
|
Python
|
droid_slam/visualization.py
|
TruongKhang/DROID-SLAM
|
7a2aa9a2b4b04952dd095bdb562f140c899ca8a7
|
[
"BSD-3-Clause"
] | null | null | null |
droid_slam/visualization.py
|
TruongKhang/DROID-SLAM
|
7a2aa9a2b4b04952dd095bdb562f140c899ca8a7
|
[
"BSD-3-Clause"
] | null | null | null |
droid_slam/visualization.py
|
TruongKhang/DROID-SLAM
|
7a2aa9a2b4b04952dd095bdb562f140c899ca8a7
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn.functional as F
import cv2
import lietorch
import droid_backends
import time
import argparse
import numpy as np
import open3d as o3d
from lietorch import SE3
import geom.projective_ops as pops
import depth_fusion
CAM_POINTS = np.array([
[ 0, 0, 0],
[-1, -1, 1.5],
[ 1, -1, 1.5],
[ 1, 1, 1.5],
[-1, 1, 1.5],
[-0.5, 1, 1.5],
[ 0.5, 1, 1.5],
[ 0, 1.2, 1.5]]) / 3
CAM_LINES = np.array([
[1,2], [2,3], [3,4], [4,1], [1,0], [0,2], [3,0], [0,4], [5,7], [7,6]])
def white_balance(img):
# from https://stackoverflow.com/questions/46390779/automatic-white-balancing-with-grayworld-assumption
result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR)
return result
def create_camera_actor(g, scale=0.05):
""" build open3d camera polydata """
camera_actor = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(scale * CAM_POINTS),
lines=o3d.utility.Vector2iVector(CAM_LINES))
color = (g * 1.0, 0.5 * (1-g), 0.9 * (1-g))
camera_actor.paint_uniform_color(color)
return camera_actor
def create_point_actor(points, colors):
""" open3d point cloud from numpy array """
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(points)
point_cloud.colors = o3d.utility.Vector3dVector(colors)
return point_cloud
def droid_visualization(video, device="cuda:0"):
""" DROID visualization frontend """
torch.cuda.set_device(device)
droid_visualization.video = video
droid_visualization.cameras = {}
droid_visualization.points = {}
droid_visualization.warmup = 8
droid_visualization.scale = 1.0
droid_visualization.ix = 0
droid_visualization.filter_thresh = 0.01
droid_visualization.thresh_view = 3
droid_visualization.thresh_disp = 1.0
intr_params = video.intrinsics[0] * 16
fx, fy, cx, cy = intr_params.cpu().numpy()
intr_mat = torch.tensor([[fx, 0, cx, 0], [0, fy, cy, 0],
[0, 0, 1, 0], [0, 0, 0, 1]], dtype=torch.float32, device=device)
def increase_filter(vis):
droid_visualization.filter_thresh *= 2
with droid_visualization.video.get_lock():
droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True
def decrease_filter(vis):
droid_visualization.filter_thresh *= 0.5
with droid_visualization.video.get_lock():
droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True
def animation_callback(vis):
cam = vis.get_view_control().convert_to_pinhole_camera_parameters()
with torch.no_grad():
with video.get_lock():
t = video.counter.value
dirty_index, = torch.where(video.dirty.clone())
dirty_index = dirty_index
if (len(dirty_index) == 0) or (intr_mat is None):
return
video.dirty[dirty_index] = False
# convert poses to 4x4 matrix
poses = torch.index_select(video.poses, 0, dirty_index)
poses = SE3(poses).matrix() # [N, 4, 4]
# disps = torch.index_select(video.disps, 0, dirty_index)
"""disps = torch.index_select(video.disps_up, 0, dirty_index).clone()
Ps = SE3(poses).inv().matrix().cpu().numpy()
images = torch.index_select(video.images, 0, dirty_index).clone()
# images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0
# images = F.interpolate(images, scale_factor=0.5)
images = images.cpu()[:, [2, 1, 0]].permute(0, 2, 3, 1) / 255.0
points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]*8).cpu()
thresh = droid_visualization.filter_thresh * torch.ones_like(disps.mean(dim=[1,2]))
count = droid_backends.depth_filter(
video.poses, video.disps_up, video.intrinsics[0]*8, dirty_index, thresh)
count = count.cpu()
disps = disps.cpu()
masks = ((count >= 2) & (disps > .5*disps.mean(dim=[1,2], keepdim=True)))"""
depths = torch.index_select(video.disps_up, 0, dirty_index).unsqueeze(1).unsqueeze(0)
intr_matrices = intr_mat.unsqueeze(0).repeat(poses.size(0), 1, 1)
proj_matrices = torch.stack((poses, intr_matrices), dim=1).unsqueeze(0) # [N, 2, 4, 4]
ref_depth, src_depths = depths[:, -1, ...], depths[:, :-1, ...]
ref_cam, src_cams = proj_matrices[:, -1, ...], proj_matrices[:, :-1, ...]
reproj_xyd, in_range = depth_fusion.get_reproj(ref_depth, src_depths, ref_cam, src_cams)
vis_masks, vis_mask = depth_fusion.vis_filter(ref_depth, reproj_xyd, in_range,
droid_visualization.thresh_disp,
droid_visualization.filter_thresh, droid_visualization.thresh_view)
ref_depth_avg = depth_fusion.ave_fusion(ref_depth, reproj_xyd, vis_masks)
# ref_image = video.images[dirty_index[-1]]
ref_image = video.ref_image[0].permute(1, 2, 0) #/ 255.0
idx_img = depth_fusion.get_pixel_grids(*ref_depth_avg.size()[-2:]).unsqueeze(0)
idx_cam = depth_fusion.idx_img2cam(idx_img, ref_depth_avg, ref_cam)
points = depth_fusion.idx_cam2world(idx_cam, ref_cam)[..., :3, 0]
ref_pose = torch.inverse(poses[-1]).cpu().numpy()
ix = dirty_index[-1].item()
if len(droid_visualization.cameras.keys()) > 1:
if ix in droid_visualization.cameras:
vis.remove_geometry(droid_visualization.cameras[ix])
del droid_visualization.cameras[ix]
if ix in droid_visualization.points:
vis.remove_geometry(droid_visualization.points[ix])
del droid_visualization.points[ix]
### add camera actor ###
cam_actor = create_camera_actor(True)
cam_actor.transform(ref_pose)
vis.add_geometry(cam_actor)
droid_visualization.cameras[ix] = cam_actor
mask = vis_mask.reshape(-1)
pts = points.reshape(-1, 3)[mask].cpu().numpy()
clr = ref_image.reshape(-1, 3)[mask].cpu().numpy()
## add point actor ###
point_actor = create_point_actor(pts, clr)
vis.add_geometry(point_actor)
droid_visualization.points[ix] = point_actor
# for i in range(len(dirty_index)):
# pose = Ps[i]
# ix = dirty_index[i].item()
#
# if ix in droid_visualization.cameras:
# vis.remove_geometry(droid_visualization.cameras[ix])
# del droid_visualization.cameras[ix]
#
# if ix in droid_visualization.points:
# vis.remove_geometry(droid_visualization.points[ix])
# del droid_visualization.points[ix]
#
# ### add camera actor ###
# cam_actor = create_camera_actor(True)
# cam_actor.transform(pose)
# vis.add_geometry(cam_actor)
# droid_visualization.cameras[ix] = cam_actor
#
# mask = masks[i].reshape(-1)
# pts = points[i].reshape(-1, 3)[mask].numpy()
# clr = images[i].reshape(-1, 3)[mask].numpy()
#
# ## add point actor ###
# point_actor = create_point_actor(pts, clr)
# vis.add_geometry(point_actor)
# droid_visualization.points[ix] = point_actor
# hack to allow interacting with vizualization during inference
if len(droid_visualization.cameras) >= droid_visualization.warmup:
cam = vis.get_view_control().convert_from_pinhole_camera_parameters(cam)
droid_visualization.ix += 1
vis.poll_events()
vis.update_renderer()
### create Open3D visualization ###
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.register_animation_callback(animation_callback)
vis.register_key_callback(ord("S"), increase_filter)
vis.register_key_callback(ord("A"), decrease_filter)
vis.create_window(height=540, width=960)
vis.get_render_option().load_from_json("misc/renderoption.json")
vis.run()
vis.destroy_window()
| 42.740385
| 125
| 0.593476
|
69b4bd44be9a0776a4d080958f11e76ebdf4f20c
| 42,284
|
py
|
Python
|
src/sentry/event_manager.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 1
|
2021-04-04T07:26:13.000Z
|
2021-04-04T07:26:13.000Z
|
src/sentry/event_manager.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/event_manager.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from collections import OrderedDict
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text
from hashlib import md5
from uuid import uuid4
from sentry import eventtypes, features, buffer
# we need a bunch of unexposed functions from tsdb
from sentry.tsdb import backend as tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, LOG_LEVELS_MAP, DEFAULT_LOG_LEVEL,
DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH, VALID_PLATFORMS
)
from sentry.interfaces.base import get_interface, InterfaceValidationError
from sentry.interfaces.schemas import validate_and_default_interface
from sentry.models import (
Activity, Environment, Event, EventError, EventMapping, EventUser, Group,
GroupEnvironment, GroupHash, GroupRelease, GroupResolution, GroupStatus,
Project, Release, ReleaseEnvironment, ReleaseProject, UserReport
)
from sentry.plugins import plugins
from sentry.signals import event_discarded, event_saved, first_event_received, regression_signal
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils import metrics
from sentry.utils.cache import default_cache
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict, get_path
from sentry.utils.strings import truncatechars
from sentry.utils.validators import is_float
from sentry.stacktraces import normalize_in_app
DEFAULT_FINGERPRINT_VALUES = frozenset(['{{ default }}', '{{default}}'])
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def md5_from_hash(hash_bits):
result = md5()
for bit in hash_bits:
result.update(force_bytes(bit, errors='replace'))
return result.hexdigest()
def get_fingerprint_for_event(event):
fingerprint = event.data.get('fingerprint')
if fingerprint is None:
return ['{{ default }}']
if isinstance(fingerprint, six.string_types):
return [fingerprint]
return fingerprint
def get_hashes_for_event(event):
return get_hashes_for_event_with_reason(event)[1]
def get_hashes_for_event_with_reason(event):
interfaces = event.get_interfaces()
for interface in six.itervalues(interfaces):
result = interface.compute_hashes(event.platform)
if not result:
continue
return (interface.get_path(), result)
return ('no_interfaces', [''])
def get_grouping_behavior(event):
data = event.data
if 'checksum' in data:
return ('checksum', data['checksum'])
fingerprint = get_fingerprint_for_event(event)
return ('fingerprint', get_hashes_from_fingerprint_with_reason(event, fingerprint))
def get_hashes_from_fingerprint(event, fingerprint):
if any(d in fingerprint for d in DEFAULT_FINGERPRINT_VALUES):
default_hashes = get_hashes_for_event(event)
hash_count = len(default_hashes)
else:
hash_count = 1
hashes = []
for idx in range(hash_count):
result = []
for bit in fingerprint:
if bit in DEFAULT_FINGERPRINT_VALUES:
result.extend(default_hashes[idx])
else:
result.append(bit)
hashes.append(result)
return hashes
def get_hashes_from_fingerprint_with_reason(event, fingerprint):
if any(d in fingerprint for d in DEFAULT_FINGERPRINT_VALUES):
default_hashes = get_hashes_for_event_with_reason(event)
hash_count = len(default_hashes[1])
else:
hash_count = 1
hashes = OrderedDict((bit, []) for bit in fingerprint)
for idx in range(hash_count):
for bit in fingerprint:
if bit in DEFAULT_FINGERPRINT_VALUES:
hashes[bit].append(default_hashes)
else:
hashes[bit] = bit
return list(hashes.items())
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence = current_datetime - last_seen
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def generate_culprit(data, platform=None):
culprit = ''
try:
stacktraces = [
e['stacktrace'] for e in data['sentry.interfaces.Exception']['values']
if e.get('stacktrace')
]
except KeyError:
stacktrace = data.get('sentry.interfaces.Stacktrace')
if stacktrace:
stacktraces = [stacktrace]
else:
stacktraces = None
if not stacktraces:
if 'sentry.interfaces.Http' in data:
culprit = data['sentry.interfaces.Http'].get('url', '')
else:
from sentry.interfaces.stacktrace import Stacktrace
culprit = Stacktrace.to_python(stacktraces[-1]).get_culprit_string(
platform=platform,
)
return truncatechars(culprit, MAX_CULPRIT_LENGTH)
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(
plugin.is_regression, group, event, version=1, _with_transaction=False
)
if result is not None:
return result
return True
def process_timestamp(value, current_datetime=None):
if is_float(value):
try:
value = datetime.fromtimestamp(float(value))
except Exception:
raise InvalidTimestamp(
'Invalid value for timestamp: %r' % value)
elif not isinstance(value, datetime):
# all timestamps are in UTC, but the marker is optional
if value.endswith('Z'):
value = value[:-1]
if '.' in value:
# Python doesn't support long microsecond values
# https://github.com/getsentry/sentry/issues/1610
ts_bits = value.split('.', 1)
value = '%s.%s' % (ts_bits[0], ts_bits[1][:2])
fmt = '%Y-%m-%dT%H:%M:%S.%f'
else:
fmt = '%Y-%m-%dT%H:%M:%S'
try:
value = datetime.strptime(value, fmt)
except Exception:
raise InvalidTimestamp(
'Invalid value for timestamp: %r' % value)
if current_datetime is None:
current_datetime = datetime.now()
if value > current_datetime + timedelta(minutes=1):
raise InvalidTimestamp(
'Invalid value for timestamp (in future): %r' % value)
if value < current_datetime - timedelta(days=30):
raise InvalidTimestamp(
'Invalid value for timestamp (too old): %r' % value)
return float(value.strftime('%s'))
class HashDiscarded(Exception):
pass
class ScoreClause(object):
def __init__(self, group):
self.group = group
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score()
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
@classmethod
def calculate(cls, times_seen, last_seen):
return math.log(times_seen) * 600 + float(last_seen.strftime('%s'))
class InvalidTimestamp(Exception):
pass
class EventManager(object):
logger = logging.getLogger('sentry.events')
def __init__(self, data, version='5'):
self.data = data
self.version = version
def normalize(self, request_env=None):
request_env = request_env or {}
data = self.data
errors = data['errors'] = []
# Before validating with a schema, attempt to cast values to their desired types
# so that the schema doesn't have to take every type variation into account.
text = six.text_type
fp_types = six.string_types + six.integer_types + (float, )
def to_values(v):
return {'values': v} if v and isinstance(v, (tuple, list)) else v
casts = {
'environment': lambda v: text(v) if v is not None else v,
'fingerprint': lambda v: list(map(text, v)) if isinstance(v, list) and all(isinstance(f, fp_types) for f in v) else v,
'release': lambda v: text(v) if v is not None else v,
'dist': lambda v: text(v).strip() if v is not None else v,
'time_spent': lambda v: int(v) if v is not None else v,
'tags': lambda v: [(text(v_k).replace(' ', '-').strip(), text(v_v).strip()) for (v_k, v_v) in dict(v).items()],
'timestamp': lambda v: process_timestamp(v),
'platform': lambda v: v if v in VALID_PLATFORMS else 'other',
'sentry.interfaces.Message': lambda v: v if isinstance(v, dict) else {'message': v},
# These can be sent as lists and need to be converted to {'values': [...]}
'exception': to_values,
'sentry.interfaces.Exception': to_values,
'breadcrumbs': to_values,
'sentry.interfaces.Breadcrumbs': to_values,
'threads': to_values,
'sentry.interfaces.Threads': to_values,
}
for c in casts:
if c in data:
try:
data[c] = casts[c](data[c])
except Exception as e:
errors.append({'type': EventError.INVALID_DATA, 'name': c, 'value': data[c]})
del data[c]
# raw 'message' is coerced to the Message interface, as its used for pure index of
# searchable strings. If both a raw 'message' and a Message interface exist, try and
# add the former as the 'formatted' attribute of the latter.
# See GH-3248
msg_str = data.pop('message', None)
if msg_str:
msg_if = data.setdefault('sentry.interfaces.Message', {'message': msg_str})
if msg_if.get('message') != msg_str:
msg_if.setdefault('formatted', msg_str)
# Fill in ip addresses marked as {{auto}}
client_ip = request_env.get('client_ip')
if client_ip:
if get_path(data, ['sentry.interfaces.Http', 'env', 'REMOTE_ADDR']) == '{{auto}}':
data['sentry.interfaces.Http']['env']['REMOTE_ADDR'] = client_ip
if get_path(data, ['request', 'env', 'REMOTE_ADDR']) == '{{auto}}':
data['request']['env']['REMOTE_ADDR'] = client_ip
if get_path(data, ['sentry.interfaces.User', 'ip_address']) == '{{auto}}':
data['sentry.interfaces.User']['ip_address'] = client_ip
if get_path(data, ['user', 'ip_address']) == '{{auto}}':
data['user']['ip_address'] = client_ip
# Validate main event body and tags against schema
is_valid, event_errors = validate_and_default_interface(data, 'event')
errors.extend(event_errors)
if 'tags' in data:
is_valid, tag_errors = validate_and_default_interface(data['tags'], 'tags', name='tags')
errors.extend(tag_errors)
# Validate interfaces
for k in list(iter(data)):
if k in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(k)
if not value:
self.logger.debug('Ignored empty interface value: %s', k)
continue
try:
interface = get_interface(k)
except ValueError:
self.logger.debug('Ignored unknown attribute: %s', k)
errors.append({'type': EventError.INVALID_ATTRIBUTE, 'name': k})
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception as e:
log = self.logger.debug if isinstance(
e, InterfaceValidationError) else self.logger.error
log('Discarded invalid value for interface: %s (%r)', k, value, exc_info=True)
errors.append({'type': EventError.INVALID_DATA, 'name': k, 'value': value})
# Additional data coercion and defaulting
level = data.get('level') or DEFAULT_LOG_LEVEL
if isinstance(level, int) or (isinstance(level, six.string_types) and level.isdigit()):
level = LOG_LEVELS.get(int(level), DEFAULT_LOG_LEVEL)
data['level'] = LOG_LEVELS_MAP.get(level, LOG_LEVELS_MAP[DEFAULT_LOG_LEVEL])
if data.get('dist') and not data.get('release'):
data['dist'] = None
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
# TODO (alex) can this all be replaced by utcnow?
# it looks like the only time that this would even be hit is when timestamp
# is not defined, as the earlier process_timestamp already converts existing
# timestamps to floats.
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
data['received'] = float(timezone.now().strftime('%s'))
data.setdefault('checksum', None)
data.setdefault('culprit', None)
data.setdefault('dist', None)
data.setdefault('environment', None)
data.setdefault('extra', {})
data.setdefault('fingerprint', None)
data.setdefault('logger', DEFAULT_LOGGER_NAME)
data.setdefault('platform', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('tags', [])
data.setdefault('transaction', None)
# Fix case where legacy apps pass 'environment' as a tag
# instead of a top level key.
# TODO (alex) save() just reinserts the environment into the tags
if not data.get('environment'):
tagsdict = dict(data['tags'])
if 'environment' in tagsdict:
data['environment'] = tagsdict['environment']
del tagsdict['environment']
data['tags'] = tagsdict.items()
# the SDKs currently do not describe event types, and we must infer
# them from available attributes
data['type'] = eventtypes.infer(data).key
data['version'] = self.version
exception = data.get('sentry.interfaces.Exception')
stacktrace = data.get('sentry.interfaces.Stacktrace')
if exception and len(exception['values']) == 1 and stacktrace:
exception['values'][0]['stacktrace'] = stacktrace
del data['sentry.interfaces.Stacktrace']
# If there is no User ip_addres, update it either from the Http interface
# or the client_ip of the request.
auth = request_env.get('auth')
is_public = auth and auth.is_public
add_ip_platforms = ('javascript', 'cocoa', 'objc')
http_ip = data.get('sentry.interfaces.Http', {}).get('env', {}).get('REMOTE_ADDR')
if http_ip:
data.setdefault('sentry.interfaces.User', {}).setdefault('ip_address', http_ip)
elif client_ip and (is_public or data.get('platform') in add_ip_platforms):
data.setdefault('sentry.interfaces.User', {}).setdefault('ip_address', client_ip)
if client_ip and data.get('sdk'):
data['sdk']['client_ip'] = client_ip
# Trim values
data['logger'] = trim(data['logger'].strip(), 64)
trim_dict(data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
if data['culprit']:
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data['transaction']:
data['transaction'] = trim(data['transaction'], MAX_CULPRIT_LENGTH)
return data
def save(self, project, raw=False):
from sentry.tasks.post_process import index_event_tags
project = Project.objects.get_from_cache(id=project)
data = self.data.copy()
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
level = data.pop('level')
culprit = data.pop('transaction', None)
if not culprit:
culprit = data.pop('culprit', None)
logger_name = data.pop('logger', None)
server_name = data.pop('server_name', None)
site = data.pop('site', None)
checksum = data.pop('checksum', None)
fingerprint = data.pop('fingerprint', None)
platform = data.pop('platform', None)
release = data.pop('release', None)
dist = data.pop('dist', None)
environment = data.pop('environment', None)
# unused
time_spent = data.pop('time_spent', None)
message = data.pop('message', '')
if not culprit:
# if we generate an implicit culprit, lets not call it a
# transaction
transaction_name = None
culprit = generate_culprit(data, platform=platform)
else:
transaction_name = culprit
recorded_timestamp = data.pop('timestamp')
date = datetime.fromtimestamp(recorded_timestamp)
date = date.replace(tzinfo=timezone.utc)
kwargs = {
'platform': platform,
}
event = Event(
project_id=project.id,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
event._project_cache = project
# convert this to a dict to ensure we're only storing one value per key
# as most parts of Sentry dont currently play well with multiple values
tags = dict(data.get('tags') or [])
tags['level'] = LOG_LEVELS[level]
if logger_name:
tags['logger'] = logger_name
if server_name:
tags['server_name'] = server_name
if site:
tags['site'] = site
if environment:
tags['environment'] = environment
if transaction_name:
tags['transaction'] = transaction_name
if release:
# dont allow a conflicting 'release' tag
if 'release' in tags:
del tags['release']
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
tags['sentry:release'] = release.version
if dist and release:
dist = release.add_dist(dist, date)
tags['sentry:dist'] = dist.name
else:
dist = None
event_user = self._get_event_user(project, data)
if event_user:
# dont allow a conflicting 'user' tag
if 'user' in tags:
del tags['user']
tags['sentry:user'] = event_user.tag_value
# At this point we want to normalize the in_app values in case the
# clients did not set this appropriately so far.
normalize_in_app(data)
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
if added_tags:
# plugins should not override user provided tags
for key, value in added_tags:
tags.setdefault(key, value)
# tags are stored as a tuple
tags = tags.items()
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
data['fingerprint'] = fingerprint or ['{{ default }}']
for path, iface in six.iteritems(event.interfaces):
data['tags'].extend(iface.iter_tags())
# Get rid of ephemeral interface data
if iface.ephemeral:
data.pop(iface.get_path(), None)
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)]
elif checksum:
hashes = [checksum]
data['checksum'] = checksum
else:
hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]
# TODO(dcramer): temp workaround for complexity
data['message'] = message
event_type = eventtypes.get(data.get('type', 'default'))(data)
event_metadata = event_type.get_metadata()
# TODO(dcramer): temp workaround for complexity
del data['message']
data['type'] = event_type.key
data['metadata'] = event_metadata
# index components into ``Event.message``
# See GH-3248
if event_type.key != 'default':
if 'sentry.interfaces.Message' in data and \
data['sentry.interfaces.Message']['message'] != message:
message = u'{} {}'.format(
message,
data['sentry.interfaces.Message']['message'],
)
if not message:
message = ''
elif not isinstance(message, six.string_types):
message = force_text(message)
for value in six.itervalues(event_metadata):
value_u = force_text(value, errors='replace')
if value_u not in message:
message = u'{} {}'.format(message, value_u)
if culprit and culprit not in message:
culprit_u = force_text(culprit, errors='replace')
message = u'{} {}'.format(message, culprit_u)
message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)
event.message = message
kwargs['message'] = message
received_timestamp = event.data.get('received') or float(event.datetime.strftime('%s'))
group_kwargs = kwargs.copy()
group_kwargs.update(
{
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'active_at': date,
'data': {
'last_received': received_timestamp,
'type':
event_type.key,
# we cache the events metadata on the group to ensure its
# accessible in the stream
'metadata':
event_metadata,
},
}
)
if release:
group_kwargs['first_release'] = release
try:
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event, hashes=hashes, release=release, **group_kwargs
)
except HashDiscarded:
event_discarded.send_robust(
project=project,
sender=EventManager,
)
metrics.incr(
'events.discarded',
skip_internal=True,
tags={
'organization_id': project.organization_id,
'platform': platform,
},
)
raise
else:
event_saved.send_robust(
project=project,
sender=EventManager,
)
event.group = group
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
# When an event was sampled, the canonical source of truth
# is the EventMapping table since we aren't going to be writing out an actual
# Event row. Otherwise, if the Event isn't being sampled, we can safely
# rely on the Event table itself as the source of truth and ignore
# EventMapping since it's redundant information.
if is_sample:
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': EventMapping.__name__,
}
)
return event
# We now always need to check the Event table for dupes
# since EventMapping isn't exactly the canonical source of truth.
if Event.objects.filter(
project_id=project.id,
event_id=event_id,
).exists():
self.logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': Event.__name__,
}
)
return event
environment = Environment.get_or_create(
project=project,
name=environment,
)
group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
group_id=group.id,
environment_id=environment.id,
)
if release:
ReleaseEnvironment.get_or_create(
project=project,
release=release,
environment=environment,
datetime=date,
)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=environment,
datetime=date,
)
counters = [
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
]
if release:
counters.append((tsdb.models.release, release.id))
tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)
frequencies = [
# (tsdb.models.frequent_projects_by_organization, {
# project.organization_id: {
# project.id: 1,
# },
# }),
# (tsdb.models.frequent_issues_by_project, {
# project.id: {
# group.id: 1,
# },
# })
(tsdb.models.frequent_environments_by_group, {
group.id: {
environment.id: 1,
},
})
]
if release:
frequencies.append(
(tsdb.models.frequent_releases_by_group, {
group.id: {
grouprelease.id: 1,
},
})
)
tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)
UserReport.objects.filter(
project=project,
event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
self.logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': Event.__name__,
}
)
return event
index_event_tags.delay(
organization_id=project.organization_id,
project_id=project.id,
group_id=group.id,
environment_id=environment.id,
event_id=event.id,
tags=tags,
)
if event_user:
tsdb.record_multi(
(
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
),
timestamp=event.datetime,
environment_id=environment.id,
)
if is_new and release:
buffer.incr(
ReleaseProject, {'new_groups': 1}, {
'release_id': release.id,
'project_id': project.id,
}
)
safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send(project=project, group=group, sender=Project)
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
)
else:
self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id})
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
metrics.timing(
'events.latency',
received_timestamp - recorded_timestamp,
tags={
'project_id': project.id,
},
)
return event
def _get_event_user(self, project, data):
user_data = data.get('sentry.interfaces.User')
if not user_data:
return
euser = EventUser(
project_id=project.id,
ident=user_data.get('id'),
email=user_data.get('email'),
username=user_data.get('username'),
ip_address=user_data.get('ip_address'),
name=user_data.get('name'),
)
euser.set_hash()
if not euser.hash:
return
cache_key = 'euserid:1:{}:{}'.format(
project.id,
euser.hash,
)
euser_id = default_cache.get(cache_key)
if euser_id is None:
try:
with transaction.atomic(using=router.db_for_write(EventUser)):
euser.save()
except IntegrityError:
try:
euser = EventUser.objects.get(
project_id=project.id,
hash=euser.hash,
)
except EventUser.DoesNotExist:
# why???
e_userid = -1
else:
if euser.name != (user_data.get('name') or euser.name):
euser.update(
name=user_data['name'],
)
e_userid = euser.id
default_cache.set(cache_key, e_userid, 3600)
return euser
def _find_hashes(self, project, hash_list):
return map(
lambda hash: GroupHash.objects.get_or_create(
project=project,
hash=hash,
)[0],
hash_list,
)
def _ensure_hashes_merged(self, group, hash_list):
# TODO(dcramer): there is a race condition with selecting/updating
# in that another group could take ownership of the hash
# XXX: This function is currently unused, and hasn't been updated to
# take `GroupHash.state` into account.
bad_hashes = GroupHash.objects.filter(
id__in=[h.id for h in hash_list],
).exclude(
group=group,
)
if not bad_hashes:
return
for hash in bad_hashes:
if hash.group_id:
merge_group.delay(
from_object_id=hash.group_id,
to_object_id=group.id,
transaction_id=uuid4().hex,
)
return GroupHash.objects.filter(
project=group.project,
hash__in=[h.hash for h in bad_hashes],
).update(
group=group,
)
def _save_aggregate(self, event, hashes, release, **kwargs):
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
existing_group_id = None
for h in all_hashes:
if h.group_id is not None:
existing_group_id = h.group_id
break
if h.group_tombstone_id is not None:
raise HashDiscarded('Matches group tombstone %s' % h.group_tombstone_id)
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
# it's possible the release was deleted between
# when we queried for the release and now, so
# make sure it still exists
first_release = kwargs.pop('first_release', None)
with transaction.atomic():
short_id = project.next_short_id()
group, group_is_new = Group.objects.create(
project=project,
short_id=short_id,
first_release_id=Release.objects.filter(
id=first_release.id,
).values_list('id', flat=True).first() if first_release else None,
**kwargs
), True
metrics.incr(
'group.created',
skip_internal=True,
tags={'platform': event.platform or 'unknown'}
)
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h for h in all_hashes if h.group_id is None]
if new_hashes:
# XXX: There is a race condition here wherein another process could
# create a new group that is associated with one of the new hashes,
# add some event(s) to it, and then subsequently have the hash
# "stolen" by this process. This then "orphans" those events from
# their "siblings" in the group we've created here. We don't have a
# way to fix this, since we can't call `_ensure_hashes_merged`
# without filtering on `group_id` (which we can't do due to query
# planner weirdness.) For more context, see 84c6f75a and d0e22787,
# as well as GH-5085.
GroupHash.objects.filter(
id__in=[h.id for h in new_hashes],
).exclude(
state=GroupHash.State.LOCKED_IN_MIGRATION,
).update(group=group)
if group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = (
features.has('projects:sample-events', project=project) and should_sample(
event.data.get('received') or float(event.datetime.strftime('%s')),
group.data.get('last_received') or float(group.last_seen.strftime('%s')),
group.times_seen,
)
)
if not is_new:
is_regression = self._process_existing_aggregate(
group=group,
event=event,
data=kwargs,
release=release,
)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
if not is_sample:
GroupHash.record_last_processed_event_id(
all_hashes[0].id,
event.event_id,
)
return group, is_new, is_regression, is_sample
def _handle_regression(self, group, event, release):
if not group.is_resolved():
return
# we only mark it as a regression if the event's release is newer than
# the release which we originally marked this as resolved
elif GroupResolution.has_resolution(group, release):
return
if not plugin_is_regression(group, event):
return
# we now think its a regression, rely on the database to validate that
# no one beat us to this
date = max(event.datetime, group.last_seen)
is_regression = bool(
Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# ignored
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
)
)
group.active_at = date
group.status = GroupStatus.UNRESOLVED
if is_regression and release:
# resolutions are only valid if the state of the group is still
# resolved -- if it were to change the resolution should get removed
try:
resolution = GroupResolution.objects.get(
group=group,
)
except GroupResolution.DoesNotExist:
affected = False
else:
cursor = connection.cursor()
# delete() API does not return affected rows
cursor.execute("DELETE FROM sentry_groupresolution WHERE id = %s", [resolution.id])
affected = cursor.rowcount > 0
if affected:
# if we had to remove the GroupResolution (i.e. we beat the
# the queue to handling this) then we need to also record
# the corresponding event
try:
activity = Activity.objects.filter(
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
).order_by('-datetime')[0]
except IndexError:
# XXX: handle missing data, as its not overly important
pass
else:
activity.update(data={
'version': release.version,
})
if is_regression:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_REGRESSION,
data={
'version': release.version if release else '',
}
)
activity.send_notification()
return is_regression
def _process_existing_aggregate(self, group, event, data, release):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
'data': data['data'],
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = self._handle_regression(group, event, release)
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| 35.894737
| 130
| 0.570831
|
e15f588c2d1d95f5fe6f2a5b6474fe7cbba9ef85
| 9,902
|
py
|
Python
|
python/http_client/v1/polyaxon_sdk/models/v1_project_settings.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_project_settings.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_project_settings.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | 1
|
2021-12-03T07:12:03.000Z
|
2021-12-03T07:12:03.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.18.2
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ProjectSettings(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'connections': 'list[str]',
'preset': 'str',
'presets': 'list[str]',
'queue': 'str',
'queues': 'list[str]',
'agents': 'list[str]',
'user_accesses': 'list[V1ProjectUserAccess]',
'teams': 'list[str]',
'projects': 'list[str]'
}
attribute_map = {
'connections': 'connections',
'preset': 'preset',
'presets': 'presets',
'queue': 'queue',
'queues': 'queues',
'agents': 'agents',
'user_accesses': 'user_accesses',
'teams': 'teams',
'projects': 'projects'
}
def __init__(self, connections=None, preset=None, presets=None, queue=None, queues=None, agents=None, user_accesses=None, teams=None, projects=None, local_vars_configuration=None): # noqa: E501
"""V1ProjectSettings - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._connections = None
self._preset = None
self._presets = None
self._queue = None
self._queues = None
self._agents = None
self._user_accesses = None
self._teams = None
self._projects = None
self.discriminator = None
if connections is not None:
self.connections = connections
if preset is not None:
self.preset = preset
if presets is not None:
self.presets = presets
if queue is not None:
self.queue = queue
if queues is not None:
self.queues = queues
if agents is not None:
self.agents = agents
if user_accesses is not None:
self.user_accesses = user_accesses
if teams is not None:
self.teams = teams
if projects is not None:
self.projects = projects
@property
def connections(self):
"""Gets the connections of this V1ProjectSettings. # noqa: E501
:return: The connections of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._connections
@connections.setter
def connections(self, connections):
"""Sets the connections of this V1ProjectSettings.
:param connections: The connections of this V1ProjectSettings. # noqa: E501
:type connections: list[str]
"""
self._connections = connections
@property
def preset(self):
"""Gets the preset of this V1ProjectSettings. # noqa: E501
:return: The preset of this V1ProjectSettings. # noqa: E501
:rtype: str
"""
return self._preset
@preset.setter
def preset(self, preset):
"""Sets the preset of this V1ProjectSettings.
:param preset: The preset of this V1ProjectSettings. # noqa: E501
:type preset: str
"""
self._preset = preset
@property
def presets(self):
"""Gets the presets of this V1ProjectSettings. # noqa: E501
:return: The presets of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._presets
@presets.setter
def presets(self, presets):
"""Sets the presets of this V1ProjectSettings.
:param presets: The presets of this V1ProjectSettings. # noqa: E501
:type presets: list[str]
"""
self._presets = presets
@property
def queue(self):
"""Gets the queue of this V1ProjectSettings. # noqa: E501
:return: The queue of this V1ProjectSettings. # noqa: E501
:rtype: str
"""
return self._queue
@queue.setter
def queue(self, queue):
"""Sets the queue of this V1ProjectSettings.
:param queue: The queue of this V1ProjectSettings. # noqa: E501
:type queue: str
"""
self._queue = queue
@property
def queues(self):
"""Gets the queues of this V1ProjectSettings. # noqa: E501
:return: The queues of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._queues
@queues.setter
def queues(self, queues):
"""Sets the queues of this V1ProjectSettings.
:param queues: The queues of this V1ProjectSettings. # noqa: E501
:type queues: list[str]
"""
self._queues = queues
@property
def agents(self):
"""Gets the agents of this V1ProjectSettings. # noqa: E501
:return: The agents of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._agents
@agents.setter
def agents(self, agents):
"""Sets the agents of this V1ProjectSettings.
:param agents: The agents of this V1ProjectSettings. # noqa: E501
:type agents: list[str]
"""
self._agents = agents
@property
def user_accesses(self):
"""Gets the user_accesses of this V1ProjectSettings. # noqa: E501
:return: The user_accesses of this V1ProjectSettings. # noqa: E501
:rtype: list[V1ProjectUserAccess]
"""
return self._user_accesses
@user_accesses.setter
def user_accesses(self, user_accesses):
"""Sets the user_accesses of this V1ProjectSettings.
:param user_accesses: The user_accesses of this V1ProjectSettings. # noqa: E501
:type user_accesses: list[V1ProjectUserAccess]
"""
self._user_accesses = user_accesses
@property
def teams(self):
"""Gets the teams of this V1ProjectSettings. # noqa: E501
:return: The teams of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._teams
@teams.setter
def teams(self, teams):
"""Sets the teams of this V1ProjectSettings.
:param teams: The teams of this V1ProjectSettings. # noqa: E501
:type teams: list[str]
"""
self._teams = teams
@property
def projects(self):
"""Gets the projects of this V1ProjectSettings. # noqa: E501
:return: The projects of this V1ProjectSettings. # noqa: E501
:rtype: list[str]
"""
return self._projects
@projects.setter
def projects(self, projects):
"""Sets the projects of this V1ProjectSettings.
:param projects: The projects of this V1ProjectSettings. # noqa: E501
:type projects: list[str]
"""
self._projects = projects
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ProjectSettings):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ProjectSettings):
return True
return self.to_dict() != other.to_dict()
| 27.736695
| 198
| 0.595738
|
7c3a625a990734f9f1da72b5e7ea1566d8136165
| 2,576
|
py
|
Python
|
pyquil_decomp.py
|
dumkar/learning-to-learn-qnn
|
289518ef1b04acd834055491752d4e3351ffbb53
|
[
"MIT"
] | 37
|
2019-07-19T18:51:36.000Z
|
2021-11-03T14:58:27.000Z
|
pyquil_decomp.py
|
dumkar/learning-to-learn-qnn
|
289518ef1b04acd834055491752d4e3351ffbb53
|
[
"MIT"
] | 1
|
2019-07-23T21:32:13.000Z
|
2019-07-23T21:32:13.000Z
|
pyquil_decomp.py
|
dumkar/learning-to-learn-qnn
|
289518ef1b04acd834055491752d4e3351ffbb53
|
[
"MIT"
] | 4
|
2020-01-06T10:49:10.000Z
|
2021-04-21T07:24:02.000Z
|
import itertools
import numpy as np
from functools import partial
from pyquil import Program, api
from pyquil.paulis import PauliSum, PauliTerm, exponential_map, sZ
from pyquil.gates import *
from scipy.optimize import minimize
import pennylane as qml
from pennylane import numpy as np
np.set_printoptions(precision=3, suppress=True)
import re
def create_circuit(beta, gamma,initial_state,exp_Hm,exp_Hc):
circuit = Program()
circuit += initial_state
for i in range(p):
for term_exp_Hc in exp_Hc:
circuit += term_exp_Hc(-beta[i])
for term_exp_Hm in exp_Hm:
circuit += term_exp_Hm(-gamma[i])
return circuit
# set p beforehand
p = 2
def QAOA_circ(parameters):# = np.random.uniform(0, np.pi*2, 2*p)):
beta = parameters[:p]
gamma = parameters [p:]
def set_up_QAOA_in_pyquil(beta, gamma, p , n_qubits = 2, J = np.array([[0,1],[0,0]])):
Hm = [PauliTerm("X", i, 1.0) for i in range(n_qubits)]
Hc = []
####################Prepare the hamiltonian for measurement
Hamilton=prepare_qaoa_hamiltonian(J,n_qubits)
###################
initial_state = Program()
for i in range(n_qubits):
initial_state += H(i)
for i in range(n_qubits):
for j in range(n_qubits):
Hc.append(PauliTerm("Z", i, -J[i, j]) * PauliTerm("Z", j, 1.0))
exp_Hm = []
exp_Hc = []
for term in Hm:
exp_Hm.append(exponential_map(term))
for term in Hc:
exp_Hc.append(exponential_map(term))
qaoa_circuit = create_circuit(beta, gamma,initial_state,exp_Hm,exp_Hc)
return Hamilton,qaoa_circuit
Hamilton,pyquil_circ=set_up_QAOA_in_pyquil(beta, gamma, p)
pyquil_circ_list=str(pyquil_circ).split('\n')
for item in pyquil_circ_list:
u_p_1=None
q_1=None
q_2=None
u_p_2=None
u_p_3=None
if 'H' in item:
q_1=item[item.find('H')+2]
qml.Hadamard(wires=q_1)
elif 'RZ(' in item:
temp=item.replace('RZ(','')
u_p_1=temp[:temp.find(')')]
q_1=temp[temp.find(')')+2]
qml.RZ(float(u_p_1),wires=q_1)
elif 'RX' in item:
pass
elif 'CNOT' in item:
temp=item.replace('CNOT ','')
q_1=temp[0]
q_2=temp[2]
qml.CNOT(wires=[q_1, q_2])
wiress=[i for i in range(n_qubits)]
return qml.expval.Hermitian(Hamilton,wires=wiress)
| 31.414634
| 90
| 0.578028
|
349fd91b966fb95f2ac69f5938d688922861a939
| 21,311
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/v20160801/web_app.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20160801/web_app.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20160801/web_app.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebApp']
class WebApp(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_affinity_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_enabled: Optional[pulumi.Input[bool]] = None,
cloning_info: Optional[pulumi.Input[pulumi.InputType['CloningInfoArgs']]] = None,
container_size: Optional[pulumi.Input[int]] = None,
daily_memory_time_quota: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
host_name_ssl_states: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostNameSslStateArgs']]]]] = None,
host_names_disabled: Optional[pulumi.Input[bool]] = None,
hosting_environment_profile: Optional[pulumi.Input[pulumi.InputType['HostingEnvironmentProfileArgs']]] = None,
https_only: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
reserved: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scm_site_also_stopped: Optional[pulumi.Input[bool]] = None,
server_farm_id: Optional[pulumi.Input[str]] = None,
site_config: Optional[pulumi.Input[pulumi.InputType['SiteConfigArgs']]] = None,
snapshot_info: Optional[pulumi.Input[pulumi.InputType['SnapshotRecoveryRequestArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A web app, a mobile app backend, or an API app.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] client_affinity_enabled: <code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
:param pulumi.Input[bool] client_cert_enabled: <code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
:param pulumi.Input[pulumi.InputType['CloningInfoArgs']] cloning_info: If specified during app creation, the app is cloned from a source app.
:param pulumi.Input[int] container_size: Size of the function container.
:param pulumi.Input[int] daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic apps only).
:param pulumi.Input[bool] enabled: <code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostNameSslStateArgs']]]] host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's hostnames.
:param pulumi.Input[bool] host_names_disabled: <code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
:param pulumi.Input[pulumi.InputType['HostingEnvironmentProfileArgs']] hosting_environment_profile: App Service Environment to use for the app.
:param pulumi.Input[bool] https_only: HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Managed service identity.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
:param pulumi.Input[bool] reserved: <code>true</code> if reserved; otherwise, <code>false</code>.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[bool] scm_site_also_stopped: <code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
:param pulumi.Input[str] server_farm_id: Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:param pulumi.Input[pulumi.InputType['SiteConfigArgs']] site_config: Configuration of the app.
:param pulumi.Input[pulumi.InputType['SnapshotRecoveryRequestArgs']] snapshot_info: If specified during app creation, the app is created from a previous snapshot.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['client_affinity_enabled'] = client_affinity_enabled
__props__['client_cert_enabled'] = client_cert_enabled
__props__['cloning_info'] = cloning_info
__props__['container_size'] = container_size
__props__['daily_memory_time_quota'] = daily_memory_time_quota
__props__['enabled'] = enabled
__props__['host_name_ssl_states'] = host_name_ssl_states
__props__['host_names_disabled'] = host_names_disabled
__props__['hosting_environment_profile'] = hosting_environment_profile
__props__['https_only'] = https_only
__props__['identity'] = identity
__props__['kind'] = kind
__props__['location'] = location
__props__['name'] = name
if reserved is None:
reserved = False
__props__['reserved'] = reserved
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if scm_site_also_stopped is None:
scm_site_also_stopped = False
__props__['scm_site_also_stopped'] = scm_site_also_stopped
__props__['server_farm_id'] = server_farm_id
__props__['site_config'] = site_config
__props__['snapshot_info'] = snapshot_info
__props__['tags'] = tags
__props__['availability_state'] = None
__props__['default_host_name'] = None
__props__['enabled_host_names'] = None
__props__['host_names'] = None
__props__['is_default_container'] = None
__props__['last_modified_time_utc'] = None
__props__['max_number_of_workers'] = None
__props__['outbound_ip_addresses'] = None
__props__['possible_outbound_ip_addresses'] = None
__props__['repository_site_name'] = None
__props__['resource_group'] = None
__props__['slot_swap_status'] = None
__props__['state'] = None
__props__['suspended_till'] = None
__props__['target_swap_slot'] = None
__props__['traffic_manager_host_names'] = None
__props__['type'] = None
__props__['usage_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web:WebApp"), pulumi.Alias(type_="azure-nextgen:web/latest:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebApp"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebApp")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebApp, __self__).__init__(
'azure-nextgen:web/v20160801:WebApp',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebApp':
"""
Get an existing WebApp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebApp(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilityState")
def availability_state(self) -> pulumi.Output[str]:
"""
Management information availability state for the app.
"""
return pulumi.get(self, "availability_state")
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@property
@pulumi.getter(name="cloningInfo")
def cloning_info(self) -> pulumi.Output[Optional['outputs.CloningInfoResponse']]:
"""
If specified during app creation, the app is cloned from a source app.
"""
return pulumi.get(self, "cloning_info")
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> pulumi.Output[Optional[int]]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> pulumi.Output[Optional[int]]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@property
@pulumi.getter(name="defaultHostName")
def default_host_name(self) -> pulumi.Output[str]:
"""
Default hostname of the app. Read-only.
"""
return pulumi.get(self, "default_host_name")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="enabledHostNames")
def enabled_host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Enabled hostnames for the app.Hostnames need to be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
"""
return pulumi.get(self, "enabled_host_names")
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> pulumi.Output[Optional[Sequence['outputs.HostNameSslStateResponse']]]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Hostnames associated with the app.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> pulumi.Output[Optional['outputs.HostingEnvironmentProfileResponse']]:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> pulumi.Output[Optional[bool]]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="isDefaultContainer")
def is_default_container(self) -> pulumi.Output[bool]:
"""
<code>true</code> if the app is a default container; otherwise, <code>false</code>.
"""
return pulumi.get(self, "is_default_container")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> pulumi.Output[str]:
"""
Last time the app was modified, in UTC. Read-only.
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfWorkers")
def max_number_of_workers(self) -> pulumi.Output[int]:
"""
Maximum number of workers.
This only applies to Functions container.
"""
return pulumi.get(self, "max_number_of_workers")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> pulumi.Output[str]:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from tenants that site can be hosted with current settings. Read-only.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="possibleOutboundIpAddresses")
def possible_outbound_ip_addresses(self) -> pulumi.Output[str]:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from all tenants. Read-only.
"""
return pulumi.get(self, "possible_outbound_ip_addresses")
@property
@pulumi.getter(name="repositorySiteName")
def repository_site_name(self) -> pulumi.Output[str]:
"""
Name of the repository site.
"""
return pulumi.get(self, "repository_site_name")
@property
@pulumi.getter
def reserved(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Output[str]:
"""
Name of the resource group the app belongs to. Read-only.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> pulumi.Output[Optional[str]]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> pulumi.Output[Optional['outputs.SiteConfigResponse']]:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@property
@pulumi.getter(name="slotSwapStatus")
def slot_swap_status(self) -> pulumi.Output['outputs.SlotSwapStatusResponse']:
"""
Status of the last deployment slot swap operation.
"""
return pulumi.get(self, "slot_swap_status")
@property
@pulumi.getter(name="snapshotInfo")
def snapshot_info(self) -> pulumi.Output[Optional['outputs.SnapshotRecoveryRequestResponse']]:
"""
If specified during app creation, the app is created from a previous snapshot.
"""
return pulumi.get(self, "snapshot_info")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Current state of the app.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="suspendedTill")
def suspended_till(self) -> pulumi.Output[str]:
"""
App suspended till in case memory-time quota is exceeded.
"""
return pulumi.get(self, "suspended_till")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetSwapSlot")
def target_swap_slot(self) -> pulumi.Output[str]:
"""
Specifies which deployment slot this app will swap into. Read-only.
"""
return pulumi.get(self, "target_swap_slot")
@property
@pulumi.getter(name="trafficManagerHostNames")
def traffic_manager_host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Azure Traffic Manager hostnames associated with the app. Read-only.
"""
return pulumi.get(self, "traffic_manager_host_names")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usageState")
def usage_state(self) -> pulumi.Output[str]:
"""
State indicating whether the app has exceeded its quota usage. Read-only.
"""
return pulumi.get(self, "usage_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.771008
| 562
| 0.654169
|
733a5d837d60f942bc4a41d728c76187694ff738
| 644
|
py
|
Python
|
GraphClient.py
|
MCSainz/Data_Structures_Algorithms_in_Python
|
c09439bdadf4c872a01e115ffb596b840bcb64c2
|
[
"MIT"
] | null | null | null |
GraphClient.py
|
MCSainz/Data_Structures_Algorithms_in_Python
|
c09439bdadf4c872a01e115ffb596b840bcb64c2
|
[
"MIT"
] | null | null | null |
GraphClient.py
|
MCSainz/Data_Structures_Algorithms_in_Python
|
c09439bdadf4c872a01e115ffb596b840bcb64c2
|
[
"MIT"
] | null | null | null |
# Client test program for graphs.
from argparse import ArgumentParser
from Graph import Graph
from DepthFirstSearch import DepthFirstSearch as DFS
def main():
ap = ArgumentParser()
ap.add_argument('-i', '--input-file-path', required=True,
help='Input graph file path.')
args = vars(ap.parse_args())
graph_file_path = args['input-file-path']
# 'Graph' class client test.
graph = Graph(graph_file_path)
# 'DepthFirstSearch' class client test.
dfs = DFS()
dfs.DepthFirstSearch(graph, '0')
print(dfs.count)
if __name__ == '__main__':
main()
| 18.941176
| 61
| 0.627329
|
8b1f729eb34350994cd53655ce64f45feb031eeb
| 1,545
|
py
|
Python
|
__init__.py
|
ashelto6/unJumble
|
cf557668133186e7ea419f6f08ccadef4cad89a1
|
[
"MIT"
] | null | null | null |
__init__.py
|
ashelto6/unJumble
|
cf557668133186e7ea419f6f08ccadef4cad89a1
|
[
"MIT"
] | 7
|
2021-02-26T07:31:12.000Z
|
2021-04-25T03:21:35.000Z
|
__init__.py
|
ashelto6/unJumble
|
cf557668133186e7ea419f6f08ccadef4cad89a1
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_login import LoginManager
from flask_mail import Mail, Message
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
from td.client import TDClient
import os
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
TDSession = TDClient(
client_id=os.environ.get('CLIENT_ID'),
redirect_uri=os.environ.get('REDIRECT_URI'),
credentials_path=os.environ.get('CREDENTIALS_PATH')
)
TDSession.login()
db = SQLAlchemy()
mail = Mail()
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['MAIL_SERVER'] = os.environ.get('MAIL_SERVER')
app.config['MAIL_PORT'] = os.environ.get('MAIL_PORT')
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_DEFAULT_SENDER'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
mail.init_app(app)
db.init_app(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
from .models import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .dt import dt as dt_blueprint
app.register_blueprint(dt_blueprint)
return app
| 28.090909
| 68
| 0.771521
|
220ad6d3ef03aec2305ab6da2aab60262db98469
| 469
|
py
|
Python
|
mite/web/__init__.py
|
markgreene74/mite
|
339bdfc39be30534ea2169d8257469bd0ff535fb
|
[
"MIT"
] | 17
|
2019-11-14T22:32:56.000Z
|
2022-02-01T15:38:03.000Z
|
mite/web/__init__.py
|
markgreene74/mite
|
339bdfc39be30534ea2169d8257469bd0ff535fb
|
[
"MIT"
] | 35
|
2020-01-08T10:50:31.000Z
|
2022-02-17T17:00:34.000Z
|
mite/web/__init__.py
|
markgreene74/mite
|
339bdfc39be30534ea2169d8257469bd0ff535fb
|
[
"MIT"
] | 4
|
2019-11-14T14:48:18.000Z
|
2020-05-06T22:09:25.000Z
|
import logging
from flask import Flask, Response
from .prometheus import PrometheusMetrics
app = Flask(__name__)
# FIXME: the goal of this line is to cut down on the once-per-second server
# log line from the prometheus scrape. But I don't think it works...
app.logger.setLevel(logging.WARNING)
prometheus_metrics = PrometheusMetrics()
@app.route('/metrics')
def metrics():
text = prometheus_metrics.format()
return Response(text, mimetype='text/plain')
| 24.684211
| 75
| 0.75693
|
9ce667027a207276aabb8a3cc7c62728c1dd08ef
| 1,128
|
py
|
Python
|
lib/python/plow/client/conf.py
|
Br3nda/plow
|
eea4468078df58d6798ceabe14c0d3e83f2c3a8d
|
[
"Apache-2.0"
] | 36
|
2015-01-02T21:02:04.000Z
|
2021-09-07T12:01:06.000Z
|
lib/python/plow/client/conf.py
|
Br3nda/plow
|
eea4468078df58d6798ceabe14c0d3e83f2c3a8d
|
[
"Apache-2.0"
] | null | null | null |
lib/python/plow/client/conf.py
|
Br3nda/plow
|
eea4468078df58d6798ceabe14c0d3e83f2c3a8d
|
[
"Apache-2.0"
] | 11
|
2015-04-01T21:31:40.000Z
|
2022-03-30T17:55:27.000Z
|
import sys
import os
import ConfigParser
_Config = ConfigParser.RawConfigParser()
_Args = { }
def _init():
"""
Parse an initalize the Config object
"""
mod = sys.modules[__name__]
if os.environ.get("PLOW_CFG"):
parsed = _Config.read([os.environ["PLOW_CFG"]])
else:
parsed = _Config.read([
os.path.join(os.environ.get("PLOW_ROOT", "/usr/local"), "etc/plow/plow.cfg"),
os.path.expanduser("~/.plow/plow.cfg")])
# If we parsed a config file, make sure its valid
if parsed:
assert _Config.has_section("plow"), ("Configuration is missing a 'plow' section: %r" % parsed)
host_list = [h.strip() for h in get('plow', 'hosts', '').strip(',').split(',') if h]
setattr(mod, 'PLOW_HOSTS', host_list or ["localhost:11336"])
def get(section, key, default=None):
"""
Return the specified configuration option.
"""
try:
return _Config.get(section, key)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return default
# run as a function to avoid polluting module with temp variables
_init()
| 25.636364
| 102
| 0.635638
|
9042181927827c36d8a7a499a084ea19d4a4f228
| 4,434
|
py
|
Python
|
env/Lib/site-packages/OpenGL/GLES1/OES/texture_cube_map.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/GLES1/OES/texture_cube_map.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/GLES1/OES/texture_cube_map.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension OES.texture_cube_map
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.texture_cube_map to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a new texture generation scheme for cube
map textures. Instead of the current texture providing a 1D, 2D,
or 3D lookup into a 1D, 2D, or 3D texture image, the texture is a
set of six 2D images representing the faces of a cube. The (s,t,r)
texture coordinates are treated as a direction vector emanating from
the center of a cube. At texture generation time, the interpolated
per-fragment (s,t,r) selects one cube face 2D image based on the
largest magnitude coordinate (the major axis). A new 2D (s,t) is
calculated by dividing the two other coordinates (the minor axes
values) by the major axis value. Then the new (s,t) is used to
lookup into the selected 2D texture image face of the cube map.
Unlike a standard 1D, 2D, or 3D texture that have just one target,
a cube map texture has six targets, one for each of its six 2D texture
image cube faces. All these targets must be consistent, complete,
and have equal width and height (ie, square dimensions).
This extension also provides two new texture coordinate generation modes
for use in conjunction with cube map texturing. The reflection map
mode generates texture coordinates (s,t,r) matching the vertex's
eye-space reflection vector. The reflection map mode
is useful for environment mapping without the singularity inherent
in sphere mapping. The normal map mode generates texture coordinates
(s,t,r) matching the vertex's transformed eye-space
normal. The normal map mode is useful for sophisticated cube
map texturing-based diffuse lighting models.
The intent of the new texgen functionality is that an application using
cube map texturing can use the new texgen modes to automatically
generate the reflection or normal vectors used to look up into the
cube map texture.
An application note: When using cube mapping with dynamic cube
maps (meaning the cube map texture is re-rendered every frame),
by keeping the cube map's orientation pointing at the eye position,
the texgen-computed reflection or normal vector texture coordinates
can be always properly oriented for the cube map. However if the
cube map is static (meaning that when view changes, the cube map
texture is not updated), the texture matrix must be used to rotate
the texgen-computed reflection or normal vector texture coordinates
to match the orientation of the cube map. The rotation can be
computed based on two vectors: 1) the direction vector from the cube
map center to the eye position (both in world coordinates), and 2)
the cube map orientation in world coordinates. The axis of rotation
is the cross product of these two vectors; the angle of rotation is
the arcsin of the dot product of these two vectors.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/texture_cube_map.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.texture_cube_map import *
from OpenGL.raw.GLES1.OES.texture_cube_map import _EXTENSION_NAME
def glInitTextureCubeMapOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glTexGenfvOES.params size not checked against 'pname'
glTexGenfvOES=wrapper.wrapper(glTexGenfvOES).setInputArraySize(
'params', None
)
# INPUT glTexGenivOES.params size not checked against 'pname'
glTexGenivOES=wrapper.wrapper(glTexGenivOES).setInputArraySize(
'params', None
)
# INPUT glTexGenxvOES.params size not checked against 'pname'
glTexGenxvOES=wrapper.wrapper(glTexGenxvOES).setInputArraySize(
'params', None
)
# INPUT glGetTexGenfvOES.params size not checked against 'pname'
glGetTexGenfvOES=wrapper.wrapper(glGetTexGenfvOES).setInputArraySize(
'params', None
)
# INPUT glGetTexGenivOES.params size not checked against 'pname'
glGetTexGenivOES=wrapper.wrapper(glGetTexGenivOES).setInputArraySize(
'params', None
)
glGetTexGenxvOES=wrapper.wrapper(glGetTexGenxvOES).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
| 47.170213
| 76
| 0.792512
|
15c782543995d6b640c620a364a9f54dd5cc37b7
| 24,761
|
py
|
Python
|
tests/core_tests_chain_collection.py
|
gf712/AbPyTools
|
9ff0d4346ad80487d43875bc77d99fbe76170db4
|
[
"MIT"
] | 13
|
2017-06-13T12:31:47.000Z
|
2022-03-23T02:14:01.000Z
|
tests/core_tests_chain_collection.py
|
gf712/AbPyTools
|
9ff0d4346ad80487d43875bc77d99fbe76170db4
|
[
"MIT"
] | 8
|
2018-02-21T22:15:35.000Z
|
2022-02-01T12:27:58.000Z
|
tests/core_tests_chain_collection.py
|
gf712/AbPyTools
|
9ff0d4346ad80487d43875bc77d99fbe76170db4
|
[
"MIT"
] | 3
|
2018-04-10T08:01:39.000Z
|
2021-10-10T14:37:43.000Z
|
import unittest
from abpytools import ChainCollection, Chain
import operator
import os
from glob import glob
from . import read_sequence, check_connection, ABNUM_URL, IGBLAST_URL
class ChainCollectionCore(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.antibody_collection_1_name = 'test'
cls.chain_test_sequence = read_sequence('./tests/Data/chain_collection_fasta_test.fasta')
def test_ChainCollection_length_0(self):
antibody_collection = ChainCollection()
self.assertEqual(len(antibody_collection), 0)
def test_ChainCollection_input_exception_1(self):
# when ChainCollection.object_list is instantiated with
# something other than a list it throws an error
self.assertRaises(ValueError, ChainCollection, 0)
def test_ChainCollection_input_exception_2(self):
# when ChainCollection.object_list is instantiated with
# a list with non Chain objects it throws an error
self.assertRaises(ValueError, ChainCollection, [Chain(sequence=""), 0])
def test_ChainCollection_input_exception_3(self):
# when ChainCollection is instantiated with static method
# .load_from_fasta with an invalid file path it throws an error
self.assertRaises(ValueError, ChainCollection.load_from_fasta, './tests/Data/NonExistentFile.fasta')
def test_ChainCollection_input_exception_4(self):
# when ChainCollection is instantiated with .load_from_file with
# path to a file that does not have a .fasta or .json extension
# it throws an error
self.assertRaises(ValueError, ChainCollection.load_from_file, './tests/Data/__init__.py')
def test_ChainCollection_input_1(self):
# instantiate ChainCollection with a Chain (empty) object
# this doesn't make any sense and raises an error
self.assertRaises(ValueError, ChainCollection, [Chain(sequence="")])
def test_ChainCollection_input_2(self):
# instantiate ChainCollection with a loaded Chain object
test_chain = Chain(sequence=self.chain_test_sequence)
test_collection = ChainCollection(antibody_objects=[test_chain])
self.assertIsInstance(test_collection, ChainCollection)
def test_ChainCollection_load_fasta_exception(self):
# throws error when reading a file with fasta extention,
# but with the wrong format
self.assertRaises(ValueError, ChainCollection.load_from_file, './tests/Data/NotAFASTAFile.fasta')
def test_ChainCollection_chain(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json')
self.assertEqual(antibody_collection_1.chain, 'heavy')
@unittest.skipUnless(check_connection(URL=IGBLAST_URL), 'No internet connection, skipping test.')
def test_ChainCollection_chain_2(self):
# checks if the chain type is read properly from a Chain object
test_chain = Chain(sequence=self.chain_test_sequence)
test_chain.load()
test_collection = ChainCollection(antibody_objects=[test_chain])
self.assertEqual(test_collection.chain, 'heavy')
def test_ChainCollection_proto_io_1(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.save(file_format='pb2', path='./tests/chain_collection_1_heavy')
self.assertTrue(os.path.isfile('./tests/chain_collection_1_heavy.pb2'))
def test_ChainCollection_proto_io_2(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/chain_collection_1_heavy.pb2',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.names[0], 'test')
def test_ChainCollection_n_ab(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.n_ab, 1)
def test_ChainCollection_numbering_scheme(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_scheme, 'chothia')
def test_ChainCollection_numbering_scheme_kabat(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.set_numbering_scheme('kabat', realign=False)
self.assertEqual(antibody_collection_1.numbering_scheme, 'kabat')
def test_ChainCollection_Hmatrix_shape(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
# if this fails it means that abysis has been updated
self.assertEqual(antibody_collection_1.hydrophobicity_matrix().shape, (1, 158))
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_ChainCollection_Hmatrix_calculation(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='chothia')
self.assertEqual(antibody_collection_1.hydrophobicity_matrix().shape, (1, 158))
def test_ChainCollection_sequence_length(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(len(antibody_collection_1.sequences), 1)
def test_ChainCollection_obj_length(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(len(antibody_collection_1), 1)
def test_ChainCollection_slicing_1_obj(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
# if returning a single chain abpytools automatically creates a new Chain object
self.assertIsInstance(antibody_collection_1[0], Chain)
def test_ChainCollection_slicing_2_obj(self):
antibody_collection_1 = ChainCollection.load_from_file(
path='./tests/Data/chain_collection_heavy_2_sequences.json', show_progressbar=False, verbose=False)
# slicing multiple sequences returns a ChainCollection object
self.assertIsInstance(antibody_collection_1[[0, 1]], ChainCollection)
def test_ChainCollection_cdr_regions_part1(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertCountEqual(antibody_collection_1.ab_region_index().keys(),
[self.antibody_collection_1_name])
def test_ChainCollection_cdr_regions_part2(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertCountEqual(antibody_collection_1.ab_region_index()[self.antibody_collection_1_name],
['CDR', 'FR'])
def test_ChainCollection_cdr_regions_part3_cdr(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertCountEqual(antibody_collection_1.ab_region_index()[self.antibody_collection_1_name]['CDR'],
['CDR1', 'CDR2', 'CDR3'])
def test_ChainCollection_cdr_regions_part3_fr(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertCountEqual(antibody_collection_1.ab_region_index()[self.antibody_collection_1_name]['FR'],
['FR1', 'FR2', 'FR3', 'FR4'])
def test_ChainCollection_total_charge(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.total_charge[self.antibody_collection_1_name], 1.3278508)
def test_ChainCollection_igblast_parser_germline(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.igblast_local_query('tests/Data/chain_collection_1_igblast.html')
self.assertEqual(antibody_collection_1.germline[self.antibody_collection_1_name][0], 'IGHV4-34*01')
def test_ChainCollection_igblast_parser_germline_score(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.igblast_local_query('tests/Data/chain_collection_1_igblast.html')
self.assertAlmostEqual(antibody_collection_1.germline[self.antibody_collection_1_name][1], 9.11e-69,
delta=10e-9)
@unittest.skipUnless(check_connection(URL=IGBLAST_URL), 'No internet connection, skipping test.')
def test_ChainCollection_igblast_server_query_germline(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.igblast_server_query(show_progressbar=False)
self.assertEqual(antibody_collection_1.germline[self.antibody_collection_1_name][0], 'IGHV4-34*01')
@unittest.skipUnless(check_connection(URL=IGBLAST_URL), 'No internet connection, skipping test.')
def test_ChainCollection_igblast_server_query_score(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.igblast_server_query(show_progressbar=False)
self.assertAlmostEqual(antibody_collection_1.germline[self.antibody_collection_1_name][1], 9.11e-69,
delta=10e-9)
@unittest.skipUnless(check_connection(URL=IGBLAST_URL), 'No internet connection, skipping test.')
def test_ChainCollection_igblast_server_query_identity(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.igblast_server_query(show_progressbar=False)
self.assertEqual(antibody_collection_1.germline_identity[self.antibody_collection_1_name]['Total'], 96.9)
def test_ChainCollection_slicing(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertIsInstance(antibody_collection_1.get_object('test'), Chain)
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_Chain_abysis_parser(self):
antibody = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='chothia', verbose=False, show_progressbar=False)
self.assertEqual(antibody.chain, 'heavy')
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_Chain_abysis_parser_chothia(self):
antibody = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='chothia', verbose=False, show_progressbar=False)
self.assertEqual(antibody.numbering_table(as_array=True)[0][-1], '-')
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_Chain_abysis_parser_kabat(self):
antibody = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='kabat', verbose=False, show_progressbar=False)
self.assertEqual(antibody.numbering_table(as_array=True)[0][-1], '-')
def test_ChainCollection_numbering_tableDataFrame(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_table(as_array=False)['CDR1']['H32'].values[0], 'Y')
def test_ChainCollection_numbering_table_shape_np(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_table(as_array=True).shape, (1, 158))
def test_ChainCollection_numbering_table_shape_pd(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_table(as_array=False).shape, (1, 158))
def test_ChainCollection_numbering_table_region_pd(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(
antibody_collection_1.numbering_table(region='CDR1').loc[self.antibody_collection_1_name].values[-1], 'Y')
def test_ChainCollection_numbering_table_region_np(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_table(as_array=True, region='CDR1')[0][-1], 'Y')
def test_ChainCollection_numbering_table_fr_region(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.numbering_table(region='FR1').loc['test'].values[0], 'Q')
def test_ChainCollection_molecular_weight(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.molecular_weights(monoisotopic=False)[0], 20029.85217699999)
def test_ChainCollection_molecular_weight_monoisotopic(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.molecular_weights(monoisotopic=True)[0], 20042.1121)
def test_ChainCollection_ec(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.extinction_coefficients(reduced=False)[0], 52410.0)
def test_ChainCollection_ec_reduced(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.extinction_coefficients(reduced=True)[0], 52160.0)
def test_ChainCollection_charge(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertAlmostEqual(antibody_collection_1.charge.sum(), 1.7497642167513607)
def test_ChainCollection_get_object_exception(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertRaises(ValueError, antibody_collection_1.get_object, 'foo')
def test_ChainCollection_get_object_1(self):
# check if get_object returns a Chain object
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertIsInstance(antibody_collection_1.get_object('test'), Chain)
def test_ChainCollection_get_object_2(self):
# check if get_object returns a Chain object and keeps the information (i.e. name)
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.get_object('test').name, 'test')
def test_ChainCollection_add(self):
# check if adding two ChainCollection objects with one sequence each
# results in a ChainCollection object with two sequences
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_2 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_2_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_3 = antibody_collection_1 + antibody_collection_2
self.assertEqual(antibody_collection_3.n_ab, 2)
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_ChainCollection_add_exception_1(self):
# check if adding two ChainCollection objects with one sequence each
# results in a ChainCollection object with two sequences
antibody_chothia = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='chothia',
show_progressbar=False, verbose=False)
antibody_kabat = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='kabat',
show_progressbar=False, verbose=False)
self.assertRaises(ValueError, operator.add, antibody_chothia, antibody_kabat)
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_ChainCollection_add_exception_2(self):
antibody_chothia = ChainCollection.load_from_file(path='./tests/Data/chain_collection_fasta_test.fasta',
numbering_scheme='chothia', show_progressbar=False,
verbose=False)
antibody_kabat = Chain(sequence=read_sequence('./tests/Data/chain_collection_fasta_test.fasta'),
numbering_scheme='kabat')
antibody_kabat.load()
self.assertRaises(ValueError, operator.add, antibody_chothia, antibody_kabat)
def test_ChainCollection_add_exception_3(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
self.assertRaises(ValueError, operator.add, antibody_collection_1, 0)
@unittest.skipUnless(check_connection(URL=ABNUM_URL), 'No internet connection, skipping test.')
def test_ChainCollection_fasta(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.save(file_format='fasta', path='./tests/SaveTest')
antibody_collection_2 = ChainCollection.load_from_file(path='./tests/SaveTest.fasta',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.sequences[0], antibody_collection_2.sequences[0])
def test_ChainCollection_json(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.save(file_format='json', path='./tests/SaveTest')
antibody_collection_2 = ChainCollection.load_from_file(path='./tests/SaveTest.json',
show_progressbar=False, verbose=False)
self.assertEqual(antibody_collection_1.sequences[0], antibody_collection_2.sequences[0])
def test_ChainCollection_append_1(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_2 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_2_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.append(antibody_collection_2)
self.assertEqual(antibody_collection_1.n_ab, 2)
def test_ChainCollection_append_2(self):
antibody_collection_1 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_2 = ChainCollection.load_from_file(path='./tests/Data/chain_collection_2_heavy.json',
show_progressbar=False, verbose=False)
antibody_collection_1.append(antibody_collection_2)
self.assertEqual(antibody_collection_1.hydrophobicity_matrix().shape, (2, 158))
@classmethod
def tearDownClass(cls):
for name in glob('./tests/*'):
if name.split('.')[-1] != 'py' and os.path.isfile(name):
os.remove(name)
| 67.838356
| 118
| 0.667259
|
fca40bd6afa91ed2eaf529bafdf14a7091138930
| 2,711
|
py
|
Python
|
chat_assistant/chat_assistant_api/views.py
|
mrhegemon/Rasa_zero_rpc_XR_bot
|
a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b
|
[
"MIT"
] | 1
|
2021-06-21T10:44:51.000Z
|
2021-06-21T10:44:51.000Z
|
chat_assistant/chat_assistant_api/views.py
|
mrhegemon/Rasa_zero_rpc_XR_bot
|
a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b
|
[
"MIT"
] | null | null | null |
chat_assistant/chat_assistant_api/views.py
|
mrhegemon/Rasa_zero_rpc_XR_bot
|
a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b
|
[
"MIT"
] | 1
|
2021-06-07T23:09:30.000Z
|
2021-06-07T23:09:30.000Z
|
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.renderers import (
HTMLFormRenderer,
JSONRenderer,
BrowsableAPIRenderer,
)
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from celery.result import AsyncResult
from django.contrib import auth
from rasa.core.agent import Agent
from rasa.utils.endpoints import EndpointConfig
from rasa import train
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import asyncio
from .tasks import train_assistant_task
from . import serializers
from . import models
import logging
logging.basicConfig(level="DEBUG")
class TokenViewSet(viewsets.ViewSet):
"""Checks username and password and returns an auth token."""
serializer_class = AuthTokenSerializer
def create(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
})
class SessionViewSet(viewsets.ViewSet):
"""Checks username and password and creates a user session."""
serializer_class = serializers.SessionSerializer
def create(self, request):
"""Use the ObtainAuthToken APIView to validate and create a token."""
serializer = serializers.SessionSerializer(data=request.data)
if serializer.is_valid():
user_name = serializer.data.get('username')
user_password = serializer.data.get('password')
user = auth.authenticate(request, username=user_name, password=user_password)
if user:
if user.is_active:
auth.login(request, user)
return Response({'response': 'Logged in'})
return Response({'response':'Not Active User'})
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 39.289855
| 89
| 0.689045
|
73e1a6c400533ce1fbdae96317cb0299357bf67f
| 3,403
|
py
|
Python
|
brno_process_data.py
|
shelltitan/CardioML
|
50b977807e2417bd422022168e1c48f8f23491db
|
[
"MIT"
] | null | null | null |
brno_process_data.py
|
shelltitan/CardioML
|
50b977807e2417bd422022168e1c48f8f23491db
|
[
"MIT"
] | null | null | null |
brno_process_data.py
|
shelltitan/CardioML
|
50b977807e2417bd422022168e1c48f8f23491db
|
[
"MIT"
] | null | null | null |
from os import listdir
from os.path import isfile, join
import os
import wfdb
import matplotlib.pyplot as plt
import numpy as np
import pywt
from pathlib import Path
target_dir = "D:/Arythmia PPG-ECG/processed_data/brno"
#well brno_test is basically this just for one file
def ProcessData(filenames, directory, target):
target = os.path.join(target, filenames[0][:-4])
Path(target).mkdir(parents=True, exist_ok=True)
ecg_record = wfdb.rdrecord(os.path.join(directory, filenames[0]))
ppg_record = wfdb.rdrecord(os.path.join(directory, filenames[1]))
ecg_dictionary = ecg_record.__dict__
ppg_dictionary = ppg_record.__dict__
ecg_signal = ecg_dictionary['p_signal'][0]
time = np.arange(ecg_signal.size)
ecg_fig, ecg_ax = plt.subplots()
ecg_ax.plot(time, ecg_signal)
ecg_ax.set_title("ECG Signal")
ecg_ax.set_ylabel('uV')
ecg_ax.set_xlabel('time (ms)')
ecg_fig.savefig(os.path.join(target, filenames[0] + '.png'))
ppg_signal = ppg_dictionary['p_signal'][0]
time2 = np.arange(ppg_signal.size)
ppg_fig, ppg_ax = plt.subplots()
ppg_ax.plot(time2, ppg_signal)
ppg_ax.set_title("PPG Signal")
ppg_ax.set_ylabel('Amplitude')
ppg_ax.set_xlabel('time (s/30)')
ppg_fig.savefig(os.path.join(target, filenames[1] + '.png'))
minimum = np.min(ppg_signal)
maximum = np.max(ppg_signal)
ppg_signal_norm = (ppg_signal - minimum) / (maximum - minimum)
ppg_fig_n, ppg_ax_n = plt.subplots()
ppg_ax_n.plot(time2, ppg_signal_norm)
ppg_ax_n.set_title("PPG Signal Normalised")
ppg_ax_n.set_ylabel('Amplitude')
ppg_ax_n.set_xlabel('time (s/30)')
ppg_fig_n.savefig(os.path.join(target, filenames[1] + 'normalised.png'))
coeffs = pywt.wavedec(ecg_signal, 'db8', level=8)
dwt_fig, dwt_axs = plt.subplots(9)
dwt_fig.suptitle('ECG signal decomposition')
for i in range(9):
dwt_axs[i].plot(np.arange(coeffs[i].size), coeffs[i])
coeffs[0] = np.zeros_like(coeffs[0])
no_bw_signal = pywt.waverec(coeffs, 'db8')
no_bw_fig, no_bw_ax = plt.subplots()
no_bw_ax.plot(time, no_bw_signal)
no_bw_ax.set_title("ECG Signal")
no_bw_ax.set_ylabel('uV')
no_bw_ax.set_xlabel('time (ms)')
dwt_fig.savefig(os.path.join(target, filenames[0] + 'decomposition.png'))
no_bw_fig.savefig(os.path.join(target, filenames[0] + 'corrected.png'))
np.save(os.path.join(target, filenames[0]), no_bw_signal)
np.save(os.path.join(target, filenames[1]), ppg_signal_norm)
coeffs2 = pywt.wavedec(ppg_signal, 'db8', level=8)
dwt2_fig, dwt2_axs = plt.subplots(9)
dwt2_fig.suptitle('PPG signal decomposition')
for i in range(9):
dwt2_axs[i].plot(np.arange(coeffs2[i].size), coeffs2[i])
dwt2_fig.savefig(os.path.join(target, filenames[1] + 'decomposition.png'))
brno = "D:/Arythmia PPG-ECG/brno/brno-university-of-technology-smartphone-ppg-database-but-ppg-1.0.0"
directory = [x[0] for x in os.walk(brno)]
directory = directory[1::]
#just looping the directories and reading the files not that intresting
#probably map function would be faster than the native python loop
#running this can take a while
for i in range(len(directory)):
files = [f for f in listdir(directory[i]) if isfile(join(directory[i], f))]
file_names = [files[0][:-4], files[2][:-4]]
ProcessData(file_names, directory[i], target_dir)
| 37.811111
| 101
| 0.698795
|
99512f904ee4a18c8fee10464558c0aba6bf6b3a
| 346
|
py
|
Python
|
jobs/sierras_24.py
|
tylertucker202/tibet_project
|
84278a894f1853d2a560f1552e98508e7ffd1688
|
[
"MIT"
] | 2
|
2018-10-05T15:40:54.000Z
|
2020-06-15T12:26:06.000Z
|
jobs/sierras_24.py
|
tylertucker202/tibet_project
|
84278a894f1853d2a560f1552e98508e7ffd1688
|
[
"MIT"
] | 4
|
2021-02-02T23:01:16.000Z
|
2022-03-12T00:47:35.000Z
|
jobs/sierras_24.py
|
tylertucker202/tibet_project
|
84278a894f1853d2a560f1552e98508e7ffd1688
|
[
"MIT"
] | 1
|
2018-08-28T03:12:47.000Z
|
2018-08-28T03:12:47.000Z
|
from region_parameters import get_sierras_24x24_param
from job_functions import run_job
if __name__ == '__main__':
input_dict = get_sierras_24x24_param()
make_grid = True
make_hdf5 = False
make_time_series_df = False
make_plots = False
run_job(input_dict, make_grid, make_hdf5, make_time_series_df, make_plots)
| 26.615385
| 78
| 0.754335
|
4740f107ab2f8b637fca34863642d97cd69a73cd
| 13,210
|
py
|
Python
|
tools/deployment/pytorch2onnx.py
|
LaudateCorpus1/edgeai-mmdetection
|
43914fe6bf25e7584033d784babb518b28c76b5c
|
[
"BSD-3-Clause"
] | 9
|
2021-09-30T16:02:18.000Z
|
2022-03-09T00:42:21.000Z
|
tools/deployment/pytorch2onnx.py
|
LaudateCorpus1/edgeai-mmdetection
|
43914fe6bf25e7584033d784babb518b28c76b5c
|
[
"BSD-3-Clause"
] | 10
|
2021-10-30T05:34:01.000Z
|
2022-03-09T01:41:26.000Z
|
tools/deployment/pytorch2onnx.py
|
LaudateCorpus1/edgeai-mmdetection
|
43914fe6bf25e7584033d784babb518b28c76b5c
|
[
"BSD-3-Clause"
] | 8
|
2021-10-20T09:39:56.000Z
|
2022-01-28T06:33:29.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import torch
from mmcv import Config, DictAction
import mmdet.utils
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
from mmdet.utils import XMMDetQuantTestModule, save_model_proto, mmdet_load_checkpoint, is_mmdet_quant_module
from .pytorch2proto import *
from torchvision.edgeailite import xnn
def pytorch2onnx(args,
cfg,
model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_img=None,
do_simplify=False,
dynamic_export=None,
skip_postprocess=False):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
if skip_postprocess:
warnings.warn('Not all models support export onnx without post '
'process, especially two stage detectors!')
model.forward = model.forward_dummy
torch.onnx.export(
model,
one_img,
output_file,
input_names=['input'],
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model without '
f'post process: {output_file}')
return
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
model_org = model.module if is_mmdet_quant_module(model) else model
if model_org.with_mask:
output_names.append('masks')
input_name = 'input'
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
input_name: {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
if model_org.with_mask:
dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'}
torch.onnx.export(
model,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
# shape inference is required to support onnx+proto detection models in edgeai-tidl-tools
onnx.shape_inference.infer_shapes_path(output_file, output_file)
output_proto_file = osp.splitext(output_file)[0] + '-proto.onnx'
pytorch2proto(cfg, model, img_list, output_file, output_proto_file, output_names=output_names, opset_version=opset_version)
# shape inference is required to support onnx+proto detection models in edgeai-tidl-tools
onnx.shape_inference.infer_shapes_path(output_proto_file, output_proto_file)
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': img_list[0].detach().cpu().numpy()}
model_opt, check_ok = onnxsim.simplify(
output_file,
input_data=input_dic,
custom_lib=ort_custom_op_path,
dynamic_input_shape=dynamic_export)
if check_ok:
onnx.save(model_opt, output_file)
print(f'Successfully simplified ONNX model: {output_file}')
else:
warnings.warn('Failed to simplify ONNX model.')
print(f'Successfully exported ONNX model: {output_file}')
# onnx model does not have the quant hooks, so in quant mode the outputs won't match
if verify:
from mmdet.core import get_classes
from mmdet.apis import show_result_pyplot
model.CLASSES = get_classes(dataset)
num_classes = len(model.CLASSES)
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
with torch.no_grad():
pytorch_results = model(
img_list,
img_metas=img_meta_list,
return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument(
'--show',
action='store_true',
help='Show onnx graph and detection outputs')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
parser.add_argument(
'--skip-postprocess',
action='store_true',
help='Whether to export model without post process. Experimental '
'option. We do not guarantee the correctness of the exported '
'model.')
args = parser.parse_args()
return args
def main(args):
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
if hasattr(cfg, 'resize_with_scale_factor') and cfg.resize_with_scale_factor:
torch.nn.functional._interpolate_orig = torch.nn.functional.interpolate
torch.nn.functional.interpolate = xnn.layers.resize_with_scale_factor
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
args,
cfg,
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
test_img=args.test_img,
do_simplify=args.simplify,
dynamic_export=args.dynamic_export,
skip_postprocess=args.skip_postprocess)
if __name__ == '__main__':
args = parse_args()
main(args)
| 35.039788
| 127
| 0.619682
|
abe9e7be94479eaa45c7d911cfd045fd2a2df911
| 1,700
|
py
|
Python
|
racecar_control/scripts/labo_brushfire.py
|
gene2302/RacecarS5
|
82ac296a10aeefa56bac0fa8d7ab3a725391b07a
|
[
"MIT"
] | null | null | null |
racecar_control/scripts/labo_brushfire.py
|
gene2302/RacecarS5
|
82ac296a10aeefa56bac0fa8d7ab3a725391b07a
|
[
"MIT"
] | null | null | null |
racecar_control/scripts/labo_brushfire.py
|
gene2302/RacecarS5
|
82ac296a10aeefa56bac0fa8d7ab3a725391b07a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from nav_msgs.srv import GetMap
from libcontrol import *
def main():
rospy.init_node('brushfire')
prefix = "racecar"
rospy.wait_for_service(prefix + '/get_map')
try:
get_map = rospy.ServiceProxy(prefix + '/get_map', GetMap)
response = get_map()
except (rospy.ServiceException) as e:
print "Service call failed: %s"%e
return
rospy.loginfo("Got map=%dx%d resolution=%f", response.map.info.height, response.map.info.width, response.map.info.resolution)
grid = np.reshape(response.map.data, [response.map.info.height, response.map.info.width])
brushfireMap = brushfire(grid)
# Export brusfire map for visualization
# Adjust color: 0 (black) = obstacle, 10-255 (white) = safest cells
maximum = np.amax(brushfireMap)
if maximum > 0:
mask = brushfireMap==1;
brushfireMap = brushfireMap.astype(float) / float(maximum) *225.0 + 30.0
brushfireMap[mask] = 0
# Flip image to get x->up, y->left (like top view in RVIZ looking towards x-axis)
cv2.imwrite('brushfire.bmp', cv2.transpose(cv2.flip(brushfireMap, -1)))
rospy.loginfo("Exported brushfire.bmp")
else:
rospy.loginfo("brushfire failed! Is brusfire implemented?")
# Example to show grid with same color than RVIZ
grid[grid == -1] = 89
grid[grid == 0] = 178
grid[grid == 100] = 0
# Flip image to get x->up, y->left (like top view in RVIZ looking towards x-axis)
cv2.imwrite('map.bmp', cv2.transpose(cv2.flip(grid, -1)))
rospy.loginfo("Exported map.bmp")
if __name__ == '__main__':
main()
| 35.416667
| 133
| 0.649412
|
f64110d6cf37d6a167e235100293bda745b12e07
| 1,402
|
py
|
Python
|
utils/common.py
|
qtisan/asteria
|
9378b4d6b882ea15c79d6b81b601f4265e55a301
|
[
"MIT"
] | null | null | null |
utils/common.py
|
qtisan/asteria
|
9378b4d6b882ea15c79d6b81b601f4265e55a301
|
[
"MIT"
] | 3
|
2021-06-02T00:45:51.000Z
|
2021-09-08T01:29:09.000Z
|
utils/common.py
|
qtisan/asteria
|
9378b4d6b882ea15c79d6b81b601f4265e55a301
|
[
"MIT"
] | null | null | null |
import logging
from pathlib import Path
root_dir = Path(__file__).parent.parent
def get_logger(app_name, level=logging.DEBUG):
logger = logging.getLogger(app_name)
logger.setLevel(logging.DEBUG)
logfile = logging.FileHandler(root_dir / 'logs/{0}'.format(app_name + '-log.txt'))
logfile.setLevel(level)
logfile.setFormatter(
logging.Formatter(
'[%(name)s][%(levelname)s]%(asctime)s: %(message)s > %(pathname)s(func: [%(funcName)s] at line %(lineno)d)'
))
logger.addHandler(logfile)
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(
logging.Formatter('[%(levelname)s] - %(message)s - %(asctime)s'))
logger.addHandler(console)
return logger
default_logger = get_logger('default')
def debug(msg):
default_logger.debug(msg)
def path_finder(func='apps'):
proot = root_dir / func
def p_path(app_name=None, sub_path=None):
if app_name is None:
return proot
if isinstance(app_name, str):
if sub_path is not None:
return proot / app_name / sub_path
return proot / app_name
raise 'Arguments error!'
return p_path
data_path = path_finder('data')
app_path = path_finder('apps')
def add_sys_path():
import sys
srd = str(root_dir)
if srd not in sys.path:
sys.path.append(srd)
| 23.366667
| 119
| 0.645506
|
e18d153674bc8af964e582ac7a103e51b91e20a5
| 21
|
py
|
Python
|
build/lib/subnet/__init__.py
|
usamaimdadsian/subnet-pkg
|
3d815a22cd82044cce3607e5ab16102f8897324c
|
[
"MIT"
] | null | null | null |
build/lib/subnet/__init__.py
|
usamaimdadsian/subnet-pkg
|
3d815a22cd82044cce3607e5ab16102f8897324c
|
[
"MIT"
] | null | null | null |
build/lib/subnet/__init__.py
|
usamaimdadsian/subnet-pkg
|
3d815a22cd82044cce3607e5ab16102f8897324c
|
[
"MIT"
] | null | null | null |
from .subnet import *
| 21
| 21
| 0.761905
|
2da1c793fe1afa5c590b89254b77ecb813d5cea9
| 15,123
|
py
|
Python
|
commands/base_commands/exchanges.py
|
bopopescu/arxcode
|
3d279ce801e177d2f98671c438c52d39ed6edd41
|
[
"MIT"
] | null | null | null |
commands/base_commands/exchanges.py
|
bopopescu/arxcode
|
3d279ce801e177d2f98671c438c52d39ed6edd41
|
[
"MIT"
] | null | null | null |
commands/base_commands/exchanges.py
|
bopopescu/arxcode
|
3d279ce801e177d2f98671c438c52d39ed6edd41
|
[
"MIT"
] | null | null | null |
from django.db import transaction
from commands.base import ArxCommand
from commands.base_commands.overrides import args_are_currency, check_volume
from world.dominion.models import CraftingMaterials
from server.utils.arx_utils import text_box
class TradeError(Exception):
pass
class PersonalTradeInProgress:
HEADER_MSG = "|w[|nPersonal Trade|w]|n "
FAIL_MSG = "Could not finish the exchange."
def __init__(self, caller, target):
self.caller = caller
self.target = target
self.names = {caller: str(caller), target: str(target)}
caller.ndb.personal_trade_in_progress = self
target.ndb.personal_trade_in_progress = self
self.items = {caller: [], target: []}
self.silver = {caller: 0, target: 0}
self.agreements = {caller: False, target: False}
@classmethod
def create_trade(cls, caller, targ_name):
"""
Creates instance and messages participants if it succeeds.
Args:
caller: character object
targ_name: string
"""
target = caller.search(targ_name)
if not target:
return
if target == caller or not target.player_ob:
raise TradeError(f"You cannot trade with {target}.")
for trader in (caller, target):
if trader.ndb.personal_trade_in_progress:
if trader != caller:
target.msg(f"{cls.HEADER_MSG}{caller} wants to trade, but you have a trade in progress.")
raise TradeError(f"{trader} has a trade already in progress.")
trade = cls(caller, target)
trade.message_participants(
f"{caller} has initiated a trade with {target}. (See |w'help trade'|n.)"
)
def cancel_trade(self):
"Scrubs this instance from traders and messages them."
self.clean_up()
self.message_participants("|yYour trade has been cancelled.|n")
def display_trade(self):
"Returns a string about the overall state of the trade."
msg = self.HEADER_MSG + "\n".join(
(self.assemble_manifest(self.caller), self.assemble_manifest(self.target),
(self.assemble_statement(self.caller) + self.assemble_statement(self.target))))
return text_box(msg)
def assemble_manifest(self, trader):
"Returns a string about what this trader is offering."
money = f"|c{self.silver[trader]}|n silver" if self.silver[trader] else "no money"
txt = f"{self.names[trader]} offers {money} and"
items = [str(ob) for ob in self.items[trader]]
if items:
txt += ":\n + " + "\n + ".join(items)
else:
txt += " no items."
return txt
def assemble_statement(self, trader):
"Returns string about whether a trader has agreed."
bull = self.agreements[trader] # :3 bool luv u
color = "|351" if bull else "|y"
adverb = "" if bull else "not yet "
return f"{self.names[trader]} has {color}{adverb}agreed|n. "
def add_item(self, trader, obj):
"Locates item and adds it to the trade."
if not obj:
raise TradeError("Trade what?")
item = trader.search(obj)
if not item:
return
if item in self.items[trader]:
raise TradeError(f"{item} is already being traded.")
self.items[trader].append(item)
self.reset_agreements(f"{self.names[trader]} offers {item}.")
def add_silver(self, trader, amount):
"Verifies amount and adds silver to the trade."
if self.silver[trader]:
raise TradeError(
f"Already offered |c{self.silver[trader]}|n silver. Cancel trade if amount is incorrect."
)
try:
amount = int(amount)
if amount < 1:
raise ValueError
except (ValueError, TypeError):
raise TradeError("Amount must be a positive number that you can afford.")
self.silver[trader] = amount
self.reset_agreements(f"{self.names[trader]} offers |c{amount}|n silver.")
def finish(self):
"Runs checks before performing the exchange, then scrubs this instance from traders."
self.check_still_trading()
self.check_trader_location()
self.check_can_pay()
self.check_can_move_items()
for obj in self.items[self.caller]:
obj.move_to(self.target)
for obj in self.items[self.target]:
obj.move_to(self.caller)
if self.silver[self.caller]:
self.caller.pay_money(self.silver[self.caller], self.target)
if self.silver[self.target]:
self.target.pay_money(self.silver[self.target], self.caller)
self.clean_up()
self.message_participants("|351Your exchange is complete!|n")
def check_still_trading(self):
"Ensures both traders are still using the same trade object."
caller_trade = self.caller.ndb.personal_trade_in_progress
target_trade = self.target.ndb.personal_trade_in_progress
if caller_trade != target_trade:
self.clean_up()
raise TradeError("Invalid trade; cancelling it. Please restart.")
def check_trader_location(self):
"Ensures traders are together & using same names. Resets agreements if not."
changed_names = False
for trader in (self.caller, self.target):
if str(trader) != self.names[trader]:
changed_names = True
if changed_names or self.caller.location != self.target.location:
self.reset_agreements("Traders must be in the same location.")
raise TradeError(self.FAIL_MSG)
def check_can_pay(self):
"Resets agreements if a trader cannot afford their offered silver."
for trader in (self.caller, self.target):
if self.silver[trader] > trader.currency:
self.reset_agreements(f"{self.names[trader]} does not have enough silver to complete the trade.")
raise TradeError(self.FAIL_MSG)
def check_can_move_items(self):
"Runs both traders through an item check."
for trader in (self.caller, self.target):
recipient = self.target if (trader == self.caller) else self.caller
for obj in self.items[trader]:
if not obj.at_before_move(recipient, caller=trader):
self.reset_agreements(f"{self.names[trader]} cannot trade {obj}.")
raise TradeError(self.FAIL_MSG)
def mark_agreement(self, trader):
"Trader's agreement becomes Truthy, then trade attempts to finish if all parties have agreed."
self.agreements[trader] = True
if all(self.agreements.values()):
self.finish()
else:
self.message_participants(f"{self.names[trader]} has agreed to the trade.")
def reset_agreements(self, msg=""):
"Checks for trade agreements and nullifies them, giving feedback if any were reset."
message = msg
if any(self.agreements.values()):
self.agreements = {key: False for key in self.agreements.keys()}
sep = " " if message else ""
message += f"{sep}|wAgreements have been reset.|n"
if message:
self.message_participants(message)
def message_participants(self, msg):
"Sends a message to traders with a small header attached."
message = f"{self.HEADER_MSG}{msg}"
self.caller.msg(message)
self.target.msg(message)
def clean_up(self):
if self.caller.ndb.personal_trade_in_progress == self:
self.caller.ndb.personal_trade_in_progress = None
if self.target.ndb.personal_trade_in_progress == self:
self.target.ndb.personal_trade_in_progress = None
class CmdTrade(ArxCommand):
"""
Set up a trade transaction with another character.
Usage:
trade [<character>]
trade/item <item>
trade/silver <amount>
trade/agree
trade/cancel
After offering items and silver, both characters must agree in
order to complete the exchange. Display the current offer by
using 'trade' command with no args or switches.
"""
key = "trade"
locks = "cmd:all()"
@transaction.atomic
def func(self):
try:
if self.args and not self.switches:
PersonalTradeInProgress.create_trade(self.caller, self.args)
return
trade = self.caller.ndb.personal_trade_in_progress
if not trade:
raise TradeError("You are not trading with anyone right now.")
elif not self.switches and not self.args:
self.msg(trade.display_trade())
return
elif self.check_switches(("agree", "accept")):
trade.mark_agreement(self.caller)
return
elif self.check_switches(("cancel", "stop", "quit")):
trade.cancel_trade()
return
elif self.check_switches(("item", "items")):
trade.add_item(self.caller, self.args)
return
elif self.check_switches(("silver", "money")):
trade.add_silver(self.caller, self.args)
return
raise TradeError("Incorrect switch. See |w'help trade'|n.")
except TradeError as err:
self.msg(err)
class CmdGive(ArxCommand):
"""
give away things
Usage:
give <inventory obj> = <target>
give <inventory obj> to <target>
give <amount> silver to <target>
give/mats <type>,<amount> to <target>
give/resource <type>,<amount> to <target>
Gives an items from your inventory to another character,
placing it in their inventory. give/resource does not require
you to be in the same room.
"""
key = "give"
locks = "cmd:all()"
# noinspection PyAttributeOutsideInit
def func(self):
"""Implement give"""
caller = self.caller
to_give = None
if not self.args:
caller.msg("Usage: give <inventory object> = <target>")
return
if not self.rhs:
arglist = self.args.split(" to ")
if len(arglist) < 2:
caller.msg("Usage: give <inventory object> to <target>")
return
self.lhs, self.rhs = arglist[0], arglist[1]
if "resource" in self.switches:
player = caller.player.search(self.rhs)
if not player:
return
target = player.char_ob
else:
target = caller.search(self.rhs)
if not target:
return
if target == caller:
caller.msg("You cannot give things to yourself.")
return
if not target.player_ob:
self.msg("You cannot give anything to them. Use 'put' instead.")
return
if "mats" in self.switches:
lhslist = self.lhs.split(",")
try:
mat = caller.player_ob.Dominion.assets.materials.get(type__name__iexact=lhslist[0])
amount = int(lhslist[1])
if amount < 1:
raise ValueError
except (IndexError, ValueError):
caller.msg("Invalid syntax.")
return
except CraftingMaterials.DoesNotExist:
caller.msg("No materials by that name.")
return
if mat.amount < amount:
caller.msg("Not enough materials.")
return
try:
tmat = target.player_ob.Dominion.assets.materials.get(type=mat.type)
except CraftingMaterials.DoesNotExist:
tmat = target.player_ob.Dominion.assets.materials.create(type=mat.type)
mat.amount -= amount
tmat.amount += amount
mat.save()
tmat.save()
caller.msg("You give %s %s to %s." % (amount, mat.type, target))
target.msg("%s gives %s %s to you." % (caller, amount, mat.type))
return
if "resource" in self.switches:
rtypes = ("economic", "social", "military")
lhslist = self.lhs.split(",")
try:
rtype = lhslist[0].lower()
amount = int(lhslist[1])
if amount < 1:
raise ValueError
except (IndexError, ValueError):
caller.msg("Invalid syntax.")
return
if rtype not in rtypes:
caller.msg("Type must be in %s." % ", ".join(rtypes))
return
cres = getattr(caller.player_ob.Dominion.assets, rtype)
if cres < amount:
caller.msg("You do not have enough %s resources." % rtype)
return
tres = getattr(target.player_ob.Dominion.assets, rtype)
cres -= amount
tres += amount
setattr(target.player_ob.Dominion.assets, rtype, tres)
setattr(caller.player_ob.Dominion.assets, rtype, cres)
target.player_ob.Dominion.assets.save()
caller.player_ob.Dominion.assets.save()
caller.msg("You give %s %s resources to %s." % (amount, rtype, target))
target.player_ob.inform("%s has given %s %s resources to you." % (caller, amount, rtype),
category="Resources")
return
if args_are_currency(self.lhs):
arglist = self.lhs.split()
val = round(float(arglist[0]), 2)
if val <= 0:
self.msg("Amount must be positive.")
return
currency = round(float(caller.db.currency or 0), 2)
if val > currency:
caller.msg("You do not have that much money to give.")
return
caller.pay_money(val, target)
caller.msg("You give coins worth %s silver pieces to %s." % (val, target))
target.msg("%s has given you coins worth %s silver pieces." % (caller, val))
return
# if we didn't find a match in currency that we're giving
if not to_give:
to_give = caller.search(self.lhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
to_give.at_get(caller)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
if not check_volume(to_give, target, quiet=True):
caller.msg("%s can't hold %s." % (target.name, to_give.name))
return
if not to_give.at_before_move(target, caller=caller):
return
# give object
to_give.move_to(target, quiet=True)
caller.msg("You give %s to %s." % (to_give.key, target))
target.msg("%s gives you %s." % (caller, to_give.key))
to_give.at_get(target)
| 40.328
| 113
| 0.585796
|
b98065586c7dfce53ff9558c48c42c7023232342
| 29,973
|
py
|
Python
|
uploadr.py
|
baumrasen/flickr-uploader
|
55c73dc15a90705c17031ddaf8ac41351e8f481f
|
[
"MIT"
] | 39
|
2017-11-04T10:10:59.000Z
|
2021-01-06T22:07:55.000Z
|
uploadr.py
|
baumrasen/flickr-uploader
|
55c73dc15a90705c17031ddaf8ac41351e8f481f
|
[
"MIT"
] | 60
|
2017-11-01T01:28:46.000Z
|
2019-09-29T12:26:39.000Z
|
uploadr.py
|
baumrasen/flickr-uploader
|
55c73dc15a90705c17031ddaf8ac41351e8f481f
|
[
"MIT"
] | 9
|
2017-11-09T20:35:12.000Z
|
2021-01-10T16:34:32.000Z
|
#!/usr/bin/env python
"""
by oPromessa, 2017, 2018
Published on https://github.com/oPromessa/flickr-uploader/
## LICENSE.txt
--------------
* Check usage and licensing notice on LICENSE.txt file.
* PLEASE REVIEW THE SOURCE CODE TO MAKE SURE IT WILL WORK FOR YOUR NEEDS.
## CONTRIBUTIONS ARE WELCOME!
-----------------------------
* Check CONTRIBUTING and TODO files
* FEEDBACK ON ANY TESTING AND FEEDBACK YOU DO IS GREATLY APPRECIATED.
* IF YOU FIND A BUG, PLEASE REPORT IT.
## Recognition
--------------
Inspired by:
* https://github.com/sybrenstuvel/flickrapi
* http://micampe.it/things/flickruploadr
* https://github.com/joelmx/flickrUploadr/
## README.md
------------
* Check README.md file for information including:
### Description
### Features
### Requirements
### Setup on Synology
### Configuration
### Usage/Arguments/Options
### Task Scheduler (cron)
### Recognition
### Final remarks
### Q&A
"""
# =============================================================================
# Import section for Python 2 and 3 compatible code
# from __future__ import absolute_import, division, print_function,
# unicode_literals
from __future__ import division # This way: 3 / 2 == 1.5; 3 // 2 == 1
# =============================================================================
# Import section
import sys
import traceback
import logging
import logging.handlers
import argparse
import os
import os.path
try:
# Use portalocker if available. Required for Windows systems
import portalocker as FileLocker # noqa
FILELOCK = FileLocker.lock
except ImportError:
# Use fcntl
import fcntl as FileLocker
FILELOCK = FileLocker.lockf
import errno
import pprint
# -----------------------------------------------------------------------------
# Helper FlickrUploadr class to upload pics/videos into Flickr.
import lib.FlickrUploadr as FlickrUploadr
# -----------------------------------------------------------------------------
# Helper class and functions for UPLoaDeR Global Constants.
import lib.Konstants as KonstantsClass
# -----------------------------------------------------------------------------
# Helper class and functions to print messages.
import lib.NicePrint as NicePrint
# -----------------------------------------------------------------------------
# Helper class and functions to load, process and verify INI configuration.
import lib.MyConfig as MyConfig
# -----------------------------------------------------------------------------
# Helper class to allow multiprocessing looging into a single file
import lib.multiprocessing_logging as multiprocessing_logging
# =============================================================================
# Logging init code
#
# Getting definitions from Konstants
UPLDR_K = KonstantsClass.Konstants()
# Sets LOGGING_LEVEL to allow logging even if everything else is wrong!
# Parent logger is set to Maximum (DEBUG) so that suns will log as appropriate
# Produces too much ouput info on MyConfig. Setting it to WARNING
logging.getLogger().setLevel(logging.DEBUG)
# define a Handler which writes WARNING messages or higher to the sys.stderr
CONSOLE_LOGGING = logging.StreamHandler()
CONSOLE_LOGGING.setLevel(logging.WARNING)
CONSOLE_LOGGING.setFormatter(logging.Formatter(
fmt=UPLDR_K.Pur + '[' + str(UPLDR_K.Run) + ']' +
'[%(asctime)s]:[%(processName)-11s]' + UPLDR_K.Std +
'[%(levelname)-8s]:[%(name)s] %(message)s',
datefmt=UPLDR_K.TimeFormat))
logging.getLogger().addHandler(CONSOLE_LOGGING)
# Inits with default configuration value, namely LOGGING_LEVEL
MY_CFG = MyConfig.MyConfig()
# Update console logging level as per LOGGING_LEVEL from default config
CONSOLE_LOGGING.setLevel(int(MY_CFG.LOGGING_LEVEL))
# -----------------------------------------------------------------------------
# =============================================================================
# Init code
#
# Python version must be greater than 2.7 for this script to run
#
if sys.version_info < (2, 7):
logging.critical('----------- (V%s) Error Init -----------(Log:%s)'
'This script requires Python 2.7 or newer.'
'Current Python version: [%s] '
'Exiting...',
UPLDR_K.Version,
MY_CFG.LOGGING_LEVEL,
sys.version)
sys.exit(1)
else:
logging.warning('----------- (V%s) Init -----------(Log:%s)'
'Python version on this system: [%s]',
UPLDR_K.Version,
MY_CFG.LOGGING_LEVEL,
sys.version)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def my_excepthook(exc_class, exc_value, exc_tb):
""" my_excepthook
Exception handler to be installed over sys.excepthook to allow
traceback reporting information to be reported back to logging file
"""
logging.critical('Exception: %s: %s', exc_class, exc_value)
logging.critical('%s', ''.join(traceback.format_tb(exc_tb)))
# -----------------------------------------------------------------------------
# parse_arguments
#
# This is the main method
#
def parse_arguments():
""" parse_arguments
Parse arguments and return results.
"""
# Parse args --------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Upload files to Flickr. Uses uploadr.ini as config file.',
epilog='by oPromessa, 2017, 2018'
)
# Configuration related options -------------------------------------------
cgrpparser = parser.add_argument_group('Configuration related options')
cgrpparser.add_argument('-C', '--config-file', action='store',
metavar='filename.ini',
type=str,
help='Optional configuration file. '
'Default is:[{!s}]'
.format(UPLDR_K.ini_file))
cgrpparser.add_argument('-a', '--authenticate', action='store_true',
help='Performs/Verifies authentication with '
'Flickr. To be run on initial setup.'
'Does not run any other option.')
# Verbose related options -------------------------------------------------
vgrpparser = parser.add_argument_group('Verbose and dry-run options')
vgrpparser.add_argument('-v', '--verbose', action='count',
help='Verbose output. Use -vv for more verbosity. '
'See also LOGGING_LEVEL value in INI file.')
vgrpparser.add_argument('-x', '--verbose-progress', action='store_true',
help='Provides progress indicator on each upload. '
'See also LOGGING_LEVEL value in INI file.')
vgrpparser.add_argument('-m', '--mask-sensitive', action='store_true',
help='Masks sensitive data on log files like '
'your pics filenames and set/albums names. '
'(Uses SHA1 hashing algorithm)')
vgrpparser.add_argument('-n', '--dry-run', action='store_true',
help='Dry run. No changes are actually performed.')
# Information related options ---------------------------------------------
igrpparser = parser.add_argument_group('Information options')
igrpparser.add_argument('-i', '--title', action='store',
help='Title for uploaded files. '
'Overwrites title set in INI config file. '
'If not specified and not set in INI file, '
'it uses filename as title (*Recommended).')
igrpparser.add_argument('-e', '--description', action='store',
help='Description for uploaded files'
'Overwrites description set in INI file. ')
igrpparser.add_argument('-t', '--tags', action='store',
help='Space-separated tags for uploaded files. '
'It appends to the tags defined in INI file.')
# used in pics_status function
igrpparser.add_argument('-l', '--list-photos-not-in-set',
metavar='N', type=int,
help='List as many as N photos (with tags) '
'not in set. Maximum listed photos is 500.')
# Processing related options ----------------------------------------------
pgrpparser = parser.add_argument_group('Processing related options')
pgrpparser.add_argument('-r', '--drip-feed', action='store_true',
help='Wait a bit between uploading individual '
'files.')
pgrpparser.add_argument('-p', '--processes',
metavar='P', type=int,
help='Number of photos to upload simultaneously. '
'Number of process to assign pics to sets.')
pgrpparser.add_argument('-u', '--not-is-already-uploaded',
action='store_true',
help='Do not check if file is already uploaded '
'and exists on flickr prior to uploading. '
'Use this option for faster INITIAL upload. '
'Do not use it in subsequent uploads to '
'prevent/recover orphan pics without a set.')
pgrpparser.add_argument('--no-delete-from-flickr',
metavar='nodelete',
type=str,
nargs='?',
const=str(UPLDR_K.no_delete_tag),
help='Do not actually delete pics from flicr.com &'
' mark them with tag:[{!s}]'
.format(UPLDR_K.no_delete_tag))
# run in daemon mode uploading every X seconds
pgrpparser.add_argument('-d', '--daemon', action='store_true',
help='Run forever as a daemon.'
'Uploading every SLEEP_TIME seconds. Please '
'note it only performs '
'upload/raw convert/replace.')
# Bad files related options -----------------------------------------------
# Cater for bad files. files in your Library that flickr does not recognize
bgrpparser = parser.add_argument_group('Handling bad and excluded files')
# -b add files to badfiles table
bgrpparser.add_argument('-b', '--bad-files', action='store_true',
help='Save on database bad files to prevent '
'continuous uploading attempts. Bad files are '
'files in your Library that flickr does not '
'recognize (Error 5) or are too large (Error 8). '
'Check also option -c.')
# -c clears the badfiles table to allow a reset of the list
bgrpparser.add_argument('-c', '--clean-bad-files', action='store_true',
help='Resets the badfiles table/list to allow a '
'new uploading attempt for bad files. Bad files '
'are files in your Library that flickr does not '
'recognize (Error 5) or are too large (Error 8). '
'Check also option -b.')
# -s list the badfiles table
bgrpparser.add_argument('-s', '--list-bad-files', action='store_true',
help='List the badfiles table/list.')
# when you change EXCLUDED_FOLDERS setting
bgrpparser.add_argument('-g', '--remove-excluded',
action='store_true',
help='Remove previously uploaded files, that are '
'now being excluded due to change of the INI '
'file configuration EXCLUDED_FOLDERS.'
'NOTE: Option --remove-ignored was '
'dropped in favor of --remove-excluded.')
# Migration related options -----------------------------------------------
# 2.7.0 Version will add album/setName as one
agrpparser = parser.add_argument_group('Migrate to v2.7.0')
agrpparser.add_argument('--add-albums-migrate', action='store_true',
help='From v2.7.0 onwards, uploadr adds to Flickr '
'an album tag to each pic. '
'This option adds such tag to previously '
'loaded pics. uploadr v2.7.0 will perform '
'automatically such migration upon first run '
'This option is *only* available to re-run '
'it, should it be necessary.')
return parser.parse_args()
# Parse args --------------------------------------------------------------
# -----------------------------------------------------------------------------
# run_uploadr
#
# This is the main method
#
def run_uploadr(args):
""" run_uploadr
args = parameters
"""
# -------------------------------------------------------------------------
# Local Variables
#
# myflick = Class Uploadr (created in the Main code)
def check_files_dir():
""" check_files_dir
Confirms setting MYCFG.FILES_DIR is defined and a valid folder.
Exits from program otherwise.
"""
NPR.niceprint('FILES_DIR: [{!s}]'
.format(NPR.strunicodeout(MY_CFG.FILES_DIR)),
verbosity=1,
logalso=logging.WARNING)
if MY_CFG.FILES_DIR == "":
NPR.niceerror(
caught=True,
caughtprefix='xxx',
caughtcode='630',
caughtmsg='Configure in INI file [normally uploadr.ini] '
'the folder [FILES_DIR] with media to sync with Flickr.',
useniceprint=True)
sys.exit(8)
elif not os.path.isdir(MY_CFG.FILES_DIR):
NPR.niceerror(
caught=True,
caughtprefix='xxx',
caughtcode='631',
caughtmsg='FILES_DIR: [{!s}] is not valid.'
'Use an existant folder in INI file [normally uploadr.ini] '
'with media to sync with Flickr. '
.format(NPR.strunicodeout(MY_CFG.FILES_DIR)),
useniceprint=True)
sys.exit(8)
def check_flickr_key_secret():
""" def check_flickr_key_secret():
Confirms the configuration for api_key and secret is defined.
Exits from program otherwise.
"""
if MY_CFG.FLICKR["api_key"] == "" or MY_CFG.FLICKR["secret"] == "":
NPR.niceerror(
caught=True,
caughtprefix='xxx',
caughtcode='635',
caughtmsg='Please enter an API key and secret in the '
'configuration file [normaly uploadr.ini] (see README).',
useniceprint=True)
sys.exit(9)
def check_token_authenticate():
""" check_token_authenticate
Checks if token is available... if not will authenticate
"""
NPR.niceprint('Checking if token is available... '
'if not will authenticate')
if not myflick.check_token():
# authenticate sys.exits in case of failure
myflick.authenticate()
else:
NPR.niceprint('Token is available.',
logalso=logging.INFO)
# Initial checks
check_files_dir()
check_flickr_key_secret()
# Instantiate class Uploadr. getCachedToken is called on __init__
logging.debug('Instantiating the Main class myflick = Uploadr()')
myflick = FlickrUploadr.Uploadr(MY_CFG, args)
# Setup the database. Clean badfiles entries if asked
myflick.setup_db()
if args.clean_bad_files:
myflick.clean_db_badfiles()
if args.authenticate:
check_token_authenticate()
elif args.daemon:
# Will run in daemon mode every SLEEP_TIME seconds
if myflick.check_token():
NPR.niceprint('Will run in daemon mode every [{!s}] seconds'
'Make sure you have previously authenticated!'
.format(MY_CFG.SLEEP_TIME),
logalso=logging.WARNING)
myflick.run()
else:
NPR.niceerror(
caught=True,
caughtprefix='xxx',
caughtcode='641',
caughtmsg='Not able to connect to Flickr.'
'Make sure you have previously authenticated!',
useniceprint=True)
sys.exit(8)
else:
check_token_authenticate()
if args.add_albums_migrate:
NPR.niceprint('Preparation migration to 2.7.0',
fname='add_albums_tag')
if myflick.add_albums_tag():
NPR.niceprint('Successfully added album tags to pics '
'on upload.',
fname='add_albums_tag')
else:
NPR.niceerror(
caught=True,
caughtprefix='xxx',
caughtcode='642',
caughtmsg='Failed adding album tags to pics uploaded. '
'Please check logs, correct, and retry.',
useniceprint=True)
sys.exit(10)
elif args.list_bad_files:
myflick.list_bad_files()
else:
myflick.remove_useless_sets_table()
myflick.get_flickr_sets()
myflick.upload()
myflick.remove_deleted_media()
if args.remove_excluded:
myflick.remove_excluded_media()
myflick.create_sets()
myflick.pics_status(KonstantsClass.media_count)
# Run Uploadr -------------------------------------------------------------
# -----------------------------------------------------------------------------
# check_base_ini_file
#
# Check if base_dir folder exists and ini_file exists and is a file
#
def check_base_ini_file(base_dir, ini_file):
""" check_base_ini_file
Returns True if base_dir is a directory and ini_file is a regular file.
base_dir = Folder
ini_file = INI File path
"""
logging.info('check_base_ini_file: base_dir=[%s] ini_file=[%s]',
NPR.strunicodeout(base_dir),
NPR.strunicodeout(ini_file))
result_check = True
try:
if not (base_dir == '' or os.path.isdir(base_dir)):
raise OSError('[Errno 2] No such directory:[{!s}]'
.format(NPR.strunicodeout(base_dir)))
elif not os.path.isfile(ini_file):
raise OSError('[Errno 2] No such file:[{!s}]'
.format(NPR.strunicodeout(ini_file)))
else:
result_check = True
except OSError as err:
result_check = False
logging.error(
'No config folder and/or INI file found: %s', str(err))
logging.debug('check_base_ini_file: result=[%s]', result_check)
return result_check
# -----------------------------------------------------------------------------
# logging_close_handlers
#
# Close logging handlers
#
def logging_close_handlers():
""" logging_close_handlers
Close logging handlers
"""
handlers = logging.getLogger().handlers[:]
for handler in handlers:
handler.close()
logging.getLogger().removeHandler(handler)
# =============================================================================
# Global Variables
#
# -----------------------------------------------------------------------------
# Class UPLDReConstants
#
# base_dir = Base configuration directory location
# ini_file = Configuration file
# media_count = Counter of total files to initially upload
# -----------------------------------------------------------------------------
# Base_dir set to os.path.abspat(os.path.dirname(sys.argv[0]))
# INI file config:
# 1. Use --config-file argument option [after PARSED_ARGS]
# 2. If not, os.path.dirname(sys.argv[0])
# 3. If not, os.path.dirname(sys.argv[0]), '..', 'etc', 'uploadr.ini'
# Set appropriate configuration within INI file for support files
UPLDR_K.base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
UPLDR_K.ini_file = os.path.abspath(os.path.join(UPLDR_K.base_dir,
'uploadr.ini'))
UPLDR_K.etc_ini_file = os.path.abspath(
os.path.join(UPLDR_K.base_dir, os.path.pardir, 'etc', 'uploadr.ini'))
UPLDR_K.media_count = 0
# -----------------------------------------------------------------------------
# Debug folder locations and INI files
logging.debug(' base_dir:[%s]', UPLDR_K.base_dir)
logging.debug(' cwd:[%s]', os.getcwd())
logging.debug('sys.prefix/etc:[%s]', os.path.join(sys.prefix, 'etc'))
logging.debug(' abs(argv[0]):[%s]',
os.path.abspath(os.path.dirname(sys.argv[0])))
logging.debug(' abs(__file__):[%s]',
os.path.abspath(os.path.dirname(__file__)))
logging.debug('argv[0]/../etc:[%s]',
os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
os.path.pardir,
'etc',
'uploadr.ini')))
logging.debug(' ini_file:[%s]', UPLDR_K.ini_file)
logging.debug(' etc_ini_file:[%s]', UPLDR_K.etc_ini_file)
# -----------------------------------------------------------------------------
# =============================================================================
# Functions aliases
#
# NPR = NicePrint.NicePrint
# -----------------------------------------------------------------------------
NPR = NicePrint.NicePrint()
# -----------------------------------------------------------------------------
# =============================================================================
# Main code
#
NPR.niceprint('----------- (V{!s}) Start -----------(Log:{!s})'
.format(UPLDR_K.Version,
MY_CFG.LOGGING_LEVEL),
logalso=logging.WARNING)
# Install exception handler
sys.excepthook = my_excepthook
if __name__ == "__main__":
# Parse the arguments options
PARSED_ARGS = parse_arguments()
if PARSED_ARGS.no_delete_from_flickr:
logging.warning('Option --no-delete-from-flickr enabled=[%s]',
PARSED_ARGS.no_delete_from_flickr)
# Print/show arguments
NPR.niceprint('Output for arguments(args):\n{!s}'
.format(pprint.pformat(PARSED_ARGS)),
verbosity=3,
logalso=logging.INFO)
# Debug: upload_sleep: Seconds to sleep prior to reattempt a failed upload
logging.info('Upload sleep setting:[%s] seconds', UPLDR_K.upload_sleep)
# Set verbosity level as per -v count
NPR.set_verbosity(PARSED_ARGS.verbose)
NPR.set_mask_sensitivity(PARSED_ARGS.mask_sensitive)
# INI file config (1/3)
# 1. Use --config-file argument option [after PARSED_ARGS]
if PARSED_ARGS.config_file:
if not check_base_ini_file(UPLDR_K.base_dir, PARSED_ARGS.config_file):
NPR.niceerror(caught=True,
caughtprefix='+++',
caughtcode='661',
caughtmsg='Invalid -C argument INI file [{!s}]. '
'Exiting...'.format(UPLDR_K.ini_file),
useniceprint=True)
sys.exit(2)
else:
UPLDR_K.ini_file = PARSED_ARGS.config_file
# INI file config (2/3)
# 2. If not, os.path.dirname(sys.argv[0])
elif not check_base_ini_file(UPLDR_K.base_dir, UPLDR_K.ini_file):
NPR.niceerror(caught=True,
caughtprefix='+++',
caughtcode='662',
caughtmsg='Invalid sys.argv INI file [{!s}].'
' Continuing...'.format(UPLDR_K.ini_file),
useniceprint=True)
# INI file config (3/3)
# 3. If not, os.path.dirname(sys.argv[0]), '../etc/uploadr.ini'
if not check_base_ini_file(UPLDR_K.base_dir, UPLDR_K.etc_ini_file):
NPR.niceerror(caught=True,
caughtprefix='+++',
caughtcode='663',
caughtmsg='Invalid sys.argv/etc INI file [{!s}].'
' Exiting...'.format(UPLDR_K.etc_ini_file),
useniceprint=True)
sys.exit(2)
else:
UPLDR_K.ini_file = UPLDR_K.etc_ini_file
# Source configuration from ini_file
logging.warning('FINAL ini_file:[%s]', UPLDR_K.ini_file)
MY_CFG.readconfig(UPLDR_K.ini_file, ['Config'])
if not (MY_CFG.processconfig() and MY_CFG.verifyconfig()):
NPR.niceerror(caught=True,
caughtprefix='+++',
caughtcode='664',
caughtmsg='Incorrect config INI file [{!s}].'
' Exiting...'.format(UPLDR_K.ini_file),
useniceprint=True)
sys.exit(8)
# Update console logging level as per LOGGING_LEVEL from INI file
CONSOLE_LOGGING.setLevel(MY_CFG.LOGGING_LEVEL)
logging.warning('\n\tCONSOLE_LOGGING.setLevel=[%s] '
'\n\tROTATING_LOGGING.setLevel/enabled?=[%s/%s] '
'\n\tMY_CFG.LOGGING_LEVEL=[%s]',
MY_CFG.LOGGING_LEVEL,
MY_CFG.ROTATING_LOGGING_LEVEL,
MY_CFG.ROTATING_LOGGING,
MY_CFG.LOGGING_LEVEL)
# Rotating LOGGING level to err_file
if MY_CFG.ROTATING_LOGGING:
ROTATING_LOGGING = None
if not os.path.isdir(os.path.dirname(MY_CFG.ROTATING_LOGGING_PATH)):
NPR.niceerror(caught=True,
caughtprefix='+++',
caughtcode='665',
caughtmsg='Invalid ROTATING_LOGGING_PATH config.',
useniceprint=True)
else:
# Define a rotating file Handler which writes DEBUG messages
# or higher to err_file
ROTATING_LOGGING = logging.handlers.RotatingFileHandler(
MY_CFG.ROTATING_LOGGING_PATH,
maxBytes=MY_CFG.ROTATING_LOGGING_FILE_SIZE,
backupCount=MY_CFG.ROTATING_LOGGING_FILE_COUNT)
# Update rotating logging level as per LOGGING_LEVEL from INI file
ROTATING_LOGGING.setLevel(MY_CFG.ROTATING_LOGGING_LEVEL)
ROTATING_LOGGING.setFormatter(logging.Formatter(
fmt='[' + str(UPLDR_K.Run) + ']' +
'[%(asctime)s]:[%(processName)-11s]' +
'[%(levelname)-8s]:[%(name)s] %(message)s',
datefmt=UPLDR_K.TimeFormat))
logging.getLogger().addHandler(ROTATING_LOGGING)
# Allow multiprocessing rotating logging into a single file
# CODING: may not work on Windows
if PARSED_ARGS.processes and PARSED_ARGS.processes > 0:
logging.debug('multiprocessing logging handler: Activating...')
multiprocessing_logging.install_mp_handler()
logging.info('multiprocessing logging handler: Activated.')
logging.warning('----------- (V%s) Init Rotating '
'-----------(Log:%s)\n'
'Python version on this system: [%s]',
UPLDR_K.Version,
MY_CFG.LOGGING_LEVEL,
sys.version)
# Enables mask sensitive data on log files.
if PARSED_ARGS.mask_sensitive:
NPR.niceprint('Mask-Sensitive Argument enabled!',
logalso=logging.DEBUG)
# Patterns to filter defined on UPLDR_K.MaskPatterns
logging.debug('Setting Masking Logging Formatter')
for hnd in logging.root.handlers:
hnd.setFormatter(NicePrint.RedactingFormatter(
hnd.formatter,
UPLDR_K.MaskPatterns))
logging.debug('Masking Logging Formatter is now set!')
logging.debug('Masking Patterns: %s', UPLDR_K.MaskPatterns)
NPR.niceprint('Output for FLICKR Configuration:\n{!s}'
.format(pprint.pformat(MY_CFG.FLICKR)),
verbosity=3,
logalso=logging.INFO)
# Ensure that only one instance of this script is running
try:
# FileLocker is an alias to portalocker (if available) or fcntl
FILELOCK(open(MY_CFG.LOCK_PATH, 'w'),
FileLocker.LOCK_EX | FileLocker.LOCK_NB)
except IOError as err:
if err.errno == errno.EAGAIN:
logging.critical('Script already running.')
sys.exit(-1)
raise
finally:
pass
# Run uploader
run_uploadr(PARSED_ARGS)
NPR.niceprint('----------- (V{!s}) End -----------(Log:{!s})'
.format(UPLDR_K.Version,
MY_CFG.LOGGING_LEVEL),
logalso=logging.WARNING)
logging_close_handlers()
| 42.575284
| 79
| 0.519434
|
9230187d8ad559ff52e30856f5221d9619ae6d43
| 847
|
py
|
Python
|
platform/polycommon/polycommon/options/option_subjects.py
|
denisoliveirac/polyaxon
|
21d33e50bbbd4bfbe474683f977eecf0b741ddd4
|
[
"Apache-2.0"
] | null | null | null |
platform/polycommon/polycommon/options/option_subjects.py
|
denisoliveirac/polyaxon
|
21d33e50bbbd4bfbe474683f977eecf0b741ddd4
|
[
"Apache-2.0"
] | null | null | null |
platform/polycommon/polycommon/options/option_subjects.py
|
denisoliveirac/polyaxon
|
21d33e50bbbd4bfbe474683f977eecf0b741ddd4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ENABLED = "ENABLED"
URL = "URL"
GLOBAL_COUNTDOWN = "GLOBAL_COUNTDOWN"
GLOBAL_COUNTDOWN_DELAYED = "GLOBAL_COUNTDOWN_DELAYED"
RECONCILE_COUNTDOWN = "RECONCILE_COUNTDOWN"
DEFAULT_PREFIX = "DEFAULT_PREFIX"
TOKEN = "TOKEN"
CONTAINER = "CONTAINER"
| 33.88
| 74
| 0.767414
|
fd75bfd653c61e5840c8a0871e2e840c5904b25e
| 462
|
py
|
Python
|
Aula13.py
|
juniorsmartins/Aulas-Python
|
48cf784c8856a588a4789c3206601a970206fdfe
|
[
"MIT"
] | null | null | null |
Aula13.py
|
juniorsmartins/Aulas-Python
|
48cf784c8856a588a4789c3206601a970206fdfe
|
[
"MIT"
] | null | null | null |
Aula13.py
|
juniorsmartins/Aulas-Python
|
48cf784c8856a588a4789c3206601a970206fdfe
|
[
"MIT"
] | null | null | null |
print('')
print('-'*20)
print('Curso em Vídeo')
print('Professor: Gustavo Guanabara')
print('Operações Aritméticas')
print('-'*20)
print('')
n1 = int(input('Digite o primeiro valor: '))
n2 = int(input('Digite o segundo valor: '))
s = n1 + n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
print('A soma é {}, a multiplicação é {} e a divisão é {:.2f}'.format(s, m, d))
print('A divisão inteira é {} e a potência é {}'.format(di, e))
print('-'*40)
print('')
| 23.1
| 79
| 0.608225
|
9611deb88ec4f63cd0f7d05dbe1d62cf0d1a0fb8
| 128
|
py
|
Python
|
01_Python_Basico_Intermediario/Aula029/aula29.py
|
Joao-Inacio/Curso-de-Python3
|
179d85f43f77dced640ffb143a87214538254cf3
|
[
"MIT"
] | 1
|
2021-07-19T12:31:49.000Z
|
2021-07-19T12:31:49.000Z
|
01_Python_Basico_Intermediario/Aula029/aula29.py
|
Joao-Inacio/Curso-de-Python3
|
179d85f43f77dced640ffb143a87214538254cf3
|
[
"MIT"
] | null | null | null |
01_Python_Basico_Intermediario/Aula029/aula29.py
|
Joao-Inacio/Curso-de-Python3
|
179d85f43f77dced640ffb143a87214538254cf3
|
[
"MIT"
] | null | null | null |
"""
Funções def = return
"""
def f(var):
print(var)
def dumb():
return f
# var = dumb()()
dumb()('Colocar valor')
| 8
| 23
| 0.53125
|
cc0e31b79b121c35f0950391a4f25518a22ca6f1
| 5,665
|
py
|
Python
|
.history/src/data/data_20191021144922.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191021144922.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191021144922.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins=None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins=None,
fare_mode=None,
embarked_mode=None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0, 10, 20, 30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
if fare_mode is None:
fare_mode = self.Xy["fare"].mode()[0]
if embarked_mode is None:
embarked_mode = self.Xy["embarked"].mode()[0]
self.fare_mode = fare_mode
self.embarked_mode = embarked_mode
self.impute_missing_fare()
self.impute_missing_embarked()
self.extract_title()
# self.extract_last_name()
# self.extract_cabin_number()
# self.extract_cabin_prefix()
# self.estimate_age()
# self.calc_age_bins()
# self.calc_is_child()
# self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""Extract title from the name using NameParser.
"""
title = (self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
.replace({"":np.nan})
.fillna(self.Xy['sex'])
.replace({'female':'Mrs', 'male':'Mr'})
)
self.Xy["title"] = title
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy["age_bin"] = pd.cut(
self.Xy.age, bins=[0, 10, 20, 30, 40, 50, 60, np.inf]
)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
self.Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
self.Xy_age_estimate = self.Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = (
self.Xy.reset_index()
.merge(self.Xy_age_estimate, on=groupby_columns)
.set_index("passengerid")
)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
def impute_missing_fare(self):
self.Xy["fare"] = self.Xy["fare"].fillna(self.fare_mode)
def impute_missing_embarked(self):
self.Xy["embarked"] = self.Xy["embarked"].fillna(self.embarked_mode)
| 29.815789
| 114
| 0.556222
|
8eef5c280bc1135d0dab46268e49f2df628e5dd7
| 2,238
|
py
|
Python
|
squarecloud/Square.py
|
squarecl/square-python-status
|
ec29adc9a3ff6575eb2fc669111599745bbe5926
|
[
"Apache-2.0"
] | null | null | null |
squarecloud/Square.py
|
squarecl/square-python-status
|
ec29adc9a3ff6575eb2fc669111599745bbe5926
|
[
"Apache-2.0"
] | 2
|
2021-12-02T05:04:18.000Z
|
2021-12-02T05:15:37.000Z
|
squarecloud/Square.py
|
squarecl/square-python-status
|
ec29adc9a3ff6575eb2fc669111599745bbe5926
|
[
"Apache-2.0"
] | 3
|
2021-11-29T19:20:50.000Z
|
2021-12-02T04:51:31.000Z
|
# This code was made for SquareCloud (https://squarecloud.app) by Mudinho and NemRela.
# Imports Section
import os
import sys
from warnings import warn
# Bytes converter (RAM Function)
def bytes_to(n: int, formatted: bool = False):
if formatted:
for i in ['B', 'KB', 'MB', 'GB']:
if n < 1024.0:
return f'{n:3.2f}{i}'
n /= 1024.0
return n
return float(f'{n / 1048576:3.2f}')
# Get_bytes from a directory
def get_bytes_from(path: str) -> int:
try:
with open(path, 'r') as b:
return int(b.read())
except FileNotFoundError:
return 0
class Square:
if os.name != 'posix':
warn('\n\nAtenção: Esta biblioteca pode não funcionar corretamente no seu sistema operacional.\n')
class ram:
def __new__(cls, formatted: bool = False):
return f'{round(cls.used(raw=True) / 1024 ** 2)}/{cls.total(formatted)}'
# Returns your used ram
@staticmethod
def used(formatted: bool = False, raw: bool = False):
b: int = get_bytes_from('/sys/fs/cgroup/memory/memory.usage_in_bytes')
return b if raw else bytes_to(b, formatted)
# Returns your total ram
@staticmethod
def total(formatted: bool = False, raw: bool = False):
b: int = get_bytes_from('/sys/fs/cgroup/memory/memory.limit_in_bytes')
return b if raw else bytes_to(b, formatted)
class ssd:
def __new__(cls, formatted: bool = False, raw: bool = False):
folder: str = sys.path[0]
b: int = 0
for item in os.listdir(folder):
if os.path.isdir(f'{os.path.join(folder)}/{item}'):
for path, dirs, files in os.walk(f'{os.path.join(folder)}/{item}'):
for f in files:
fp = os.path.join(path, f)
b += float(int(os.path.getsize(fp)))
else:
if not str(item) == 'start.sh':
b += float(int(os.path.getsize(f'{os.path.join(folder)}/{item}')))
return b if raw else bytes_to(b, formatted)
| 36.096774
| 107
| 0.537534
|
375079570429dedb1d17297f30fb7b51109ef5bb
| 2,250
|
py
|
Python
|
python/paddle/fluid/entry_attr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/entry_attr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/entry_attr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
__all__ = ['ProbabilityEntry', 'CountFilterEntry']
class EntryAttr(object):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
"""
def __init__(self):
self._name = None
def _to_attr(self):
"""
Returns the attributes of this parameter.
Returns:
Parameter attributes(map): The attributes of this parameter.
"""
raise NotImplementedError("EntryAttr is base class")
class ProbabilityEntry(EntryAttr):
def __init__(self, probability):
super(ProbabilityEntry, self).__init__()
if not isinstance(probability, float):
raise ValueError("probability must be a float in (0,1)")
if probability <= 0 or probability >= 1:
raise ValueError("probability must be a float in (0,1)")
self._name = "probability_entry"
self._probability = probability
def _to_attr(self):
return ":".join([self._name, str(self._probability)])
class CountFilterEntry(EntryAttr):
def __init__(self, count_filter):
super(CountFilterEntry, self).__init__()
if not isinstance(count_filter, int):
raise ValueError(
"count_filter must be a valid integer greater than 0")
if count_filter < 0:
raise ValueError(
"count_filter must be a valid integer greater or equal than 0")
self._name = "count_filter_entry"
self._count_filter = count_filter
def _to_attr(self):
return ":".join([self._name, str(self._count_filter)])
| 29.220779
| 79
| 0.662667
|
50c293c624738998a432f6ce75b1741eaa87620e
| 2,062
|
py
|
Python
|
pinakes/main/migrations/0009_auto_20211004_2054.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 2
|
2022-03-17T18:53:58.000Z
|
2022-03-17T22:04:22.000Z
|
pinakes/main/migrations/0009_auto_20211004_2054.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 9
|
2022-03-18T08:22:57.000Z
|
2022-03-30T17:14:49.000Z
|
pinakes/main/migrations/0009_auto_20211004_2054.py
|
hsong-rh/pinakes
|
2f08cb757ca64c866af3244686b92a3074fc7571
|
[
"Apache-2.0"
] | 7
|
2022-03-17T22:03:08.000Z
|
2022-03-28T21:28:34.000Z
|
# Generated by Django 3.2.5 on 2021-10-04 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("main", "0008_alter_request_parent"),
]
operations = [
migrations.RemoveConstraint(
model_name="catalogserviceplan",
name="main_catalogserviceplan_name_empty",
),
migrations.RemoveConstraint(
model_name="catalogserviceplan",
name="main_catalogserviceplan_name_unique",
),
migrations.RenameField(
model_name="catalogserviceplan",
old_name="base",
new_name="base_schema",
),
migrations.RemoveField(
model_name="catalogserviceplan",
name="description",
),
migrations.AddField(
model_name="catalogserviceplan",
name="create_json_schema",
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="catalogserviceplan",
name="imported",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="catalogserviceplan",
name="modified_schema",
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="catalogserviceplan",
name="service_offering_ref",
field=models.CharField(max_length=64, null=True),
),
migrations.AddField(
model_name="catalogserviceplan",
name="service_plan_ref",
field=models.CharField(max_length=64, null=True),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="modified",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 31.242424
| 74
| 0.580989
|
ff01b7de93e7b8e385f01c2f9c15f436923d70d3
| 1,198
|
py
|
Python
|
tanit/filesystem/config.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | 1
|
2020-09-01T15:16:11.000Z
|
2020-09-01T15:16:11.000Z
|
tanit/filesystem/config.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | null | null | null |
tanit/filesystem/config.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | null | null | null |
import json
import logging as lg
import os
_logger = lg.getLogger(__name__)
class FileSystemsConfig(object):
config_file = "filesystems.json"
default_conf_dir = os.path.expanduser("/etc/tanit/conf")
def __init__(self, path=None):
self._filesystems = {}
self.conf_dir = path or os.getenv("TANIT_CONF_DIR", self.default_conf_dir)
self.conf_file = os.path.join(self.conf_dir, self.config_file)
if os.path.exists(self.conf_file):
try:
self.config = json.loads(open(self.conf_file).read())
except Exception as e:
raise TanitConfigurationException(
"Exception while loading configuration file %s.", self.conf_file, e
)
_logger.info(
"Instantiated filesystems configuration from %s.", self.conf_file
)
else:
_logger.warning(
"Could not find filesystems configuration file, "
+ "instantiating empty configuration."
)
self.config = None
def get_config(self):
return self.config
class TanitConfigurationException(Exception):
pass
| 28.52381
| 87
| 0.606845
|
3d7be0d21a633e4aaefba89c22cb4d2334ee4e7e
| 25,813
|
py
|
Python
|
pyrsss/mag/sm_stations.py
|
butala/pyrsss
|
3f4412cdab36ecd36de72e5607791d2633c658a9
|
[
"MIT"
] | 7
|
2016-12-27T08:00:54.000Z
|
2021-12-16T06:55:16.000Z
|
pyrsss/mag/sm_stations.py
|
butala/pyrsss
|
3f4412cdab36ecd36de72e5607791d2633c658a9
|
[
"MIT"
] | 1
|
2017-01-31T20:36:08.000Z
|
2017-01-31T20:36:08.000Z
|
pyrsss/mag/sm_stations.py
|
butala/pyrsss
|
3f4412cdab36ecd36de72e5607791d2633c658a9
|
[
"MIT"
] | 6
|
2016-04-08T16:25:14.000Z
|
2021-05-02T12:05:16.000Z
|
from csv import reader
from collections import namedtuple
from cStringIO import StringIO
# MLAT and MLON are from IGRF 2010
STATION_CSV = """\
IAGA,GLON,GLAT,MLON,MLAT,STATION-NAME,OPERATOR-NUM,OPERATORS
SPA,0.00,-90.00,19.56,-74.19,South Pole Station,1,PENGUIN
B23,316.13,-88.03,20.41,-72.34,m88-316,1,BAS
B21,28.41,-87.00,29.34,-73.53,m87-028,1,BAS
B22,68.17,-86.51,31.00,-75.62,m87-069,1,BAS
B19,2.06,-85.36,30.41,-71.39,m85-002,1,BAS
B20,95.98,-85.36,30.38,-77.78,m85-096,1,BAS
PG1,77.20,-85.50,33.68,-76.55,Antartica,1,THEMIS
B18,336.14,-84.35,26.33,-69.45,m84-336,1,BAS
PG2,57.96,-84.42,39.07,-75.24,Antartica,1,THEMIS
PG3,37.62,-84.42,37.95,-73.43,Antartica,1,THEMIS
B17,347.76,-82.90,30.79,-68.84,m83-348,1,BAS
B16,347.06,-82.78,30.73,-68.71,m83-347,1,BAS
PG4,12.20,-83.34,36.38,-70.83,Antartica,1,THEMIS
B14,337.74,-80.89,29.35,-66.70,m81-338,1,BAS
B15,2.97,-81.49,37.07,-68.93,m82-003,1,BAS
B27,3.00,-81.00,37.78,-68.61,m81-003,1,BAS
B13,77.00,-80.00,56.17,-77.91,m80-077,1,BAS
SBA,166.78,-77.85,-31.24,-79.94,Scott Base,1,INTERMAGNET
VOS,106.87,-78.45,54.41,-83.54,Vostok,2,INTERMAGNET,AARI
B12,335.88,-79.08,29.67,-65.15,m79-336,1,BAS
MCM,166.67,-77.85,-31.25,-79.96,McMurdo Station,1,PENGUIN
B11,336.58,-77.51,30.69,-63.96,m78-337,1,BAS
B10,39.71,-77.32,57.15,-71.02,m77-040,1,BAS
DMC,235.83,-75.25,-14.70,-65.48,Dome Concordia,1,INTERMAGNET
B09,42.99,-74.01,65.58,-69.97,m74-043,1,BAS
B08,159.03,-72.77,-65.20,-80.61,m73-159,1,BAS
B07,44.28,-70.70,72.51,-68.45,m70-044,1,BAS
B24,77.77,-71.67,90.77,-76.46,m72-078,1,BAS
NVL,11.83,-70.78,53.55,-63.26,Novolazorevskaya,1,AARI
VNA,351.70,-70.70,41.78,-60.63,Neumayer Station III,1,INTERMAGNET
B06,39.40,-69.67,71.31,-66.95,m70-039,1,BAS
DVS,77.97,-68.58,100.76,-74.97,Davis,1,INTERMAGNET
PRG,76.23,-69.23,97.38,-74.88,Progress,1,AARI
DRV,140.01,-66.67,-122.99,-80.60,Dumont Durville,1,INTERMAGNET
MAW,62.88,-67.61,90.72,-70.67,Mawson,1,INTERMAGNET
B04,41.08,-68.58,74.03,-66.63,m68-041,1,BAS
B05,41.32,-69.29,73.07,-67.09,m69-041,1,BAS
B03,291.88,-67.57,8.27,-53.14,m67-292,1,BAS
CSY,110.53,-66.28,158.16,-80.93,Casey,1,INTERMAGNET
MIR,93.02,-66.55,124.25,-77.56,Mirny,2,INTERMAGNET,AARI
AIA,295.74,-65.25,9.72,-50.99,Faraday Islands,1,INTERMAGNET
B02,294.00,-66.03,8.97,-51.69,m66-294,1,BAS
PAL,295.95,-64.77,9.72,-50.55,Palmer,1,SAMBA
B01,296.52,-64.82,10.05,-50.62,m65-297,1,BAS
LIV,299.61,-62.66,11.31,-48.75,Livingston Island,1,INTERMAGNET
OHI,302.10,-63.32,12.87,-49.52,OHiggins,1,SAMBA
ESC,301.08,-62.18,12.07,-48.41,Escudero,1,SAMBA
ORC,315.26,-60.74,20.47,-48.69,Orcadas,1,INTERMAGNET
KEP,323.51,-54.28,25.94,-45.16,King Edward Point,2,INTERMAGNET,BGS
MCQ,158.95,-54.50,-110.94,-64.39,Macquarie Island,2,210MM,INTERMAGNET
PNT,289.10,-53.20,3.24,-39.23,Puerto Natales,1,SAMBA
PST,302.11,-51.70,11.06,-38.96,Port Stanley,2,BGS,INTERMAGNET
ENP,289.10,-52.13,3.05,-38.19,San Gregorio,1,SAMBA
PAF,70.26,-49.35,123.64,-58.63,Port aux Francias,1,INTERMAGNET
CZT,51.87,-46.43,107.41,-53.23,Port Alfred,1,INTERMAGNET
EYR,172.40,-43.40,-102.74,-49.89,Eyrewell,1,INTERMAGNET
TRW,294.68,-43.25,5.38,-29.98,Trelew,1,INTERMAGNET
LEM,147.50,-42.30,-132.47,-52.95,Lemont,1,210MM
VLD,286.86,-39.48,-0.16,-25.61,Valdivia,1,SAMBA
OSO,286.91,-40.34,-0.03,-26.48,Osorno,1,SAMBA
PAC,289.91,-40.34,1.97,-26.64,Punta Arenas,1,SAMBA
TDC,347.69,-37.10,49.99,-39.80,Tristan de Cunha,4,DTUSpace,INTERMAGNET,TROMSOE,GFZ
AMS,77.57,-37.80,140.23,-48.87,Martin de Vivias,1,INTERMAGNET
CNB,149.36,-35.32,-132.34,-45.03,Canberra,1,INTERMAGNET
CAN,149.00,-35.30,-132.77,-45.05,Canberra,1,210MM
HER,19.23,-34.43,84.09,-42.08,Hermanus,2,INTERMAGNET,EMMA
KAT,117.62,-33.68,-170.10,-45.63,Katanning,1,210MM
ADL,138.65,-34.67,-145.09,-45.42,Adelaide,1,210MM
CER,289.40,-33.45,1.15,-19.71,Los Cerrillos,1,SAMBA
GNA,116.00,-31.80,-171.85,-43.57,Gnangara,1,INTERMAGNET
SUT,20.67,-32.40,86.49,-40.66,Sutherland,1,EMMA
PIL,294.47,-31.40,4.76,-18.39,Pilar,1,INTERMAGNET
GNG,115.71,-31.36,-172.14,-43.08,Gingin,1,INTERMAGNET
SER,288.87,-30.00,0.65,-16.22,La Serena,1,SAMBA
IPM,250.58,-27.20,-32.30,-19.28,Isla de Pascua,1,INTERMAGNET
KMH,18.11,-26.54,86.27,-36.25,Keetmanshoop,2,GFZ,INTERMAGNET
DAL,151.20,-27.18,-132.12,-36.01,Dalby,1,210MM
HBK,27.71,-25.88,96.36,-35.51,Hartebeesthoek,1,INTERMAGNET
BSV,139.21,-25.54,-145.83,-35.31,Birdsville,1,210MM
LMM,40.00,-25.92,108.28,-35.33,Maputo,1,INTERMAGNET
ASP,133.88,-23.77,-151.93,-33.67,Alice Springs,1,INTERMAGNET
VSS,316.35,-22.40,23.72,-18.53,Vassouras,2,INTERMAGNET,GFZ
ANT,289.76,-23.39,1.35,-9.83,Antofagasta,1,SAMBA
LRM,114.10,-22.22,-173.36,-32.83,Learmonth,1,INTERMAGNET
EWA,202.00,-21.32,-81.02,-21.31,Ewa Beach,1,210MM
CTA,146.30,-20.10,-138.73,-28.92,Charters Towers,1,INTERMAGNET
TAN,47.55,-18.92,117.81,-28.38,Antananarivo,1,INTERMAGNET
TSU,17.70,-19.22,88.13,-30.50,Tsumeb,2,INTERMAGNET,EMMA
PUT,290.50,-18.33,2.23,-5.15,Putre,1,SAMBA
A05,17.58,-19.20,88.00,-30.49,Namibia,1,AMBER
VRE,292.38,-17.28,3.97,-4.53,Villa Remedios,1,GFZ
PPT,210.42,-17.57,-73.70,-16.22,Pamatai,1,INTERMAGNET
SHE,354.27,-15.95,63.70,-25.71,St. Helena,1,GFZ
NMP,39.25,-15.09,110.85,-24.90,Nampula,1,INTERMAGNET
API,188.22,-13.80,-96.46,-15.50,Apia,1,INTERMAGNET
ASA,170.70,-14.28,-113.81,-19.02,American Samoa,1,210MM
DRW,130.90,-12.40,-156.02,-21.31,Darwin,1,210MM
WEP,141.88,-12.68,-144.40,-21.33,Weipa,1,210MM
KDU,132.47,-12.69,-154.37,-21.59,Kakadu,1,INTERMAGNET
HUA,284.67,-12.05,-2.75,1.17,Huancayo,1,INTERMAGNET
CKI,96.84,-12.19,168.90,-22.26,Cocos-Keeling Islands,1,INTERMAGNET
A10,319.50,-9.40,31.08,-10.00,Petrolina,1,AMBER
WTK,112.63,-7.56,-174.68,-16.50,Watukosek,1,210MM
ASC,345.62,-7.95,56.97,-16.34,Ascension Island,2,BGS,INTERMAGNET
WEW,143.62,-3.55,-143.40,-11.75,Wewak,1,210MM
BIK,136.05,-1.08,-151.37,-9.35,Biak,1,210MM
A11,311.55,-1.45,26.47,0.44,Belem,1,AMBER
PTN,109.25,-0.05,-178.10,-8.49,Pontianak,1,210MM
KTB,100.32,-0.20,172.96,-9.01,Kototabang,1,210MM
KOU,307.27,2.21,23.49,6.05,Kourou,1,INTERMAGNET
TND,124.95,1.29,-162.62,-6.78,Tondano,1,INTERMAGNET
GAN,73.15,0.69,145.72,-7.77,Gan,1,INTERMAGNET
BNG,18.57,4.33,91.18,-9.54,Bangui,1,INTERMAGNET
A03,11.52,3.87,84.18,-11.10,Cameroon,1,AMBER
A06,355.87,5.33,69.02,-8.23,Abidjan,1,AMBER
KOR,134.50,7.33,-153.20,-0.71,Koror,1,210MM
A08,126.00,7.50,-161.63,-0.34,Davao,1,AMBER
A13,98.35,7.89,171.06,-0.32,Phuket,1,AMBER
AAE,38.77,9.03,111.51,-0.06,Addis Ababa,1,INTERMAGNET
YAP,138.50,9.30,-149.26,1.24,Yap,1,210MM
CRP,275.09,10.44,-12.58,21.63,Chiripa,1,INTERMAGNET
A01,7.55,10.50,80.45,-4.68,Abuja,1,AMBER
A07,346.34,9.58,60.64,-3.02,Conakry,1,AMBER
DLT,108.48,11.94,-178.88,4.30,Dalat,1,INTERMAGNET
GUA,144.87,13.59,-143.10,5.64,Guam,3,INTERMAGNET,USGS,210MM
MBO,343.03,14.38,58.44,1.20,Mbour,1,INTERMAGNET
MUT,121.02,14.37,-166.45,6.90,Muntinlupa,1,210MM
A04,39.46,14.28,112.06,5.78,Ethiopia,1,AMBER
A09,120.00,14.58,-167.46,7.13,Manila,1,AMBER
A12,100.61,14.08,173.36,6.47,Bangkok,1,AMBER
PNL,303.82,16.68,23.75,21.14,Pantanal,1,GFZ
HYB,78.60,17.40,151.87,10.36,Hyderabad,2,GFZ,INTERMAGNET
ABG,72.87,18.62,146.22,11.99,Alibag,2,GFZ,INTERMAGNET
SJG,293.85,18.11,11.85,26.75,San Juan,2,INTERMAGNET,USGS
TEO,260.82,19.75,-29.37,28.80,Teoloyucan,1,INTERMAGNET
HON,202.00,21.32,-89.10,20.88,Honolulu,2,INTERMAGNET,USGS
PHU,105.95,21.03,178.77,14.11,Phuthuy,1,INTERMAGNET
M11,256.90,20.70,-33.86,29.01,Juriquilla,1,McMAC
TAM,5.53,22.79,78.97,8.92,Tamanrasset,1,INTERMAGNET
GZH,113.34,23.09,-173.79,16.29,Guangzhou,1,INTERMAGNET
SON,66.44,25.12,139.81,19.36,Sonmiani,1,INTERMAGNET
LNP,121.17,25.00,-165.89,18.13,Lunping,2,210MM,INTERMAGNET
M10,260.40,24.80,-30.22,33.87,Linares,1,McMAC
CBI,142.30,27.15,-145.40,19.58,Chichijima,1,210MM
M09,262.20,26.40,-28.17,35.80,Lyford,1,McMAC
JAI,75.80,26.92,149.20,21.02,Jaipur,1,INTERMAGNET
GUI,343.57,28.32,60.99,16.06,Guimar,1,INTERMAGNET
MID,182.62,28.21,-108.84,24.27,Midway,1,INTERMAGNET
FIT,279.05,28.07,-6.42,38.57,Bullcreek,1,MEASURE
CDP,256.30,31.00,-35.65,39.45,Chengdu,1,INTERMAGNET
BSL,270.37,30.35,-17.88,40.69,Bay St Louis,2,INTERMAGNET,USGS
DLR,259.08,29.49,-32.17,38.40,Del Rio,1,INTERMAGNET
JAX,278.40,30.35,-7.11,40.78,Jacksonville,1,MEASURE
ELT,34.95,29.67,107.23,22.59,Eilat,1,INTERMAGNET
MLT,30.89,29.52,103.13,21.81,Misallat,1,INTERMAGNET
M08,261.39,29.44,-29.35,38.74,San Antonio,1,McMAC
TUC,249.27,32.17,-43.96,39.32,Tucson,2,INTERMAGNET,USGS
KAG,130.72,31.48,-156.17,24.56,Kagoshima,1,210MM
YMK,130.62,31.19,-156.28,24.26,Yamakawa,1,210MM
KNY,130.88,31.42,-156.01,24.49,Kanoya,1,INTERMAGNET
BGY,35.09,31.72,107.42,24.99,Bar Gyora,1,INTERMAGNET
ONW,141.47,33.43,-145.96,26.14,Onagawa,1,210MM
HTY,139.80,33.12,-147.50,25.88,Hatizyo,1,INTERMAGNET
QSB,35.64,33.87,108.05,27.54,Qsaybeh,1,INTERMAGNET
USC,278.54,33.34,-6.68,43.65,U of South Carolina,1,MEASURE
M07,262.25,32.98,-28.49,42.44,Richardson,1,McMAC
M06,262.60,35.00,-28.16,44.52,Purcell,1,McMAC
T26,242.15,34.20,-52.21,40.03,San Gabriel,1,THEMIS
T27,242.32,34.38,-52.06,40.25,Table Mountain,1,THEMIS
LZH,103.85,36.09,177.25,30.55,Lanzhou,1,INTERMAGNET
FRN,240.30,37.10,-54.87,42.65,Fresno,2,INTERMAGNET,USGS
SMA,334.87,36.99,55.49,30.34,Santa Maria/Azoren,1,GFZ
KAK,140.18,36.23,-147.05,29.12,Kakioka,1,INTERMAGNET
TUL,264.22,35.92,-26.09,45.65,Tulsa,1,Unknown
DSO,278.60,36.25,-6.39,46.47,Darsky,1,MEASURE
SFS,354.06,36.67,70.52,27.11,San Fernando,1,INTERMAGNET
E05,22.95,36.72,95.78,29.37,Velies,1,ENIGMA
CYG,126.85,36.37,-159.65,29.94,Cheongyang,1,INTERMAGNET
A02,2.93,36.85,77.88,27.49,Algeria,1,AMBER
FRD,282.63,38.20,-0.64,48.05,Fredericksburg,2,INTERMAGNET,USGS
ASH,58.10,37.95,131.16,33.36,Ashkabad,1,Unknown
E04,23.93,38.08,96.84,31.18,Dionyssos,1,ENIGMA
PEG,23.90,38.10,96.81,31.21,Pedeli,1,INTERMAGNET
BOU,254.77,40.13,-38.68,48.51,Boulder,2,INTERMAGNET,USGS
APL,283.12,39.17,0.12,48.95,Applied Physics Lab,1,MEASURE
ESA,141.36,39.24,-145.89,32.24,Esashi,1,INTERMAGNET
MIZ,141.20,39.11,-146.04,32.11,Mizusawa,1,INTERMAGNET
M05,263.70,38.50,-26.91,48.18,Americus,1,McMAC
T16,240.20,39.19,-55.49,44.77,Carson City,1,THEMIS
BMT,116.20,40.30,-170.01,34.69,Beijing Ming Tombs,1,INTERMAGNET
SPT,355.65,39.55,72.25,31.31,San Pablo Toledo,1,INTERMAGNET
TOL,355.95,39.88,72.55,31.79,Toledo,1,Unknown
BJI,116.18,40.06,-170.05,34.43,Beijing,1,210MM
ISK,29.06,41.07,102.04,35.32,Kandilli,1,INTERMAGNET
TKT,69.62,41.33,142.86,36.91,Tashkent,1,Unknown
EBR,0.49,40.82,76.44,33.11,Ebro,1,INTERMAGNET
T20,281.62,40.18,-1.88,50.10,Loysburg,1,THEMIS
E02,21.97,39.45,95.14,32.68,Klokotos,1,ENIGMA
E03,24.10,38.63,97.05,31.88,Kimi,1,ENIGMA
M04,263.84,41.36,-26.89,51.07,Bennington,1,McMAC
IZN,29.73,40.50,102.63,34.70,Iznik,1,INTERMAGNET
E01,23.86,41.35,97.13,35.21,Nevrokopi,1,ENIGMA
AQU,13.32,42.38,87.68,35.74,LAquila,3,SEGMA,INTERMAGNET,EMMA
DUR,14.28,41.39,88.38,34.49,Duronia,1,INTERMAGNET
C01,276.10,42.42,-9.58,52.65,Ann Arbor,1,CARISMA
MMB,144.19,43.91,-143.33,37.08,Memambetsu,1,INTERMAGNET
AAA,76.92,43.25,150.22,38.83,Alma Ata,1,INTERMAGNET
PPI,131.73,42.98,-154.70,36.67,Popov Island,1,210MM
RIK,143.76,43.48,-143.70,36.64,Rikubetsu,1,210MM
MSH,288.52,42.60,7.85,51.59,Millstone Hill,1,MEASURE
GTF,288.05,43.62,7.32,52.69,Grafton,1,MEASURE
M03,264.40,43.60,-26.26,53.38,Worthington,1,McMAC
OTT,284.45,45.40,2.52,54.98,Ottawa,2,CANMOS,INTERMAGNET
SUA,26.25,45.32,99.92,40.18,Surlari,2,GFZ,INTERMAGNET
MSR,142.27,44.37,-145.03,37.64,Moshiri,1,210MM
CLK,285.00,44.70,3.23,54.21,Clarkson,1,MEASURE
GCK,20.77,44.63,94.75,39.04,Grocka,1,INTERMAGNET
SBL,299.99,43.93,22.70,50.53,Sable Carrigan,2,BGS,INTERMAGNET
T21,257.41,43.08,-35.69,51.96,Pine Ridge,1,THEMIS
T23,274.86,43.66,-11.31,53.91,Remus,1,THEMIS
NKK,62.12,45.77,135.28,41.74,Novokazalinsk,1,Unknown
ODE,30.88,46.22,104.42,41.46,Odessa,1,GFZ
M02,266.75,45.56,-23.05,55.55,Cambridge,1,McMAC
T24,271.40,44.78,-16.31,55.00,Shawano,1,THEMIS
CNH,124.86,44.08,-161.26,38.25,Changchun,1,INTERMAGNET
WMQ,87.71,43.81,161.14,39.23,Urumqi,1,INTERMAGNET
RNC,12.08,43.97,86.87,37.77,Ranchio,2,SEGMA,EMMA
STJ,307.32,47.60,31.64,52.60,St Johns,2,CANMOS,INTERMAGNET
THY,17.54,46.90,92.25,41.72,Tihany,2,INTERMAGNET,EMMA
M01,263.55,46.87,-27.71,56.59,Glyndon,1,McMAC
C08,264.92,45.87,-25.69,55.72,Osakis,1,CARISMA
T17,287.87,44.95,7.22,54.06,Derby,1,THEMIS
T25,241.07,45.14,-56.04,51.06,Ukiah,1,THEMIS
T15,275.66,46.24,-10.08,56.45,Bay Mills,1,THEMIS
T18,259.32,46.09,-33.52,55.29,Fort Yates,1,THEMIS
CST,11.65,46.05,86.90,40.48,Castello Tesino,2,SEGMA,EMMA
P01,16.66,45.41,91.18,39.82,Lonjsko Polje,1,EMMA
VIC,236.58,48.52,-62.05,53.62,Victoria,2,CANMOS,INTERMAGNET
NEW,242.88,48.27,-54.82,54.65,Newport,2,INTERMAGNET,USGS
CLF,2.27,48.02,79.42,43.11,Chambon la foret,1,INTERMAGNET
FUR,11.28,48.17,87.05,43.20,Furstenfeldbruk,1,INTERMAGNET
HRB,18.19,47.86,93.03,42.92,Hurbanovo,2,INTERMAGNET,EMMA
NCK,16.72,47.63,91.68,42.60,Nagycenk,3,SEGMA,INTERMAGNET,EMMA
YSS,142.72,46.95,-144.67,40.38,Yuzhno Sakhalinsk,1,AARI
BDV,14.02,49.07,89.64,44.35,Budkov,1,INTERMAGNET
VLO,282.24,48.19,-0.38,58.00,Val-dOr,1,AUTUMN
BFO,8.33,48.33,84.57,43.41,Black Forest,1,INTERMAGNET
C10,245.79,47.66,-51.20,54.60,Polson,1,CARISMA
C11,263.64,48.03,-27.69,57.76,Thief River Falls,1,CARISMA
T19,245.33,47.61,-51.74,54.45,Hot Springs,1,THEMIS
PAG,24.18,47.49,98.37,42.66,Panagjurishte,3,GFZ,INTERMAGNET,SEGMA
KHB,134.69,47.61,-151.91,41.44,Khabarovsk,1,INTERMAGNET
VYH,18.84,48.49,93.75,43.71,Vyhne,1,EMMA
WIC,15.52,47.55,90.60,42.48,Conrad Observatorium,1,EMMA
MAB,5.68,50.30,82.86,45.98,Manhay,1,INTERMAGNET
DOU,4.60,50.10,81.91,45.76,Dourbes,1,INTERMAGNET
PIN,263.96,50.20,-27.43,59.96,Pinawa,3,CARISMA,THEMIS,CANOPUS
MZH,117.40,49.60,-168.51,44.53,Manzaoli,1,Unknown
GLN,262.88,49.65,-28.94,59.30,Glenlea,2,CANMOS,INTERMAGNET
LET,247.13,49.64,-50.07,56.88,Lethbridge,1,AUTUMN
T50,287.55,48.65,7.18,57.83,Saint-Fellicien,1,THEMIS
T51,282.22,48.05,-0.43,57.86,Val-dOr,1,THEMIS
KIV,30.30,50.72,104.63,46.56,Kiev,1,INTERMAGNET
KGD,73.08,49.82,146.42,45.92,Karaganda,1,Unknown
LVV,23.75,49.90,98.46,45.49,Lvov,1,INTERMAGNET
VAL,349.75,51.93,70.41,49.13,Valentia,1,INTERMAGNET
HAD,355.52,50.98,74.74,47.37,Hartland,3,BGS,SAMNET,INTERMAGNET
BEL,20.80,51.83,96.27,47.71,Belsk,2,INTERMAGNET,EMMA
ROT,245.87,51.07,-52.01,58.10,Rothney,1,AUTUMN
T03,247.02,50.37,-50.40,57.60,Vulcan,1,CARISMA
C04,251.74,50.06,-44.33,58.14,Gull Lake,1,CARISMA
C12,256.20,49.69,-38.32,58.49,Weyburn,1,CARISMA
T30,285.60,49.80,4.57,59.24,Chibougamau,1,THEMIS
T32,277.70,49.40,-6.97,59.52,Kapuskasing,1,THEMIS
BRD,260.03,49.87,-33.04,59.20,Brandon,1,CANMOS
ZAG,20.58,50.28,95.70,45.89,Zagorzyce,1,EMMA
WHS,264.75,49.80,-26.24,59.63,Whiteshell,1,CANMOS
T49,293.73,50.22,15.73,58.42,Spet-lles,1,THEMIS
NGK,12.68,52.07,89.28,48.03,Niemegk,2,GFZ,INTERMAGNET
IRT,104.45,52.17,178.45,47.79,Irkoutsk,2,INTERMAGNET,210MM
RED,246.16,52.14,-51.96,59.25,Red Deer,1,AUTUMN
C13,239.97,51.88,-59.30,57.77,Wells Gray,1,CARISMA
T43,245.70,50.90,-52.17,57.90,Priddis,1,THEMIS
C05,264.54,52.03,-26.78,61.84,Little Grand Rapids,1,CARISMA
MEA,246.65,54.62,-52.10,61.85,Meanook,2,CANMOS,INTERMAGNET
WNG,9.07,53.75,86.75,50.15,Wingst,2,GFZ,INTERMAGNET
ISL,265.34,53.86,-25.79,63.70,Island Lake,2,CARISMA,CANOPUS
LAN,357.23,54.01,77.12,51.10,Lancaster,1,SAMNET
YOR,358.95,53.95,78.49,50.90,York,1,SAMNET
EDM,246.70,53.30,-51.63,60.53,Edmonton,1,AUTUMN
SAS,253.60,52.80,-42.49,61.22,Saskatoon,1,AUTUMN
MSK,34.52,52.69,108.92,48.82,MSK,1,INTERMAGNET
C06,247.03,53.35,-51.24,60.64,Ministik Lake,1,CARISMA
T28,299.50,53.30,23.61,60.49,Goose Bay,1,THEMIS
PET,158.25,52.97,-132.07,46.73,Paratunka,3,INTERMAGNET,210MM,GFZ
SZC,19.61,52.91,95.51,48.97,Szczechowo,1,EMMA
PBQ,282.26,55.28,0.20,65.01,Poste de la Baleine,2,CANMOS,INTERMAGNET
ESK,356.80,55.32,77.25,52.74,Eskdalemuir,3,BGS,SAMNET,INTERMAGNET
HLP,18.82,54.61,95.32,50.93,Hel,2,INTERMAGNET,EMMA
MNK,26.52,54.10,101.95,50.31,Minsk,1,Unknown
BFE,11.67,55.62,89.56,52.27,Brorfelde,3,DTUSpace,INTERMAGNET,TROMSOE
ROE,8.55,55.17,86.80,51.86,Roemoe,2,DTUSpace,TROMSOE
NVS,82.90,55.03,156.55,51.26,Novosibirsk,1,INTERMAGNET
RSV,12.45,55.85,90.29,52.51,Rude Skov,1,INTERMAGNET
MOS,37.32,55.48,112.09,51.85,Moscow,1,IZMIRAN
C09,264.71,54.93,-26.87,64.70,Oxford House,1,CARISMA
T33,259.10,54.00,-35.04,63.21,Flin Flon/The Pas,1,THEMIS
T36,246.69,54.71,-52.09,61.95,Athabasca,2,THEMIS,AUTUMN
T37,237.20,53.80,-63.14,59.16,Prince George,1,THEMIS
SUW,23.18,54.01,98.95,50.21,Suwalki,1,EMMA
T52,282.38,53.79,0.26,63.54,Radisson,1,THEMIS
T42,254.84,55.15,-41.35,63.76,La Ronge,2,THEMIS,AUTUMN
T45,282.24,55.28,0.17,65.01,Kuujjuarapik,1,THEMIS
T48,293.19,54.80,15.78,63.16,Schefferville,1,THEMIS
SIT,224.67,57.07,-77.84,59.89,Sitka,2,INTERMAGNET,USGS
GIM,265.36,56.38,-26.08,66.16,Gillam,3,CARISMA,THEMIS,CANOPUS
FMC,248.79,56.66,-50.02,64.28,Fort McMurray,2,CARISMA,BGS
NAN,298.30,56.40,22.81,63.90,Nain,2,MACCS,THEMIS
FSJ,239.27,56.23,-61.69,62.05,Fort St. John,1,AUTUMN
KNZ,48.85,55.83,122.98,52.27,Zaymishche,1,INTERMAGNET
SHU,199.54,55.38,-99.74,53.29,Shumagin,2,INTERMAGNET,USGS
T31,280.80,56.50,-1.92,66.31,Sanikiluaq,1,CANMOS
ARS,58.57,56.43,132.43,52.90,Arti,1,INTERMAGNET
BRZ,24.75,56.21,100.97,52.66,Birzai,1,EMMA
BOX,38.97,58.03,114.20,54.54,Barok,2,210MM,INTERMAGNET
BOR,38.33,58.03,113.62,54.54,Borok,1,SAMNET
CRK,357.36,57.09,78.41,54.81,Crooktree,1,SAMNET
GML,356.32,57.16,77.60,54.98,Gleenmore Lodge,1,SAMNET
LOV,17.83,59.35,96.13,56.27,Lovo,1,INTERMAGNET
FCC,265.92,58.76,-25.57,68.50,Fort Churchill,4,CARISMA,CANOPUS,INTERMAGNET,CANMOS
RAL,256.32,58.22,-40.08,67.00,Rabbit Lake,1,CARISMA
FVE,243.98,58.37,-56.85,65.11,Fort Vermilion,1,AUTUMN
C02,265.79,57.71,-25.60,67.48,Back Lake,1,CARISMA
T22,226.84,56.83,-75.54,60.09,Petersburg,1,THEMIS
LER,358.82,60.13,80.96,58.20,Lerwick,3,BGS,SAMNET,INTERMAGNET
SMI,248.05,60.02,-52.29,67.47,Fort Smith,2,CARISMA,THEMIS
KAR,5.24,59.21,85.69,56.70,Karmoey ,2,TROMSOE,IMAGE
TAR,26.46,58.26,103.11,54.88,Tartu,2,IMAGE,EMMA
LNN,30.70,59.95,107.38,56.62,Leningrad,1,Unknown
YAK,129.72,60.02,-157.40,54.88,Yakutsk,3,GFZ,210MM,INTERMAGNET
MGD,150.86,59.97,-139.32,54.30,Magadan,3,GFZ,210MM,INTERMAGNET
KVI,17.63,59.50,96.02,56.44,Kvistaberg,1,SAMNET
HOM,209.53,59.70,-93.12,59.60,Homer,1,GIMA
C03,248.89,58.77,-50.65,66.39,Fort Chipewyan,1,CARISMA
T29,291.80,58.30,14.58,66.87,Kuujjuaq,1,THEMIS
T44,281.95,58.47,0.01,68.14,Inukjuak,1,THEMIS
AMU,210.10,61.20,-93.39,61.27,Anchorage,1,THEMIS
NAQ,314.56,61.16,43.19,65.75,Narssarssuaq,2,DTUSpace,INTERMAGNET
EKP,265.95,61.11,-25.96,70.73,Eskimo Point,2,CARISMA,CANOPUS
NUR,24.65,60.50,102.35,57.32,Nurmajarvi,4,IMAGE,EMMA,SAMNET,INTERMAGNET
UPS,17.35,59.90,95.95,56.88,Uppsala,3,IMAGE,SAMNET,INTERMAGNET
T38,224.80,61.00,-79.67,63.93,White Horse,2,THEMIS,AUTUMN
GRK,29.38,60.27,106.34,56.98,Gorkovskaya,1,AARI
T46,282.71,60.05,1.36,69.60,Puvirnituq,1,THEMIS
T53,281.85,60.82,0.08,70.39,Akulivik,1,THEMIS
YKC,245.52,62.48,-56.85,69.42,Yellowknife,2,CANMOS,INTERMAGNET
FSP,238.77,61.76,-64.89,67.47,Fort Simpson,2,CARISMA,THEMIS
MEK,30.97,62.77,108.66,59.57,Mekrijaervi,2,IMAGE,EMMA
FHB,310.32,62.00,39.05,67.41,Paamiut,1,DTUSpace
RAN,267.89,62.82,-23.12,72.45,Rankine Inlet,3,CARISMA,THEMIS,CANOPUS
DOB,9.11,62.07,90.19,59.64,Dombas,2,TROMSOE,IMAGE
SOL,4.84,61.08,86.25,58.82,Solund,2,TROMSOE,IMAGE
HAN,26.65,62.30,104.78,59.17,Hankasalmi,3,IMAGE,EMMA,SAMNET
GAK,214.70,62.30,-89.95,63.27,Gakona,2,GIMA,THEMIS
FAR,352.98,62.05,77.30,60.89,Faroes,1,SAMNET
HLM,210.13,61.24,-93.38,61.32,HLMS,1,GIMA
TLK,209.90,63.30,-94.74,63.43,Talkeetna,1,GIMA
TRP,150.24,62.67,-140.57,57.23,Trapper,1,GIMA
T39,209.80,62.30,-94.24,62.37,Trapper Creek,1,THEMIS
T47,284.35,62.20,4.23,71.51,Salluit,1,THEMIS
BLC,263.97,64.33,-30.09,73.61,Baker Lake,2,CANMOS,INTERMAGNET
LRV,338.30,64.18,66.72,65.01,Leirvogur,1,INTERMAGNET
GHB,308.27,64.17,37.85,69.98,Nuuk,1,DTUSpace
DAW,220.89,64.05,-85.34,66.24,Dawson City,1,CARISMA
IQA,291.48,63.75,15.58,72.21,Iqaluit,2,CANMOS,INTERMAGNET
CHC,276.80,64.10,-8.07,73.72,Coral Harbour,1,MACCS
HLL,339.44,63.77,67.40,64.41,Hella,1,SAMNET
S01,13.36,64.37,94.84,61.87,Nordli,1,SAMNET
T35,249.10,63.60,-52.64,71.10,Snap Lake,1,THEMIS
T40,204.40,63.00,-99.16,62.16,McGrath,1,THEMIS
SKT,307.10,65.42,37.22,71.43,Maniitsoq,1,DTUSpace
AMK,322.37,65.60,53.57,68.99,Tasiilaq,1,DTUSpace
RVK,10.99,64.94,93.27,62.61,Roervik,2,TROMSOE,IMAGE
LYC,18.75,64.61,99.33,61.87,Lycksele,2,IMAGE,INTERMAGNET
OUJ,27.23,64.52,106.27,61.47,Oulujarvi,2,IMAGE,SAMNET
EAG,218.80,64.78,-87.80,66.59,Eagle,1,GIMA
CMO,212.14,64.87,-93.82,65.45,College,3,GIMA,USGS,INTERMAGNET
CGO,212.14,64.87,-93.82,65.46,College Intl Geophys Observatory,2,THEMIS,GIMA
PKR,212.74,65.08,-93.44,65.78,Poker Flat,1,GIMA
ARK,40.50,64.60,117.67,61.29,Arkhangelsk,1,AARI
CDC,283.40,64.20,3.04,73.47,Cape Dorset,1,MACCS
OUL,25.85,65.10,105.42,62.11,Oulu,1,SAMNET
C07,233.31,65.26,-73.47,69.85,Norman Wells,1,CARISMA
T34,249.30,64.70,-53.01,72.17,Ekati,1,THEMIS
MCR,313.71,66.48,45.42,71.35,Magic2 Raven,1,DTUSpace
CNL,248.75,65.75,-54.49,73.08,Contwoyto Lake,1,CARISMA
JCK,16.98,66.40,98.94,63.82,Jaeckvik,2,TROMSOE,IMAGE
DON,12.50,66.11,95.19,63.75,Doenna,2,TROMSOE,IMAGE
ZYK,150.78,65.75,-141.24,60.54,Zyryanka,1,210MM
KOT,197.40,66.88,-107.15,65.18,Kotzebue,2,GIMA,210MM
BET,208.32,66.90,-98.51,66.88,Bettles,1,GIMA
FYU,214.70,66.57,-92.83,67.65,Fort Yukon,2,GIMA,THEMIS
CWE,190.17,66.17,-112.14,63.47,Wellen,1,AARI
ZGN,123.26,66.75,-164.56,62.01,Zhigansk,1,210MM
PGC,294.20,66.10,20.55,74.09,Pangnirtung,1,MACCS
RPB,273.80,66.50,-13.45,75.97,Repulse Bay,1,MACCS
ATU,306.43,67.93,38.19,73.99,Attu,1,DTUSpace
STF,309.28,67.02,40.87,72.64,Kangerlussuaq,1,DTUSpace
SOD,26.63,67.37,107.33,64.41,Sodankyla,3,IMAGE,EMMA,INTERMAGNET
PEL,24.08,66.90,104.97,64.03,Pello,2,IMAGE,EMMA
T41,199.60,67.00,-105.55,65.62,Kiana,1,THEMIS
CBB,255.00,69.10,-47.75,77.04,Cambridge Bay,2,CANMOS,INTERMAGNET
ARC,214.43,68.13,-94.37,69.18,Arctic Village,1,GIMA
INK,226.70,68.25,-83.05,71.50,Inuvik,3,GIMA,AUTUMN,THEMIS
GDH,306.47,69.25,39.39,75.25,Godhavn,2,DTUSpace,INTERMAGNET
AND,16.03,69.30,100.22,66.86,Andenes,2,TROMSOE,IMAGE
KAU,23.05,69.02,105.53,66.23,Kautokeino,1,IMAGE
IVA,27.29,68.56,108.61,65.60,Ivalo,2,IMAGE,EMMA
ABK,18.82,68.35,101.70,65.74,Abisko,2,IMAGE,INTERMAGNET
LEK,13.54,68.13,97.39,65.79,Leknes,1,IMAGE
MUO,23.53,68.02,105.23,65.19,Muonio,2,IMAGE,EMMA
LOZ,35.08,67.97,114.65,64.77,Lovozero,2,IMAGE,AARI
KIR,20.42,67.84,102.62,65.14,Kiruna,1,IMAGE
CPS,180.55,68.88,-120.98,65.37,Cape Schmidt,2,AARI,210MM
CKA,73.60,68.50,149.04,64.77,Cape Kamenniy,1,Unknown
LOP,33.08,68.25,113.15,65.10,Lop,1,Unknown
GHC,264.10,68.60,-31.76,77.51,Gjoa Haven,1,MACCS
IGC,278.20,69.30,-5.39,78.43,Igloolik,1,MACCS
PBC,270.30,68.50,-20.27,77.74,Pelly Bay,1,MACCS
DED,211.21,70.36,-99.27,70.87,Deadhorse,2,INTERMAGNET,USGS
NOK,88.10,69.40,162.74,65.41,NOK,2,RAPIDMAG,GIMA
UMQ,307.87,70.68,42.58,76.38,Uummannaq,1,DTUSpace
SCO,338.03,70.48,71.82,71.63,Ittoqqortoormiit,1,DTUSpace
TAL,266.45,69.54,-27.95,78.51,Taloyoak,1,CARISMA
KAV,216.63,70.07,-94.36,71.48,Kaktovik,2,GIMA,INTERMAGNET
NOR,25.79,71.09,109.28,68.19,Nordkapp,2,TROMSOE,IMAGE
JAN,351.30,70.90,82.78,70.47,Jan Mayen,1,TROMSOE
SOR,22.22,70.54,106.04,67.80,Soeroeya,2,TROMSOE,IMAGE
TRO,18.94,69.66,102.77,67.07,Tromso,2,TROMSOE,IMAGE
ALT,22.96,69.86,106.08,67.08,Alta,1,IMAGE
KEV,27.01,69.76,109.22,66.82,Kevo,2,IMAGE,EMMA
MAS,23.70,69.46,106.36,66.65,Masi,2,IMAGE,EMMA
KIL,20.79,69.02,103.74,66.33,Kilpisjarvi,3,IMAGE,EMMA,SAMNET
CPY,235.30,70.20,-75.95,74.91,Cape Perry,1,GIMA
AMD,61.40,69.50,138.20,65.85,Amderma,3,RAPIDMAG,GIMA,AARI
BRW,203.25,71.30,-106.57,70.60,Barrow,3,GIMA,USGS,INTERMAGNET
JCO,211.20,70.36,-99.27,70.86,Jim Carrigan,1,BGS
TIK,129.00,71.58,-161.29,66.70,Tixie,5,RAPIDMAG,GIMA,210MM,AARI,INTERMAGNET
CHD,147.89,70.62,-145.89,65.67,Chokurdakh,1,210MM
PBK,170.90,70.10,-128.74,65.94,Pebek,2,AARI,GIMA
CY0,291.40,70.50,18.88,78.52,Clyde River,1,MACCS
UPN,303.85,72.78,40.20,78.93,Upernavik,1,DTUSpace
MCE,326.10,72.40,63.93,75.20,Magic1 east,1,DTUSpace
MCW,317.41,72.00,54.95,76.13,Magic1 west,1,DTUSpace
MCG,321.65,72.60,60.05,76.06,Magic2 GISP,1,DTUSpace
SAH,234.77,71.98,-78.98,76.44,Sachs Harbour,1,CARISMA
DIK,80.57,73.55,156.64,69.36,Dixon,3,RAPIDMAG,GIMA,AARI
RES,265.11,74.69,-35.54,82.93,Resolute Bay,2,CANMOS,INTERMAGNET
KUV,302.82,74.57,41.92,80.69,Kullorsuaq,1,DTUSpace
DNB,339.78,74.30,78.39,75.19,Daneborg,1,DTUSpace
MCN,322.38,73.93,62.90,77.20,Magic1 north,1,DTUSpace
BJN,19.20,74.50,107.71,71.89,Bear Island,2,TROMSOE,IMAGE
TAB,291.18,76.54,27.24,83.89,Thule Air Base,1,DTUSpace
SVS,294.90,76.02,32.87,83.00,Savissivik,1,DTUSpace
KTN,137.71,75.94,-157.19,70.98,Kotelnyy,1,210MM
MBC,240.64,76.32,-81.35,81.13,Mould Bay,2,CANMOS,INTERMAGNET
THL,290.77,77.47,29.24,84.72,Qaanag,2,DTUSpace,INTERMAGNET
DMH,341.37,76.77,84.38,77.34,Danmarkshavn,1,DTUSpace
HOP,25.01,76.51,114.59,73.53,Hopen Island,2,TROMSOE,IMAGE
HRN,15.60,77.00,108.72,74.52,Hornsund,2,IMAGE,INTERMAGNET
CCS,104.28,77.72,176.59,72.77,Chelyuskin,3,RAPIDMAG,GIMA,AARI
HOR,15.60,77.00,108.72,74.52,Hornsund,1,IMAGE
NAL,11.95,78.92,109.96,76.57,New Aalesund,2,TROMSOE,IMAGE
LYR,15.83,78.20,111.03,75.64,Longyearbyen,2,TROMSOE,IMAGE
BBG,14.23,78.07,109.70,75.62,Barentsburg,1,AARI
VIZ,76.98,79.48,156.12,74.70,Vieze Island,1,AARI
HIS,58.05,80.62,143.72,76.10,Heiss Island,1,AARI
EUA,274.10,80.00,-20.31,87.88,Eureka,1,CANMOS
ALE,297.50,82.50,86.95,87.14,Alert,2,CANMOS,INTERMAGNET
NRD,343.33,81.60,101.64,81.27,Nord,1,DTUSpace
"""
""" ??? """
class Record(namedtuple('Record', 'iaga glon glat mlon mlat name operators')):
pass
def parse_csv(csv_string=STATION_CSV):
""" ??? """
stringio = StringIO(STATION_CSV)
station_map = {}
for i, cols in enumerate(reader(stringio)):
if i == 0:
continue
station_map[cols[0]] = Record(cols[0],
float(cols[1]),
float(cols[2]),
float(cols[3]),
float(cols[4]),
cols[5],
cols[7:])
return station_map
""" ??? """
STATION_MAP = parse_csv()
| 50.025194
| 82
| 0.728625
|
b4a1b041b1f82065670f45b047d956c15cdbd547
| 36,853
|
py
|
Python
|
synapse/storage/databases/main/deviceinbox.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/databases/main/deviceinbox.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | 1
|
2022-03-23T08:03:58.000Z
|
2022-03-23T08:03:58.000Z
|
synapse/storage/databases/main/deviceinbox.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | 1
|
2022-03-31T09:03:27.000Z
|
2022-03-31T09:03:27.000Z
|
# Copyright 2016 OpenMarket Ltd
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple, cast
from synapse.logging import issue9533_logger
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.replication.tcp.streams import ToDeviceStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
StreamIdGenerator,
)
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class DeviceInboxWorkerStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self._instance_name = hs.get_instance_name()
# Map of (user_id, device_id) to the last stream_id that has been
# deleted up to. This is so that we can no op deletions.
self._last_device_delete_cache: ExpiringCache[
Tuple[str, Optional[str]], int
] = ExpiringCache(
cache_name="last_device_delete_cache",
clock=self._clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
)
if isinstance(database.engine, PostgresEngine):
self._can_write_to_device = (
self._instance_name in hs.config.worker.writers.to_device
)
self._device_inbox_id_gen: AbstractStreamIdGenerator = (
MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="to_device",
instance_name=self._instance_name,
tables=[("device_inbox", "instance_name", "stream_id")],
sequence_name="device_inbox_sequence",
writers=hs.config.worker.writers.to_device,
)
)
else:
self._can_write_to_device = True
self._device_inbox_id_gen = StreamIdGenerator(
db_conn, "device_inbox", "stream_id"
)
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_inbox",
entity_column="user_id",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_inbox_stream_cache = StreamChangeCache(
"DeviceInboxStreamChangeCache",
min_device_inbox_id,
prefilled_cache=device_inbox_prefill,
)
# The federation outbox and the local device inbox uses the same
# stream_id generator.
device_outbox_prefill, min_device_outbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_federation_outbox",
entity_column="destination",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_federation_outbox_stream_cache = StreamChangeCache(
"DeviceFederationOutboxStreamChangeCache",
min_device_outbox_id,
prefilled_cache=device_outbox_prefill,
)
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == ToDeviceStream.NAME:
# If replication is happening than postgres must be being used.
assert isinstance(self._device_inbox_id_gen, MultiWriterIdGenerator)
self._device_inbox_id_gen.advance(instance_name, token)
for row in rows:
if row.entity.startswith("@"):
self._device_inbox_stream_cache.entity_has_changed(
row.entity, token
)
else:
self._device_federation_outbox_stream_cache.entity_has_changed(
row.entity, token
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
def get_to_device_stream_token(self):
return self._device_inbox_id_gen.get_current_token()
async def get_messages_for_user_devices(
self,
user_ids: Collection[str],
from_stream_id: int,
to_stream_id: int,
) -> Dict[Tuple[str, str], List[JsonDict]]:
"""
Retrieve to-device messages for a given set of users.
Only to-device messages with stream ids between the given boundaries
(from < X <= to) are returned.
Args:
user_ids: The users to retrieve to-device messages for.
from_stream_id: The lower boundary of stream id to filter with (exclusive).
to_stream_id: The upper boundary of stream id to filter with (inclusive).
Returns:
A dictionary of (user id, device id) -> list of to-device messages.
"""
# We expect the stream ID returned by _get_device_messages to always
# be to_stream_id. So, no need to return it from this function.
(
user_id_device_id_to_messages,
last_processed_stream_id,
) = await self._get_device_messages(
user_ids=user_ids,
from_stream_id=from_stream_id,
to_stream_id=to_stream_id,
)
assert (
last_processed_stream_id == to_stream_id
), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
return user_id_device_id_to_messages
async def get_messages_for_device(
self,
user_id: str,
device_id: str,
from_stream_id: int,
to_stream_id: int,
limit: int = 100,
) -> Tuple[List[JsonDict], int]:
"""
Retrieve to-device messages for a single user device.
Only to-device messages with stream ids between the given boundaries
(from < X <= to) are returned.
Args:
user_id: The ID of the user to retrieve messages for.
device_id: The ID of the device to retrieve to-device messages for.
from_stream_id: The lower boundary of stream id to filter with (exclusive).
to_stream_id: The upper boundary of stream id to filter with (inclusive).
limit: A limit on the number of to-device messages returned.
Returns:
A tuple containing:
* A list of to-device messages within the given stream id range intended for
the given user / device combo.
* The last-processed stream ID. Subsequent calls of this function with the
same device should pass this value as 'from_stream_id'.
"""
(
user_id_device_id_to_messages,
last_processed_stream_id,
) = await self._get_device_messages(
user_ids=[user_id],
device_id=device_id,
from_stream_id=from_stream_id,
to_stream_id=to_stream_id,
limit=limit,
)
if not user_id_device_id_to_messages:
# There were no messages!
return [], to_stream_id
# Extract the messages, no need to return the user and device ID again
to_device_messages = user_id_device_id_to_messages.get((user_id, device_id), [])
return to_device_messages, last_processed_stream_id
async def _get_device_messages(
self,
user_ids: Collection[str],
from_stream_id: int,
to_stream_id: int,
device_id: Optional[str] = None,
limit: Optional[int] = None,
) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]:
"""
Retrieve pending to-device messages for a collection of user devices.
Only to-device messages with stream ids between the given boundaries
(from < X <= to) are returned.
Note that a stream ID can be shared by multiple copies of the same message with
different recipient devices. Stream IDs are only unique in the context of a single
user ID / device ID pair. Thus, applying a limit (of messages to return) when working
with a sliding window of stream IDs is only possible when querying messages of a
single user device.
Finally, note that device IDs are not unique across users.
Args:
user_ids: The user IDs to filter device messages by.
from_stream_id: The lower boundary of stream id to filter with (exclusive).
to_stream_id: The upper boundary of stream id to filter with (inclusive).
device_id: A device ID to query to-device messages for. If not provided, to-device
messages from all device IDs for the given user IDs will be queried. May not be
provided if `user_ids` contains more than one entry.
limit: The maximum number of to-device messages to return. Can only be used when
passing a single user ID / device ID tuple.
Returns:
A tuple containing:
* A dict of (user_id, device_id) -> list of to-device messages
* The last-processed stream ID. If this is less than `to_stream_id`, then
there may be more messages to retrieve. If `limit` is not set, then this
is always equal to 'to_stream_id'.
"""
if not user_ids:
logger.warning("No users provided upon querying for device IDs")
return {}, to_stream_id
# Prevent a query for one user's device also retrieving another user's device with
# the same device ID (device IDs are not unique across users).
if len(user_ids) > 1 and device_id is not None:
raise AssertionError(
"Programming error: 'device_id' cannot be supplied to "
"_get_device_messages when >1 user_id has been provided"
)
# A limit can only be applied when querying for a single user ID / device ID tuple.
# See the docstring of this function for more details.
if limit is not None and device_id is None:
raise AssertionError(
"Programming error: _get_device_messages was passed 'limit' "
"without a specific user_id/device_id"
)
user_ids_to_query: Set[str] = set()
device_ids_to_query: Set[str] = set()
# Note that a device ID could be an empty str
if device_id is not None:
# If a device ID was passed, use it to filter results.
# Otherwise, device IDs will be derived from the given collection of user IDs.
device_ids_to_query.add(device_id)
# Determine which users have devices with pending messages
for user_id in user_ids:
if self._device_inbox_stream_cache.has_entity_changed(
user_id, from_stream_id
):
# This user has new messages sent to them. Query messages for them
user_ids_to_query.add(user_id)
if not user_ids_to_query:
return {}, to_stream_id
def get_device_messages_txn(txn: LoggingTransaction):
# Build a query to select messages from any of the given devices that
# are between the given stream id bounds.
# If a list of device IDs was not provided, retrieve all devices IDs
# for the given users. We explicitly do not query hidden devices, as
# hidden devices should not receive to-device messages.
# Note that this is more efficient than just dropping `device_id` from the query,
# since device_inbox has an index on `(user_id, device_id, stream_id)`
if not device_ids_to_query:
user_device_dicts = self.db_pool.simple_select_many_txn(
txn,
table="devices",
column="user_id",
iterable=user_ids_to_query,
keyvalues={"user_id": user_id, "hidden": False},
retcols=("device_id",),
)
device_ids_to_query.update(
{row["device_id"] for row in user_device_dicts}
)
if not device_ids_to_query:
# We've ended up with no devices to query.
return {}, to_stream_id
# We include both user IDs and device IDs in this query, as we have an index
# (device_inbox_user_stream_id) for them.
user_id_many_clause_sql, user_id_many_clause_args = make_in_list_sql_clause(
self.database_engine, "user_id", user_ids_to_query
)
(
device_id_many_clause_sql,
device_id_many_clause_args,
) = make_in_list_sql_clause(
self.database_engine, "device_id", device_ids_to_query
)
sql = f"""
SELECT stream_id, user_id, device_id, message_json FROM device_inbox
WHERE {user_id_many_clause_sql}
AND {device_id_many_clause_sql}
AND ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
"""
sql_args = (
*user_id_many_clause_args,
*device_id_many_clause_args,
from_stream_id,
to_stream_id,
)
# If a limit was provided, limit the data retrieved from the database
if limit is not None:
sql += "LIMIT ?"
sql_args += (limit,)
txn.execute(sql, sql_args)
# Create and fill a dictionary of (user ID, device ID) -> list of messages
# intended for each device.
last_processed_stream_pos = to_stream_id
recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {}
rowcount = 0
for row in txn:
rowcount += 1
last_processed_stream_pos = row[0]
recipient_user_id = row[1]
recipient_device_id = row[2]
message_dict = db_to_json(row[3])
# Store the device details
recipient_device_to_messages.setdefault(
(recipient_user_id, recipient_device_id), []
).append(message_dict)
if limit is not None and rowcount == limit:
# We ended up bumping up against the message limit. There may be more messages
# to retrieve. Return what we have, as well as the last stream position that
# was processed.
#
# The caller is expected to set this as the lower (exclusive) bound
# for the next query of this device.
return recipient_device_to_messages, last_processed_stream_pos
# The limit was not reached, thus we know that recipient_device_to_messages
# contains all to-device messages for the given device and stream id range.
#
# We return to_stream_id, which the caller should then provide as the lower
# (exclusive) bound on the next query of this device.
return recipient_device_to_messages, to_stream_id
return await self.db_pool.runInteraction(
"get_device_messages", get_device_messages_txn
)
@trace
async def delete_messages_for_device(
self, user_id: str, device_id: Optional[str], up_to_stream_id: int
) -> int:
"""
Args:
user_id: The recipient user_id.
device_id: The recipient device_id.
up_to_stream_id: Where to delete messages up to.
Returns:
The number of messages deleted.
"""
# If we have cached the last stream id we've deleted up to, we can
# check if there is likely to be anything that needs deleting
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), None
)
set_tag("last_deleted_stream_id", last_deleted_stream_id)
if last_deleted_stream_id:
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_deleted_stream_id
)
if not has_changed:
log_kv({"message": "No changes in cache since last check"})
return 0
def delete_messages_for_device_txn(txn):
sql = (
"DELETE FROM device_inbox"
" WHERE user_id = ? AND device_id = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (user_id, device_id, up_to_stream_id))
return txn.rowcount
count = await self.db_pool.runInteraction(
"delete_messages_for_device", delete_messages_for_device_txn
)
log_kv({"message": f"deleted {count} messages for device", "count": count})
# Update the cache, ensuring that we only ever increase the value
updated_last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), 0
)
self._last_device_delete_cache[(user_id, device_id)] = max(
updated_last_deleted_stream_id, up_to_stream_id
)
return count
@trace
async def get_new_device_msgs_for_remote(
self, destination, last_stream_id, current_stream_id, limit
) -> Tuple[List[dict], int]:
"""
Args:
destination(str): The name of the remote server.
last_stream_id(int|long): The last position of the device message stream
that the server sent up to.
current_stream_id(int|long): The current position of the device
message stream.
Returns:
A list of messages for the device and where in the stream the messages got to.
"""
set_tag("destination", destination)
set_tag("last_stream_id", last_stream_id)
set_tag("current_stream_id", current_stream_id)
set_tag("limit", limit)
has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
destination, last_stream_id
)
if not has_changed or last_stream_id == current_stream_id:
log_kv({"message": "No new messages in stream"})
return [], current_stream_id
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
return [], last_stream_id
@trace
def get_new_messages_for_remote_destination_txn(txn):
sql = (
"SELECT stream_id, messages_json FROM device_federation_outbox"
" WHERE destination = ?"
" AND ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC"
" LIMIT ?"
)
txn.execute(sql, (destination, last_stream_id, current_stream_id, limit))
messages = []
stream_pos = current_stream_id
for row in txn:
stream_pos = row[0]
messages.append(db_to_json(row[1]))
# If the limit was not reached we know that there's no more data for this
# user/device pair up to current_stream_id.
if len(messages) < limit:
log_kv({"message": "Set stream position to current position"})
stream_pos = current_stream_id
return messages, stream_pos
return await self.db_pool.runInteraction(
"get_new_device_msgs_for_remote",
get_new_messages_for_remote_destination_txn,
)
@trace
async def delete_device_msgs_for_remote(
self, destination: str, up_to_stream_id: int
) -> None:
"""Used to delete messages when the remote destination acknowledges
their receipt.
Args:
destination: The destination server_name
up_to_stream_id: Where to delete messages up to.
"""
def delete_messages_for_remote_destination_txn(txn):
sql = (
"DELETE FROM device_federation_outbox"
" WHERE destination = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (destination, up_to_stream_id))
await self.db_pool.runInteraction(
"delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn
)
async def get_all_new_device_messages(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
"""Get updates for to device replication stream.
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updatees.
The updates are a list of 2-tuples of stream ID and the row data
"""
if last_id == current_id:
return [], current_id, False
def get_all_new_device_messages_txn(txn):
# We limit like this as we might have multiple rows per stream_id, and
# we want to make sure we always get all entries for any stream_id
# we return.
upper_pos = min(current_id, last_id + limit)
sql = (
"SELECT max(stream_id), user_id"
" FROM device_inbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY user_id"
)
txn.execute(sql, (last_id, upper_pos))
updates = [(row[0], row[1:]) for row in txn]
sql = (
"SELECT max(stream_id), destination"
" FROM device_federation_outbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY destination"
)
txn.execute(sql, (last_id, upper_pos))
updates.extend((row[0], row[1:]) for row in txn)
# Order by ascending stream ordering
updates.sort()
limited = False
upto_token = current_id
if len(updates) >= limit:
upto_token = updates[-1][0]
limited = True
return updates, upto_token, limited
return await self.db_pool.runInteraction(
"get_all_new_device_messages", get_all_new_device_messages_txn
)
@trace
async def add_messages_to_device_inbox(
self,
local_messages_by_user_then_device: dict,
remote_messages_by_destination: dict,
) -> int:
"""Used to send messages from this server.
Args:
local_messages_by_user_then_device:
Dictionary of recipient user_id to recipient device_id to message.
remote_messages_by_destination:
Dictionary of destination server_name to the EDU JSON to send.
Returns:
The new stream_id.
"""
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Add the local messages directly to the local inbox.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
# Add the remote messages to the federation outbox.
# We'll send them to a remote server when we next send a
# federation transaction to that destination.
self.db_pool.simple_insert_many_txn(
txn,
table="device_federation_outbox",
keys=(
"destination",
"stream_id",
"queued_ts",
"messages_json",
"instance_name",
),
values=[
(
destination,
stream_id,
now_ms,
json_encoder.encode(edu),
self._instance_name,
)
for destination, edu in remote_messages_by_destination.items()
],
)
if remote_messages_by_destination:
issue9533_logger.debug(
"Queued outgoing to-device messages with stream_id %i for %s",
stream_id,
list(remote_messages_by_destination.keys()),
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
for destination in remote_messages_by_destination.keys():
self._device_federation_outbox_stream_cache.entity_has_changed(
destination, stream_id
)
return self._device_inbox_id_gen.get_current_token()
async def add_messages_from_remote_to_device_inbox(
self, origin: str, message_id: str, local_messages_by_user_then_device: dict
) -> int:
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Check if we've already inserted a matching message_id for that
# origin. This can happen if the origin doesn't receive our
# acknowledgement from the first time we received the message.
already_inserted = self.db_pool.simple_select_one_txn(
txn,
table="device_federation_inbox",
keyvalues={"origin": origin, "message_id": message_id},
retcols=("message_id",),
allow_none=True,
)
if already_inserted is not None:
return
# Add an entry for this message_id so that we know we've processed
# it.
self.db_pool.simple_insert_txn(
txn,
table="device_federation_inbox",
values={
"origin": origin,
"message_id": message_id,
"received_ts": now_ms,
},
)
# Add the messages to the appropriate local device inboxes so that
# they'll be sent to the devices when they next sync.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_from_remote_to_device_inbox",
add_messages_txn,
now_ms,
stream_id,
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
return stream_id
def _add_messages_to_local_device_inbox_txn(
self, txn, stream_id, messages_by_user_then_device
):
assert self._can_write_to_device
local_by_user_then_device = {}
for user_id, messages_by_device in messages_by_user_then_device.items():
messages_json_for_user = {}
devices = list(messages_by_device.keys())
if len(devices) == 1 and devices[0] == "*":
# Handle wildcard device_ids.
# We exclude hidden devices (such as cross-signing keys) here as they are
# not expected to receive to-device messages.
devices = self.db_pool.simple_select_onecol_txn(
txn,
table="devices",
keyvalues={"user_id": user_id, "hidden": False},
retcol="device_id",
)
message_json = json_encoder.encode(messages_by_device["*"])
for device_id in devices:
# Add the message for all devices for this user on this
# server.
messages_json_for_user[device_id] = message_json
else:
if not devices:
continue
# We exclude hidden devices (such as cross-signing keys) here as they are
# not expected to receive to-device messages.
rows = self.db_pool.simple_select_many_txn(
txn,
table="devices",
keyvalues={"user_id": user_id, "hidden": False},
column="device_id",
iterable=devices,
retcols=("device_id",),
)
for row in rows:
# Only insert into the local inbox if the device exists on
# this server
device_id = row["device_id"]
message_json = json_encoder.encode(messages_by_device[device_id])
messages_json_for_user[device_id] = message_json
if messages_json_for_user:
local_by_user_then_device[user_id] = messages_json_for_user
if not local_by_user_then_device:
return
self.db_pool.simple_insert_many_txn(
txn,
table="device_inbox",
keys=("user_id", "device_id", "stream_id", "message_json", "instance_name"),
values=[
(user_id, device_id, stream_id, message_json, self._instance_name)
for user_id, messages_by_device in local_by_user_then_device.items()
for device_id, message_json in messages_by_device.items()
],
)
issue9533_logger.debug(
"Stored to-device messages with stream_id %i for %s",
stream_id,
[
(user_id, device_id)
for (user_id, messages_by_device) in local_by_user_then_device.items()
for device_id in messages_by_device.keys()
],
)
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
REMOVE_DELETED_DEVICES = "remove_deleted_devices_from_device_inbox"
REMOVE_HIDDEN_DEVICES = "remove_hidden_devices_from_device_inbox"
REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox"
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
"device_inbox_stream_index",
index_name="device_inbox_stream_id_user_id",
table="device_inbox",
columns=["stream_id", "user_id"],
)
self.db_pool.updates.register_background_update_handler(
self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
)
# Used to be a background update that deletes all device_inboxes for deleted
# devices.
self.db_pool.updates.register_noop_background_update(
self.REMOVE_DELETED_DEVICES
)
# Used to be a background update that deletes all device_inboxes for hidden
# devices.
self.db_pool.updates.register_noop_background_update(self.REMOVE_HIDDEN_DEVICES)
self.db_pool.updates.register_background_update_handler(
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
self._remove_dead_devices_from_device_inbox,
)
async def _background_drop_index_device_inbox(self, progress, batch_size):
def reindex_txn(conn):
txn = conn.cursor()
txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id")
txn.close()
await self.db_pool.runWithConnection(reindex_txn)
await self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
return 1
async def _remove_dead_devices_from_device_inbox(
self,
progress: JsonDict,
batch_size: int,
) -> int:
"""A background update to remove devices that were either deleted or hidden from
the device_inbox table.
Args:
progress: The update's progress dict.
batch_size: The batch size for this update.
Returns:
The number of rows deleted.
"""
def _remove_dead_devices_from_device_inbox_txn(
txn: LoggingTransaction,
) -> Tuple[int, bool]:
if "max_stream_id" in progress:
max_stream_id = progress["max_stream_id"]
else:
txn.execute("SELECT max(stream_id) FROM device_inbox")
# There's a type mismatch here between how we want to type the row and
# what fetchone says it returns, but we silence it because we know that
# res can't be None.
res = cast(Tuple[Optional[int]], txn.fetchone())
if res[0] is None:
# this can only happen if the `device_inbox` table is empty, in which
# case we have no work to do.
return 0, True
else:
max_stream_id = res[0]
start = progress.get("stream_id", 0)
stop = start + batch_size
# delete rows in `device_inbox` which do *not* correspond to a known,
# unhidden device.
sql = """
DELETE FROM device_inbox
WHERE
stream_id >= ? AND stream_id < ?
AND NOT EXISTS (
SELECT * FROM devices d
WHERE
d.device_id=device_inbox.device_id
AND d.user_id=device_inbox.user_id
AND NOT hidden
)
"""
txn.execute(sql, (start, stop))
self.db_pool.updates._background_update_progress_txn(
txn,
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
{
"stream_id": stop,
"max_stream_id": max_stream_id,
},
)
return stop > max_stream_id
finished = await self.db_pool.runInteraction(
"_remove_devices_from_device_inbox_txn",
_remove_dead_devices_from_device_inbox_txn,
)
if finished:
await self.db_pool.updates._end_background_update(
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
)
return batch_size
class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore):
pass
| 39.372863
| 97
| 0.597889
|
e6492b66ac275ce3f870b603e41e4e0e037b7589
| 24,515
|
py
|
Python
|
src/encoded/upgrade/file.py
|
caseylitton/encoded
|
ecfb135ba84ef5cad2a71638720d782cbfc4d14a
|
[
"MIT"
] | null | null | null |
src/encoded/upgrade/file.py
|
caseylitton/encoded
|
ecfb135ba84ef5cad2a71638720d782cbfc4d14a
|
[
"MIT"
] | 1
|
2018-12-14T18:00:30.000Z
|
2018-12-14T18:00:30.000Z
|
src/encoded/upgrade/file.py
|
caseylitton/encoded
|
ecfb135ba84ef5cad2a71638720d782cbfc4d14a
|
[
"MIT"
] | null | null | null |
from snovault import upgrade_step
from pyramid.traversal import find_root
from datetime import datetime, time
@upgrade_step('file', '', '2')
def file_0_2(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
if 'status' in value:
value['status'] = value['status'].lower()
@upgrade_step('file', '2', '3')
def file_2_3(value, system):
# http://redmine.encodedcc.org/issues/1572
file_format = value.get('file_format')
file_name = value['download_path'].rsplit('/', 1)[-1]
file_ext = file_name[file_name.find('.'):]
# REJECTIONS
if file_ext in ['.gtf.bigBed', '.pdf', '.pdf.gz', '.gff.bigBed', '.spikeins']:
value['status'] = 'deleted'
# Find the miscatorgorized bedMethyls
if file_ext == '.bed.bigBed' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bedMethyl'
if file_ext == '.bed.gz' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bed_bedMethyl'
unknownDict = {'.CEL.gz': 'CEL',
'.bb': 'bedMethyl',
'.bed': 'bed',
'.bed.gz': 'bed',
'.bed.bigBed': 'bigBed',
'.bigBed': 'bigBed',
'.bed9': 'bedMethyl',
'.bed9.gz': 'bed_bedMethyl',
'.bedCluster.bigBed': 'bigBed',
'.bedLogR.bigBed': 'bedLogR',
'.bedRnaElements.bigBed': 'bedRnaElements',
'.bedRrbs.bigBed': 'bedMethyl',
'.broadPeak.gz': 'bed_broadPeak',
'.bigBed': 'bigBed',
'.csfasta.gz': 'csfasta',
'.csqual.gz': 'csqual',
'.fasta.gz': 'fasta',
'.gff.bigBed': 'bigBed',
'.gff.gz': 'gtf',
'.gp.bigBed': 'bigBed',
'.matrix.gz': 'tsv',
'.matrix.tgz': 'tar',
'.narrowPeak': 'bed_narrowPeak',
'.narrowPeak.gz': 'bed_narrowPeak',
'.pdf': 'tsv', # These are going to be obsolete
'.pdf.gz': 'tsv', # These are going to be obsolete
'.peaks.gz': 'tsv',
'.peptideMapping.bigBed': 'bigBed',
'.shortFrags.bigBed': 'bigBed',
'.sorted.bigBed': 'bigBed',
'.tab.gz': 'tsv',
'.tgz': 'tar',
'.txt': 'tsv',
'.xlsx': 'tsv', # These need to be converted to tsv
}
if file_format in ['unknown', 'customTrack']:
value['file_format'] = unknownDict[file_ext]
# http://redmine.encodedcc.org/issues/1429
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
dataset_status = dataset.get('status')
status = value.get('status')
if status == 'current':
if dataset_status == 'released':
value['status'] = 'released'
else:
value['status'] = 'in progress'
if status == 'obsolete':
if dataset_status in ['released', 'revoked']:
value['status'] = 'revoked'
else:
value['status'] = 'deleted'
# http://redmine.encodedcc.org/issues/568
output_type_dict = {
'': 'raw data',
'Alignments': 'alignments',
'bigBed': 'sites',
'bigWig': 'sites',
'Clusters': 'clusters',
'Contigs': 'contigs',
'FastqRd1': 'reads',
'FastqRd2': 'reads',
'forebrain_enhancers': 'enhancers_forebrain',
'heart_enhancers': 'enhancers_heart',
'GreenIdat': 'idat green file',
'hotspot_broad_peaks': 'hotspots',
'hotspot_narrow_peaks': 'hotspots',
'hotspot_signal': 'hotspots',
'Hotspots': 'hotspots',
'Interactions': 'interactions',
'MinusRawSignal': 'raw minus signal',
'PlusRawSignal': 'raw plus signal',
'macs2_dnase_peaks': 'peaks',
'macs2_dnase_signal': 'signal',
'MinusSignal': 'minus signal',
'minusSignal': 'minus signal',
'MultiMinus': 'multi-read minus signal',
'MultiPlus': 'multi-read plus signal',
'MultiSignal': 'multi-read signal',
'MultiUnstranded': 'multi-read signal',
'RawData2': 'reads',
'RedIdat': 'idat red file',
'peak': 'peaks',
'PeakCalls': 'peaks',
'Peaks': 'peaks',
'PlusSignal': 'plus signal',
'plusSignal': 'plus signal',
'predicted_enhancers_heart': 'enhancers_heart',
'RawSignal': 'raw signal',
'RawData': 'raw data',
'rcc': 'raw data',
'Read': 'reads',
'read': 'reads',
'read1': 'reads',
'rejected_reads': 'rejected reads',
'RepPeaks': 'peaks',
'RepSignal': 'signal',
'Signal': 'signal',
'SimpleSignal': 'signal',
'Sites': 'sites',
'Spikeins': 'spike-ins',
'Spikes': 'spike-ins',
'Splices': 'splice junctions',
'uniqueReads': 'unique signal',
'UniqueSignal': 'unique signal',
'uniqueSignal': 'unique signal',
'UniqueMinus': 'unique minus signal',
'uniqueMinusSignal': 'unique minus signal',
'UniquePlus': 'unique plus signal',
'uniquePlusSignal': 'unique plus signal',
'UniqueUnstranded': 'unique signal',
'UnstrandedSignal': 'signal',
'dataset_used': 'enhancers',
'TRAINING_DATA_MOUSE_VISTA': 'enhancers',
'method_description': 'enhancers',
'unknown': 'enhancers',
'Protocol': 'raw data',
}
current_output_type = value['output_type']
if current_output_type in output_type_dict:
value['output_type'] = output_type_dict[current_output_type]
# Help the raw data problem
if value['output_type'] == 'raw data' and value['file_format'] == 'fastq':
value['output_type'] = 'reads'
@upgrade_step('file', '3', '4')
def file_3_4(value, system):
# http://redmine.encodedcc.org/issues/1714
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
if 'download_path' in value:
value.pop('download_path')
value['lab'] = dataset['lab']
value['award'] = dataset['award']
# EDW User
if value.get('submitted_by') == '0e04cd39-006b-4b4a-afb3-b6d76c4182ff':
value['lab'] = 'fb0af3d0-3a4c-4e96-b67a-f273fe527b04'
value['award'] = '8bafd685-aa17-43fe-95aa-37bc1c90074a'
@upgrade_step('file', '4', '5')
def file_4_5(value, system):
# http://redmine.encodedcc.org/issues/2566
# http://redmine.encodedcc.org/issues/2565
# we need to remeber bedRnaElements,
bed_files = {
'bed_bedLogR': 'bedLogR',
'bed_bedMethyl': 'bedMethyl',
'bed_broadPeak': 'broadPeak',
'bed_gappedPeak': 'gappedPeak',
'bed_narrowPeak': 'narrowPeak',
'bed_bedRnaElements': 'bedRnaElements'
}
bigBed_files = [
'bedLogR',
'bedMethyl',
'broadPeak',
'narrowPeak',
'gappedPeak',
'bedRnaElements'
]
current = value['file_format']
if current in ['bed', 'bigBed']:
value['file_format_type'] = 'unknown'
# we do not know what those formats were, wranglers will need to investigate
elif current in bigBed_files:
value['file_format_type'] = current
value['file_format'] = 'bigBed'
elif current in bed_files:
value['file_format_type'] = bed_files[current]
value['file_format'] = 'bed'
elif current in ['gff']:
value['file_format_type'] = 'unknown'
# all gffs todate were in gff3, but we wouldn't know without wranglers checking
# classify the peptide stuff
if value['output_type'] in ['mPepMapGcFt', 'mPepMapGcUnFt']:
value['file_format_type'] = 'modPepMap'
elif value['output_type'] in ['pepMapGcFt', 'pepMapGcUnFt']:
value['file_format_type'] = 'pepMap'
# http://redmine.encodedcc.org/issues/2565
output_mapping = {
# Category: Raw data
'idat green file': 'idat green channel',
'idat red file': 'idat red channel',
'reads': 'reads',
'rejected reads': 'rejected reads',
'rcc': 'reporter code counts',
'CEL': 'intensity values',
'raw data': 'raw data',
'alignments': 'alignments',
'transcriptome alignments': 'transcriptome alignments',
'spike-ins': 'spike-in alignments',
'multi-read minus signal': 'minus strand signal of multi-mapped reads',
'multi-read plus signal': 'plus strand signal of multi-mapped reads',
'multi-read signal': 'signal of multi-mapped reads',
'multi-read normalized signal': 'normalized signal of multi-mapped reads',
'raw minus signal': 'raw minus strand signal',
'raw plus signal': 'raw plus strand signal',
'raw signal': 'raw signal',
'raw normalized signal': 'raw normalized signal',
'unique minus signal': 'minus strand signal of unique reads',
'unique plus signal': 'plus strand signal of unique reads',
'unique signal': 'signal of unique reads',
'signal': 'signal',
'minus signal': 'minus strand signal',
'plus signal': 'plus strand signal',
'Base_Overlap_Signal': 'base overlap signal',
'PctSignal': 'percentage normalized signal',
'SumSignal': 'summed densities signal',
'WaveSignal': 'wavelet-smoothed signal',
'signal p-value': 'signal p-value',
'fold change over control': 'fold change over control',
'enrichment': 'enrichment',
'exon quantifications': 'exon quantifications',
'ExonsDeNovo': 'exon quantifications',
'ExonsEnsV65IAcuff': 'exon quantifications',
'ExonsGencV10': 'exon quantifications',
'ExonsGencV3c': 'exon quantifications',
'ExonsGencV7': 'exon quantifications',
'GeneDeNovo': 'gene quantifications',
'GeneEnsV65IAcuff': 'gene quantifications',
'GeneGencV10': 'gene quantifications',
'GeneGencV3c': 'gene quantifications',
'GeneGencV7': 'gene quantifications',
'genome quantifications': 'gene quantifications',
'library_fraction': 'library fraction',
'transcript quantifications': 'transcript quantifications',
'TranscriptDeNovo': 'transcript quantifications',
'TranscriptEnsV65IAcuff': 'transcript quantifications',
'TranscriptGencV10': 'transcript quantifications',
'TranscriptGencV3c': 'transcript quantifications',
'TranscriptGencV7': 'transcript quantifications',
'mPepMapGcFt': 'filtered modified peptide quantification',
'mPepMapGcUnFt': 'unfiltered modified peptide quantification',
'pepMapGcFt': 'filtered peptide quantification',
'pepMapGcUnFt': 'unfiltered peptide quantification',
'clusters': 'clusters',
'CNV': 'copy number variation',
'contigs': 'contigs',
'enhancer validation': 'enhancer validation',
'FiltTransfrags': 'filtered transcribed fragments',
'hotspots': 'hotspots',
'Junctions': 'splice junctions',
'interactions': 'long range chromatin interactions',
'Matrix': 'long range chromatin interactions',
'PrimerPeaks': 'long range chromatin interactions',
'sites': 'methylation state at CpG',
'methyl CG': 'methylation state at CpG',
'methyl CHG': 'methylation state at CHG',
'methyl CHH': 'methylation state at CHH',
'peaks': 'peaks',
'replicated peaks': 'replicated peaks',
'RbpAssocRna': 'RNA-binding protein associated mRNAs',
'splice junctions': 'splice junctions',
'Transfrags': 'transcribed fragments',
'TssGencV3c': 'transcription start sites',
'TssGencV7': 'transcription start sites',
'Valleys': 'valleys',
'Alignability': 'sequence alignability',
'Excludable': 'blacklisted regions',
'Uniqueness': 'sequence uniqueness',
'genome index': 'genome index',
'genome reference': 'genome reference',
'Primer': 'primer sequence',
'spike-in sequence': 'spike-in sequence',
'reference': 'reference',
'enhancers': 'predicted enhancers',
'enhancers_forebrain': 'predicted forebrain enhancers',
'enhancers_heart': 'predicted heart enhancers',
'enhancers_wholebrain': 'predicted whole brain enhancers',
'TssHmm': 'predicted transcription start sites',
'UniformlyProcessedPeakCalls': 'optimal idr thresholded peaks',
'Validation': 'validation',
'HMM': 'HMM predicted chromatin state'
}
old_output_type = value['output_type']
# The peptide mapping files from UCSC all assumed V10 hg19
if old_output_type in ['mPepMapGcFt', 'mPepMapGcUnFt', 'pepMapGcFt', 'pepMapGcUnFt']:
value['genome_annotation'] = 'V10'
value['assembly'] = 'hg19'
elif old_output_type in ['ExonsEnsV65IAcuff', 'GeneEnsV65IAcuff', 'TranscriptEnsV65IAcuff']:
value['genome_annotation'] = 'ENSEMBL V65'
elif old_output_type in ['ExonsGencV3c', 'GeneGencV3c', 'TranscriptGencV3c', 'TssGencV3c']:
value['genome_annotation'] = 'V3c'
elif old_output_type in ['ExonsGencV7', 'GeneGenc7', 'TranscriptGencV7', 'TssGencV7']:
value['genome_annotation'] = 'V7'
elif old_output_type in ['ExonsGencV10', 'GeneGenc10', 'TranscriptGencV10', 'TssGencV10']:
value['genome_annotation'] = 'V10'
elif old_output_type in ['spike-ins'] and value['file_format'] == 'fasta':
old_output_type = 'spike-in sequence'
elif old_output_type in ['raw data'] and value['file_format'] in ['fastq', 'csfasta', 'csqual', 'fasta']:
old_output_type = 'reads'
elif old_output_type in ['raw data'] and value['file_format'] in ['CEL', 'tar']:
old_output_type = 'CEL'
elif old_output_type in ['raw data'] and value['file_format'] in ['rcc']:
old_output_type = 'rcc'
elif old_output_type in ['raw data'] and value['lab'] == '/labs/timothy-hubbard/':
old_output_type = 'reference'
elif old_output_type in ['raw data']:
if 'These are protocol documents' in value.get('notes', ''):
old_output_type = 'reference'
elif old_output_type == 'sites' and value['file_format'] == 'tsv':
old_output_type = 'interactions'
elif old_output_type in ['Validation'] and value['file_format'] == '2bit':
old_output_type = 'genome reference'
value['output_type'] = output_mapping[old_output_type]
# label the lost bedRnaElements files #2940
bedRnaElements_files = [
'transcript quantifications',
'gene quantifications',
'exon quantifications'
]
if (
value['output_type'] in bedRnaElements_files
and value['status'] in ['deleted', 'replaced']
and value['file_format'] == 'bigBed'
and value['file_format_type'] == 'unknown'
):
value['file_format_type'] = 'bedRnaElements'
# Get the replicate information
if value.get('file_format') in ['fastq', 'fasta', 'csfasta']:
context = system['context']
root = find_root(context)
if 'replicate' in value:
replicate = root.get_by_uuid(value['replicate']).upgrade_properties()
if 'read_length' not in value:
value['read_length'] = replicate.get('read_length')
if value['read_length'] is None:
del value['read_length']
run_type_dict = {
True: 'paired-ended',
False: 'single-ended',
None: 'unknown'
}
if 'run_type' not in value:
value['run_type'] = run_type_dict[replicate.get('paired_ended')]
if value.get('paired_end') in ['2']:
value['run_type'] = 'paired-ended'
# Backfill content_md5sum #2683
if 'content_md5sum' not in value:
md5sum_content_md5sum = system['registry'].get('backfill_2683', {})
if value['md5sum'] in md5sum_content_md5sum:
value['content_md5sum'] = md5sum_content_md5sum[value['md5sum']]
@upgrade_step('file', '5', '6')
def file_5_6(value, system):
# http://redmine.encodedcc.org/issues/3019
import re
if value.get('output_type') in [
'minus strand signal of multi-mapped reads',
'plus strand signal of multi-mapped reads',
'signal of multi-mapped reads',
'normalized signal of multi-mapped reads'
]:
value['output_type'] = re.sub('multi-mapped', 'all', value['output_type'])
@upgrade_step('file', '6', '7')
def file_6_7(value, system):
# http://redmine.encodedcc.org/issues/3063
if 'file_format_specifications' in value:
value['file_format_specifications'] = list(set(value['file_format_specifications']))
if 'controlled_by' in value:
value['controlled_by'] = list(set(value['controlled_by']))
if 'derived_from' in value:
value['derived_from'] = list(set(value['derived_from']))
if 'supercedes' in value:
value['supercedes'] = list(set(value['supersedes']))
if 'aliases' in value:
value['aliases'] = list(set(value['aliases']))
@upgrade_step('file', '7', '8')
def file_7_8(value, system):
return
@upgrade_step('file', '8', '9')
def file_8_9(value, system):
# http://redmine.encodedcc.org/issues/4183
if (value['file_format'] == 'fastq') and ('assembly' in value):
value.pop('assembly')
# http://redmine.encodedcc.org/issues/1859
if 'supercedes' in value:
value['supersedes'] = value['supercedes']
value.pop('supercedes', None)
def set_to_midnight(date_string):
release_date = datetime.strptime(date_string, '%Y-%m-%d')
min_pub_date_time = datetime.combine(release_date, time.min)
return '{:%Y-%m-%dT%H:%M:%S.%f+00:00}'.format(min_pub_date_time)
@upgrade_step('file', '9', '10')
def file_9_10(value, system):
# http://redmine.encodedcc.org/issues/5021
# http://redmine.encodedcc.org/issues/4929
# http://redmine.encodedcc.org/issues/4927
# http://redmine.encodedcc.org/issues/4903
# http://redmine.encodedcc.org/issues/4904
date_created = value.get('date_created')
if date_created.find('T') == -1:
value['date_created'] = set_to_midnight(date_created)
# http://redmine.encodedcc.org/issues/4748
aliases = []
if 'aliases' in value and value['aliases']:
aliases = value['aliases']
else:
return
aliases_to_remove = []
for i in range(0, len(aliases)):
new_alias = ''
if 'roadmap-epigenomics' in aliases[i]:
if '||' in aliases[i]:
scrub_parts = aliases[i].split('||')
date_split = scrub_parts[1].split(' ')
date = "-".join([date_split[1].strip(),
date_split[2].strip(),
date_split[5].strip()])
scrubbed_list = [scrub_parts[0].strip(), date.strip(), scrub_parts[2].strip()]
if len(scrub_parts) == 4:
scrubbed_list.append(scrub_parts[3].strip())
new_alias = '_'.join(scrubbed_list)
parts = aliases[i].split(':') if not new_alias else new_alias.split(':')
namespace = parts[0]
if namespace in ['ucsc_encode_db', 'UCSC_encode_db', 'versionof']:
# Remove the alias with the bad namespace
aliases_to_remove.append(aliases[i])
namespace = 'encode'
if namespace in ['CGC']:
namespace = namespace.lower()
rest = '_'.join(parts[1:]).strip()
# Remove or substitute bad characters and multiple whitespaces
import re
if '"' or '#' or '@' or '!' or '$' or '^' or '&' or '|' or '~' or ';' or '`' in rest:
rest = re.sub(r'[\"#@!$^&|~;`\/\\]', '', rest)
rest = ' '.join(rest.split())
if '%' in rest:
rest = re.sub(r'%', 'pct', rest)
if '[' or '{' in rest:
rest = re.sub('[\[{]', '(', rest)
if ']' or '}' in rest:
rest = re.sub('[\]}]', ')', rest)
new_alias = ':'.join([namespace, rest])
if new_alias not in aliases:
aliases[i] = new_alias
if aliases_to_remove and aliases:
for a in aliases_to_remove:
if a in aliases:
aliases.remove(a)
@upgrade_step('file', '10', '11')
def file_10_11(value, system):
# http://redmine.encodedcc.org/issues/5049
# http://redmine.encodedcc.org/issues/5081
# http://redmine.encodedcc.org/issues/4924
if not value.get('no_file_available'):
value['no_file_available'] = False
# The above change also required the files whose values should be set to True
# to also be upgraded or patched. The patch was applied post-release and
# can be found in ./upgrade_data/file_10_to_11_patch.tsv
@upgrade_step('file', '11', '12')
def file_11_12(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-3347
return
@upgrade_step('file', '12', '13')
def file_12_13(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-3809
platform = value.get('platform', None)
if platform and platform in [
"ced61406-dcc6-43c4-bddd-4c977cc676e8",
"c7564b38-ab4f-4c42-a401-3de48689a998"
]:
value.pop('read_length', None)
value.pop('run_type', None)
return
@upgrade_step('file', '13', '14')
def file_13_14(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4613
output_type = value.get('output_type', None)
if output_type and output_type == 'candidate regulatory elements':
value['output_type'] = 'candidate Cis-Regulatory Elements'
@upgrade_step('file', '14', '15')
def file_14_15(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4641
output_type = value.get('output_type', None)
if output_type and output_type == 'optimal idr thresholded peaks':
value['output_type'] = 'optimal IDR thresholded peaks'
elif output_type and output_type == 'conservative idr thresholded peaks':
value['output_type'] = 'conservative IDR thresholded peaks'
elif output_type and output_type == 'pseudoreplicated idr thresholded peaks':
value['output_type'] = 'pseudoreplicated IDR thresholded peaks'
@upgrade_step('file', '15', '16')
def file_15_16(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4921
platform = value.get('platform', None)
if platform == "e2be5728-5744-4da4-8881-cb9526d0389e":
value.pop('read_length', None)
value.pop('run_type', None)
return
@upgrade_step('file', '16', '17')
def file_16_17(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5050
platform = value.get('platform', None)
if platform and platform in [
"6c275b37-018d-4bf8-85f6-6e3b830524a9",
"8f1a9a8c-3392-4032-92a8-5d196c9d7810"
]:
value.pop('read_length', None)
value.pop('run_type', None)
return
| 39.161342
| 109
| 0.572343
|
ca63131a835d1da99ec954b6d49b83db2b6e6e7a
| 4,724
|
py
|
Python
|
google/appengine/tools/cron_xml_parser.py
|
enpi/Test
|
5fb2055c7cfd4cc91ff97471c529b041f21abeb6
|
[
"Apache-2.0"
] | 3
|
2019-01-28T03:57:20.000Z
|
2020-02-20T01:37:33.000Z
|
google/appengine/tools/cron_xml_parser.py
|
enpi/Test
|
5fb2055c7cfd4cc91ff97471c529b041f21abeb6
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/cron_xml_parser.py
|
enpi/Test
|
5fb2055c7cfd4cc91ff97471c529b041f21abeb6
|
[
"Apache-2.0"
] | 3
|
2019-01-18T11:33:56.000Z
|
2020-01-05T10:44:05.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Directly processes text of cron.xml.
CronXmlParser is called with an XML string to produce a CronXml object
containing the data from the XML.
CronXmlParser: converts XML to CronXml objct
Cron: describes a single cron specified in cron.xml
"""
from xml.etree import ElementTree
from google.appengine.cron import groc
from google.appengine.cron import groctimespecification
from google.appengine.tools import xml_parser_utils
from google.appengine.tools.app_engine_config_exception import AppEngineConfigException
def GetCronYaml(unused_application, cron_xml_str):
return _MakeCronListIntoYaml(CronXmlParser().ProcessXml(cron_xml_str))
def _MakeCronListIntoYaml(cron_list):
"""Converts list of yaml statements describing cron jobs into a string."""
statements = ['cron:']
for cron in cron_list:
statements += cron.ToYaml()
return '\n'.join(statements) + '\n'
class CronXmlParser(object):
"""Provides logic for walking down XML tree and pulling data."""
def ProcessXml(self, xml_str):
"""Parses XML string and returns object representation of relevant info.
Args:
xml_str: The XML string.
Returns:
A list of Cron objects containing information about cron jobs from the
XML.
Raises:
AppEngineConfigException: In case of malformed XML or illegal inputs.
"""
try:
self.crons = []
self.errors = []
xml_root = ElementTree.fromstring(xml_str)
if xml_root.tag != 'cronentries':
raise AppEngineConfigException('Root tag must be <cronentries>')
for child in xml_root.getchildren():
self.ProcessCronNode(child)
if self.errors:
raise AppEngineConfigException('\n'.join(self.errors))
return self.crons
except ElementTree.ParseError:
raise AppEngineConfigException('Bad input -- not valid XML')
def ProcessCronNode(self, node):
"""Processes XML <cron> nodes into Cron objects.
The following information is parsed out:
description: Describing the purpose of the cron job.
url: The location of the script.
schedule: Written in groc; the schedule according to which the job is
executed.
timezone: The timezone that the schedule runs in.
target: Which version of the app this applies to.
Args:
node: <cron> XML node in cron.xml.
"""
tag = xml_parser_utils.GetTag(node)
if tag != 'cron':
self.errors.append('Unrecognized node: <%s>' % tag)
return
cron = Cron()
cron.url = xml_parser_utils.GetChildNodeText(node, 'url')
cron.timezone = xml_parser_utils.GetChildNodeText(node, 'timezone')
cron.target = xml_parser_utils.GetChildNodeText(node, 'target')
cron.description = xml_parser_utils.GetChildNodeText(node, 'description')
cron.schedule = xml_parser_utils.GetChildNodeText(node, 'schedule')
validation_error = self._ValidateCronEntry(cron)
if validation_error:
self.errors.append(validation_error)
else:
self.crons.append(cron)
def _ValidateCronEntry(self, cron):
if not cron.url:
return 'No URL for <cron> entry'
if not cron.schedule:
return "No schedule provided for <cron> entry with URL '%s'" % cron.url
try:
groctimespecification.GrocTimeSpecification(cron.schedule)
except groc.GrocException:
return ("Text '%s' in <schedule> node failed to parse,"
' for <cron> entry with url %s.'
% (cron.schedule, cron.url))
class Cron(object):
"""Instances contain information about individual cron entries."""
TZ_GMT = 'UTC'
def ToYaml(self):
"""Returns data from Cron object as a list of Yaml statements."""
statements = [
'- url: %s' % self._SanitizeForYaml(self.url),
' schedule: %s' % self._SanitizeForYaml(self.schedule)]
for optional in ('target', 'timezone', 'description'):
field = getattr(self, optional)
if field:
statements.append(' %s: %s' % (optional, self._SanitizeForYaml(field)))
return statements
def _SanitizeForYaml(self, field):
return "'%s'" % field.replace('\n', ' ').replace("'", "''")
| 33.267606
| 87
| 0.701524
|
73be989993e90d72912fc08516bce82d12deed07
| 3,896
|
py
|
Python
|
src/evaluation/evaluate.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | 4
|
2018-11-09T16:18:28.000Z
|
2019-04-09T11:19:23.000Z
|
src/evaluation/evaluate.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | null | null | null |
src/evaluation/evaluate.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | 1
|
2020-05-28T18:48:17.000Z
|
2020-05-28T18:48:17.000Z
|
import os
from utils.read_exp_utils import read_raw_y, read_results_for_experiment, read_empty_results, read_experiment_result_db, read_raw_y_db
from utils.write_exp_utils import update_results_eval, update_raw_eval
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import *
import itertools
from utils.misc_utils import connect_rds
from psycopg2.extras import Json, DictCursor
#ResultEvaluator:
def filter_time(df_raw_scores, time):
'''Return dataframe filter by time'''
df=df_raw_scores.copy()
if time == 'night':
df['hour'] = pd.to_datetime(df['time']).apply(lambda x: x.hour)
df = df.loc[(df['hour'] > 19) | (df['hour'] < 6)]
elif time == 'day':
df['hour'] = pd.to_datetime(df['time']).apply(lambda x: x.hour)
df = df.loc[(df['hour'] < 19) & (df['hour'] > 6)]
return df
def tp_group(tp_group_df, k):
"""Function called to find the top k for each time group"""
#take the top k from each group
tp_group_df = tp_group_df.head(k)
#return the total tp in each group
return tp_group_df['y_true'].sum()
def pr_at_k(df, k):
"""
Returns p/r for a specific result at a specific k
df: pandas df with columns 'space', 'time', 'y_true', and 'y_pred'
k: the number of obs you'd like to label 1 at each time
"""
#static traits of df
universe = df['time'].nunique()
p = df['y_true'].sum()
#needs to be sorted by (time, y_pred)
#group by time and find the num tp in the top k
tp = df.groupby('time').pipe(tp_group, k)
fp = (universe*k) - tp
precision = tp/(tp+fp)
recall = tp/p
return precision, recall
def pr_all_k(df):
"""
Returns all p/r for a specific result_id
df: pandas df with columns 'space', 'time', 'y_true' and 'y_pred'
"""
space_universe = df['space'].nunique()
precision_array = []
recall_array = []
for k in range(1, space_universe+1):
precision, recall = pr_at_k(df, k)
precision_array.append(precision)
recall_array.append(recall)
return precision_array, recall_array
class EvaluationResultUpdater:
def __init__(self,experiment_ids):
self.experiment_ids = experiment_ids
print('Evaluation Result Updater is ready.\n call update result_tables to write results to the database')
def _fill_result_table_for_experiment(self, df, result_id:int):
df_day = filter_time(df, 'day')
df_night = filter_time(df, 'night')
p8, r8 = pr_at_k(df_day, 8)
p1, r1 = pr_at_k(df_night, 1)
update_results_eval('p_8_dayshift', p8, result_id)
update_results_eval('r_8_dayshift', r8, result_id)
update_results_eval('p_1_nightshift', p1, result_id)
update_results_eval('r_1_nightshift', r1, result_id)
def _fill_raw_table_for_experiment(self, df, result_id:int):
df_day = filter_time(df, 'day')
df_night = filter_time(df, 'night')
precision_d, recall_d = pr_all_k(df_day)
precision_n, recall_n = pr_all_k(df_night)
precision_a, recall_a = pr_all_k(df)
pr_dict = Json({'recall_day': recall_d, 'precision_day': precision_d,
'recall_night': recall_n, 'precision_night': precision_n,
'recall_all': recall_a, 'precision_all': precision_a})
update_raw_eval(pr_dict, result_id)
def update_result_tables(self):
for experiment_id in self.experiment_ids:
result_ids = read_results_for_experiment(experiment_id)
for result_id in result_ids:
df = read_raw_y(result_id)
print('updating result tables for experiment:',experiment_id)
self._fill_result_table_for_experiment(df, result_id)
self._fill_raw_table_for_experiment(df, result_id)
| 32.198347
| 134
| 0.652977
|
5beb2c0035542aba0b34e6e62f70b5fc1dfd22e9
| 26,678
|
py
|
Python
|
jax/interpreters/partial_eval.py
|
david-waterworth/jax
|
f6e1d01f94936c992d9e63810eae5db69c06a026
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-02-26T16:05:11.000Z
|
2021-06-05T00:36:37.000Z
|
jax/interpreters/partial_eval.py
|
david-waterworth/jax
|
f6e1d01f94936c992d9e63810eae5db69c06a026
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/partial_eval.py
|
david-waterworth/jax
|
f6e1d01f94936c992d9e63810eae5db69c06a026
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-03-03T21:29:13.000Z
|
2020-03-03T21:29:13.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools as it
from collections import namedtuple, Counter, defaultdict
import contextlib
import threading
from weakref import ref
import numpy as onp
from .. import core
from .. import linear_util as lu
from ..abstract_arrays import ShapedArray, ConcreteArray, raise_to_shaped
from ..util import (unzip2, safe_zip, safe_map, toposort, partial, split_list,
wrap_name, cache)
from ..core import (Trace, Tracer, new_master, Jaxpr, Literal, get_aval,
AbstractValue, unit, unitvar, abstract_unit, Primitive,
call_p, TypedJaxpr, new_jaxpr_eqn)
map = safe_map
zip = safe_zip
def identity(x): return x
# A partial value (pval) is modeled as a pair (pv, const), as per
# type PVal = (PV, Const)
# data PV = Known | Unknown AbstractValue
# type Const = MaybeTraced JaxType
# where the Known arm, represented by a None, indicates a known (constant) value
# and the Unknown arm, represented by an AbstractValue instance, indicates an
# unknown value.
# When the pv is an AbstractValue, then the const must be unit.
class JaxprTrace(Trace):
def pure(self, val):
return self.new_const(val)
def lift(self, val):
return self.new_const(val)
def sublift(self, val):
return JaxprTracer(self, val.pval, FreeVar(val))
def new_const(self, val):
if isinstance(val, Tracer) and val._trace.level == self.level:
raise Exception
return JaxprTracer(self, PartialVal((None, val)), unit)
def new_instantiated_literal(self, val):
return JaxprTracer(self, PartialVal((get_aval(val), unit)), Literal(val))
def new_instantiated_const(self, val):
return JaxprTracer(self, PartialVal((get_aval(val), unit)), ConstVar(val))
def new_arg(self, pval):
_, const = pval
return JaxprTracer(self, pval, LambdaBinding())
def instantiate_const(self, tracer):
pv, const = tracer.pval
if isinstance(pv, AbstractValue):
return tracer
elif pv is None:
if type(const) in core.literalable_types and onp.shape(const) == ():
return self.new_instantiated_literal(const)
else:
return self.new_instantiated_const(const)
else:
raise TypeError(pv)
def instantiate_const_abstracted(self, tracer):
pv, const = tracer.pval
if isinstance(pv, AbstractValue):
return tracer
elif pv is None:
aval = raise_to_shaped(get_aval(const), onp.isscalar(const))
return JaxprTracer(self, PartialVal((aval, unit)), ConstVar(const))
else:
raise TypeError(pv)
def process_primitive(self, primitive, tracers, params):
if primitive in custom_partial_eval_rules:
return custom_partial_eval_rules[primitive](self, *tracers, **params)
else:
return self.default_process_primitive(primitive, tracers, params)
def default_process_primitive(self, primitive, tracers, params):
pvs, consts = unzip2(t.pval for t in tracers)
if all(pv is None for pv in pvs):
return primitive.bind(*consts, **params)
tracers = map(self.instantiate_const, tracers)
avals = [t.aval for t in tracers]
out_aval = primitive.abstract_eval(*avals, **params)
if primitive.multiple_results:
out_tracers = [JaxprTracer(self, PartialVal((aval, unit)), None)
for aval in out_aval]
eqn = new_eqn_recipe(tracers, out_tracers, primitive, params)
for t in out_tracers: t.recipe = eqn
return out_tracers
else:
out_tracer = JaxprTracer(self, PartialVal((out_aval, unit)), None)
out_tracer.recipe = new_eqn_recipe(tracers, [out_tracer], primitive, params)
return out_tracer
def process_call(self, call_primitive, f, tracers, params):
name = params.get('name', f.__name__)
if self.master.trace_type is StagingJaxprTrace:
tracers = map(self.instantiate_const_abstracted, tracers)
else:
name = wrap_name(name, 'pe')
params = dict(params, name=name)
if call_primitive in call_partial_eval_rules:
return call_partial_eval_rules[call_primitive](self, f, tracers, params)
if call_primitive in map_primitives:
return self.process_map(call_primitive, f, tracers, params)
in_pvs, in_consts = unzip2([t.pval for t in tracers])
fun, aux = partial_eval(f, self, in_pvs)
out_flat = call_primitive.bind(fun, *in_consts, **params)
out_pvs, jaxpr, env = aux()
env_tracers = map(self.full_raise, env)
out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)])
const_tracers = map(self.new_instantiated_const, consts)
lifted_jaxpr = convert_constvars_jaxpr(jaxpr)
out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None)
for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)]
new_params = dict(params, call_jaxpr=lifted_jaxpr)
# The `jaxpr` already contains the env_vars at start of invars
eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)),
out_tracers, call_primitive, new_params)
for t in out_tracers:
t.recipe = eqn
return out_tracers
def process_map(self, map_primitive, f, tracers, params):
in_pvs, in_consts = unzip2([t.pval for t in tracers])
reduced_pvs = [None if pv is None else _mapped_aval(pv) for pv in in_pvs]
fun, aux = partial_eval(f, self, reduced_pvs)
out_flat = map_primitive.bind(fun, *in_consts, **params)
out_pvs_reduced, jaxpr, env = aux()
out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)])
out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv)
for pv in out_pvs_reduced]
const_tracers = map(self.new_instantiated_const, consts)
env_tracers = map(self.full_raise, env)
lifted_jaxpr = convert_constvars_jaxpr(jaxpr)
out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None)
for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)]
# The `jaxpr` already contains the env_vars at start of invars
new_params = dict(params,
mapped_invars=tuple([True] * len(const_tracers) +
[False] * len(env_tracers) +
[True] * len(tracers)),
call_jaxpr=lifted_jaxpr)
eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)),
out_tracers, map_primitive, new_params)
for t in out_tracers:
t.recipe = eqn
return out_tracers
def post_process_call(self, call_primitive, out_tracers, params):
if call_primitive in map_primitives:
return self.post_process_map(call_primitive, out_tracers, params)
jaxpr, consts, env = tracers_to_jaxpr([], out_tracers)
out_pvs, out_pv_consts = unzip2(t.pval for t in out_tracers)
out = out_pv_consts + consts
del consts, out_pv_consts
master = self.master
def todo(x):
n = len(jaxpr.outvars)
out_pv_consts, consts = x[:n], x[n:]
trace = JaxprTrace(master, core.cur_sublevel())
const_tracers = map(trace.new_instantiated_const, consts)
env_tracers = map(trace.full_raise, env)
lifted_jaxpr = convert_constvars_jaxpr(jaxpr)
out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None)
for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)]
new_params = dict(params, call_jaxpr=lifted_jaxpr)
# The `jaxpr` already contains the env_vars at start of invars
eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers)),
out_tracers, call_primitive, new_params)
for t in out_tracers:
t.recipe = eqn
return out_tracers
return out, todo
def post_process_map(self, map_primitive, out_tracers, params):
jaxpr, consts, env = tracers_to_jaxpr([], out_tracers)
out_pvs_reduced, out_pv_consts = unzip2(t.pval for t in out_tracers)
out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv)
for pv in out_pvs_reduced]
out = out_pv_consts + consts
del consts, out_pv_consts
master = self.master
def todo(x):
n = len(jaxpr.outvars)
out_pv_consts, consts = x[:n], x[n:]
trace = JaxprTrace(master, core.cur_sublevel())
const_tracers = map(trace.new_instantiated_const, consts)
# The `jaxpr` already contains the env_vars at start of invars
lifted_jaxpr = convert_constvars_jaxpr(jaxpr)
out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None)
for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)]
new_params = dict(params,
mapped_invars=tuple([True] * len(const_tracers) +
[False] * len(env)),
call_jaxpr=lifted_jaxpr)
env_tracers = map(trace.full_raise, env)
eqn = new_eqn_recipe(it.chain(const_tracers, env_tracers),
out_tracers, map_primitive, new_params)
for t in out_tracers:
t.recipe = eqn
return out_tracers
return out, todo
# This subclass is used just for its type tag, which switches the behavior of
# process_call to stage out into the jaxpr any call primitives encountered
# (rather than doing partial evaluation into the call).
class StagingJaxprTrace(JaxprTrace):
pass
def _mapped_aval(aval):
if aval is core.abstract_unit:
return aval
elif isinstance(aval, ShapedArray):
# might be raising abstraction level from Concrete here
return ShapedArray(aval.shape[1:], aval.dtype)
else:
raise TypeError(aval)
def _unmapped_aval(size, aval):
if aval is core.abstract_unit:
return aval
elif isinstance(aval, ShapedArray):
return ShapedArray((size,) + aval.shape, aval.dtype)
else:
raise TypeError(aval)
map_primitives = set()
custom_partial_eval_rules = {}
call_partial_eval_rules = {}
def partial_eval(f, trace, pvs):
f = trace_to_subjaxpr(f, trace.master, False)
return partial_eval_wrapper(f, tuple(pvs))
@lu.transformation_with_aux
def partial_eval_wrapper(avals, *consts):
py_args = (map(PartialVal, zip(avals, consts)),)
jaxpr, (out_pvals, consts, env) = yield py_args, {}
out_pvs, out_consts = unzip2(out_pvals)
out = tuple(out_consts) + tuple(consts) # TODO: can consts be traced?
yield out, (out_pvs, jaxpr, env)
def abstract_eval_fun(fun, *avals, **params):
pvals_in = [PartialVal((a, unit)) for a in avals]
_, pvals_out, _ = trace_to_jaxpr(lu.wrap_init(fun, params), pvals_in,
instantiate=True)
avals_out, _ = unzip2(pvals_out)
for aval_out in avals_out:
assert isinstance(aval_out, AbstractValue) # instantiate=True
return avals_out
class JaxprTracer(Tracer):
__slots__ = ['pval', 'recipe']
def __init__(self, trace, pval, recipe):
assert isinstance(pval, PartialVal)
pv, const = pval
if isinstance(const, Tracer) and const._trace.level >= trace.level:
trace.escaped_tracer_error(
"Tracer from a higher level: {} in trace {}".format(const, trace))
self._trace = trace
self.pval = pval
self.recipe = recipe
def __repr__(self):
return 'Traced<{}:{}>'.format(self.aval, self._trace)
@property
def aval(self):
pv, const = self.pval
return partial_val_aval(pv, const)
@property
def parents(self):
if isinstance(self.recipe, JaxprEqnRecipe):
return self.recipe.invars
else:
return []
def ispure(self):
pv, _ = self.pval
return pv is None # or pv is core.abstract_unit
def full_lower(self):
if self.ispure():
_, const = self.pval
return core.full_lower(const)
else:
return self
class PartialVal(tuple):
def __new__(cls, xs):
pv, const = xs
if not core.skip_checks:
# type checks
assert isinstance(pv, valid_pv_types), xs
assert isinstance(const, core.Tracer) or core.valid_jaxtype(const), xs
# invariant checks
if isinstance(pv, AbstractValue):
assert const == core.unit, xs
return tuple.__new__(cls, xs)
valid_pv_types = (AbstractValue, type(None))
def merge_pvals(val, pval):
pv, const = pval
if isinstance(pv, AbstractValue):
return val
elif pv is None:
return const
else:
raise TypeError(pv)
def partial_val_aval(pv, const):
if isinstance(pv, AbstractValue):
return pv
elif pv is None:
return get_aval(const)
else:
raise TypeError(pv)
def trace_to_jaxpr(fun, pvals, instantiate=False, stage_out_calls=False):
"""Traces a function, given abstract inputs, to a jaxpr."""
trace_type = StagingJaxprTrace if stage_out_calls else JaxprTrace
with new_master(trace_type) as master:
fun = trace_to_subjaxpr(fun, master, instantiate)
jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals)
assert not env
del master
return jaxpr, out_pvals, consts
@lu.transformation
def trace_to_subjaxpr(master, instantiate, pvals):
assert all([isinstance(pv, PartialVal) for pv in pvals]), pvals
trace = JaxprTrace(master, core.cur_sublevel())
in_tracers = map(trace.new_arg, pvals)
ans = yield in_tracers, {}
instantiate = [instantiate] * len(ans) if type(instantiate) is bool else instantiate
out_tracers = map(trace.full_raise, map(core.full_lower, ans))
out_tracers = map(partial(instantiate_const_at, trace), instantiate, out_tracers)
jaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracers)
out_pvals = [t.pval for t in out_tracers]
del trace, in_tracers, out_tracers
yield jaxpr, (out_pvals, consts, env)
def instantiate_const_at(trace, instantiate, tracer):
assert type(instantiate) is bool
if instantiate:
return trace.instantiate_const(trace.full_raise(tracer))
else:
return tracer
FreeVar = namedtuple('FreeVar', ['val'])
ConstVar = namedtuple('ConstVar', ['val'])
LambdaBinding = namedtuple('LambdaBinding', [])
JaxprEqnRecipe = namedtuple('JaxprEqnRecipe',
['eqn_id', 'invars', 'outvars', 'primitive', 'params'])
def new_eqn_recipe(invars, outvars, primitive, params):
"""Constructs a new JaxEqnRecipe.
Params:
invars: the tracers for the primitive inputs.
outvars: the tracers for the primitive outputs.
primitive: the primitive.
params: the primitive params
"""
if primitive.call_primitive:
# TODO(necula): move these checks to core.check_jaxpr, and call it
# in more places.
assert "call_jaxpr" in params
return JaxprEqnRecipe(object(), tuple(invars), map(ref, outvars), primitive,
params)
def recipe_to_eqn(unused_var, getvar, recipe):
_, in_tracers, out_tracer_refs, primitive, params = recipe
out_tracers = [t_ref() for t_ref in out_tracer_refs]
invars = [getvar(t) for t in in_tracers]
outvars = [unused_var() if t is None else getvar(t) for t in out_tracers]
return new_jaxpr_eqn(invars, outvars, primitive, params)
def tracers_to_jaxpr(in_tracers, out_tracers):
"""Constructs Jaxpr given tracers for inputs and outputs.
Params:
in_tracers: the tracers that were created for the function inputs
out_tracers: the tracers that were output by the function.
Returns: a triple of a `Jaxpr`, a list of constant values corresponding to
the `constvars` in the returned Jaxps, and a list of environment values.
The vars for the environment values have been pre-pended to the Jaxpr's
`invars`.
"""
newvar = core.gensym('')
t_to_var = defaultdict(newvar)
getvar = lambda t: t_to_var[id(t)]
sorted_tracers = toposort(out_tracers)
invars = map(getvar, in_tracers)
eqns = []
env = {}
consts = {}
const_to_var = defaultdict(newvar)
processed_eqn_ids = set()
for t in sorted_tracers:
recipe = t.recipe
if isinstance(recipe, JaxprEqnRecipe):
if recipe.eqn_id not in processed_eqn_ids:
eqns.append(recipe_to_eqn(newvar, getvar, recipe))
processed_eqn_ids.add(recipe.eqn_id)
elif isinstance(recipe, LambdaBinding):
if not any(t is in_tracer for in_tracer in in_tracers):
t._trace.escaped_tracer_error("Tracer not among input tracers {}".format(t))
assert in_tracers, "Lambda binding with no args"
elif isinstance(recipe, FreeVar):
env[getvar(t)] = recipe.val
elif isinstance(recipe, ConstVar):
v = t_to_var[id(t)] = const_to_var[id(recipe.val)]
consts[v] = recipe.val
elif isinstance(recipe, Literal):
t_to_var[id(t)] = recipe
elif recipe is unit:
t_to_var[id(t)] = unitvar
else:
raise TypeError(recipe)
env_vars, env_vals = unzip2(env.items())
const_vars, const_vals = unzip2(consts.items())
# The env_vars are pre-pended to the invars
jaxpr = Jaxpr(const_vars, list(it.chain(env_vars, invars)), list(map(getvar, out_tracers)), eqns)
core.skip_checks or core.check_jaxpr(jaxpr)
return jaxpr, const_vals, env_vals
@cache()
def convert_constvars_jaxpr(jaxpr):
"""Moves the constvars to the start of invars."""
core.skip_checks or core.check_jaxpr(jaxpr)
lifted_jaxpr = Jaxpr(constvars=(),
invars=jaxpr.constvars + jaxpr.invars,
outvars=jaxpr.outvars, eqns=jaxpr.eqns)
core.skip_checks or core.check_jaxpr(lifted_jaxpr)
return lifted_jaxpr
def partial_eval_jaxpr(jaxpr, unknowns, instantiate):
f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
cell = []
def fun(*vals):
pvals = [PartialVal((aval, unit)) if uk else PartialVal((None, val))
for aval, val, uk in zip(jaxpr.in_avals, vals, unknowns)]
jaxpr_2, out_pvals_2, consts_2 = trace_to_jaxpr(f, pvals, instantiate=instantiate)
out_pvs_2, out_consts_2 = unzip2(out_pvals_2)
cell.append((out_pvs_2, jaxpr_2, len(consts_2)))
return out_consts_2 + consts_2
pvals = [PartialVal((abstract_unit, unit)) if uk else PartialVal((aval, unit))
for aval, uk in zip(jaxpr.in_avals, unknowns)]
jaxpr_1, out_pvals, consts_1 = trace_to_jaxpr(lu.wrap_init(fun), pvals, instantiate=True)
(out_pvs_2, jaxpr_2, num_res), = cell
assert len(jaxpr_2.constvars) == num_res
# jaxpr :: a -> b
# jaxpr_1 :: a1 -> [b1, res]
# jaxpr_2 :: res | a2 -> b2
# jaxpr_2 :: [a2, res] -> b2
jaxpr_2 = convert_constvars_jaxpr(jaxpr_2)
jaxpr_2.invars = jaxpr_2.invars[num_res:] + jaxpr_2.invars[:num_res]
uk_out = [pv is not None for pv in out_pvs_2]
in_avals_1, in_avals_2 = unzip2(map(_split_aval, unknowns, jaxpr.in_avals))
out_avals_1, out_avals_2 = unzip2(map(_split_aval, uk_out, jaxpr.out_avals))
# out_avals_1 and in_avals_2 need the residuals added
out_pvs, _ = unzip2(out_pvals)
res_avals = out_pvs[len(jaxpr.out_avals):]
assert len(res_avals) == num_res
out_avals_1 = out_avals_1 + res_avals
in_avals_2 = in_avals_2 + res_avals
typed_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, out_avals_1)
typed_jaxpr_2 = TypedJaxpr(jaxpr_2, (), in_avals_2, out_avals_2)
return typed_jaxpr_1, typed_jaxpr_2, uk_out
def _split_aval(unknown, aval):
return (abstract_unit, aval) if unknown else (aval, abstract_unit)
remat_call_p = core.Primitive('remat_call')
remat_call_p.call_primitive = True
remat_call = partial(core.call_bind, remat_call_p)
remat_call_p.def_custom_bind(remat_call)
remat_call_p.def_impl(core.call_impl)
remat_call_p.multiple_results = True
def _remat_partial_eval(trace, f, tracers, params):
concrete = params['concrete']
# Unlike JaxprTrace.process_call, we want to form a jaxpr for the entirety of
# the function being called, not just for the unknown parts. To do that, we
# instantiate all the input tracers as constants in the jaxpr being formed.
# Those tracers might have concrete avals, and doing abstract interpretation
# on concrete avals engenders a tradeoff: it allows data-dependent Python
# control flow to work, but it can in some cases lead to redundant FLOPs (done
# both in the `bind` call below and the `core.jaxpr_as_fun` call). We use the
# `concrete` parameter to switch this behavior, and if `concrete` is False
# then we raise the avals to the Shaped level.
if concrete:
instantiated_tracers = map(trace.instantiate_const, tracers)
else:
instantiated_tracers = map(trace.instantiate_const_abstracted, tracers)
# Using the instantiated tracers, run call_bind like JaxprTrace.process_call.
in_pvs, in_consts = unzip2(t.pval for t in instantiated_tracers)
fun, aux = partial_eval(f, trace, in_pvs)
if concrete:
# TODO(mattjj): remove `remat_context` when confident no accidental FLOPs
with remat_context():
out_flat = remat_call_p.bind(fun, *in_consts, **params)
else:
out_flat = remat_call_p.bind(fun, *in_consts, **params)
out_pvs, jaxpr, env = aux()
env = map(trace.full_raise, env)
out_pval_consts1, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)])
out_pvals1 = [PartialVal((pv, const)) for pv, const in zip(out_pvs, out_pval_consts1)]
# Since we traced with everything marked as unknown, but we need to know which
# outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns.
in_avals = ([raise_to_shaped(partial_val_aval(*t.pval)) for t in env]
+ [raise_to_shaped(pv) for pv in in_pvs])
out_avals = [raise_to_shaped(pv if pv is not None
else abstract_unit if var is unitvar
else get_aval(var.val) if type(var) is Literal
else get_aval(const))
for var, pv, const in zip(jaxpr.outvars, out_pvs, out_pval_consts1)]
typed_jaxpr = core.TypedJaxpr(jaxpr, consts, in_avals, out_avals)
in_unknowns = [t.pval[0] is not None for t in it.chain(env, tracers)]
jaxpr_1, jaxpr_2, out_unknowns = partial_eval_jaxpr(typed_jaxpr, in_unknowns, False)
num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals)
# First, we prune the jaxpr to be staged out not to have too many outputs.
typed_jaxpr = _dce_jaxpr(typed_jaxpr, out_unknowns)
# Next, we need values for the outputs that should be known. Since consts
# weren't passed through Python for evaluation, we need to evaluate jaxpr_1,
# minus the residual outputs that we don't need. When `concrete=True`, as an
# optimization we can avoid redoing *some* redundant FLOPs, namely those that
# produced concrete avals at the output, simply by using those as computed
# values. For the use case of reverse-mode ad in op-by-op ("eager mode")
# evaluation, all the primal outputs should be concrete (thus not recomputed).
to_compute = [not uk and type(pv) is not ConcreteArray
for uk, pv in zip(out_unknowns, out_pvs)]
jaxpr_1_primals = _dce_jaxpr(jaxpr_1, to_compute + [False] * num_res)
_, in_consts = unzip2(t.pval for t in it.chain(env, tracers))
out_pval_consts2 = core.jaxpr_as_fun(jaxpr_1_primals)(*in_consts)[:-num_res or None]
out_pvals = map(_reconstruct_pval, out_pvals1, out_pval_consts2, out_unknowns)
# Now that we have out_pvals, the rest is just like JaxprTrace.process_call.
instantiated_tracers = env + instantiated_tracers
const_tracers = map(trace.new_instantiated_const, consts)
lifted_jaxpr = convert_constvars_jaxpr(typed_jaxpr.jaxpr)
out_tracers = [JaxprTracer(trace, out_pval, None) for out_pval in out_pvals]
new_params = dict(params, call_jaxpr=lifted_jaxpr)
eqn = new_eqn_recipe(tuple(it.chain(const_tracers, instantiated_tracers)),
out_tracers, remat_call_p, new_params)
for t in out_tracers: t.recipe = eqn
return out_tracers
call_partial_eval_rules[remat_call_p] = _remat_partial_eval
def _dce_jaxpr(typed_jaxpr, outputs):
# This dead-code elimination is pretty rudimentary, and in particular doesn't
# nontrivially DCE through scan, call, or other higher-order primitives.
# TODO(mattjj): better DCE
jaxpr = typed_jaxpr.jaxpr
outvars, out_avals = jaxpr.outvars, typed_jaxpr.out_avals
out_pairs = [(var, aval) if output else (unitvar, core.abstract_unit)
for var, aval, output in zip(outvars, out_avals, outputs)]
new_outvars, new_out_avals = unzip2(out_pairs)
needed_vars = {v for v in new_outvars if type(v) is not Literal}
new_eqns = []
for eqn in jaxpr.eqns[::-1]:
if set(eqn.outvars) & needed_vars:
new_eqns.append(eqn)
needed_vars.update(v for v in eqn.invars if type(v) is not Literal)
new_eqns = new_eqns[::-1]
new_jaxpr = core.Jaxpr(jaxpr.constvars, jaxpr.invars,
new_outvars, new_eqns)
return core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, typed_jaxpr.in_avals,
new_out_avals)
def _reconstruct_pval(pval1, const2, unknown):
pv1, const1 = pval1
if unknown or pv1 is None:
return pval1
else:
if type(pv1) is ConcreteArray:
return PartialVal((None, pv1.val))
else:
return PartialVal((None, const2))
# TODO(mattjj): for https://github.com/google/jax/pull/1749 we allowed
# standard_abstract_eval to perform concrete evaluation (i.e. FLOPs), but we
# don't think it should happen except for in a remat context
@contextlib.contextmanager
def remat_context():
try:
prev_state = _thread_local_state.remat
_thread_local_state.remat = True
yield
finally:
_thread_local_state.remat = prev_state
class _ThreadLocalState(threading.local):
def __init__(self):
self.remat = False
_thread_local_state = _ThreadLocalState()
def move_binders_to_front(typed_jaxpr, to_move):
assert not typed_jaxpr.jaxpr.constvars
assert len(typed_jaxpr.in_avals) == len(to_move)
new_invars = _move_to_front(typed_jaxpr.jaxpr.invars, to_move)
new_jaxpr = core.Jaxpr((), new_invars, typed_jaxpr.jaxpr.outvars,
typed_jaxpr.jaxpr.eqns)
new_in_avals = _move_to_front(typed_jaxpr.in_avals, to_move)
new_typed_jaxpr = core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals,
new_in_avals, typed_jaxpr.out_avals)
return new_typed_jaxpr
def _move_to_front(lst, to_move):
return ([elt for elt, move in zip(lst, to_move) if move] +
[elt for elt, move in zip(lst, to_move) if not move])
| 40.238311
| 99
| 0.703688
|
a65c2015bee745f6c52b2667f26dc17e5598f3af
| 21,311
|
py
|
Python
|
keras/utils/layer_utils_test.py
|
mcx/keras
|
3613c3defc39c236fb1592c4f7ba1a9cc887343a
|
[
"Apache-2.0"
] | null | null | null |
keras/utils/layer_utils_test.py
|
mcx/keras
|
3613c3defc39c236fb1592c4f7ba1a9cc887343a
|
[
"Apache-2.0"
] | null | null | null |
keras/utils/layer_utils_test.py
|
mcx/keras
|
3613c3defc39c236fb1592c4f7ba1a9cc887343a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer_utils."""
import collections
import contextlib
import multiprocessing.dummy
import os
import pickle
import shutil
import sys
import time
import timeit
import numpy as np
import tensorflow.compat.v2 as tf
import keras
from keras.utils import io_utils
from keras.utils import layer_utils
_PICKLEABLE_CALL_COUNT = collections.Counter()
class MyPickleableObject(tf.__internal__.tracking.AutoTrackable):
"""Needed for InterfaceTests.test_property_cache_serialization.
This class must be at the top level. This is a constraint of pickle,
unrelated to `cached_per_instance`.
"""
@property
@layer_utils.cached_per_instance
def my_id(self):
_PICKLEABLE_CALL_COUNT[self] += 1
return id(self)
class LayerUtilsTest(tf.test.TestCase):
def test_print_summary(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2,
kernel_size=(2, 3),
input_shape=(3, 5, 5),
name="conv",
)
)
model.add(keras.layers.Flatten(name="flat"))
model.add(keras.layers.Dense(5, name="dense"))
file_name = "model_1.txt"
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, "w")
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(model, print_fn=print_to_file)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, "r")
lines = reader.readlines()
reader.close()
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_without_print_fn(self):
model = keras.Sequential(
[keras.layers.Dense(5, input_shape=(10,), name="dense")]
)
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
layer_utils.print_summary(model)
self.assertIn("dense (Dense)", printed.contents())
def test_print_summary_expand_nested(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape)
x = keras.layers.Conv2D(3, 1)(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape)
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape)
model = keras.Model(inputs, inner_model(inputs))
file_name = "model_2.txt"
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, "w")
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, expand_nested=True
)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, "r")
lines = reader.readlines()
reader.close()
check_str = (
'Model: "model_2"\n'
"_________________________________________________________________\n" # noqa: E501
" Layer (type) Output Shape Param # \n" # noqa: E501
"=================================================================\n" # noqa: E501
" input_3 (InputLayer) [(None, None, None, 3)] 0 \n" # noqa: E501
" \n" # noqa: E501
" model_1 (Functional) (None, None, None, 3) 24 \n" # noqa: E501
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n" # noqa: E501
"| input_1 (InputLayer) [(None, None, None, 3)] 0 |\n" # noqa: E501
"| |\n" # noqa: E501
"| model (Functional) (None, None, None, 3) 24 |\n" # noqa: E501
"||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n" # noqa: E501
"|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\n" # noqa: E501
"|| ||\n" # noqa: E501
"|| conv2d (Conv2D) (None, None, None, 3) 12 ||\n" # noqa: E501
"|| ||\n" # noqa: E501
"|| batch_normalization (BatchN (None, None, None, 3) 12 ||\n" # noqa: E501
"|| ormalization) ||\n" # noqa: E501
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n" # noqa: E501
"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n" # noqa: E501
"=================================================================\n" # noqa: E501
"Total params: 24\n"
"Trainable params: 18\n"
"Non-trainable params: 6\n"
"_________________________________________________________________\n" # noqa: E501
)
fin_str = ""
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_summary_subclass_model_expand_nested(self):
class Sequential(keras.Model):
def __init__(self, *args):
super().__init__()
self.module_list = list(args) if args else []
def call(self, x):
for module in self.module_list:
x = module(x)
return x
class Block(keras.Model):
def __init__(self):
super().__init__()
self.module = Sequential(
keras.layers.Dense(10),
keras.layers.Dense(10),
)
def call(self, input_tensor):
x = self.module(input_tensor)
return x
class Base(keras.Model):
def __init__(self):
super().__init__()
self.module = Sequential(Block(), Block())
def call(self, input_tensor):
x = self.module(input_tensor)
y = self.module(x)
return x, y
class Network(keras.Model):
def __init__(self):
super().__init__()
self.child = Base()
def call(self, inputs):
return self.child(inputs)
net = Network()
inputs = keras.Input(shape=(10,))
outputs = net(inputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
file_name = "model_3.txt"
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, "w")
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model,
line_length=120,
print_fn=print_to_file,
expand_nested=True,
)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, "r")
lines = reader.readlines()
reader.close()
# The output content are slightly different for the input shapes
# between v1 and v2.
if tf.__internal__.tf2.enabled():
self.assertEqual(len(lines), 39)
else:
self.assertEqual(len(lines), 40)
except ImportError:
pass
def test_print_summary_show_trainable(self):
model = keras.Sequential(name="trainable")
untrained = keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name="conv"
)
model.add(untrained)
model.add(keras.layers.Flatten(name="flat"))
model.add(keras.layers.Dense(5, name="dense"))
untrained.trainable = False
file_name = "model_4.txt"
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, "w")
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model, print_fn=print_to_file, show_trainable=True
)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, "r")
lines = reader.readlines()
reader.close()
check_str = (
"Model: "
'"trainable"\n____________________________________________________________________________\n' # noqa: E501
" Layer (type) Output Shape Param # " # noqa: E501
"Trainable "
"\n============================================================================\n" # noqa: E501
" conv (Conv2D) (None, 2, 3, 2) 62 N" # noqa: E501
" \n"
" " # noqa: E501
"\n flat (Flatten) (None, 12) 0 " # noqa: E501
"Y \n"
" " # noqa: E501
"\n dense (Dense) (None, 5) 65 " # noqa: E501
"Y \n"
" " # noqa: E501
"\n============================================================================\nTotal" # noqa: E501
" params: 127\nTrainable params: 65\nNon-trainable params: "
"62\n____________________________________________________________________________\n" # noqa: E501
"____________________________________________________________________________\n" # noqa: E501
)
fin_str = ""
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 15)
except ImportError:
pass
def test_print_summary_expand_nested_show_trainable(self):
shape = (None, None, 3)
def make_model():
x = inputs = keras.Input(shape, name="input2")
untrainable = keras.layers.Conv2D(3, 1)
untrainable.trainable = False
x = untrainable(x)
x = keras.layers.BatchNormalization()(x)
return keras.Model(inputs, x)
x = inner_inputs = keras.Input(shape, name="input1")
x = make_model()(x)
inner_model = keras.Model(inner_inputs, x)
inputs = keras.Input(shape, name="input3")
model = keras.Model(inputs, inner_model(inputs))
file_name = "model_6.txt"
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
fpath = os.path.join(temp_dir, file_name)
writer = open(fpath, "w")
def print_to_file(text):
print(text, file=writer)
try:
layer_utils.print_summary(
model,
print_fn=print_to_file,
expand_nested=True,
show_trainable=True,
)
self.assertTrue(tf.io.gfile.exists(fpath))
writer.close()
reader = open(fpath, "r")
lines = reader.readlines()
reader.close()
check_str = (
"Model: "
'"model_2"\n____________________________________________________________________________\n' # noqa: E501
" Layer (type) Output Shape Param # " # noqa: E501
"Trainable "
"\n============================================================================\n" # noqa: E501
" input3 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" \n"
" " # noqa: E501
"\n model_1 (Functional) (None, None, None, 3) 24 " # noqa: E501
"Y "
"\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n|" # noqa: E501
" input1 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" |\n|"
" " # noqa: E501
"|\n| model (Functional) (None, None, None, 3) 24 " # noqa: E501
"Y "
"|\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n||" # noqa: E501
" input2 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" ||\n||"
" " # noqa: E501
"||\n|| conv2d (Conv2D) (None, None, None, 3) 12 " # noqa: E501
"N ||\n||"
" " # noqa: E501
"||\n|| batch_normalization (BatchN (None, None, None, 3) 12 " # noqa: E501
"Y ||\n|| ormalization)"
" "
"||\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n============================================================================\nTotal" # noqa: E501
" params: 24\nTrainable params: 6\nNon-trainable params: "
"18\n____________________________________________________________________________\n" # noqa: E501
"____________________________________________________________________________\n" # noqa: E501
)
fin_str = ""
for line in lines:
fin_str += line
self.assertIn(fin_str, check_str)
self.assertEqual(len(lines), 25)
except ImportError:
pass
def test_property_cache(self):
test_counter = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
def __init__(self):
super().__init__()
self._frozen = True
def __setattr__(self, key, value):
"""Enforce that cache does not set attribute on MyObject."""
if getattr(self, "_frozen", False):
raise ValueError("Cannot mutate when frozen.")
return super().__setattr__(key, value)
@property
@layer_utils.cached_per_instance
def test_property(self):
test_counter[id(self)] += 1
return id(self)
first_object = MyObject()
second_object = MyObject()
# Make sure the objects return the correct values
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Make sure the cache does not share across objects
self.assertNotEqual(
first_object.test_property, second_object.test_property
)
# Check again (Now the values should be cached.)
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Count the function calls to make sure the cache is actually being
# used.
self.assertAllEqual(tuple(test_counter.values()), (1, 1))
def test_property_cache_threaded(self):
call_count = collections.Counter()
class MyObject(tf.__internal__.tracking.AutoTrackable):
@property
@layer_utils.cached_per_instance
def test_property(self):
# Random sleeps to ensure that the execution thread changes
# mid-computation.
call_count["test_property"] += 1
time.sleep(np.random.random() + 1.0)
# Use a RandomState which is seeded off the instance's id (the
# mod is because numpy limits the range of seeds) to ensure that
# an instance returns the same value in different threads, but
# different instances return different values.
return int(
np.random.RandomState(id(self) % (2**31)).randint(2**16)
)
def get_test_property(self, _):
"""Function provided to .map for threading test."""
return self.test_property
# Test that multiple threads return the same value. This requires that
# the underlying function is repeatable, as cached_property makes no
# attempt to prioritize the first call.
test_obj = MyObject()
with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:
# Intentionally make a large pool (even when there are only a small
# number of cpus) to ensure that the runtime switches threads.
results = pool.map(test_obj.get_test_property, range(64))
self.assertEqual(len(set(results)), 1)
# Make sure we actually are testing threaded behavior.
self.assertGreater(call_count["test_property"], 1)
# Make sure new threads still cache hit.
with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:
start_time = (
timeit.default_timer()
) # Don't time pool instantiation.
results = pool.map(test_obj.get_test_property, range(4))
total_time = timeit.default_timer() - start_time
# Note(taylorrobie): The reason that it is safe to time a unit test is
# that a cache hit will be << 1 second, and a cache miss is guaranteed
# to be >= 1 second. Empirically confirmed by 100,000 runs with no
# flakes.
self.assertLess(total_time, 0.95)
def test_property_cache_serialization(self):
# Reset call count. .keys() must be wrapped in a list, because otherwise
# we would mutate the iterator while iterating.
for k in list(_PICKLEABLE_CALL_COUNT.keys()):
_PICKLEABLE_CALL_COUNT.pop(k)
first_instance = MyPickleableObject()
self.assertEqual(id(first_instance), first_instance.my_id)
# Test that we can pickle and un-pickle
second_instance = pickle.loads(pickle.dumps(first_instance))
self.assertEqual(id(second_instance), second_instance.my_id)
self.assertNotEqual(first_instance.my_id, second_instance.my_id)
# Make sure de-serialized object uses the cache.
self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)
# Make sure the decorator cache is not being serialized with the object.
expected_size = len(pickle.dumps(second_instance))
for _ in range(5):
# Add some more entries to the cache.
_ = MyPickleableObject().my_id
self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)
size_check_instance = MyPickleableObject()
_ = size_check_instance.my_id
self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))
if __name__ == "__main__":
tf.test.main()
| 42.367793
| 275
| 0.49932
|
c691947f2362cc1d99855bb223faba82c541d1da
| 1,989
|
py
|
Python
|
python/vsi/test/test_dir_util.py
|
NoahRJohnson/vsi_common
|
e19e14bc276b20b9dcfd62e3e38abfd6f3487502
|
[
"MIT"
] | null | null | null |
python/vsi/test/test_dir_util.py
|
NoahRJohnson/vsi_common
|
e19e14bc276b20b9dcfd62e3e38abfd6f3487502
|
[
"MIT"
] | null | null | null |
python/vsi/test/test_dir_util.py
|
NoahRJohnson/vsi_common
|
e19e14bc276b20b9dcfd62e3e38abfd6f3487502
|
[
"MIT"
] | null | null | null |
import os
import unittest
from vsi.tools.dir_util import (
find_file_in_path
)
from vsi.test.utils import TestCase
class DirTest(TestCase):
pass
class FindFileInPath(DirTest):
def test_path_argument(self):
# Empty lists
self.assertIsNone(find_file_in_path('foo.txt', ''))
self.assertIsNone(find_file_in_path('foo.txt', os.pathsep))
open(os.path.join(self.temp_dir.name, 'bar.txt',), 'wb').close()
# Just the dir
self.assertIsNone(find_file_in_path('foo.txt', self.temp_dir.name))
self.assertEqual(find_file_in_path('bar.txt', self.temp_dir.name),
os.path.join(self.temp_dir.name, 'bar.txt'))
# Multiple
self.assertEqual(find_file_in_path('bar.txt',
os.pathsep.join([
os.path.join(self.temp_dir.name, '1'),
os.path.join(self.temp_dir.name, '2'),
self.temp_dir.name,
os.path.join(self.temp_dir.name, '3')
])),
os.path.join(self.temp_dir.name, 'bar.txt'))
def test_env(self):
# Empty lists
with unittest.mock.patch.dict(os.environ, {'PATH': ""}):
self.assertIsNone(find_file_in_path('foo.txt'))
with unittest.mock.patch.dict(os.environ, {'PATH': os.pathsep}):
self.assertIsNone(find_file_in_path('foo.txt'))
open(os.path.join(self.temp_dir.name, 'bar.txt',), 'wb').close()
# Just the dir
with unittest.mock.patch.dict(os.environ, {'PATH': self.temp_dir.name}):
self.assertIsNone(find_file_in_path('foo.txt'))
self.assertEqual(find_file_in_path('bar.txt'),
os.path.join(self.temp_dir.name, 'bar.txt'))
# Multiple dirs
with unittest.mock.patch.dict(os.environ, {'PATH':
os.pathsep.join([
os.path.join(self.temp_dir.name, '1'),
os.path.join(self.temp_dir.name, '2'),
self.temp_dir.name,
os.path.join(self.temp_dir.name, '3')])}):
self.assertEqual(find_file_in_path('bar.txt'),
os.path.join(self.temp_dir.name, 'bar.txt'))
| 33.15
| 76
| 0.645048
|
5d5f5f4e6c3ad67e2d0bd90d5ed71f2a0c8b3700
| 868
|
py
|
Python
|
read_json_url_pyspark.py
|
arturosolutions/earthquakes
|
1afda35a373b3d31d6e912db4d17d3b66e70bc9c
|
[
"MIT"
] | null | null | null |
read_json_url_pyspark.py
|
arturosolutions/earthquakes
|
1afda35a373b3d31d6e912db4d17d3b66e70bc9c
|
[
"MIT"
] | null | null | null |
read_json_url_pyspark.py
|
arturosolutions/earthquakes
|
1afda35a373b3d31d6e912db4d17d3b66e70bc9c
|
[
"MIT"
] | null | null | null |
import json
from urllib.request import urlopen
from pyspark.shell import spark, sqlContext
url = "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.geojson"
def load_json_data(json_url):
"""
:param json_url:
:return: earthquakes json data
"""
response = urlopen(json_url)
source = response.read().decode("utf-8")
data = json.dumps(source)
return json.loads(data)
def main():
"""
:return: Place and magnitude, where magnitude is greater than 1.0.
"""
df = sqlContext.read.json(load_json_data(url))
df.createOrReplaceTempView('earthquakes')
earthquakes_df = spark.sql("SELECT properties.mag, properties.place "
"FROM earthquakes "
"WHERE properties.mag > 1.0")
earthquakes_df.show()
if __name__ == '__main__':
main()
| 24.8
| 83
| 0.642857
|
ced1a0f64237267c1d2a4f4c925c4f5d38bf8016
| 12,038
|
py
|
Python
|
angr/procedures/definitions/win32_mrmsupport.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_mrmsupport.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_mrmsupport.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("mrmsupport.dll")
prototypes = \
{
#
'CreateResourceIndexer': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["projectRoot", "extensionDllPath", "ppResourceIndexer"]),
#
'DestroyResourceIndexer': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["resourceIndexer"]),
#
'IndexFilePath': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="IndexedResourceQualifier", pack=False, align=None), offset=0), label="LPArray", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["resourceIndexer", "filePath", "ppResourceUri", "pQualifierCount", "ppQualifiers"]),
#
'DestroyIndexedResults': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="IndexedResourceQualifier", pack=False, align=None), label="LPArray", offset=0)], SimTypeBottom(label="Void"), arg_names=["resourceUri", "qualifierCount", "qualifiers"]),
#
'MrmCreateResourceIndexer': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["packageFamilyName", "projectRoot", "platformVersion", "defaultQualifiers", "indexer"]),
#
'MrmCreateResourceIndexerFromPreviousSchemaFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["projectRoot", "platformVersion", "defaultQualifiers", "schemaFile", "indexer"]),
#
'MrmCreateResourceIndexerFromPreviousPriFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["projectRoot", "platformVersion", "defaultQualifiers", "priFile", "indexer"]),
#
'MrmCreateResourceIndexerFromPreviousSchemaData': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["projectRoot", "platformVersion", "defaultQualifiers", "schemaXmlData", "schemaXmlSize", "indexer"]),
#
'MrmCreateResourceIndexerFromPreviousPriData': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["projectRoot", "platformVersion", "defaultQualifiers", "priData", "priSize", "indexer"]),
#
'MrmIndexString': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "resourceUri", "resourceString", "qualifiers"]),
#
'MrmIndexEmbeddedData': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "resourceUri", "embeddedData", "embeddedDataSize", "qualifiers"]),
#
'MrmIndexFile': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "resourceUri", "filePath", "qualifiers"]),
#
'MrmIndexFileAutoQualifiers': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "filePath"]),
#
'MrmIndexResourceContainerAutoQualifiers': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "containerPath"]),
#
'MrmCreateResourceFile': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypeInt(signed=False, label="MrmPackagingMode"), SimTypeInt(signed=False, label="MrmPackagingOptions"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "packagingMode", "packagingOptions", "outputDirectory"]),
#
'MrmCreateResourceFileInMemory': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypeInt(signed=False, label="MrmPackagingMode"), SimTypeInt(signed=False, label="MrmPackagingOptions"), SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer", "packagingMode", "packagingOptions", "outputPriData", "outputPriSize"]),
#
'MrmPeekResourceIndexerMessages': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None), SimTypePointer(SimTypePointer(SimStruct({"severity": SimTypeInt(signed=False, label="MrmResourceIndexerMessageSeverity"), "id": SimTypeInt(signed=False, label="UInt32"), "text": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="MrmResourceIndexerMessage", pack=False, align=None), offset=0), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["handle", "messages", "numMsgs"]),
#
'MrmDestroyIndexerAndMessages': SimTypeFunction([SimStruct({"handle": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="MrmResourceIndexerHandle", pack=False, align=None)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexer"]),
#
'MrmFreeMemory': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["data"]),
#
'MrmDumpPriFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmDumpType"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexFileName", "schemaPriFile", "dumpType", "outputXmlFile"]),
#
'MrmDumpPriFileInMemory': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="MrmDumpType"), SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["indexFileName", "schemaPriFile", "dumpType", "outputXmlData", "outputXmlSize"]),
#
'MrmDumpPriDataInMemory': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="MrmDumpType"), SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["inputPriData", "inputPriSize", "schemaPriData", "schemaPriSize", "dumpType", "outputXmlData", "outputXmlSize"]),
#
'MrmCreateConfig': SimTypeFunction([SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["platformVersion", "defaultQualifiers", "outputXmlFile"]),
#
'MrmCreateConfigInMemory': SimTypeFunction([SimTypeInt(signed=False, label="MrmPlatformVersion"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Byte"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["platformVersion", "defaultQualifiers", "outputXmlData", "outputXmlSize"]),
}
lib.set_prototypes(prototypes)
| 171.971429
| 695
| 0.745971
|
13c9c188645002aa53364964f14ae72fff1a72c2
| 13,655
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20201101/ddos_protection_plan.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20201101/ddos_protection_plan.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20201101/ddos_protection_plan.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DdosProtectionPlanArgs', 'DdosProtectionPlan']
@pulumi.input_type
class DdosProtectionPlanArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
ddos_protection_plan_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DdosProtectionPlan resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] ddos_protection_plan_name: The name of the DDoS protection plan.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if ddos_protection_plan_name is not None:
pulumi.set(__self__, "ddos_protection_plan_name", ddos_protection_plan_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="ddosProtectionPlanName")
def ddos_protection_plan_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the DDoS protection plan.
"""
return pulumi.get(self, "ddos_protection_plan_name")
@ddos_protection_plan_name.setter
def ddos_protection_plan_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ddos_protection_plan_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class DdosProtectionPlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ddos_protection_plan_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A DDoS protection plan in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ddos_protection_plan_name: The name of the DDoS protection plan.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DdosProtectionPlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A DDoS protection plan in a resource group.
:param str resource_name: The name of the resource.
:param DdosProtectionPlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DdosProtectionPlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ddos_protection_plan_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DdosProtectionPlanArgs.__new__(DdosProtectionPlanArgs)
__props__.__dict__["ddos_protection_plan_name"] = ddos_protection_plan_name
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_networks"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20180201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180201:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20180401:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180401:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20180601:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180601:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20180701:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180701:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20180801:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180801:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20181001:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181001:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20181101:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181101:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20181201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181201:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190201:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190401:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190401:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190601:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190601:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190701:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190701:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190801:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190801:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20190901:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190901:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20191101:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20191101:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20191201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20191201:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200301:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200301:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200401:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200401:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200501:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200501:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200601:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200601:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200701:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200701:DdosProtectionPlan"), pulumi.Alias(type_="azure-native:network/v20200801:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200801:DdosProtectionPlan")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DdosProtectionPlan, __self__).__init__(
'azure-native:network/v20201101:DdosProtectionPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DdosProtectionPlan':
"""
Get an existing DdosProtectionPlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DdosProtectionPlanArgs.__new__(DdosProtectionPlanArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_networks"] = None
return DdosProtectionPlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the DDoS protection plan resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the DDoS protection plan resource. It uniquely identifies the resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworks")
def virtual_networks(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
The list of virtual networks associated with the DDoS protection plan resource. This list is read-only.
"""
return pulumi.get(self, "virtual_networks")
| 53.54902
| 3,488
| 0.690809
|
d4b42f38f2dbf3b438ec751a66430a3f5ad9d24c
| 3,545
|
py
|
Python
|
TP_04/ejercicio_6/script_1/tokenizer.py
|
AgustinNormand/recuperacion-de-informacion
|
511ff6a83a929621792ee684aa5a55bcad512c9d
|
[
"MIT"
] | null | null | null |
TP_04/ejercicio_6/script_1/tokenizer.py
|
AgustinNormand/recuperacion-de-informacion
|
511ff6a83a929621792ee684aa5a55bcad512c9d
|
[
"MIT"
] | null | null | null |
TP_04/ejercicio_6/script_1/tokenizer.py
|
AgustinNormand/recuperacion-de-informacion
|
511ff6a83a929621792ee684aa5a55bcad512c9d
|
[
"MIT"
] | null | null | null |
from constants import *
from entity_extractor import Entity_Extractor
from normalizer import Normalizer
class Tokenizer:
def __init__(self):
self.vocabulary = {}
self.inverted_index = {}
self.index = {}
self.palabras_vacias = []
self.load_empty_words()
self.normalizer = Normalizer(STEMMING_LANGUAGE)
if EXTRACT_ENTITIES:
self.entities_extractor = Entity_Extractor(STEMMING_LANGUAGE)
def load_empty_words(self):
if EMPTY_WORDS_PATH:
with open(EMPTY_WORDS_PATH, "r") as f:
for line in f.readlines():
self.palabras_vacias.append(line.strip())
def valid_length(self, token):
return len(token) >= MIN_TERM_LENGTH and len(token) <= MAX_TERM_LENGTH
def palabra_vacia(self, token):
for palabra_vacia in self.palabras_vacias:
if palabra_vacia == token:
return True
if len(palabra_vacia) > len(token):
return False
return False
def is_term(self, token):
if not self.valid_length(token):
return False
return True
def doc_id_present(self, term, doc_id):
for stored_doc_id, _, _ in self.inverted_index[term]:
if stored_doc_id == doc_id:
return True
return False
def increment_frequency(self, term, doc_id, word_position):
for value in self.inverted_index[term]:
if value[0] == doc_id:
value[1] += 1
value[2].append(word_position)
def increment_index_frequency(self, doc_id, term):
try:
self.index[doc_id][term] += 1
except:
self.index[doc_id][term] = 1
def add_term(self, term, doc_id, word_position):
self.increment_index_frequency(doc_id, term)
if term not in self.inverted_index.keys():
self.inverted_index[term] = [[doc_id, 1, [word_position]]]
return
if self.doc_id_present(term, doc_id):
self.increment_frequency(term, doc_id, word_position)
else:
self.inverted_index[term].append([doc_id, 1, [word_position]])
def increment_vocabulary(self, file_terms):
for term in file_terms:
try:
self.vocabulary[term] += 1
except:
self.vocabulary[term] = 1
def tokenize_file(self, filename, doc_id):
self.index[doc_id] = {}
with open(filename, "r", encoding=CORPUS_FILES_ENCODING) as f:
word_position = 0
for line in f.readlines():
if EXTRACT_ENTITIES:
processed_line, entities = self.entities_extractor.extract_entities(
line
)
for entity in entities:
self.add_term(entity, doc_id)
else:
processed_line = line
for word in processed_line.split():
token = self.normalizer.normalize(word)
if not self.palabra_vacia(token):
#word_position += 1 #Antes lo había puesto aca
if self.valid_length(token):
word_position += 1 #Considero que va aca
self.add_term(token, doc_id, word_position)
self.increment_vocabulary(list(self.index[doc_id].keys()))
def get_results(self):
return [self.vocabulary, self.inverted_index]
| 34.754902
| 88
| 0.57433
|
33e56bcd81788bf507a779a7beabc0eea256f458
| 12,217
|
py
|
Python
|
px/px_ipc_map.py
|
walles/px
|
e513e51de56d581b8ea1483acebf24547caec86d
|
[
"MIT"
] | 149
|
2016-03-27T20:39:37.000Z
|
2022-03-01T07:53:42.000Z
|
px/px_ipc_map.py
|
walles/px
|
e513e51de56d581b8ea1483acebf24547caec86d
|
[
"MIT"
] | 85
|
2016-06-06T17:33:54.000Z
|
2022-02-14T06:06:58.000Z
|
px/px_ipc_map.py
|
walles/px
|
e513e51de56d581b8ea1483acebf24547caec86d
|
[
"MIT"
] | 9
|
2016-05-05T11:22:13.000Z
|
2021-03-04T12:03:59.000Z
|
import sys
if sys.version_info.major >= 3:
# For mypy PEP-484 static typing validation
from . import px_file # NOQA
from . import px_process # NOQA
from typing import Set # NOQA
from typing import List # NOQA
from typing import Dict # NOQA
from typing import Text # NOQA
from typing import AbstractSet # NOQA
from typing import MutableMapping # NOQA
from typing import Iterable # NOQA
from typing import TypeVar # NOQA
from typing import Optional # NOQA
T = TypeVar("T")
S = TypeVar("S")
FILE_TYPES = ["PIPE", "FIFO", "unix", "IPv4", "IPv6"]
class IpcMap(object):
"""
This is a map of process->[channels], where "process" is a process we have
IPC communication open with, and a channel is a socket or a pipe that we
have open to that process.
After creating an IpcMap, you can access:
* ipc_map.network_connections: This is a list of non-IPC network connections
* ipc_map.keys(): A set of other px_processes this process is connected to
* ipc_map[px_process]: A set of px_files through which we're connected to the
px_process
"""
def __init__(
self,
process, # type: px_process.PxProcess
files, # type: Iterable[px_file.PxFile]
processes, # type: Iterable[px_process.PxProcess]
is_root, # type: bool
):
# type: (...) -> None
# On Linux, lsof reports the same open file once per thread of a
# process. Putting the files in a set gives us each file only once.
files = set(files)
self._own_files = list(
filter(lambda f: f.pid == process.pid and f.fd is not None, files)
)
# Only deal with IPC related files
self.files = list(filter(lambda f: f.type in FILE_TYPES, files))
self.process = process
self.processes = processes
self.ipc_files_for_process = list(
filter(lambda f: f.pid == self.process.pid, self.files)
)
self._map = {} # type: MutableMapping[PeerProcess, Set[px_file.PxFile]]
self._create_mapping()
self.fds = self._create_fds(is_root)
def _create_fds(self, is_root):
# type: (bool) -> Dict[int, str]
"""
Describe standard FDs open by this process; the mapping is from FD number to
FD description.
The returned dict will always contain entries for 0, 1 and 2.
In theory this method could easily be modified to go through all fds, not
just the standard ones, but that can get us lots more DNS lookups, and
take a lot of time. If you do want to try it, just drop all the "if fd
not in [0, 1, 2]: continue"s and benchmark it on not-cached IP addresses.
"""
fds = dict()
if not self._own_files:
for fd in [0, 1, 2]:
fds[fd] = "<unavailable, running px as root might help>"
return fds
for fd in [0, 1, 2]:
fds[fd] = "<closed>"
for file in self._own_files:
if file.fd not in [0, 1, 2]:
continue
fds[file.fd] = str(file)
if file.type in FILE_TYPES:
excuse = "destination not found, try running px as root"
if is_root:
excuse = "not connected"
name = file.name # type: Optional[str]
if not name:
name = file.device
if name and name.startswith("->"):
name = name[2:]
fds[file.fd] = "[{}] <{}> ({})".format(
file.type,
excuse,
name,
)
# Traverse network connections and update FDs as required
for network_connection in self.network_connections:
if network_connection.fd is None:
continue
if network_connection.fd not in [0, 1, 2]:
continue
fds[network_connection.fd] = str(network_connection)
# Traverse our IPC structure and update FDs as required
for target in self.keys():
for link in self[target]:
if link.fd is None:
# No FD, never mind
continue
if link.fd not in [0, 1, 2]:
continue
# FIXME: If this is a PIPE/FIFO leading to ourselves we should say that
# FIXME: If this is an unconnected PIPE/FIFO, we should say that
name = link.name
if name and name.startswith("->"):
name = name[2:]
fds[link.fd] = "[{}] -> {} ({})".format(link.type, str(target), name)
return fds
def _create_mapping(self):
# type: () -> None
self._create_indices()
unknown = PeerProcess(
name="UNKNOWN destinations: Running with sudo might help find out where these go"
)
network_connections = set()
for file in self.ipc_files_for_process:
if file.type in ["FIFO", "PIPE"] and not file.fifo_id():
# Unidentifiable FIFO, just ignore this
continue
other_end_pids = self._get_other_end_pids(file)
if not other_end_pids:
if file.type in ["IPv4", "IPv6"]:
# This is a remote connection
network_connections.add(file)
continue
self.add_ipc_entry(unknown, file)
continue
for other_end_pid in other_end_pids:
if other_end_pid == self.process.pid:
# Talking to ourselves, never mind
continue
other_end_process = self._pid2process.get(other_end_pid)
if not other_end_process:
other_end_process = PeerProcess(pid=other_end_pid)
self._pid2process[other_end_pid] = other_end_process
self.add_ipc_entry(other_end_process, file)
self.network_connections = network_connections # type: Set[px_file.PxFile]
def _create_indices(self):
# type: () -> None
"""
Creates indices used by _get_other_end_pids()
"""
self._pid2process = create_pid2process(self.processes)
self._device_to_pids = {} # type: MutableMapping[str, List[int]]
self._name_to_pids = {} # type: MutableMapping[str, List[int]]
self._name_to_files = {} # type: MutableMapping[str, List[px_file.PxFile]]
self._device_number_to_files = (
{}
) # type: MutableMapping[int, List[px_file.PxFile]]
self._fifo_id_and_access_to_pids = {} # type: MutableMapping[str, List[int]]
self._local_endpoint_to_pid = {} # type: MutableMapping[str, int]
for file in self.files:
if file.device is not None:
add_arraymapping(self._device_to_pids, file.device, file.pid)
if file.name:
add_arraymapping(self._name_to_pids, file.name, file.pid)
add_arraymapping(self._name_to_files, file.name, file)
local_endpoint, remote = file.get_endpoints()
if local_endpoint:
self._local_endpoint_to_pid[local_endpoint] = file.pid
device_number = file.device_number()
if device_number is not None:
add_arraymapping(self._device_number_to_files, device_number, file)
if file.access is not None and file.type == "FIFO":
fifo_id = file.fifo_id()
if fifo_id:
add_arraymapping(
self._fifo_id_and_access_to_pids,
fifo_id + file.access,
file.pid,
)
def _get_other_end_pids(self, file):
# type: (px_file.PxFile) -> Iterable[int]
"""Locate the other end of a pipe / domain socket"""
if file.type in ["IPv4", "IPv6"]:
local, remote = file.get_endpoints()
if remote is None:
return []
pid = self._local_endpoint_to_pid.get(remote)
if pid:
return [pid]
else:
return []
name = file.name
if name and name.startswith("->"):
# With lsof 4.87 on OS X 10.11.3, pipe and socket names start with "->",
# but their endpoint names don't. Strip initial "->" from name before
# scanning for it.
name = name[2:]
file_device_with_arrow = None
if file.device is not None:
file_device_with_arrow = "->" + file.device
pids = set() # type: Set[int]
# The other end of the socket / pipe is encoded in the DEVICE field of
# lsof's output ("view source" in your browser to see the conversation):
# http://www.justskins.com/forums/lsof-find-both-endpoints-of-a-unix-socket-123037.html
if name:
matching_pids = self._device_to_pids.get(name)
if matching_pids:
pids.update(matching_pids)
if file_device_with_arrow:
matching_pids = self._name_to_pids.get(file_device_with_arrow)
if matching_pids:
pids.update(matching_pids)
device_number = file.device_number()
if device_number:
matching_files = self._device_number_to_files.get(device_number)
if not matching_files:
matching_files = []
for candidate in matching_files:
if candidate.name == file.name:
pids.add(candidate.pid)
fifo_id = file.fifo_id()
if fifo_id and file.access and file.type == "FIFO":
# On Linux, this is how we trace FIFOs
opposing_access = {"r": "w", "w": "r"}.get(file.access)
if opposing_access:
name_and_opposing_access = fifo_id + opposing_access
matching_pids = self._fifo_id_and_access_to_pids.get(
name_and_opposing_access
)
if matching_pids:
pids.update(matching_pids)
return pids
def add_ipc_entry(self, process, file):
# type: (PeerProcess, px_file.PxFile) -> None
"""
Note that we're connected to process via file
"""
if process not in self._map:
self._map[process] = set()
self._map[process].add(file)
def keys(self):
# type: () -> Iterable[PeerProcess]
"""
Returns a set of other px_processes this process is connected to
"""
return self._map.keys()
def __getitem__(self, process):
# type: (PeerProcess) -> Set[px_file.PxFile]
"""
Returns a set of px_files through which we're connected to the px_process
"""
return self._map.__getitem__(process)
class PeerProcess(object):
def __init__(self, name=None, pid=None):
# type: (Optional[Text], Optional[int]) -> None
if not name:
if pid is None:
raise ValueError("Either pid, name or both must be set")
name = "PID " + str(pid)
else:
if pid is not None:
name += "(" + str(pid) + ")"
self.name = name # type: Text
self.pid = pid # type: Optional[int]
def __repr__(self):
return self.name
def __hash__(self):
return self.name.__hash__()
def __str__(self):
return self.name
def create_pid2process(processes):
# type: (Iterable[px_process.PxProcess]) -> MutableMapping[int, PeerProcess]
pid2process = {} # type: MutableMapping[int, PeerProcess]
for process in processes:
# Guard against duplicate PIDs
assert process.pid not in pid2process
pid2process[process.pid] = PeerProcess(name=process.command, pid=process.pid)
return pid2process
def add_arraymapping(mapping, key, value):
# type: (MutableMapping[S, List[T]], S, T) -> None
array = mapping.setdefault(key, [])
array.append(value)
| 35.514535
| 95
| 0.569207
|
e617946da1fc64ae6a87bba9a763fabec813dc1f
| 3,848
|
py
|
Python
|
tests/test_hdf5.py
|
Pooppap/io
|
4ad8e0a4cdabf17f43c81cae0907ac4a1af8c251
|
[
"Apache-2.0"
] | 10
|
2019-04-19T01:21:09.000Z
|
2021-11-17T11:25:52.000Z
|
tests/test_hdf5.py
|
Pooppap/io
|
4ad8e0a4cdabf17f43c81cae0907ac4a1af8c251
|
[
"Apache-2.0"
] | null | null | null |
tests/test_hdf5.py
|
Pooppap/io
|
4ad8e0a4cdabf17f43c81cae0907ac4a1af8c251
|
[
"Apache-2.0"
] | 2
|
2019-04-25T14:31:06.000Z
|
2019-06-25T03:47:34.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for HDF5Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow
tensorflow.compat.v1.disable_eager_execution()
from tensorflow import dtypes # pylint: disable=wrong-import-position
from tensorflow import errors # pylint: disable=wrong-import-position
from tensorflow import test # pylint: disable=wrong-import-position
from tensorflow.compat.v1 import data # pylint: disable=wrong-import-position
import tensorflow_io.hdf5 as hdf5_io # pylint: disable=wrong-import-position
class HDF5DatasetTest(test.TestCase):
"""HDF5DatasetTest"""
def test_hdf5_invalid_dataset(self):
"""test_hdf5_invalid_dataset"""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_hdf5", "tdset.h5")
filename = "file://" + filename
dataset = hdf5_io.HDF5Dataset(
[filename],
['/invalid', '/invalid2'],
[dtypes.int32, dtypes.int32],
[(1, 20), (1, 30)])
iterator = data.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "unable to open dataset /invalid"):
sess.run(get_next)
def test_hdf5_dataset_int32(self):
"""Test case for HDF5Dataset."""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_hdf5", "tdset.h5")
filename = "file://" + filename
columns = ['/dset1']
output_types = [dtypes.int32]
output_shapes = [(1, 20)]
dataset = hdf5_io.HDF5Dataset(
[filename], columns, output_types, output_shapes)
iterator = data.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
v0 = list([np.asarray([v for v in range(i, i + 20)])])
vv = sess.run(get_next)
self.assertAllEqual(v0, vv)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_hdf5_dataset(self):
"""Test case for HDF5Dataset."""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_hdf5", "tdset.h5")
filename = "file://" + filename
columns = ['/dset2']
output_types = [dtypes.float32]
output_shapes = [(1, 20)]
dataset = hdf5_io.HDF5Dataset(
[filename], columns, output_types, output_shapes, batch=1)
iterator = data.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(30):
v0 = list(
[np.asarray([[i + 1e-04 * v for v in range(20)]],
dtype=np.float32)])
vv = sess.run(get_next)
self.assertAllEqual(v0, vv)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| 34.981818
| 80
| 0.66684
|
79a70782d1a975f42b977419b8cecda4def2d1d3
| 51,613
|
py
|
Python
|
src/pyrad_proc/pyrad/prod/process_product.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 9
|
2021-02-22T15:34:37.000Z
|
2022-03-29T13:16:25.000Z
|
src/pyrad_proc/pyrad/prod/process_product.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 15
|
2021-02-08T10:16:41.000Z
|
2022-03-31T09:26:26.000Z
|
src/pyrad_proc/pyrad/prod/process_product.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 2
|
2021-02-08T09:44:40.000Z
|
2021-03-24T14:56:31.000Z
|
"""
pyrad.prod.process_product
==========================
Functions for obtaining Pyrad products from the datasets
.. autosummary::
:toctree: generated/
generate_occurrence_products
generate_cosmo_coord_products
generate_cosmo_to_radar_products
generate_sun_hits_products
generate_qvp_products
generate_ml_products
generate_centroids_products
"""
from copy import deepcopy
from warnings import warn
import os
import numpy as np
import pyart
from .process_vol_products import generate_vol_products
from ..io.io_aux import get_fieldname_pyart
from ..io.io_aux import get_save_dir, make_filename
from ..io.read_data_sun import read_sun_retrieval
from ..io.read_data_other import read_ml_ts
from ..io.write_data import write_sun_hits, write_sun_retrieval
from ..io.write_data import write_excess_gates, write_ts_ml, write_histogram
from ..io.write_data import write_timeseries_point, write_centroids
from ..graph.plots import plot_sun_hits, plot_histogram2, plot_scatter
from ..graph.plots import plot_centroids
from ..graph.plots_timeseries import plot_sun_retrieval_ts, plot_ml_ts
from ..graph.plots_vol import plot_fixed_rng, plot_fixed_rng_sun
from ..util.radar_utils import create_sun_hits_field, compute_histogram
from ..util.radar_utils import create_sun_retrieval_field
import matplotlib as mpl
mpl.use('Agg')
# Increase a bit font size
mpl.rcParams.update({'font.size': 16})
mpl.rcParams.update({'font.family': "sans-serif"})
import matplotlib.pyplot as plt
def generate_occurrence_products(dataset, prdcfg):
"""
generates occurrence products. Accepted product types:
'WRITE_EXCESS_GATES': Write the data that identifies radar gates
with clutter that has a frequency of occurrence above a certain
threshold.
User defined parameters:
quant_min: float
Minimum frequency of occurrence in percentage to keep the
gate as valid. Default 95.
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object and metadata dictionary
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
instant = False
if 'instant' in prdcfg:
instant = prdcfg['instant']
if not instant and not dataset['occu_final']:
return None
if prdcfg['type'] == 'WRITE_EXCESS_GATES':
if not dataset['occu_final']:
return None
radar = dataset['radar_out']
if (('frequency_of_occurrence' not in radar.fields) or
('occurrence' not in radar.fields) or
('number_of_samples' not in radar.fields)):
warn('Unable to create quantile excess gates file. '
'Missing data')
return None
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
quant_min = 95.
if 'quant_min' in prdcfg:
quant_min = prdcfg['quant_min']
# get index of gates exceeding quantile
freq_occu = radar.fields['frequency_of_occurrence'][
'data']
ind_ray, ind_rng = np.where(freq_occu > quant_min)
if ind_ray.size == 0:
warn('No data exceeds the frequency of occurrence ' +
str(quant_min)+' %')
return None
excess_dict = {
'starttime': dataset['starttime'],
'endtime': dataset['endtime'],
'quant_min': quant_min,
'ray_ind': ind_ray,
'rng_ind': ind_rng,
'ele': radar.elevation['data'][ind_ray],
'azi': radar.azimuth['data'][ind_ray],
'rng': radar.range['data'][ind_rng],
'nsamples': (
radar.fields['number_of_samples']['data'][ind_ray, ind_rng]),
'occurrence': (
radar.fields['occurrence']['data'][ind_ray, ind_rng]),
'freq_occu': freq_occu[ind_ray, ind_rng]
}
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['endtime'])
fname = make_filename(
'excess_gates', prdcfg['dstype'], prdcfg['prdname'], ['csv'],
prdcfginfo='quant'+'{:.1f}'.format(quant_min),
timeinfo=dataset['endtime'])
fname = savedir+fname[0]
fname = write_excess_gates(excess_dict, fname)
if fname is not None:
print('saved excess gates file: {}'.format(fname))
return fname
field_name = get_fieldname_pyart(prdcfg['voltype'])
if ((field_name == 'frequency_of_occurrence') and
(not dataset['occu_final'])):
return None
if dataset['occu_final']:
prdcfg['timeinfo'] = dataset['endtime']
return generate_vol_products(dataset, prdcfg)
def generate_cosmo_coord_products(dataset, prdcfg):
"""
generates COSMO coordinates products. Accepted product types:
'SAVEVOL': Save an object containing the index of the COSMO model grid
that corresponds to each radar gate in a C/F radial file.
User defined parameters:
file_type: str
The type of file used to save the data. Can be 'nc' or
'h5'. Default 'nc'
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
compression: str
For ODIM file formats, the type of compression. Can be any
of the allowed compression types for hdf5 files. Default
gzip
compression_opts: any
The compression options allowed by the hdf5. Depends on
the type of compression. Default 6 (The gzip compression
level).
Parameters
----------
dataset : tuple
radar object containing the COSMO coordinates
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
if prdcfg['type'] == 'SAVEVOL':
radar_obj = dataset['radar_out']
ind_rad = dataset['ind_rad']
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in radar_obj.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
compression = prdcfg.get('compression', 'gzip')
compression_opts = prdcfg.get('compression_opts', 6)
new_dataset = deepcopy(radar_obj)
new_dataset.fields = dict()
new_dataset.add_field(field_name, radar_obj.fields[field_name])
savedir = prdcfg['cosmopath'][ind_rad]+'rad2cosmo/'
fname = 'rad2cosmo_'+prdcfg['voltype']+'_'+prdcfg['procname']+'.nc'
if file_type == 'nc':
pyart.io.cfradial.write_cfradial(
savedir+fname, new_dataset, physical=physical)
elif file_type == 'h5':
pyart.aux_io.write_odim_h5(
savedir+fname, new_dataset, physical=physical,
compression=compression, compression_opts=compression_opts)
else:
warn('Data could not be saved. ' +
'Unknown saving file type '+file_type)
return None
print('saved file: {}'.format(savedir+fname))
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None
def generate_cosmo_to_radar_products(dataset, prdcfg):
"""
generates COSMO data in radar coordinates products. Accepted product
types:
'SAVEVOL': Save an object containing the COSMO data in radar
coordinatesin a C/F radial or ODIM file.
User defined parameters:
file_type: str
The type of file used to save the data. Can be 'nc' or
'h5'. Default 'nc'
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
compression: str
For ODIM file formats, the type of compression. Can be any
of the allowed compression types for hdf5 files. Default
gzip
compression_opts: any
The compression options allowed by the hdf5. Depends on
the type of compression. Default 6 (The gzip compression
level).
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object containing the COSMO coordinates
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
time_index = prdcfg.get('cosmo_time_index', 0)
if time_index > len(dataset)-1:
warn(
'COSMO time index larger than available. Skipping product ' +
prdcfg['type'])
return None
radar_dataset = dataset[time_index]
if prdcfg['type'] == 'SAVEVOL':
radar_obj = radar_dataset['radar_out']
ind_rad = radar_dataset['ind_rad']
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in radar_obj.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
compression = prdcfg.get('compression', 'gzip')
compression_opts = prdcfg.get('compression_opts', 6)
new_dataset = deepcopy(radar_obj)
new_dataset.fields = dict()
new_dataset.add_field(field_name, radar_obj.fields[field_name])
savedir = (
prdcfg['cosmopath'][ind_rad]+prdcfg['voltype']+'/radar/' +
prdcfg['timeinfo'].strftime('%Y-%m-%d')+'/'+prdcfg['procname']+'/')
fname = (
prdcfg['voltype']+'_RUN' +
prdcfg['timeinfo'].strftime('%Y%m%d%H%M%S')+'_' +
radar_dataset['dtcosmo'].strftime('%Y%m%d%H%M%S')+'.nc')
if not os.path.isdir(savedir):
os.makedirs(savedir)
if file_type == 'nc':
pyart.io.cfradial.write_cfradial(
savedir+fname, new_dataset, physical=physical)
elif file_type == 'h5':
pyart.aux_io.write_odim_h5(
savedir+fname, new_dataset, physical=physical,
compression=compression, compression_opts=compression_opts)
else:
warn('Data could not be saved. ' +
'Unknown saving file type '+file_type)
return None
print('saved file: {}'.format(savedir+fname))
return fname
return generate_vol_products(radar_dataset, prdcfg)
def generate_sun_hits_products(dataset, prdcfg):
"""
generates sun hits products. Accepted product types:
'PLOT_SUN_HITS': Plots in a sun-radar azimuth difference-sun-radar
elevation difference grid the values of all sun hits obtained
during the processing period
'PLOT_SUN_RETRIEVAL': Plots in a sun-radar azimuth difference-sun-
radar elevation difference grid the retrieved sun pattern
'PLOT_SUN_RETRIEVAL_TS': Plots time series of the retrieved sun
pattern parameters
User defined parameters:
dpi: int
The pixel density of the plot. Default 72
add_date_in_fname: Bool
If true the year is added in the plot file name
'PLOT_SUNSCAN': Plots a constant range radar azimuth-elevation of the
sunscan field data
'WRITE_SUN_HITS': Writes the information concerning possible sun hits
in a csv file
'WRITE_SUN_RETRIEVAL': Writes the retrieved sun pattern parameters in
a csv file.
User defined parameters:
add_date_in_fname: Bool
If true the year is added in the csv file name
'WRITE_SUNSCAN': Writes the sunscan parameters in a csv file
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object and sun hits dictionary
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
prdcfg['timeinfo'] = dataset['timeinfo']
if prdcfg['type'] == 'WRITE_SUN_HITS':
if 'sun_hits' not in dataset:
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname = make_filename(
'info', prdcfg['dstype'], 'detected', ['csv'],
timeinfo=dataset['timeinfo'], timeformat='%Y%m%d')[0]
fname = savedir+fname
write_sun_hits(dataset['sun_hits'], fname)
print('saved sun hits file: {}'.format(fname))
return fname[0]
if prdcfg['type'] == 'PLOT_SUN_HITS':
if 'sun_hits_final' not in dataset:
return None
field_name = get_fieldname_pyart(prdcfg['voltype'])
if prdcfg['voltype'] not in dataset['sun_hits_final']:
warn(
' Field type ' + prdcfg['voltype'] +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname_list = make_filename(
'detected', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=dataset['timeinfo'],
timeformat='%Y%m%d')
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
field = create_sun_hits_field(
dataset['sun_hits_final']['rad_el'],
dataset['sun_hits_final']['rad_az'],
dataset['sun_hits_final']['sun_el'],
dataset['sun_hits_final']['sun_az'],
dataset['sun_hits_final'][prdcfg['voltype']],
prdcfg['sunhitsImageConfig'])
if field is None:
warn(
'Unable to create field '+prdcfg['voltype'] +
' Skipping product ' + prdcfg['type'])
return None
plot_sun_hits(field, field_name, fname_list, prdcfg)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'WRITE_SUN_RETRIEVAL':
if 'sun_retrieval' not in dataset:
return None
timeinfo = None
timeformat = None
if prdcfg.get('add_date_in_fname', False):
timeinfo = dataset['timeinfo']
timeformat = '%Y'
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=None)
fname = make_filename(
'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
write_sun_retrieval(dataset['sun_retrieval'], fname)
print('saved sun retrieval file: {}'.format(fname))
return fname
if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL':
if 'sun_retrieval' not in dataset:
return None
field_name = get_fieldname_pyart(prdcfg['voltype'])
par = None
if field_name == 'sun_est_power_h':
par = 'par_h'
elif field_name == 'sun_est_power_v':
par = 'par_v'
elif field_name == 'sun_est_differential_reflectivity':
par = 'par_zdr'
if par not in dataset['sun_retrieval']:
warn(
' Field type ' + prdcfg['voltype'] +
' not available in data set. Skipping product ' +
prdcfg['type'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname_list = make_filename(
'retrieval', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=dataset['timeinfo'],
timeformat='%Y%m%d')
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['sun_retrieval'][par] is None:
warn(
' Invalid retrieval parameters. Skipping product ' +
prdcfg['type'])
return None
field = create_sun_retrieval_field(
dataset['sun_retrieval'][par], field_name,
prdcfg['sunhitsImageConfig'],
lant=dataset['sun_retrieval']['lant'])
if field is not None:
plot_sun_hits(field, field_name, fname_list, prdcfg)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL_TS':
if 'sun_retrieval' not in dataset:
return None
dpi = prdcfg.get('dpi', 72)
timeinfo = None
timeformat = None
if prdcfg.get('add_date_in_fname', False):
timeinfo = dataset['timeinfo']
timeformat = '%Y'
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdid'], timeinfo=None)
fname = make_filename(
'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])
fname = savedir + fname[0]
sun_retrieval = read_sun_retrieval(fname)
if sun_retrieval[0] is None:
warn(
'Unable to read sun retrieval file '+fname)
return None
if len(sun_retrieval[0]) < 2:
warn(
'Unable to plot sun retrieval time series. ' +
'Not enough data points.')
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=None)
fname_list = make_filename(
'retrieval_ts', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
titl = (prdcfg['runinfo']+' Sun Retrieval ' +
sun_retrieval[1][0].strftime('%Y%m%d')+'-' +
sun_retrieval[1][-1].strftime('%Y%m%d'))
figfname = plot_sun_retrieval_ts(
sun_retrieval, prdcfg['voltype'], fname_list, titl=titl, dpi=dpi)
if figfname is None:
return None
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'WRITE_SUNSCAN':
if 'sun_retrieval' not in dataset:
return None
text = [
"SunScan info",
"sun_az: [deg] Azimuth sun position ",
"sun_el: [deg] Elevation sun position",
"noise_pwr: [dBm] Noise power",
"sun_maxpwr_noise: [dBm]"
" sun maximal power sample (including noise)",
"sun_maxpwr_nonoise: [dBm]"
" sun maximal power sample without noise",
"sun_maxpwr_fit: [dBm]"
" sun maximal fitted power (without noise)",
"sun_maxpwr_toa: [dBm]"
" sun maximal power at top of atmosphere",
"az_offset: [deg]"
" Azimuth shift of fitted maxima to sun azimuth",
"el_offset: [deg]"
" Elevation shift of fitted maxima to sun elevation",
"az_phi3db: [deg]"
" Half-power beam width in azimuth",
"el_phi3db: [deg]"
" Half-power beam width in elevation",
"fit_stddev: [dBm]"
" Standard deviation (fit to samples)",
"num_samples: [#]"
" Number of samples used for the sun power fitting"
]
sunRdata = dataset['sun_retrieval']
if dataset['field_name'] == 'noisedBm_hh':
data = {
'dstype': prdcfg['dstype'],
'unit': 'dBm',
'time': sunRdata['sunscan_time'],
'label': [
"sun_az", "sun_el", "noise_pwr", "sun_maxpwr_noise",
"sun_maxpwr_nonoise", "sun_maxpwr_fit", "sun_maxpwr_toa",
"az_offset", "el_offset", "az_phi3db", "el_phi3db",
"fit_stddev", "num_samples"],
'value': [
sunRdata['sunpos_az'], sunRdata['sunpos_el'],
sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'],
sunRdata['sun_maxpwr_nonoise'], sunRdata['dBm_sun_est'],
sunRdata['dBm_sun_est_toa'], sunRdata['az_bias_h'],
sunRdata['el_bias_h'], sunRdata['az_width_h'],
sunRdata['el_width_h'], sunRdata['std(dBm_sun_est)'],
sunRdata['nhits_h']]
}
elif dataset['field_name'] == 'noisedBm_vv':
data = {
'dstype': prdcfg['dstype'],
'unit': 'dBm',
'time': sunRdata['sunscan_time'],
'label': [
"sun_az", "sun_el", "noise_pwr", "sun_maxpwr_noise",
"sun_maxpwr_nonoise", "sun_maxpwr_fit", "sun_maxpwr_toa",
"az_offset", "el_offset", "az_phi3db", "el_phi3db",
"fit_stddev", "num_samples"],
'value': [
sunRdata['sunpos_az'], sunRdata['sunpos_el'],
sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'],
sunRdata['sun_maxpwr_nonoise'], sunRdata['dBmv_sun_est'],
sunRdata['dBmv_sun_est_toa'], sunRdata['az_bias_v'],
sunRdata['el_bias_v'], sunRdata['az_width_v'],
sunRdata['el_width_v'], sunRdata['std(dBmv_sun_est)'],
sunRdata['nhits_v']]
}
else:
warn('ERROR: No valid datatype for WRITE_SUNSCAN product.')
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], prdcfg['timeinfo'])
fname1 = make_filename(
'ts', prdcfg['dstype'], dataset['field_name'], ['csv'],
timeinfo=prdcfg['timeinfo'], timeformat='%Y%m%d',
runinfo=prdcfg['runinfo'])[0]
fname1 = savedir+fname1
write_timeseries_point(fname1, data, prdcfg['dstype'], text)
print('saved sunscan file: {}'.format(fname1))
return fname1
if prdcfg['type'] == 'PLOT_SUNSCAN':
radar = dataset['radar_out']
sun_hits = dataset['sun_hits']
field_name = dataset['field_name']
if field_name not in radar.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined parameters
azi_res = prdcfg.get('azi_res', None)
ele_res = prdcfg.get('ele_res', None)
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
angtol = prdcfg.get('ang_tol', 0.5)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], prdcfg['timeinfo'])
fname_list = make_filename(
'constr', prdcfg['dstype'], prdcfg['dsname'],
prdcfg['imgformat'],
prdcfginfo='rng'+'{:.1f}'.format(
dataset['radar_out'].range['data'][0]),
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_fixed_rng_sun(
radar, field_name, sun_hits, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=angtol, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if 'radar_out' in dataset:
return generate_vol_products(dataset, prdcfg)
return None
def generate_qvp_products(dataset, prdcfg):
"""
Generates quasi vertical profile-like products. Quasi vertical profiles
come from azimuthal averaging of polarimetric radar data. With the
variable 'qvp_type' the user decides if the product has to be generated
at the end of the processing period ('final') or instantaneously
('instant')
Accepted product types:
All the products of the 'VOL' dataset group
Parameters
----------
dataset : dict
dictionary containing the radar object and a keyword stating the
status of the processing
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
qvp_type = 'final'
if 'qvp_type' in prdcfg:
qvp_type = prdcfg['qvp_type']
if qvp_type == 'final' and dataset['radar_type'] != 'final':
return None
prdcfg['timeinfo'] = dataset['start_time']
return generate_vol_products(dataset, prdcfg)
def generate_ml_products(dataset, prdcfg):
"""
Generates melting layer products. Accepted product types:
'ML_TS': Plots and writes a time series of the melting layer, i.e.
the evolution of the average and standard deviation of the melting
layer top and thickness and the the number of rays used in the
retrieval.
User defined parameters:
dpi: int
The pixel density of the plot. Default 72
'SAVE_ML': Saves an object containing the melting layer retrieval
information in a C/F radial file
All the products of the 'VOL' dataset group
Parameters
----------
dataset : dict
dictionary containing the radar object and a keyword stating the
status of the processing
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'ML_TS':
dpi = prdcfg.get('dpi', 72)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
csvfname = make_filename(
'ts', prdcfg['dstype'], 'ml', ['csv'],
timeinfo=prdcfg['timeinfo'], timeformat='%Y%m%d')[0]
csvfname = savedir+csvfname
ml_bottom = dataset['ml_obj'].fields['melting_layer_height']['data'][
:, 0]
ml_top = dataset['ml_obj'].fields['melting_layer_height']['data'][:, 1]
ml_top_avg = np.ma.asarray(np.ma.mean(ml_top))
ml_top_std = np.ma.asarray(np.ma.std(ml_top))
thick = ml_top-ml_bottom
thick_avg = np.ma.asarray(np.ma.mean(thick))
thick_std = np.ma.asarray(np.ma.std(thick))
nrays_valid = thick.compressed().size
nrays_total = thick.size
write_ts_ml(
prdcfg['timeinfo'], ml_top_avg, ml_top_std, thick_avg, thick_std,
nrays_valid, nrays_total, csvfname)
print('saved CSV file: {}'.format(csvfname))
(dt_ml_arr, ml_top_avg_arr, ml_top_std_arr, thick_avg_arr,
thick_std_arr, nrays_valid_arr, nrays_total_arr) = (
read_ml_ts(csvfname))
if dt_ml_arr is None:
warn(
'Unable to plot time series. No valid data')
return None
figfname_list = make_filename(
'ts', prdcfg['dstype'], 'ml', prdcfg['imgformat'],
timeinfo=dt_ml_arr[0], timeformat='%Y%m%d')
for i, figfname in enumerate(figfname_list):
figfname_list[i] = savedir+figfname
titl = dt_ml_arr[0].strftime('%Y-%m-%d')+' melting layer time series'
plot_ml_ts(
dt_ml_arr, ml_top_avg_arr, ml_top_std_arr, thick_avg_arr,
thick_std_arr, nrays_valid_arr, nrays_total_arr, figfname_list,
labelx='Time UTC', titl=titl, dpi=dpi)
print('----- save to '+' '.join(figfname_list))
return figfname_list
if prdcfg['type'] == 'SAVE_ML':
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'saveml', prdcfg['dstype'], 'ml_h', ['nc'],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
pyart.io.cfradial.write_cfradial(fname, dataset['ml_obj'])
print('saved file: {}'.format(fname))
return fname
return generate_vol_products(dataset, prdcfg)
def generate_centroids_products(dataset, prdcfg):
"""
Generates centroids products. Accepted product types:
'HISTOGRAM': Plots the histogram of one of the variables used for
centroids computation.
User defined parameters:
voltype : str
The name of the variable to plot. Can be dBZ, ZDR, KDP,
RhoHV, H_ISO0 and its standardized form (e.g. dBZ_std)
write_data : Bool
If true writes the histogram in a .csv file. Default True
step : float
bin size. Default 0.1
'HISTOGRAM2D': Plots the 2D- histogram of two of the variables used
for centroids computation.
User defined parameters:
voltype_x, voltype_y : str
The name of the variables to plot. Can be dBZ, ZDR, KDP,
RhoHV, H_ISO0 and its standardized form (e.g. dBZ_std)
step_x, step_y : float
bin size. Default 0.1
'HISTOGRAM_LABELED': Plots the histogram of one of the variables used
for centroids computation. Only plots labeled data.
User defined parameters:
voltype : str
The name of the variable to plot. Can be dBZ, ZDR, KDP,
RhoHV, H_ISO0 and its standardized form (e.g. dBZ_std)
write_data : Bool
If true writes the histogram in a .csv file. Default True
step : float
bin size. Default 0.1
'HISTOGRAM_CENTROIDS': Plots the histogram of one of the variables
used for centroids computation corresponding to a particular
hydrometeor type, the intermediate centroids and the final
centroid
User defined parameters:
voltype : str
The name of the variable to plot. Can be dBZ, ZDR, KDP,
RhoHV, H_ISO0 and its standardized form (e.g. dBZ_std)
hydro_type : str
The name of the hydrometeor type.
write_data : Bool
If true writes the histogram in a .csv file. Default True
step : float
bin size. Default 0.1
'HISTOGRAM2D_CENTROIDS': Plots the 2D- histogram of two of the
variables used for centroids computatio ncorresponding to a
particular hydrometeor type, the intermediate centroids and the
final centroid
User defined parameters:
voltype_x, voltype_y : str
The name of the variables to plot. Can be dBZ, ZDR, KDP,
RhoHV, H_ISO0 and its standardized form (e.g. dBZ_std)
hydro_type : str
The name of the hydrometeor type.
step_x, step_y : float
bin size. Default 0.1
'WRITE_CENTROIDS': Writes the final centroids in a .csv file.
'SAVE_DATA': Saves the data used to compute the centroids in an .npz
file
'SAVE_LABELED_DATA': Saves the labeled data, the intermediate
centroids and the final centroids in an .npz file
Parameters
----------
dataset : dict
dictionary containing the radar object and a keyword stating the
status of the processing
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'HISTOGRAM':
data_dict = dataset['data_dict']
# check if we have to plot the data
hist_type = prdcfg.get('hist_type', 'cumulative')
if hist_type == 'cumulative' and not data_dict['final']:
return None
if hist_type == 'instant' and data_dict['final']:
return None
voltype = prdcfg['voltype']
if voltype not in data_dict.keys():
warn(
' Field type ' + voltype +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
write_data = prdcfg.get('write_data', 1)
step = prdcfg.get('step', 0.1)
timeformat = '%Y%m%d'
timeinfo = data_dict['timeinfo'][0]
titl = timeinfo.strftime('%Y-%m-%d')+'\n'+voltype
data_vals = data_dict[voltype]
if hist_type == 'instant':
timeformat = '%Y%m%d%H%M%S'
timeinfo = data_dict['timeinfo'][-1]
titl = timeinfo.strftime('%Y-%m-%d %H:%M:%S')+'\n'+voltype
nvols = data_dict['npoints'].size
ind_start = np.sum(data_dict['npoints'][0:nvols-2])
data_vals = data_vals[ind_start:]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname_list = make_filename(
'histogram', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'],
timeinfo=timeinfo, timeformat=timeformat)
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if '_std' in voltype:
bin_edges = np.arange(-1.-step/2., 1.+step/2+step, step)
values = data_vals
else:
field_name = get_fieldname_pyart(voltype)
bin_edges, values = compute_histogram(
data_vals, field_name, step=step)
bin_centers = bin_edges[1:]-step/2
hist, bin_edges = np.histogram(values, bins=bin_edges)
plot_histogram2(
bin_centers, hist, fname_list, labelx=voltype,
labely='Number of Samples', titl=titl)
print('----- save to '+' '.join(fname_list))
if write_data:
fname = savedir+make_filename(
'histogram', prdcfg['dstype'], prdcfg['voltype'],
['csv'], timeinfo=timeinfo, timeformat=timeformat)[0]
write_histogram(bin_edges, hist, fname, step=step)
print('----- save to {}'.format(fname))
return fname
return fname_list
if prdcfg['type'] == 'HISTOGRAM2D':
data_dict = dataset['data_dict']
# check if we have to plot the data
hist_type = prdcfg.get('hist_type', 'cumulative')
if hist_type == 'cumulative' and not data_dict['final']:
return None
if hist_type == 'instant' and data_dict['final']:
return None
voltype_x = prdcfg['voltype_x']
voltype_y = prdcfg['voltype_y']
if voltype_x not in data_dict.keys():
warn(
' Field type ' + voltype_x +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
if voltype_y not in data_dict.keys():
warn(
' Field type ' + voltype_y +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
step_x = prdcfg.get('step_x', 0.1)
step_y = prdcfg.get('step_y', 0.1)
timeformat = '%Y%m%d'
timeinfo = data_dict['timeinfo'][0]
titl = (
timeinfo.strftime('%Y-%m-%d')+'\n'+voltype_x+'-'+voltype_y)
data_vals_x = data_dict[voltype_x]
data_vals_y = data_dict[voltype_y]
if hist_type == 'instant':
timeformat = '%Y%m%d%H%M%S'
timeinfo = data_dict['timeinfo'][-1]
titl = (
timeinfo.strftime('%Y-%m-%d %H:%M:%S')+'\n'+voltype_x+'-' +
voltype_y)
nvols = data_dict['npoints'].size
ind_start = np.sum(data_dict['npoints'][0:nvols-2])
data_vals_x = data_vals_x[ind_start:]
data_vals_y = data_vals_y[ind_start:]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname_list = make_filename(
'2Dhistogram', prdcfg['dstype'], voltype_x+'-'+voltype_y,
prdcfg['imgformat'],
timeinfo=timeinfo, timeformat=timeformat)
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if '_std' in voltype_x:
bin_edges_x = np.arange(-1.-step_x/2., 1.+step_x/2+step_x, step_x)
values_x = data_vals_x
else:
field_name_x = get_fieldname_pyart(voltype_x)
bin_edges_x, values_x = compute_histogram(
data_vals_x, field_name_x, step=step_x)
if '_std' in voltype_y:
bin_edges_y = np.arange(-1.-step_y/2., 1.+step_y/2+step_y, step_y)
values_y = data_vals_y
else:
field_name_y = get_fieldname_pyart(voltype_y)
bin_edges_y, values_y = compute_histogram(
data_vals_y, field_name_y, step=step_y)
hist_2d, bin_edges_x, bin_edges_y = np.histogram2d(
values_x, values_y, bins=[bin_edges_x, bin_edges_y])
plot_scatter(
bin_edges_x, bin_edges_y, np.ma.asarray(hist_2d), voltype_x,
voltype_y, fname_list, prdcfg, rad1_name='', rad2_name='',
titl=titl, cmap='viridis')
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'HISTOGRAM_LABELED':
if 'labeled_data_dict' not in dataset:
return None
data_dict = dataset['labeled_data_dict']
voltype = prdcfg['voltype']
if voltype not in data_dict.keys():
warn(
' Field type ' + voltype +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
write_data = prdcfg.get('write_data', 1)
step = prdcfg.get('step', 0.1)
timeformat = '%Y%m%d'
timeinfo = data_dict['timeinfo'][0]
titl = timeinfo.strftime('%Y-%m-%d')+'\n'+voltype
data_vals = data_dict[voltype]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname_list = make_filename(
'histogram', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'],
timeinfo=timeinfo, timeformat=timeformat)
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if '_std' in voltype:
bin_edges = np.arange(-1.-step/2., 1.+step/2+step, step)
values = data_vals
else:
field_name = get_fieldname_pyart(voltype)
bin_edges, values = compute_histogram(
data_vals, field_name, step=step)
bin_centers = bin_edges[1:]-step/2
hist, bin_edges = np.histogram(values, bins=bin_edges)
plot_histogram2(
bin_centers, hist, fname_list, labelx=voltype,
labely='Number of Samples', titl=titl)
print('----- save to '+' '.join(fname_list))
if write_data:
fname = savedir+make_filename(
'histogram', prdcfg['dstype'], prdcfg['voltype'],
['csv'], timeinfo=timeinfo, timeformat=timeformat)[0]
write_histogram(bin_edges, hist, fname, step=step)
print('----- save to {}'.format(fname))
return fname
return fname_list
if prdcfg['type'] == 'HISTOGRAM_CENTROIDS':
if 'labeled_data_dict' not in dataset:
return None
data_dict = dataset['labeled_data_dict']
voltype = prdcfg['voltype']
hydro_type = prdcfg['hydro_type']
if voltype not in data_dict.keys():
warn(
' Field type ' + voltype +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
if hydro_type not in data_dict['medoids_dict']:
warn('No medoids where found for hydrometeor class '+hydro_type)
return None
ind_hydro = np.where(
np.array(data_dict['hydro_names']) == hydro_type)[0]
ind_medoid = np.where(
np.array(data_dict['var_names']) == voltype)[0]
write_data = prdcfg.get('write_data', 1)
step = prdcfg.get('step', 0.1)
dpi = 72
if 'dpi' in prdcfg['ppiImageConfig']:
dpi = prdcfg['ppiImageConfig']['dpi']
timeformat = '%Y%m%d'
timeinfo = data_dict['timeinfo'][0]
titl = timeinfo.strftime('%Y-%m-%d')+'\n'+voltype+' '+hydro_type
data_vals = data_dict[voltype]
data_vals = data_vals[data_dict['labels'] == ind_hydro]
medoids = np.array(data_dict['medoids_dict'][hydro_type])
medoids = medoids[:, ind_medoid]
if hydro_type not in data_dict['final_medoids_dict']:
warn('No medoid for hydrometeor class '+hydro_type)
fmedoid = None
else:
fmedoid = data_dict['final_medoids_dict'][hydro_type][ind_medoid]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname_list = make_filename(
'histogram', prdcfg['dstype'], hydro_type+_+prdcfg['voltype'],
prdcfg['imgformat'],
timeinfo=timeinfo, timeformat=timeformat)
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if '_std' in voltype:
bin_edges = np.arange(-1.-step/2., 1.+step/2+step, step)
values = data_vals
else:
field_name = get_fieldname_pyart(voltype)
bin_edges, values = compute_histogram(
data_vals, field_name, step=step)
bin_centers = bin_edges[1:]-step/2
hist, bin_edges = np.histogram(values, bins=bin_edges)
pos_medoids = []
for medoid in medoids:
ind = np.where(bin_edges <= medoid)[0][-1]
pos_medoids.append(hist[ind])
pos_medoids = np.array(pos_medoids)
if fmedoid is not None:
ind = np.where(bin_edges <= fmedoid)[0][-1]
pos_fmedoid = hist[ind]
fig, ax, = plot_histogram2(
bin_centers, hist, fname_list, labelx=voltype,
labely='Number of Samples', titl=titl, save_fig=False)
ax.scatter(medoids, pos_medoids, c='g', marker='o')
if fmedoid is not None:
ax.plot(fmedoid, pos_fmedoid, c='r', marker='D')
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
print('----- save to '+' '.join(fname_list))
if write_data:
fname = savedir+make_filename(
'histogram', prdcfg['dstype'], prdcfg['voltype'],
['csv'], timeinfo=timeinfo, timeformat=timeformat)[0]
write_histogram(bin_edges, hist, fname, step=step)
print('----- save to {}'.format(fname))
return fname
return fname_list
if prdcfg['type'] == 'HISTOGRAM2D_CENTROIDS':
if 'labeled_data_dict' not in dataset:
return None
labeled_data_dict = dataset['labeled_data_dict']
voltype_x = prdcfg['voltype_x']
voltype_y = prdcfg['voltype_y']
hydro_type = prdcfg['hydro_type']
if voltype_x not in labeled_data_dict.keys():
warn(
' Field type ' + voltype_x +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
if voltype_y not in labeled_data_dict.keys():
warn(
' Field type ' + voltype_y +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
if hydro_type not in labeled_data_dict['medoids_dict']:
warn('No medoids where found for hydrometeor class '+hydro_type)
return None
ind_hydro = np.where(
np.array(labeled_data_dict['hydro_names']) == hydro_type)[0]
ind_medoid_x = np.where(
np.array(labeled_data_dict['var_names']) == voltype_x)[0]
ind_medoid_y = np.where(
np.array(labeled_data_dict['var_names']) == voltype_y)[0]
step_x = prdcfg.get('step_x', 0.1)
step_y = prdcfg.get('step_y', 0.1)
timeformat = '%Y%m%d'
timeinfo = labeled_data_dict['timeinfo'][0]
titl = (
timeinfo.strftime('%Y-%m-%d')+'\n'+voltype_x+'-'+voltype_y +
' '+hydro_type)
data_vals_x = labeled_data_dict[voltype_x]
data_vals_y = labeled_data_dict[voltype_y]
data_vals_x = data_vals_x[labeled_data_dict['labels'] == ind_hydro]
data_vals_y = data_vals_y[labeled_data_dict['labels'] == ind_hydro]
medoids = np.array(labeled_data_dict['medoids_dict'][hydro_type])
medoids_x = medoids[:, ind_medoid_x]
medoids_y = medoids[:, ind_medoid_y]
if hydro_type not in labeled_data_dict['final_medoids_dict']:
warn('No medoid for hydrometeor class '+hydro_type)
fmedoid_x = None
fmedoid_y = None
else:
fmedoid_x = labeled_data_dict['final_medoids_dict'][hydro_type][
ind_medoid_x]
fmedoid_y = labeled_data_dict['final_medoids_dict'][hydro_type][
ind_medoid_y]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname_list = make_filename(
'2Dhistogram', prdcfg['dstype'],
hydro_type+'_'+voltype_x+'-'+voltype_y, prdcfg['imgformat'],
timeinfo=timeinfo, timeformat=timeformat)
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if '_std' in voltype_x:
bin_edges_x = np.arange(-1.-step_x/2., 1.+step_x/2+step_x, step_x)
values_x = data_vals_x
else:
field_name_x = get_fieldname_pyart(voltype_x)
bin_edges_x, values_x = compute_histogram(
data_vals_x, field_name_x, step=step_x)
if '_std' in voltype_y:
bin_edges_y = np.arange(-1.-step_y/2., 1.+step_y/2+step_y, step_y)
values_y = data_vals_y
else:
field_name_y = get_fieldname_pyart(voltype_y)
bin_edges_y, values_y = compute_histogram(
data_vals_y, field_name_y, step=step_y)
hist_2d, bin_edges_x, bin_edges_y = np.histogram2d(
values_x, values_y, bins=[bin_edges_x, bin_edges_y])
plot_centroids(
bin_edges_x, bin_edges_y, np.ma.asarray(hist_2d), voltype_x,
voltype_y, fname_list, prdcfg, titl=titl, medoids_x=medoids_x,
medoids_y=medoids_y, fmedoid_x=fmedoid_x, fmedoid_y=fmedoid_y,
cmap='viridis')
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'WRITE_CENTROIDS':
if 'labeled_data_dict' not in dataset:
return None
labeled_data_dict = dataset['labeled_data_dict']
timeformat = '%Y%m%d'
timeinfo = labeled_data_dict['timeinfo'][0]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname = make_filename(
'centroids', prdcfg['dstype'], 'centroids',
['csv'], timeinfo=timeinfo, timeformat=timeformat)[0]
fname = savedir+fname
write_centroids(
fname, labeled_data_dict['final_medoids_dict'],
labeled_data_dict['var_names'])
print('----- save to {}'.format(fname))
return fname
if prdcfg['type'] == 'SAVE_DATA':
data_dict = dataset['data_dict']
# check if we have to save the data
if not data_dict['final']:
return None
timeinfo = data_dict['timeinfo'][0]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname = make_filename(
'data', prdcfg['dstype'], 'centroids_data', ['npz'],
timeinfo=timeinfo)[0]
fname = savedir+fname
np.savez_compressed(fname, centroids_data=data_dict)
print('----- save to {}'.format(fname))
return fname
if prdcfg['type'] == 'SAVE_LABELED_DATA':
if 'labeled_data_dict' not in dataset:
return None
data_dict = dataset['labeled_data_dict']
timeinfo = data_dict['timeinfo'][0]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname = make_filename(
'labeled_data', prdcfg['dstype'], 'centroids_data', ['npz'],
timeinfo=timeinfo)[0]
fname = savedir+fname
np.savez_compressed(fname, centroids_data=data_dict)
print('----- save to {}'.format(fname))
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None
| 35.767845
| 79
| 0.577374
|
619a62179d49d58520cf48ffa95d5fff061653f7
| 8,109
|
py
|
Python
|
acapy_plugin_toolbox/schemas.py
|
lynnbendixsen/aries-acapy-plugin-toolbox
|
c19c6c26486e8acdc3814b07e620f7a291754089
|
[
"Apache-2.0"
] | null | null | null |
acapy_plugin_toolbox/schemas.py
|
lynnbendixsen/aries-acapy-plugin-toolbox
|
c19c6c26486e8acdc3814b07e620f7a291754089
|
[
"Apache-2.0"
] | null | null | null |
acapy_plugin_toolbox/schemas.py
|
lynnbendixsen/aries-acapy-plugin-toolbox
|
c19c6c26486e8acdc3814b07e620f7a291754089
|
[
"Apache-2.0"
] | null | null | null |
"""Define messages for schemas admin protocols."""
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
from asyncio import shield
from marshmallow import fields
from aries_cloudagent.messaging.base_handler import BaseHandler, BaseResponder, RequestContext
from aries_cloudagent.messaging.models.base_record import BaseRecord, BaseRecordSchema
from aries_cloudagent.ledger.base import BaseLedger
from aries_cloudagent.storage.error import StorageNotFoundError
from aries_cloudagent.config.injection_context import InjectionContext
from .util import generate_model_schema, admin_only
PROTOCOL = 'did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/admin-schemas/0.1'
SEND_SCHEMA = '{}/send-schema'.format(PROTOCOL)
SCHEMA_ID = '{}/schema-id'.format(PROTOCOL)
SCHEMA_GET = '{}/schema-get'.format(PROTOCOL)
SCHEMA = '{}/schema'.format(PROTOCOL)
SCHEMA_GET_LIST = '{}/schema-get-list'.format(PROTOCOL)
SCHEMA_LIST = '{}/schema-list'.format(PROTOCOL)
MESSAGE_TYPES = {
SEND_SCHEMA:
'acapy_plugin_toolbox.schemas'
'.SendSchema',
SCHEMA_ID:
'acapy_plugin_toolbox.schemas'
'.SchemaID',
SCHEMA_GET:
'acapy_plugin_toolbox.schemas'
'.SchemaGet',
SCHEMA:
'acapy_plugin_toolbox.schemas'
'.Schema',
SCHEMA_GET_LIST:
'acapy_plugin_toolbox.schemas'
'.SchemaGetList',
SCHEMA_LIST:
'acapy_plugin_toolbox.schemas'
'.SchemaList',
}
class SchemaRecord(BaseRecord):
"""Represents a Schema."""
RECORD_ID_NAME = "record_id"
RECORD_TYPE = "schema"
AUTHOR_SELF = "self"
AUTHOR_OTHER = "other"
STATE_UNWRITTEN = "unwritten"
STATE_WRITTEN = "written"
class Meta:
"""SchemaRecord metadata."""
schema_class = "SchemaRecordSchema"
def __init__(
self,
*,
record_id: str = None,
schema_id: str = None,
schema_name: str = None,
schema_version: str = None,
author: str = None,
attributes: [str] = None,
state: str = None,
**kwargs):
"""Initialize a new SchemaRecord."""
super().__init__(record_id, state or self.STATE_UNWRITTEN, **kwargs)
self.schema_id = schema_id
self.schema_name = schema_name
self.schema_version = schema_version
self.author=author
self.attributes = attributes
@property
def record_id(self) -> str:
"""Get the id."""
return self._id
@property
def record_value(self) -> dict:
"""Get record value."""
return {'attributes': self.attributes}
@property
def record_tags(self) -> dict:
"""Get tags for record."""
return {
prop: getattr(self, prop)
for prop in (
'schema_id',
'schema_name',
'schema_version',
'state',
'author'
)
}
@classmethod
async def retrieve_by_schema_id(
cls,
context: InjectionContext,
schema_id: str) -> "SchemaRecord":
"""Retrieve a schema record by schema_id."""
return await cls.retrieve_by_tag_filter(
context,
{'schema_id': schema_id}
)
class SchemaRecordSchema(BaseRecordSchema):
"""Schema to allow serialization/deserialization of Schema records."""
class Meta:
"""PoolRecordSchema metadata."""
model_class = SchemaRecord
schema_id = fields.Str(required=False)
schema_name = fields.Str(required=False)
schema_version = fields.Str(required=False)
author = fields.Str(required=False)
attributes = fields.List(fields.Str(), required=False)
SendSchema, SendSchemaSchema = generate_model_schema(
name='SendSchema',
handler='acapy_plugin_toolbox.schemas.SendSchemaHandler',
msg_type=SEND_SCHEMA,
schema={
'schema_name': fields.Str(required=True),
'schema_version': fields.Str(required=True),
'attributes': fields.List(fields.Str(), required=True)
}
)
SchemaID, SchemaIDSchema = generate_model_schema(
name='SchemaID',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=SCHEMA_ID,
schema={
'schema_id': fields.Str()
}
)
class SendSchemaHandler(BaseHandler):
"""Handler for received send schema request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received send schema request."""
ledger: BaseLedger = await context.inject(BaseLedger)
async with ledger:
schema_id = await shield(
ledger.send_schema(
context.message.schema_name,
context.message.schema_version,
context.message.attributes
)
)
schema = SchemaRecord(
schema_id=schema_id,
schema_name=context.message.schema_name,
schema_version=context.message.schema_version,
attributes=context.message.attributes,
state=SchemaRecord.STATE_WRITTEN,
author=SchemaRecord.AUTHOR_SELF,
)
await schema.save(context, reason="Committed to ledger")
result = SchemaID(schema_id=schema_id)
result.assign_thread_from(context.message)
await responder.send_reply(result)
SchemaGet, SchemaGetSchema = generate_model_schema(
name='SchemaGet',
handler='acapy_plugin_toolbox.schemas.SchemaGetHandler',
msg_type=SCHEMA_GET,
schema={
'schema_id': fields.Str(required=True)
}
)
Schema, SchemaSchema = generate_model_schema(
name='Schema',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=SCHEMA,
schema=SchemaRecordSchema
)
class SchemaGetHandler(BaseHandler):
"""Handler for received schema get request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received schema get request."""
try:
schema_record = await SchemaRecord.retrieve_by_schema_id(
context,
context.message.schema_id
)
schema_msg = Schema(**schema_record.serialize())
schema_msg.assign_thread_from(context.message)
await responder.send_reply(schema_msg)
return
except StorageNotFoundError:
pass
ledger: BaseLedger = await context.inject(BaseLedger)
async with ledger:
schema = await ledger.get_schema(context.message.schema_id)
schema_record = SchemaRecord(
schema_id=schema['id'],
schema_name=schema['name'],
schema_version=schema['version'],
attributes=schema['attrNames'],
state=SchemaRecord.STATE_WRITTEN,
author=SchemaRecord.AUTHOR_OTHER
)
await schema_record.save(context, reason='Retrieved from ledger')
schema_msg = Schema(**schema_record.serialize())
schema_msg.assign_thread_from(context.message)
await responder.send_reply(schema_msg)
SchemaGetList, SchemaGetListSchema = generate_model_schema(
name='SchemaGetList',
handler='acapy_plugin_toolbox.schemas.SchemaGetListHandler',
msg_type=SCHEMA_GET_LIST,
schema={}
)
SchemaList, SchemaListSchema = generate_model_schema(
name='SchemaList',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=SCHEMA_LIST,
schema={
'results': fields.List(
fields.Nested(SchemaRecordSchema),
required=True
)
}
)
class SchemaGetListHandler(BaseHandler):
"""Handler for get schema list request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle get schema list request."""
records = await SchemaRecord.query(context, {})
schema_list = SchemaList(results=records)
schema_list.assign_thread_from(context.message)
await responder.send_reply(schema_list)
| 30.144981
| 94
| 0.647305
|
aa66eadcfb9e09b73c7dc70b20bc7312992b14f2
| 4,191
|
py
|
Python
|
unit3_lesson_01_understanding_modules.py
|
RaviTejaKomma/learn-python
|
c9788b958371c013e19f2af289ebb4c5e4dd04ba
|
[
"MIT"
] | null | null | null |
unit3_lesson_01_understanding_modules.py
|
RaviTejaKomma/learn-python
|
c9788b958371c013e19f2af289ebb4c5e4dd04ba
|
[
"MIT"
] | null | null | null |
unit3_lesson_01_understanding_modules.py
|
RaviTejaKomma/learn-python
|
c9788b958371c013e19f2af289ebb4c5e4dd04ba
|
[
"MIT"
] | null | null | null |
__author__ = 'Kalyan'
notes = '''
modules are a abstraction feature which greatly aids in building large applications.
modules are defined in .py file (socket.py, random.py, csv.py ...) and usually contain
a set of function, data and class definitions which provide a specific functionality. This
allows for easy reuse and discovery of functionality. e.g. you can be pretty sure that
socket module exposes functionality related to communication using sockets.
'''
notes_1 = '''
All these tests uses module1.py to module4.py. Take a look at them before starting the tests.
'''
#this is a global import, generally you use only these. rarely will you use function level imports, but we are doing that
#here for the sake of testing.
import sys
import placeholders
from placeholders import *
def test_module_without_import():
try:
module1.greet("jack")
except NameError :
print 'module1 is not defined or u should import it'
assert True
def test_module_usage_needs_import():
import module1
assert "module1 says hi to jack" == module1.greet("jack")
def test_module_usage_multiple():
import module1
import module2
assert "module1 says hi to jack" == module1.greet("jack")
assert "module2 says hi to jack" == module2.greet("jack")
def test_module_import_affects_current_namespace():
import module1
def inner_func():
import module2
assert True == ('module2' in locals())
return module2.greet("jack")
assert "module1 says hi to jack" == module1.greet("jack")
assert "module2 says hi to jack" == inner_func()
assert False == ('placeholders' in locals())
assert True == ('placeholders' in globals())
assert True == ('module1' in locals())
assert False == ('module1' in globals())
assert False == ('module2' in locals())
assert False == ('module2' in globals())
def test_module_type():
assert "module" == type(placeholders).__name__
def test_module_is_an_object():
assert 9 == len(dir(placeholders)) ###['__', '___', '__all__', '__author__', '__builtins__', '__doc__', '__file__', '__name__', '__package__']
assert "placeholders" == placeholders.__name__
assert None == placeholders.__doc__
def test_module_from_import():
from module1 import greet
assert False == ('module1' in locals())
assert True == ('greet' in locals())
try:
module1.greet()
except NameError as ne :
pass
assert "module1 says hi to jack" == greet("jack")
def test_module_why_from_import_is_a_bad_idea():
from module1 import greet
from module2 import greet ########### latest import statement will be taken into consideration
assert "module2 says hi to jack" == greet("jack")
def test_modules_are_cached():
import module1
import module1 as new_name
def inner():
import module1
return module1.some_attr
try:
inner() #'module1' object has no attribute 'some_attr'
except AttributeError as ae: # what exception do you get here?
pass
module1.some_attr = 10
assert 10 == inner()
def inner2():
import module1
return module1.some_attr
assert 10 == inner2()
assert "dict" == type(sys.modules).__name__
assert True == (module1 is sys.modules['module1'])
assert False == ('new_name' in sys.modules)
assert True == (new_name is module1)
assert True == (new_name is sys.modules['module1'])
s1 = set()
s2 = set()
s3 = set()
s1 = set(dir())
from module3 import *
s2 = set(dir())
from module4 import *
s3 = set(dir())
def test_module_star_import(): ###############DOUBT######################
# * imports are not allowed within functions, so we had to do it at global scope
assert set(["m3_func2","m3_func1"]) == (s2 - s1) # what did module3 import bring in.
assert set(["_m4_func3","m4_func1"]) == (s3 - s2) # what did module4 import bring in.
notes_2 = '''
http://effbot.org/zone/import-confusion.htm
'''
three_things_i_learnt = """
-
-
-
"""
| 30.151079
| 147
| 0.645669
|
c8025d8a01fec6071f49adb91e14661c071a49e9
| 1,191
|
py
|
Python
|
nm/code/tochenDeepSEA.py
|
wzthu/NeuronMotif
|
0f7f786e4b75916039388824d04d2041747fd299
|
[
"MIT"
] | null | null | null |
nm/code/tochenDeepSEA.py
|
wzthu/NeuronMotif
|
0f7f786e4b75916039388824d04d2041747fd299
|
[
"MIT"
] | null | null | null |
nm/code/tochenDeepSEA.py
|
wzthu/NeuronMotif
|
0f7f786e4b75916039388824d04d2041747fd299
|
[
"MIT"
] | 2
|
2021-09-13T06:59:36.000Z
|
2021-11-10T00:17:25.000Z
|
import h5py
import numpy as np
import sys
layer = str(sys.argv[1])
pfmsf = h5py.File('layer' + str(layer)+ '/allppm.h5','r')
pfms=pfmsf['allppm'][:]
act = pfmsf['act'][:]
conact = pfmsf['conact'][:]
spnumb = pfmsf['spnumb'][:]
pfmsf.close()
lines=[]
for i in range(int(pfms.shape[0])):
pfm = pfms[i,:,]
if (pfm != 0.25).sum()==0:
continue
lines.append('>%04d_%d_%.4f_%.4f\n' %(i,spnumb[i], act[i], conact[i]))
for j in range(pfm.shape[0]):
lines.append('\t'.join(list(np.array(np.array(pfm[j,[0,2,1,3]]*1000,dtype=int),dtype=str))) + '\n')
lines.append('\n')
with open('layer' + str(layer)+'/ppm.chen' , 'w') as f:
f.writelines(lines)
import os
os.mkdir('layer' + str(layer)+'/vis')
for i in range(int(pfms.shape[0])):
lines=[]
pfm = pfms[i,:,]
if (pfm != 0.25).sum()==0:
continue
lines.append('>%04d_%d_%.4f_%.4f\n' %(i,spnumb[i], act[i], conact[i]))
for j in range(pfm.shape[0]):
lines.append('\t'.join(list(np.array(np.array(pfm[j,[0,2,1,3]]*1000,dtype=int),dtype=str))) + '\n')
lines.append('\n')
with open('layer' + str(layer)+'/vis/'+str(i)+'.chen' , 'w') as f:
f.writelines(lines)
| 27.068182
| 107
| 0.565911
|
0f965a87ebe914eb6690cea2d57593f7c32a36c3
| 21,109
|
py
|
Python
|
tests/tracer/test_span.py
|
ganeshkumarsv/dd-trace-py
|
0665507ecfd95a4c247c1d789321f9ab5004977f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/tracer/test_span.py
|
ganeshkumarsv/dd-trace-py
|
0665507ecfd95a4c247c1d789321f9ab5004977f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 9
|
2021-07-26T01:22:38.000Z
|
2022-03-21T19:20:53.000Z
|
tests/tracer/test_span.py
|
ganeshkumarsv/dd-trace-py
|
0665507ecfd95a4c247c1d789321f9ab5004977f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-03T12:41:49.000Z
|
2021-08-03T12:41:49.000Z
|
# -*- coding: utf-8 -*-
import re
import sys
import time
from unittest.case import SkipTest
import mock
import pytest
import six
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ENV_KEY
from ddtrace.constants import SERVICE_VERSION_KEY
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.ext import SpanTypes
from ddtrace.ext import errors
from ddtrace.span import Span
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import assert_is_not_measured
from tests.utils import override_global_config
class SpanTestCase(TracerTestCase):
def test_ids(self):
s = Span(tracer=None, name="span.test")
assert s.trace_id
assert s.span_id
assert not s.parent_id
s2 = Span(tracer=None, name="t", trace_id=1, span_id=2, parent_id=1)
assert s2.trace_id == 1
assert s2.span_id == 2
assert s2.parent_id == 1
def test_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("a", "a")
s.set_tag("b", 1)
s.set_tag("c", "1")
d = s.to_dict()
assert d["meta"] == dict(a="a", c="1")
assert d["metrics"] == dict(b=1)
def test_numeric_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("negative", -1)
s.set_tag("zero", 0)
s.set_tag("positive", 1)
s.set_tag("large_int", 2 ** 53)
s.set_tag("really_large_int", (2 ** 53) + 1)
s.set_tag("large_negative_int", -(2 ** 53))
s.set_tag("really_large_negative_int", -((2 ** 53) + 1))
s.set_tag("float", 12.3456789)
s.set_tag("negative_float", -12.3456789)
s.set_tag("large_float", 2.0 ** 53)
s.set_tag("really_large_float", (2.0 ** 53) + 1)
d = s.to_dict()
assert d["meta"] == dict(
really_large_int=str(((2 ** 53) + 1)),
really_large_negative_int=str(-((2 ** 53) + 1)),
)
assert d["metrics"] == {
"negative": -1,
"zero": 0,
"positive": 1,
"large_int": 2 ** 53,
"large_negative_int": -(2 ** 53),
"float": 12.3456789,
"negative_float": -12.3456789,
"large_float": 2.0 ** 53,
"really_large_float": (2.0 ** 53) + 1,
}
def test_set_tag_bool(self):
s = Span(tracer=None, name="test.span")
s.set_tag("true", True)
s.set_tag("false", False)
d = s.to_dict()
assert d["meta"] == dict(true="True", false="False")
assert "metrics" not in d
def test_set_tag_metric(self):
s = Span(tracer=None, name="test.span")
s.set_tag("test", "value")
assert s.meta == dict(test="value")
assert s.metrics == dict()
s.set_tag("test", 1)
assert s.meta == dict()
assert s.metrics == dict(test=1)
def test_set_valid_metrics(self):
s = Span(tracer=None, name="test.span")
s.set_metric("a", 0)
s.set_metric("b", -12)
s.set_metric("c", 12.134)
s.set_metric("d", 1231543543265475686787869123)
s.set_metric("e", "12.34")
d = s.to_dict()
expected = {
"a": 0,
"b": -12,
"c": 12.134,
"d": 1231543543265475686787869123,
"e": 12.34,
}
assert d["metrics"] == expected
def test_set_invalid_metric(self):
s = Span(tracer=None, name="test.span")
invalid_metrics = [None, {}, [], s, "quarante-douze", float("nan"), float("inf"), 1j]
for i, m in enumerate(invalid_metrics):
k = str(i)
s.set_metric(k, m)
assert s.get_metric(k) is None
def test_set_numpy_metric(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
s = Span(tracer=None, name="test.span")
s.set_metric("a", np.int64(1))
assert s.get_metric("a") == 1
assert type(s.get_metric("a")) == float
def test_tags_not_string(self):
# ensure we can cast as strings
class Foo(object):
def __repr__(self):
1 / 0
s = Span(tracer=None, name="test.span")
s.set_tag("a", Foo())
def test_finish(self):
# ensure span.finish() marks the end time of the span
s = Span(None, "test.span")
sleep = 0.05
time.sleep(sleep)
s.finish()
assert s.duration >= sleep, "%s < %s" % (s.duration, sleep)
def test_finish_no_tracer(self):
# ensure finish works with no tracer without raising exceptions
s = Span(tracer=None, name="test.span")
s.finish()
def test_finish_called_multiple_times(self):
# we should only record a span the first time finish is called on it
s = Span(self.tracer, "bar")
s.finish()
s.finish()
def test_finish_set_span_duration(self):
# If set the duration on a span, the span should be recorded with this
# duration
s = Span(tracer=None, name="test.span")
s.duration = 1337.0
s.finish()
assert s.duration == 1337.0
def test_setter_casts_duration_ns_as_int(self):
s = Span(tracer=None, name="test.span")
s.duration = 3.2
s.finish()
assert s.duration == 3.2
assert s.duration_ns == 3200000000
assert isinstance(s.duration_ns, int)
def test_get_span_returns_none_by_default(self):
s = Span(tracer=None, name="test.span")
assert s.duration is None
def test_traceback_with_error(self):
s = Span(None, "test.span")
try:
1 / 0
except ZeroDivisionError:
s.set_traceback()
else:
assert 0, "should have failed"
assert s.error
assert "by zero" in s.get_tag(errors.ERROR_MSG)
assert "ZeroDivisionError" in s.get_tag(errors.ERROR_TYPE)
def test_traceback_without_error(self):
s = Span(None, "test.span")
s.set_traceback()
assert not s.error
assert not s.get_tag(errors.ERROR_MSG)
assert not s.get_tag(errors.ERROR_TYPE)
assert "in test_traceback_without_error" in s.get_tag(errors.ERROR_STACK)
def test_ctx_mgr(self):
s = Span(self.tracer, "bar")
assert not s.duration
assert not s.error
e = Exception("boo")
try:
with s:
time.sleep(0.01)
raise e
except Exception as out:
assert out == e
assert s.duration > 0, s.duration
assert s.error
assert s.get_tag(errors.ERROR_MSG) == "boo"
assert "Exception" in s.get_tag(errors.ERROR_TYPE)
assert s.get_tag(errors.ERROR_STACK)
else:
assert 0, "should have failed"
def test_span_type(self):
s = Span(tracer=None, name="test.span", service="s", resource="r", span_type=SpanTypes.WEB)
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "web"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict(self):
s = Span(tracer=None, name="test.span", service="s", resource="r")
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict_sub(self):
parent = Span(tracer=None, name="test.span", service="s", resource="r")
s = Span(tracer=None, name="test.span", service="s", resource="r")
s._parent = parent
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_boolean_err(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.error = True
s.finish()
d = s.to_dict()
assert d
assert d["error"] == 1
assert type(d["error"]) == int
@mock.patch("ddtrace.span.log")
def test_numeric_tags_none(self, span_log):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None)
d = s.to_dict()
assert d
assert "metrics" not in d
# Ensure we log a debug message
span_log.debug.assert_called_once_with(
"ignoring not number metric %s:%s",
ANALYTICS_SAMPLE_RATE_KEY,
None,
)
def test_numeric_tags_true(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 1.0}
assert d["metrics"] == expected
def test_numeric_tags_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 0.5}
assert d["metrics"] == expected
def test_numeric_tags_bad_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, "Hello")
d = s.to_dict()
assert d
assert "metrics" not in d
def test_set_tag_none(self):
s = Span(tracer=None, name="root.span", service="s", resource="r")
assert s.meta == dict()
s.set_tag("custom.key", "100")
assert s.meta == {"custom.key": "100"}
s.set_tag("custom.key", None)
assert s.meta == {"custom.key": "None"}
def test_duration_zero(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123)
assert s.duration_ns == 0
assert s.duration == 0
def test_start_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
assert s.start == 123
assert s.start_ns == 123000000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
assert s.start == 123.123
assert s.start_ns == 123123000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
s.start = 234567890.0
assert s.start == 234567890
assert s.start_ns == 234567890000000000
def test_duration_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.finish()
assert isinstance(s.duration_ns, int)
assert isinstance(s.duration, float)
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123.2)
assert s.duration_ns == 200000000
assert s.duration == 0.2
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.1)
s.finish(finish_time=123.2)
assert s.duration_ns == 100000000
assert s.duration == 0.1
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=122)
s.finish(finish_time=123)
assert s.duration_ns == 1000000000
assert s.duration == 1
def test_set_tag_version(self):
s = Span(tracer=None, name="test.span")
s.set_tag(VERSION_KEY, "1.2.3")
assert s.get_tag(VERSION_KEY) == "1.2.3"
assert s.get_tag(SERVICE_VERSION_KEY) is None
s.set_tag(SERVICE_VERSION_KEY, "service.version")
assert s.get_tag(VERSION_KEY) == "service.version"
assert s.get_tag(SERVICE_VERSION_KEY) == "service.version"
def test_set_tag_env(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ENV_KEY, "prod")
assert s.get_tag(ENV_KEY) == "prod"
@pytest.mark.parametrize(
"value,assertion",
[
(None, assert_is_measured),
(1, assert_is_measured),
(1.0, assert_is_measured),
(-1, assert_is_measured),
(True, assert_is_measured),
("true", assert_is_measured),
# DEV: Ends up being measured because we do `bool("false")` which is `True`
("false", assert_is_measured),
(0, assert_is_not_measured),
(0.0, assert_is_not_measured),
(False, assert_is_not_measured),
],
)
def test_set_tag_measured(value, assertion):
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, value)
assertion(s)
def test_set_tag_measured_not_set():
# Span is not measured by default
s = Span(tracer=None, name="test.span")
assert_is_not_measured(s)
def test_set_tag_measured_no_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
def test_set_tag_measured_change_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, True)
assert_is_measured(s)
s.set_tag(SPAN_MEASURED_KEY, False)
assert_is_not_measured(s)
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
@mock.patch("ddtrace.span.log")
def test_span_key(span_log):
# Span tag keys must be strings
s = Span(tracer=None, name="test.span")
s.set_tag(123, True)
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", 123, True)
assert s.get_tag(123) is None
assert s.get_tag("123") is None
span_log.reset_mock()
s.set_tag(None, "val")
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", None, "val")
assert s.get_tag(123.32) is None
def test_span_finished():
span = Span(None, None)
assert span.finished is False
assert span.duration_ns is None
span.finished = True
assert span.finished is True
assert span.duration_ns is not None
duration = span.duration_ns
span.finished = True
assert span.finished is True
assert span.duration_ns == duration
span.finished = False
assert span.finished is False
span.finished = True
assert span.finished is True
assert span.duration_ns != duration
def test_span_unicode_set_tag():
span = Span(None, None)
span.set_tag("key", u"😌")
span.set_tag("😐", u"😌")
span._set_str_tag("key", u"😌")
span._set_str_tag(u"😐", u"😌")
@pytest.mark.skipif(sys.version_info.major != 2, reason="This test only applies Python 2")
@mock.patch("ddtrace.span.log")
def test_span_binary_unicode_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", "🤔")
span._set_str_tag("key_str", "🤔")
# only span.set_tag() will fail
span_log.warning.assert_called_once_with("error setting tag %s, ignoring it", "key", exc_info=True)
assert "key" not in span.meta
assert span.meta["key_str"] == u"🤔"
@pytest.mark.skipif(sys.version_info.major == 2, reason="This test does not apply to Python 2")
@mock.patch("ddtrace.span.log")
def test_span_bytes_string_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", b"\xf0\x9f\xa4\x94")
span._set_str_tag("key_str", b"\xf0\x9f\xa4\x94")
assert span.meta["key"] == "b'\\xf0\\x9f\\xa4\\x94'"
assert span.meta["key_str"] == "🤔"
span_log.warning.assert_not_called()
@mock.patch("ddtrace.span.log")
def test_span_encoding_set_str_tag(span_log):
span = Span(None, None)
span._set_str_tag("foo", u"/?foo=bar&baz=정상처리".encode("euc-kr"))
span_log.warning.assert_not_called()
assert span.meta["foo"] == u"/?foo=bar&baz=����ó��"
def test_span_nonstring_set_str_tag_exc():
span = Span(None, None)
with pytest.raises(TypeError):
span._set_str_tag("foo", dict(a=1))
assert "foo" not in span.meta
@mock.patch("ddtrace.span.log")
def test_span_nonstring_set_str_tag_warning(span_log):
with override_global_config(dict(_raise=False)):
span = Span(None, None)
span._set_str_tag("foo", dict(a=1))
span_log.warning.assert_called_once_with(
"Failed to set text tag '%s'",
"foo",
exc_info=True,
)
def test_span_ignored_exceptions():
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
assert s.error == 0
assert s.get_tag(errors.ERROR_MSG) is None
assert s.get_tag(errors.ERROR_TYPE) is None
assert s.get_tag(errors.ERROR_STACK) is None
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 1
assert s.get_tag(errors.ERROR_MSG) is not None
assert "RuntimeError" in s.get_tag(errors.ERROR_TYPE)
assert s.get_tag(errors.ERROR_STACK) is not None
def test_span_ignored_exception_multi():
s = Span(None, None)
s._ignore_exception(ValueError)
s._ignore_exception(RuntimeError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(errors.ERROR_MSG) is None
assert s.get_tag(errors.ERROR_TYPE) is None
assert s.get_tag(errors.ERROR_STACK) is None
def test_span_ignored_exception_subclass():
s = Span(None, None)
s._ignore_exception(Exception)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(errors.ERROR_MSG) is None
assert s.get_tag(errors.ERROR_TYPE) is None
assert s.get_tag(errors.ERROR_STACK) is None
def test_on_finish_single_callback():
m = mock.Mock()
s = Span(None, "test", on_finish=[m])
m.assert_not_called()
s.finish()
m.assert_called_once_with(s)
def test_on_finish_multi_callback():
m1 = mock.Mock()
m2 = mock.Mock()
s = Span(None, "test", on_finish=[m1, m2])
s.finish()
m1.assert_called_once_with(s)
m2.assert_called_once_with(s)
@pytest.mark.parametrize("arg", ["span_id", "trace_id", "parent_id"])
def test_span_preconditions(arg):
Span(None, "test", **{arg: None})
with pytest.raises(TypeError):
Span(None, "test", **{arg: "foo"})
def test_span_pprint():
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag("t", "v")
root.set_metric("m", 1.0)
root.finish()
actual = root.pprint()
assert "name='test.span'" in actual
assert "service='s'" in actual
assert "resource='r'" in actual
assert "type='web'" in actual
assert "error=0" in actual
assert ("tags={'t': 'v'}" if six.PY3 else "tags={'t': u'v'}") in actual
assert "metrics={'m': 1.0}" in actual
assert re.search("id=[0-9]+", actual) is not None
assert re.search("trace_id=[0-9]+", actual) is not None
assert "parent_id=None" in actual
assert re.search("duration=[0-9.]+", actual) is not None
assert re.search("start=[0-9.]+", actual) is not None
assert re.search("end=[0-9.]+", actual) is not None
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
actual = root.pprint()
assert "duration=None" in actual
assert "end=None" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.error = 1
actual = root.pprint()
assert "error=1" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag(u"😌", u"😌")
actual = root.pprint()
assert (u"tags={'😌': '😌'}" if six.PY3 else "tags={u'\\U0001f60c': u'\\U0001f60c'}") in actual
root = Span(None, "test.span", service=object())
actual = root.pprint()
assert "service=<object object at" in actual
def test_manual_context_usage():
span1 = Span(None, "span1")
span2 = Span(None, "span2", context=span1.context)
span2.context.sampling_priority = 2
assert span1.context.sampling_priority == 2
span1.context.sampling_priority = 1
assert span2.context.sampling_priority == 1
assert span1.context.sampling_priority == 1
| 31.180207
| 107
| 0.605761
|
babe227403b3e4c5e4c798bc3ff786dcdfee37eb
| 21,018
|
py
|
Python
|
src/twisted/scripts/trial.py
|
muelli/twisted
|
eacc5964187aebf5c34fa255c7e0a3700eaab15a
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/scripts/trial.py
|
muelli/twisted
|
eacc5964187aebf5c34fa255c7e0a3700eaab15a
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/scripts/trial.py
|
muelli/twisted
|
eacc5964187aebf5c34fa255c7e0a3700eaab15a
|
[
"MIT",
"Unlicense"
] | null | null | null |
# -*- test-case-name: twisted.trial.test.test_script -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import gc
import inspect
import os
import pdb
import random
import sys
import time
import warnings
from twisted.internet import defer
from twisted.application import app
from twisted.python import usage, reflect, failure
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedModule
from twisted import plugin
from twisted.trial import runner, itrial, reporter
# Yea, this is stupid. Leave it for command-line compatibility for a
# while, though.
TBFORMAT_MAP = {
'plain': 'default',
'default': 'default',
'emacs': 'brief',
'brief': 'brief',
'cgitb': 'verbose',
'verbose': 'verbose'
}
def _parseLocalVariables(line):
"""
Accepts a single line in Emacs local variable declaration format and
returns a dict of all the variables {name: value}.
Raises ValueError if 'line' is in the wrong format.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
paren = '-*-'
start = line.find(paren) + len(paren)
end = line.rfind(paren)
if start == -1 or end == -1:
raise ValueError("%r not a valid local variable declaration" % (line,))
items = line[start:end].split(';')
localVars = {}
for item in items:
if len(item.strip()) == 0:
continue
split = item.split(':')
if len(split) != 2:
raise ValueError("%r contains invalid declaration %r"
% (line, item))
localVars[split[0].strip()] = split[1].strip()
return localVars
def loadLocalVariables(filename):
"""
Accepts a filename and attempts to load the Emacs variable declarations
from that file, simulating what Emacs does.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
with open(filename, "r") as f:
lines = [f.readline(), f.readline()]
for line in lines:
try:
return _parseLocalVariables(line)
except ValueError:
pass
return {}
def getTestModules(filename):
testCaseVar = loadLocalVariables(filename).get('test-case-name', None)
if testCaseVar is None:
return []
return testCaseVar.split(',')
def isTestFile(filename):
"""
Returns true if 'filename' looks like a file containing unit tests.
False otherwise. Doesn't care whether filename exists.
"""
basename = os.path.basename(filename)
return (basename.startswith('test_')
and os.path.splitext(basename)[1] == ('.py'))
def _reporterAction():
return usage.CompleteList([p.longOpt for p in
plugin.getPlugins(itrial.IReporter)])
def _maybeFindSourceLine(testThing):
"""
Try to find the source line of the given test thing.
@param testThing: the test item to attempt to inspect
@type testThing: an L{TestCase}, test method, or module, though only the
former two have a chance to succeed
@rtype: int
@return: the starting source line, or -1 if one couldn't be found
"""
# an instance of L{TestCase} -- locate the test it will run
method = getattr(testThing, "_testMethodName", None)
if method is not None:
testThing = getattr(testThing, method)
# If it's a function, we can get the line number even if the source file no
# longer exists
code = getattr(testThing, "__code__", None)
if code is not None:
return code.co_firstlineno
try:
return inspect.getsourcelines(testThing)[1]
except (IOError, TypeError):
# either testThing is a module, which raised a TypeError, or the file
# couldn't be read
return -1
# orders which can be passed to trial --order
_runOrders = {
"alphabetical" : (
"alphabetical order for test methods, arbitrary order for test cases",
runner.name),
"toptobottom" : (
"attempt to run test cases and methods in the order they were defined",
_maybeFindSourceLine),
}
def _checkKnownRunOrder(order):
"""
Check that the given order is a known test running order.
Does nothing else, since looking up the appropriate callable to sort the
tests should be done when it actually will be used, as the default argument
will not be coerced by this function.
@param order: one of the known orders in C{_runOrders}
@return: the order unmodified
"""
if order not in _runOrders:
raise usage.UsageError(
"--order must be one of: %s. See --help-orders for details" %
(", ".join(repr(order) for order in _runOrders),))
return order
class _BasicOptions:
"""
Basic options shared between trial and its local workers.
"""
longdesc = ("trial loads and executes a suite of unit tests, obtained "
"from modules, packages and files listed on the command line.")
optFlags = [["help", "h"],
["no-recurse", "N", "Don't recurse into packages"],
['help-orders', None, "Help on available test running orders"],
['help-reporters', None,
"Help on available output plugins (reporters)"],
["rterrors", "e", "realtime errors, print out tracebacks as "
"soon as they occur"],
["unclean-warnings", None,
"Turn dirty reactor errors into warnings"],
["force-gc", None, "Have Trial run gc.collect() before and "
"after each test case."],
["exitfirst", "x",
"Exit after the first non-successful result (cannot be "
"specified along with --jobs)."],
]
optParameters = [
["order", "o", None,
"Specify what order to run test cases and methods. "
"See --help-orders for more info.", _checkKnownRunOrder],
["random", "z", None,
"Run tests in random order using the specified seed"],
['temp-directory', None, '_trial_temp',
'Path to use as working directory for tests.'],
['reporter', None, 'verbose',
'The reporter to use for this test run. See --help-reporters for '
'more info.']]
compData = usage.Completions(
optActions={"order": usage.CompleteList(_runOrders),
"reporter": _reporterAction,
"logfile": usage.CompleteFiles(descr="log file name"),
"random": usage.Completer(descr="random seed")},
extraActions=[usage.CompleteFiles(
"*.py", descr="file | module | package | TestCase | testMethod",
repeat=True)],
)
fallbackReporter = reporter.TreeReporter
tracer = None
def __init__(self):
self['tests'] = []
usage.Options.__init__(self)
def getSynopsis(self):
executableName = reflect.filenameToModuleName(sys.argv[0])
if executableName.endswith('.__main__'):
executableName = '{} -m {}'.format(os.path.basename(sys.executable),
executableName.replace('.__main__', ''))
return """%s [options] [[file|package|module|TestCase|testmethod]...]
""" % (executableName,)
def coverdir(self):
"""
Return a L{FilePath} representing the directory into which coverage
results should be written.
"""
coverdir = 'coverage'
result = FilePath(self['temp-directory']).child(coverdir)
print("Setting coverage directory to %s." % (result.path,))
return result
# TODO: Some of the opt_* methods on this class have docstrings and some do
# not. This is mostly because usage.Options's currently will replace
# any intended output in optFlags and optParameters with the
# docstring. See #6427. When that is fixed, all methods should be
# given docstrings (and it should be verified that those with
# docstrings already have content suitable for printing as usage
# information).
def opt_coverage(self):
"""
Generate coverage information in the coverage file in the
directory specified by the temp-directory option.
"""
import trace
self.tracer = trace.Trace(count=1, trace=0)
sys.settrace(self.tracer.globaltrace)
self['coverage'] = True
def opt_testmodule(self, filename):
"""
Filename to grep for test cases (-*- test-case-name).
"""
# If the filename passed to this parameter looks like a test module
# we just add that to the test suite.
#
# If not, we inspect it for an Emacs buffer local variable called
# 'test-case-name'. If that variable is declared, we try to add its
# value to the test suite as a module.
#
# This parameter allows automated processes (like Buildbot) to pass
# a list of files to Trial with the general expectation of "these files,
# whatever they are, will get tested"
if not os.path.isfile(filename):
sys.stderr.write("File %r doesn't exist\n" % (filename,))
return
filename = os.path.abspath(filename)
if isTestFile(filename):
self['tests'].append(filename)
else:
self['tests'].extend(getTestModules(filename))
def opt_spew(self):
"""
Print an insanely verbose log of everything that happens. Useful
when debugging freezes or locks in complex code.
"""
from twisted.python.util import spewer
sys.settrace(spewer)
def opt_help_orders(self):
synopsis = ("Trial can attempt to run test cases and their methods in "
"a few different orders. You can select any of the "
"following options using --order=<foo>.\n")
print(synopsis)
for name, (description, _) in sorted(_runOrders.items()):
print(' ', name, '\t', description)
sys.exit(0)
def opt_help_reporters(self):
synopsis = ("Trial's output can be customized using plugins called "
"Reporters. You can\nselect any of the following "
"reporters using --reporter=<foo>\n")
print(synopsis)
for p in plugin.getPlugins(itrial.IReporter):
print(' ', p.longOpt, '\t', p.description)
sys.exit(0)
def opt_disablegc(self):
"""
Disable the garbage collector
"""
self["disablegc"] = True
gc.disable()
def opt_tbformat(self, opt):
"""
Specify the format to display tracebacks with. Valid formats are
'plain', 'emacs', and 'cgitb' which uses the nicely verbose stdlib
cgitb.text function
"""
try:
self['tbformat'] = TBFORMAT_MAP[opt]
except KeyError:
raise usage.UsageError(
"tbformat must be 'plain', 'emacs', or 'cgitb'.")
def opt_recursionlimit(self, arg):
"""
see sys.setrecursionlimit()
"""
try:
sys.setrecursionlimit(int(arg))
except (TypeError, ValueError):
raise usage.UsageError(
"argument to recursionlimit must be an integer")
else:
self["recursionlimit"] = int(arg)
def opt_random(self, option):
try:
self['random'] = int(option)
except ValueError:
raise usage.UsageError(
"Argument to --random must be a positive integer")
else:
if self['random'] < 0:
raise usage.UsageError(
"Argument to --random must be a positive integer")
elif self['random'] == 0:
self['random'] = int(time.time() * 100)
def opt_without_module(self, option):
"""
Fake the lack of the specified modules, separated with commas.
"""
self["without-module"] = option
for module in option.split(","):
if module in sys.modules:
warnings.warn("Module '%s' already imported, "
"disabling anyway." % (module,),
category=RuntimeWarning)
sys.modules[module] = None
def parseArgs(self, *args):
self['tests'].extend(args)
def _loadReporterByName(self, name):
for p in plugin.getPlugins(itrial.IReporter):
qual = "%s.%s" % (p.module, p.klass)
if p.longOpt == name:
return reflect.namedAny(qual)
raise usage.UsageError("Only pass names of Reporter plugins to "
"--reporter. See --help-reporters for "
"more info.")
def postOptions(self):
# Only load reporters now, as opposed to any earlier, to avoid letting
# application-defined plugins muck up reactor selecting by importing
# t.i.reactor and causing the default to be installed.
self['reporter'] = self._loadReporterByName(self['reporter'])
if 'tbformat' not in self:
self['tbformat'] = 'default'
if self['order'] is not None and self['random'] is not None:
raise usage.UsageError(
"You can't specify --random when using --order")
class Options(_BasicOptions, usage.Options, app.ReactorSelectionMixin):
"""
Options to the trial command line tool.
@ivar _workerFlags: List of flags which are accepted by trial distributed
workers. This is used by C{_getWorkerArguments} to build the command
line arguments.
@type _workerFlags: C{list}
@ivar _workerParameters: List of parameter which are accepted by trial
distributed workers. This is used by C{_getWorkerArguments} to build
the command line arguments.
@type _workerParameters: C{list}
"""
optFlags = [
["debug", "b", "Run tests in a debugger. If that debugger is "
"pdb, will load '.pdbrc' from current directory if it exists."
],
["debug-stacktraces", "B", "Report Deferred creation and "
"callback stack traces"],
["nopm", None, "don't automatically jump into debugger for "
"postmorteming of exceptions"],
["dry-run", 'n', "do everything but run the tests"],
["profile", None, "Run tests under the Python profiler"],
["until-failure", "u", "Repeat test until it fails"],
]
optParameters = [
["debugger", None, "pdb", "the fully qualified name of a debugger to "
"use if --debug is passed"],
["logfile", "l", "test.log", "log file name"],
["jobs", "j", None, "Number of local workers to run"]
]
compData = usage.Completions(
optActions = {
"tbformat": usage.CompleteList(["plain", "emacs", "cgitb"]),
"reporter": _reporterAction,
},
)
_workerFlags = ["disablegc", "force-gc", "coverage"]
_workerParameters = ["recursionlimit", "reactor", "without-module"]
fallbackReporter = reporter.TreeReporter
extra = None
tracer = None
def opt_jobs(self, number):
"""
Number of local workers to run, a strictly positive integer.
"""
try:
number = int(number)
except ValueError:
raise usage.UsageError(
"Expecting integer argument to jobs, got '%s'" % number)
if number <= 0:
raise usage.UsageError(
"Argument to jobs must be a strictly positive integer")
self["jobs"] = number
def _getWorkerArguments(self):
"""
Return a list of options to pass to distributed workers.
"""
args = []
for option in self._workerFlags:
if self.get(option) is not None:
if self[option]:
args.append("--%s" % (option,))
for option in self._workerParameters:
if self.get(option) is not None:
args.extend(["--%s" % (option,), str(self[option])])
return args
def postOptions(self):
_BasicOptions.postOptions(self)
if self['jobs']:
conflicts = ['debug', 'profile', 'debug-stacktraces', 'exitfirst']
for option in conflicts:
if self[option]:
raise usage.UsageError(
"You can't specify --%s when using --jobs" % option)
if self['nopm']:
if not self['debug']:
raise usage.UsageError("You must specify --debug when using "
"--nopm ")
failure.DO_POST_MORTEM = False
def _initialDebugSetup(config):
# do this part of debug setup first for easy debugging of import failures
if config['debug']:
failure.startDebugMode()
if config['debug'] or config['debug-stacktraces']:
defer.setDebugging(True)
def _getSuite(config):
loader = _getLoader(config)
recurse = not config['no-recurse']
return loader.loadByNames(config['tests'], recurse=recurse)
def _getLoader(config):
loader = runner.TestLoader()
if config['random']:
randomer = random.Random()
randomer.seed(config['random'])
loader.sorter = lambda x : randomer.random()
print('Running tests shuffled with seed %d\n' % config['random'])
elif config['order']:
_, sorter = _runOrders[config['order']]
loader.sorter = sorter
if not config['until-failure']:
loader.suiteFactory = runner.DestructiveTestSuite
return loader
def _wrappedPdb():
"""
Wrap an instance of C{pdb.Pdb} with readline support and load any .rcs.
"""
dbg = pdb.Pdb()
try:
namedModule('readline')
except ImportError:
print("readline module not available")
for path in ('.pdbrc', 'pdbrc'):
if os.path.exists(path):
try:
rcFile = open(path, 'r')
except IOError:
pass
else:
with rcFile:
dbg.rcLines.extend(rcFile.readlines())
return dbg
class _DebuggerNotFound(Exception):
"""
A debugger import failed.
Used to allow translating these errors into usage error messages.
"""
def _makeRunner(config):
"""
Return a trial runner class set up with the parameters extracted from
C{config}.
@return: A trial runner instance.
@rtype: L{runner.TrialRunner} or C{DistTrialRunner} depending on the
configuration.
"""
cls = runner.TrialRunner
args = {'reporterFactory': config['reporter'],
'tracebackFormat': config['tbformat'],
'realTimeErrors': config['rterrors'],
'uncleanWarnings': config['unclean-warnings'],
'logfile': config['logfile'],
'workingDirectory': config['temp-directory']}
if config['dry-run']:
args['mode'] = runner.TrialRunner.DRY_RUN
elif config['jobs']:
from twisted.trial._dist.disttrial import DistTrialRunner
cls = DistTrialRunner
args['workerNumber'] = config['jobs']
args['workerArguments'] = config._getWorkerArguments()
else:
if config['debug']:
args['mode'] = runner.TrialRunner.DEBUG
debugger = config['debugger']
if debugger != 'pdb':
try:
args['debugger'] = reflect.namedAny(debugger)
except reflect.ModuleNotFound:
raise _DebuggerNotFound(
'%r debugger could not be found.' % (debugger,))
else:
args['debugger'] = _wrappedPdb()
args['exitFirst'] = config['exitfirst']
args['profile'] = config['profile']
args['forceGarbageCollection'] = config['force-gc']
return cls(**args)
def run():
if len(sys.argv) == 1:
sys.argv.append("--help")
config = Options()
try:
config.parseOptions()
except usage.error as ue:
raise SystemExit("%s: %s" % (sys.argv[0], ue))
_initialDebugSetup(config)
try:
trialRunner = _makeRunner(config)
except _DebuggerNotFound as e:
raise SystemExit('%s: %s' % (sys.argv[0], str(e)))
suite = _getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir().path)
sys.exit(not test_result.wasSuccessful())
| 33.57508
| 87
| 0.590161
|
990f6d9206d940139506ec36eee5338795b247e7
| 4,682
|
py
|
Python
|
backend/cvs.py
|
nhnb/postsai
|
6e906d9c332c7b2346abdb98f00264ced28c8e36
|
[
"MIT"
] | 5
|
2016-02-19T19:51:33.000Z
|
2020-03-23T00:41:40.000Z
|
backend/cvs.py
|
nhnb/postsai
|
6e906d9c332c7b2346abdb98f00264ced28c8e36
|
[
"MIT"
] | 37
|
2016-02-20T01:40:25.000Z
|
2018-07-03T22:22:09.000Z
|
backend/cvs.py
|
nhnb/postsai
|
6e906d9c332c7b2346abdb98f00264ced28c8e36
|
[
"MIT"
] | 5
|
2016-02-19T19:51:37.000Z
|
2016-03-22T12:25:45.000Z
|
# The MIT License (MIT)
# Copyright (c) 2016-2021 Postsai
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import cgi
import json
import sys
import subprocess
from backend.db import PostsaiDB
def convert_to_builtin_type(obj):
"""return a string representation for JSON conversation"""
return str(obj)
class PostsaiCommitViewer:
"""Reads a commit from a repository"""
def __init__(self, config):
"""Creates a PostsaiCommitViewer instance"""
self.config = config
def read_commit(self, form):
"""reads a commmit from the database"""
db = PostsaiDB(self.config)
db.connect()
sql = """SELECT repositories.repository, checkins.ci_when, people.who,
trim(leading '/' from concat(concat(dirs.dir, '/'), files.file)),
revision, descs.description, commitids.hash, commitids.co_when, repository_url
FROM checkins
JOIN descs ON checkins.descid = descs.id
JOIN dirs ON checkins.dirid = dirs.id
JOIN files ON checkins.fileid = files.id
JOIN people ON checkins.whoid = people.id
JOIN repositories ON checkins.repositoryid = repositories.id
JOIN commitids ON checkins.commitid = commitids.id
WHERE repositories.repository = %s AND commitids.hash = %s """
data = [form.getfirst("repository", ""), form.getfirst("commit", "")]
result = db.query(sql, data)
db.disconnect()
return result
@staticmethod
def format_commit_header(commit):
"""Extracts the commit meta information"""
result = {
"repository": commit[0][0],
"published": commit[0][1],
"author": commit[0][2],
"description": commit[0][5],
"commit": commit[0][6],
"timestamp": commit[0][7]
}
return result
@staticmethod
def calculate_previous_cvs_revision(revision):
"""determine the CVS revision of the previous commit
which might have been on a parent branch"""
split = revision.split(".")
last = split[len(split) - 1]
if (last == "1" and len(split) > 2):
split.pop()
split.pop()
else:
split[len(split) - 1] = str(int(last) - 1)
return ".".join(split)
@staticmethod
def dump_commit_diff(commit):
"""dumps the diff generates by invoking CVS to the browser"""
for file in commit:
if file[4] == "" or "." not in file[4]:
sys.stdout.flush()
print(("Index: " + file[3] + " deleted\r"))
sys.stdout.flush()
else:
subprocess.call([
"cvs",
"-d",
file[8],
"rdiff",
"-u",
"-r",
PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),
"-r",
file[4],
file[3]])
def process(self):
"""Returns information about a commit"""
form = cgi.FieldStorage()
commit = self.read_commit(form)
print("Content-Type: text/plain; charset='utf-8'\r")
print("Cache-Control: max-age=60\r")
if form.getfirst("download", "false") == "true":
print("Content-Disposition: attachment; filename=\"patch.txt\"\r")
print("\r")
print(("#" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))
sys.stdout.flush()
PostsaiCommitViewer.dump_commit_diff(commit)
| 34.175182
| 116
| 0.607219
|
03d4789772bf6ace8c71d0931ed588307e190e5c
| 9,048
|
py
|
Python
|
tests/test_thread_twitter.py
|
berna1995/CovidDailyUpdate
|
b74c2b7340f1366cf510be352c05f7bca0078302
|
[
"MIT"
] | 1
|
2020-05-17T17:58:06.000Z
|
2020-05-17T17:58:06.000Z
|
tests/test_thread_twitter.py
|
berna1995/CovidDailyUpdateBot
|
b74c2b7340f1366cf510be352c05f7bca0078302
|
[
"MIT"
] | null | null | null |
tests/test_thread_twitter.py
|
berna1995/CovidDailyUpdateBot
|
b74c2b7340f1366cf510be352c05f7bca0078302
|
[
"MIT"
] | null | null | null |
import unittest
import random
import string
from unittest.mock import Mock
from unittest.mock import create_autospec
from bot.twitter import ThreadTwitter
from bot.twitter import MediaType
from twitter.api import CHARACTER_LIMIT
class MockStatus:
current_id = 0
def __init__(self, id):
self.id = id
@staticmethod
def new():
mocked_status = MockStatus(MockStatus.current_id)
MockStatus.current_id = MockStatus.current_id + 1
return mocked_status
def side_effect(*args, **kwargs):
return MockStatus.new()
def create_mock():
tt = ThreadTwitter(None, None, None, None)
mock = create_autospec(
tt._ThreadTwitter__api.PostUpdate, side_effect=side_effect)
tt._ThreadTwitter__api.PostUpdate = mock
return tt, mock
def random_string(string_length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(string_length))
class ThreadTwitterTest(unittest.TestCase):
def setUp(self):
MockStatus.current_id = 0
def test_set_header_too_long(self):
tt, mock = create_mock()
with self.assertRaises(ValueError):
tt.set_header(random_string(ThreadTwitter.HEADER_MAX_LENGTH + 1))
def test_set_footer_too_long(self):
tt, mock = create_mock()
with self.assertRaises(ValueError):
tt.set_footer(random_string(ThreadTwitter.FOOTER_MAX_LENGTH + 1))
def test_set_repeating_header(self):
tt, mock = create_mock()
hdr = "HEADER"
line_1 = "First line"
line_2 = "Second line"
tt.set_header(hdr, repeat=True)
tt.add_line(line_1)
tt.add_line(line_2, force_new_tweet=True)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(2, len(calls))
self.assertTrue(hdr in calls[0].args[0])
self.assertTrue(calls[0].kwargs["in_reply_to_status_id"] is None)
self.assertTrue(line_1 in calls[0].args[0])
self.assertFalse(line_2 in calls[0].args[0])
self.assertTrue(hdr in calls[1].args[0])
self.assertEqual(calls[1].kwargs["in_reply_to_status_id"], 0)
self.assertFalse(line_1 in calls[1].args[0])
self.assertTrue(line_2 in calls[1].args[0])
def test_non_repeating_header(self):
tt, mock = create_mock()
hdr = "HEADER"
line_1 = "First line"
line_2 = "Second line"
tt.set_header(hdr, repeat=False)
tt.add_line(line_1)
tt.add_line(line_2, force_new_tweet=True)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(2, len(calls))
self.assertTrue(hdr in calls[0].args[0])
self.assertTrue(calls[0].kwargs["in_reply_to_status_id"] is None)
self.assertTrue(line_1 in calls[0].args[0])
self.assertFalse(line_2 in calls[0].args[0])
self.assertFalse(hdr in calls[1].args[0])
self.assertEqual(calls[1].kwargs["in_reply_to_status_id"], 0)
self.assertFalse(line_1 in calls[1].args[0])
self.assertTrue(line_2 in calls[1].args[0])
def test_repeating_footer(self):
tt, mock = create_mock()
ftr = "FOOTER"
line_1 = "First line"
line_2 = "Second line"
tt.set_footer(ftr, repeat=True)
tt.add_line(line_1)
tt.add_line(line_2, force_new_tweet=True)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(2, len(calls))
self.assertTrue(ftr in calls[0].args[0])
self.assertTrue(calls[0].kwargs["in_reply_to_status_id"] is None)
self.assertTrue(line_1 in calls[0].args[0])
self.assertFalse(line_2 in calls[0].args[0])
self.assertTrue(ftr in calls[1].args[0])
self.assertEqual(calls[1].kwargs["in_reply_to_status_id"], 0)
self.assertFalse(line_1 in calls[1].args[0])
self.assertTrue(line_2 in calls[1].args[0])
def test_non_repeating_footer(self):
tt, mock = create_mock()
ftr = "FOOTER"
line_1 = "First line"
line_2 = "Second line"
line_3 = "Third line"
tt.set_footer(ftr, repeat=False)
tt.add_line(line_1)
tt.add_line(line_2, force_new_tweet=True)
tt.add_line(line_3, force_new_tweet=True)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(3, len(calls))
self.assertFalse(ftr in calls[0].args[0])
self.assertTrue(calls[0].kwargs["in_reply_to_status_id"] is None)
self.assertTrue(line_1 in calls[0].args[0])
self.assertFalse(ftr in calls[1].args[0])
self.assertEqual(calls[1].kwargs["in_reply_to_status_id"], 0)
self.assertTrue(line_2 in calls[1].args[0])
self.assertTrue(ftr in calls[2].args[0])
self.assertEqual(calls[2].kwargs["in_reply_to_status_id"], 1)
self.assertTrue(line_3 in calls[2].args[0])
def test_tweet_splitting_correctly(self):
tt, mock = create_mock()
hdr = "HEADER"
ftr = "FOOTER"
long_msg_1 = random_string(ThreadTwitter.LINE_MAX_LENGTH)
long_msg_2 = random_string(ThreadTwitter.LINE_MAX_LENGTH)
tt.set_header(hdr, repeat=False)
tt.set_footer(ftr, repeat=False)
tt.add_line(long_msg_1)
tt.add_line(long_msg_2)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(2, len(calls))
self.assertTrue(calls[0].kwargs["in_reply_to_status_id"] is None)
self.assertTrue(hdr in calls[0].args[0])
self.assertFalse(ftr in calls[0].args[0])
self.assertTrue(long_msg_1 in calls[0].args[0])
self.assertFalse(long_msg_2 in calls[0].args[0])
self.assertEqual(calls[1].kwargs["in_reply_to_status_id"], 0)
self.assertFalse(hdr in calls[1].args[0])
self.assertTrue(ftr in calls[1].args[0])
self.assertFalse(long_msg_1 in calls[1].args[0])
self.assertTrue(long_msg_2 in calls[1].args[0])
def test_ignoring_force_new_on_first_line(self):
tt, mock = create_mock()
tt.add_line("line")
tt.tweet()
calls = mock.call_args_list
self.assertEqual(1, len(calls))
def test_tweet_limits(self):
tt, mock = create_mock()
hdr = random_string(ThreadTwitter.HEADER_MAX_LENGTH)
ftr = random_string(ThreadTwitter.FOOTER_MAX_LENGTH)
msg = random_string(ThreadTwitter.LINE_MAX_LENGTH)
tt.set_header(hdr)
tt.set_footer(ftr)
tt.add_line(msg)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(1, len(calls))
self.assertLessEqual(len(calls[0].args[0]), CHARACTER_LIMIT)
def test_tweet_with_media_only(self):
tt, mock = create_mock()
media_file_ref = "file1.jpg"
tt.add_media(media_file_ref, MediaType.PHOTO)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(1, len(calls))
self.assertTrue("Service tweet" in calls[0].args[0])
self.assertTrue(media_file_ref in calls[0].kwargs["media"])
def test_tweet_with_more_media_than_text_tweets(self):
tt, mock = create_mock()
media1 = "file1.jpg"
media2 = "file.mov"
txt = "My Text Content"
tt.add_line(txt)
tt.add_media(media1, MediaType.PHOTO)
tt.add_media(media2, MediaType.VIDEO)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(2, len(calls))
self.assertTrue(txt in calls[0].args[0])
self.assertTrue("Service tweet" in calls[1].args[0])
self.assertEqual(1, len(calls[0].kwargs["media"]))
self.assertTrue(media1 in calls[0].kwargs["media"])
self.assertEqual(1, len(calls[1].kwargs["media"]))
self.assertTrue(media2 in calls[1].kwargs["media"])
def test_subsequent_photo_aggregation(self):
tt, mock = create_mock()
media1 = "file1.jpg"
media2 = "file2.jpg"
media3 = "file3.jpg"
media4 = "file.gif"
media5 = "file4.jpg"
tt.add_line("Line1")
tt.add_line("Line2", force_new_tweet=True)
tt.add_line("Line3", force_new_tweet=True)
tt.add_line("Line4", force_new_tweet=True)
tt.add_media(media1, MediaType.PHOTO)
tt.add_media(media2, MediaType.PHOTO)
tt.add_media(media3, MediaType.PHOTO)
tt.add_media(media4, MediaType.GIF)
tt.add_media(media5, MediaType.PHOTO)
tt.tweet()
calls = mock.call_args_list
self.assertEqual(4, len(calls))
self.assertEqual(3, len(calls[0].kwargs["media"]))
self.assertTrue(media1 in calls[0].kwargs["media"])
self.assertTrue(media2 in calls[0].kwargs["media"])
self.assertTrue(media3 in calls[0].kwargs["media"])
self.assertEqual(1, len(calls[1].kwargs["media"]))
self.assertTrue(media4 in calls[1].kwargs["media"])
self.assertEqual(1, len(calls[2].kwargs["media"]))
self.assertTrue(media5 in calls[2].kwargs["media"])
if __name__ == "__main__":
unittest.main()
| 34.934363
| 77
| 0.643015
|
83ee51286bb11b3af3da07664b68ba907d707420
| 6,004
|
py
|
Python
|
form_api/models/combine_pdfs_data.py
|
DocSpring/formapi-python
|
69fe8b14f78cae57e07a888f97c0903d5f0f545c
|
[
"MIT"
] | 2
|
2017-10-07T17:38:10.000Z
|
2017-10-10T06:14:13.000Z
|
form_api/models/combine_pdfs_data.py
|
FormAPI/formapi-python
|
69fe8b14f78cae57e07a888f97c0903d5f0f545c
|
[
"MIT"
] | null | null | null |
form_api/models/combine_pdfs_data.py
|
FormAPI/formapi-python
|
69fe8b14f78cae57e07a888f97c0903d5f0f545c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API v1
FormAPI is a service that helps you fill out and sign PDF templates. # noqa: E501
OpenAPI spec version: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CombinePdfsData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'test': 'bool',
'source_pdfs': 'list[object]',
'metadata': 'object',
'expires_in': 'int',
'delete_custom_files': 'bool'
}
attribute_map = {
'test': 'test',
'source_pdfs': 'source_pdfs',
'metadata': 'metadata',
'expires_in': 'expires_in',
'delete_custom_files': 'delete_custom_files'
}
def __init__(self, test=None, source_pdfs=None, metadata=None, expires_in=None, delete_custom_files=None): # noqa: E501
"""CombinePdfsData - a model defined in OpenAPI""" # noqa: E501
self._test = None
self._source_pdfs = None
self._metadata = None
self._expires_in = None
self._delete_custom_files = None
self.discriminator = None
if test is not None:
self.test = test
self.source_pdfs = source_pdfs
if metadata is not None:
self.metadata = metadata
if expires_in is not None:
self.expires_in = expires_in
if delete_custom_files is not None:
self.delete_custom_files = delete_custom_files
@property
def test(self):
"""Gets the test of this CombinePdfsData. # noqa: E501
:return: The test of this CombinePdfsData. # noqa: E501
:rtype: bool
"""
return self._test
@test.setter
def test(self, test):
"""Sets the test of this CombinePdfsData.
:param test: The test of this CombinePdfsData. # noqa: E501
:type: bool
"""
self._test = test
@property
def source_pdfs(self):
"""Gets the source_pdfs of this CombinePdfsData. # noqa: E501
:return: The source_pdfs of this CombinePdfsData. # noqa: E501
:rtype: list[object]
"""
return self._source_pdfs
@source_pdfs.setter
def source_pdfs(self, source_pdfs):
"""Sets the source_pdfs of this CombinePdfsData.
:param source_pdfs: The source_pdfs of this CombinePdfsData. # noqa: E501
:type: list[object]
"""
if source_pdfs is None:
raise ValueError("Invalid value for `source_pdfs`, must not be `None`") # noqa: E501
self._source_pdfs = source_pdfs
@property
def metadata(self):
"""Gets the metadata of this CombinePdfsData. # noqa: E501
:return: The metadata of this CombinePdfsData. # noqa: E501
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this CombinePdfsData.
:param metadata: The metadata of this CombinePdfsData. # noqa: E501
:type: object
"""
self._metadata = metadata
@property
def expires_in(self):
"""Gets the expires_in of this CombinePdfsData. # noqa: E501
:return: The expires_in of this CombinePdfsData. # noqa: E501
:rtype: int
"""
return self._expires_in
@expires_in.setter
def expires_in(self, expires_in):
"""Sets the expires_in of this CombinePdfsData.
:param expires_in: The expires_in of this CombinePdfsData. # noqa: E501
:type: int
"""
self._expires_in = expires_in
@property
def delete_custom_files(self):
"""Gets the delete_custom_files of this CombinePdfsData. # noqa: E501
:return: The delete_custom_files of this CombinePdfsData. # noqa: E501
:rtype: bool
"""
return self._delete_custom_files
@delete_custom_files.setter
def delete_custom_files(self, delete_custom_files):
"""Sets the delete_custom_files of this CombinePdfsData.
:param delete_custom_files: The delete_custom_files of this CombinePdfsData. # noqa: E501
:type: bool
"""
self._delete_custom_files = delete_custom_files
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CombinePdfsData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.541284
| 124
| 0.589107
|
c47f0d9859b52311112ef7b2e4d136a421bd885c
| 569
|
py
|
Python
|
awx/main/tests/functional/test_execution_environments.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 2
|
2020-03-19T20:49:37.000Z
|
2020-05-04T14:36:11.000Z
|
awx/main/tests/functional/test_execution_environments.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 35
|
2021-03-01T06:34:26.000Z
|
2022-03-01T01:18:42.000Z
|
awx/main/tests/functional/test_execution_environments.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from awx.main.models import ExecutionEnvironment
@pytest.mark.django_db
def test_execution_environment_creation(execution_environment, organization):
execution_env = ExecutionEnvironment.objects.create(
name='Hello Environment', image='', organization=organization, managed_by_tower=False, credential=None, pull='missing'
)
assert type(execution_env) is type(execution_environment)
assert execution_env.organization == organization
assert execution_env.name == 'Hello Environment'
assert execution_env.pull == 'missing'
| 37.933333
| 126
| 0.787346
|
3c7baff3932da6a158d55e7d402991802c7022a1
| 2,595
|
py
|
Python
|
leet/prefix/numMatrix.py
|
peterlamar/python-cp-cheatsheet
|
f9f854064a3c657c04fab27d0a496401bfa97da1
|
[
"Apache-2.0"
] | 140
|
2020-10-21T13:23:52.000Z
|
2022-03-31T15:09:45.000Z
|
leet/prefix/numMatrix.py
|
stacykutyepov/python-cp-cheatsheet
|
a00a57e1b36433648d1cace331e15ff276cef189
|
[
"Apache-2.0"
] | 1
|
2021-07-22T14:01:25.000Z
|
2021-07-22T14:01:25.000Z
|
leet/prefix/numMatrix.py
|
stacykutyepov/python-cp-cheatsheet
|
a00a57e1b36433648d1cace331e15ff276cef189
|
[
"Apache-2.0"
] | 33
|
2020-10-21T14:17:02.000Z
|
2022-03-25T11:25:03.000Z
|
"""
+-----+-+-------+ +--------+-----+ +-----+---------+ +-----+--------+
| | | | | | | | | | | | |
| | | | | | | | | | | | |
+-----+-+ | +--------+ | | | | +-----+ |
| | | | = | | + | | | - | |
+-----+-+ | | | +-----+ | | |
| | | | | | | |
| | | | | | | |
+---------------+ +--------------+ +---------------+ +--------------+
sums[i][j] = sums[i-1][j] + sums[i][j-1] - sums[i-1][j-1] +
matrix[i-1][j-1]
+---------------+ +---------+----+ +---+-----------+ +---------+----+ +---+----------+
| | | | | | | | | | | | | |
| (r1,c1) | | | | | | | | | | | | |
| +------+ | | | | | | | +---------+ | +---+ |
| | | | = | | | - | | | - | (r1,c2) | + | (r1,c1) |
| | | | | | | | | | | | | |
| +------+ | +---------+ | +---+ | | | | |
| (r2,c2)| | (r2,c2)| | (r2,c1) | | | | |
+---------------+ +--------------+ +---------------+ +--------------+ +--------------+
Range Sum Query 2D - Immutable
Precompute - O(mn)
lookup O(1)
Space O(mn)
"""
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
R = len(matrix)
C = len(matrix[0])
self.dp = [[0 for i in range(C+1)] for j in range(R+1)]
for r in range(1, R+1):
for c in range(1, C+1):
self.dp[r][c] = self.dp[r][c-1] + self.dp[r-1][c] - self.dp[r-1][c-1] + matrix[r-1][c-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
row1 += 1
col1 += 1
row2 += 1
col2 += 1
return self.dp[row2][col2] - self.dp[row1-1][col2] - self.dp[row2][col1-1] + self.dp[row1-1][col1-1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| 48.055556
| 108
| 0.203083
|
e8caaae4d0e63caf6ebcbde83ffbb7087ee66092
| 850
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-test-most/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/perl-test-most/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/perl-test-most/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTestMost(PerlPackage):
"""Most commonly needed test functions and features."""
homepage = "https://metacpan.org/pod/Test::Most"
url = "http://search.cpan.org/CPAN/authors/id/O/OV/OVID/Test-Most-0.35.tar.gz"
version('0.35', sha256='9897a6f4d751598d2ed1047e01c1554b01d0f8c96c45e7e845229782bf6f657f')
depends_on('perl-exception-class', type=('build', 'run'))
depends_on('perl-test-differences', type=('build', 'run'))
depends_on('perl-test-exception', type=('build', 'run'))
depends_on('perl-test-warn', type=('build', 'run'))
depends_on('perl-test-deep', type=('build', 'run'))
| 38.636364
| 94
| 0.703529
|
ecfcbd0bc8e1979f5a433c9fb9a9dde04000e5e5
| 60,167
|
py
|
Python
|
aidants_connect_web/tests/test_models.py
|
betagouv/Aidants_Connect
|
2329d41545912460c4a43b5b41c892189fc1df11
|
[
"MIT"
] | 16
|
2019-05-13T08:32:40.000Z
|
2022-03-22T13:40:57.000Z
|
aidants_connect_web/tests/test_models.py
|
betagouv/Aidants_Connect
|
2329d41545912460c4a43b5b41c892189fc1df11
|
[
"MIT"
] | 207
|
2019-05-15T16:30:52.000Z
|
2022-03-31T15:26:25.000Z
|
aidants_connect_web/tests/test_models.py
|
betagouv/Aidants_Connect
|
2329d41545912460c4a43b5b41c892189fc1df11
|
[
"MIT"
] | 12
|
2019-11-08T13:44:35.000Z
|
2022-02-14T15:38:37.000Z
|
from os.path import join as path_join
from datetime import date, datetime, timedelta
from unittest.mock import Mock
import mock
from django.db.utils import IntegrityError
from django.test import tag, TestCase
from django.utils import timezone
from django.conf import settings
from django_otp.plugins.otp_totp.models import TOTPDevice
from freezegun import freeze_time
from pytz import timezone as pytz_timezone
from aidants_connect_web.models import (
Aidant,
Autorisation,
Connection,
HabilitationRequest,
Journal,
Mandat,
Organisation,
OrganisationType,
Usager,
)
from aidants_connect_web.tests.factories import (
AidantFactory,
AttestationJournalFactory,
AutorisationFactory,
HabilitationRequestFactory,
MandatFactory,
OrganisationFactory,
OrganisationTypeFactory,
UsagerFactory,
)
from aidants_connect_web.utilities import (
generate_attestation_hash,
generate_file_sha256_hash,
validate_attestation_hash,
)
@tag("models")
class ConnectionModelTests(TestCase):
def test_saving_and_retrieving_connection(self):
first_connection = Connection()
first_connection.state = "aZeRtY"
first_connection.code = "ert"
first_connection.nonce = "varg"
first_connection.usager = UsagerFactory(given_name="Joséphine")
first_connection.save()
second_connection = Connection()
second_connection.state = "QsDfG"
second_connection.usager = UsagerFactory(given_name="Fabrice")
second_connection.save()
saved_items = Connection.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.state, "aZeRtY")
self.assertEqual(first_saved_item.nonce, "varg")
self.assertEqual(first_saved_item.usager.given_name, "Joséphine")
self.assertEqual(second_saved_item.state, "QsDfG")
self.assertEqual(second_saved_item.usager.gender, Usager.GENDER_MALE)
@tag("models")
class UsagerModelTests(TestCase):
def test_usager_with_null_birthplace(self):
first_usager = Usager()
first_usager.given_name = "TEST NAME"
first_usager.family_name = "TEST Family Name éèà"
first_usager.preferred_username = "I prefer being called this"
first_usager.birthdate = date(1902, 6, 30)
first_usager.gender = Usager.GENDER_FEMALE
first_usager.birthplace = None
first_usager.birthcountry = Usager.BIRTHCOUNTRY_FRANCE
first_usager.email = "user@test.user"
first_usager.sub = "1233"
first_usager.save()
saved_items = Usager.objects.all()
self.assertEqual(saved_items.count(), 1)
def test_saving_and_retrieving_usager(self):
first_usager = Usager()
first_usager.given_name = "TEST NAME"
first_usager.family_name = "TEST Family Name éèà"
first_usager.preferred_username = "I prefer being called this"
first_usager.birthdate = date(1902, 6, 30)
first_usager.gender = Usager.GENDER_FEMALE
first_usager.birthplace = "27681"
first_usager.birthcountry = Usager.BIRTHCOUNTRY_FRANCE
first_usager.email = "user@test.user"
first_usager.sub = "1233"
first_usager.save()
second_usager = Usager()
second_usager.given_name = "TEST SECOND NAME"
second_usager.family_name = "TEST Family Name éèà"
second_usager.preferred_username = "I prefer being called this"
second_usager.birthdate = date(1945, 10, 20)
second_usager.gender = Usager.GENDER_MALE
second_usager.birthplace = "84016"
second_usager.birthcountry = Usager.BIRTHCOUNTRY_FRANCE
second_usager.email = "other_user@test.user"
second_usager.sub = "1234"
second_usager.save()
saved_items = Usager.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.given_name, "TEST NAME")
self.assertEqual(str(first_saved_item.birthdate), "1902-06-30")
self.assertEqual(second_saved_item.family_name, "TEST Family Name éèà")
self.assertEqual(second_usager.sub, "1234")
def test_normalize_birthplace(self):
usager = UsagerFactory(birthplace="123")
usager.normalize_birthplace()
self.assertEqual(usager.birthplace, "00123")
usager = UsagerFactory(birthplace="1234")
usager.normalize_birthplace()
self.assertEqual(usager.birthplace, "01234")
usager = UsagerFactory(birthplace="12345")
usager.normalize_birthplace()
self.assertEqual(usager.birthplace, "12345")
def test_active_usager_excludes_usager_with_revoked_mandats(self):
usager = UsagerFactory()
mandat_1 = MandatFactory(usager=usager)
AutorisationFactory(
mandat=mandat_1,
demarche="justice",
revocation_date=timezone.now() - timedelta(minutes=1),
)
usagers = Usager.objects.all()
self.assertEqual(usagers.count(), 1)
self.assertEqual(usagers.active().count(), 0)
def test_active_usager_excludes_usager_with_expired_mandats(self):
usager = UsagerFactory()
mandat_1 = MandatFactory(
usager=usager, expiration_date=timezone.now() - timedelta(minutes=1)
)
AutorisationFactory(mandat=mandat_1, demarche="justice")
usagers = Usager.objects.all()
self.assertEqual(usagers.count(), 1)
self.assertEqual(usagers.active().count(), 0)
@tag("models")
class MandatModelTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.organisation_1 = OrganisationFactory()
cls.aidant_1 = AidantFactory()
cls.usager_1 = UsagerFactory()
cls.mandat_1 = Mandat.objects.create(
organisation=cls.organisation_1,
usager=cls.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=cls.mandat_1,
demarche="justice",
)
cls.usager_2 = UsagerFactory(sub="anothersub")
cls.mandat_2 = Mandat.objects.create(
organisation=cls.organisation_1,
usager=cls.usager_2,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=cls.mandat_2,
demarche="argent",
)
AutorisationFactory(
mandat=cls.mandat_2,
demarche="transport",
)
def test_saving_and_retrieving_mandats(self):
self.assertEqual(Mandat.objects.count(), 2)
def test_mandat_can_have_one_autorisation(self):
self.assertEqual(len(self.mandat_1.autorisations.all()), 1)
def test_mandat_can_have_two_autorisations(self):
self.assertEqual(len(self.mandat_2.autorisations.all()), 2)
def test_active_queryset_method_exclude_fully_revoked_mandats(self):
fully_revoked_mandat = Mandat.objects.create(
organisation=self.organisation_1,
usager=self.usager_2,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=fully_revoked_mandat,
demarche="papiers",
revocation_date=timezone.now() - timedelta(minutes=1),
)
AutorisationFactory(
mandat=fully_revoked_mandat,
demarche="loisirs",
revocation_date=timezone.now() - timedelta(minutes=1),
)
active_mandats = Mandat.objects.active().count()
inactive_mandats = Mandat.objects.inactive().count()
self.assertEqual(active_mandats, 2)
self.assertEqual(inactive_mandats, 1)
def test_active_queryset_method_include_partially_revoked_mandat(self):
partially_revoked_mandat = Mandat.objects.create(
organisation=self.organisation_1,
usager=self.usager_2,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=partially_revoked_mandat,
demarche="papiers",
revocation_date=timezone.now() - timedelta(minutes=1),
)
AutorisationFactory(
mandat=partially_revoked_mandat,
demarche="loisirs",
)
active_mandats = Mandat.objects.active().count()
inactive_mandats = Mandat.objects.inactive().count()
self.assertEqual(active_mandats, 3)
self.assertEqual(inactive_mandats, 0)
def test_active_queryset_method_excludes_expired_mandat(self):
expired_mandat = Mandat.objects.create(
organisation=self.organisation_1,
usager=self.usager_2,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() - timedelta(days=1),
)
AutorisationFactory(
mandat=expired_mandat,
demarche="papiers",
)
active_mandats = Mandat.objects.active().count()
inactive_mandats = Mandat.objects.inactive().count()
self.assertEqual(active_mandats, 2)
self.assertEqual(inactive_mandats, 1)
def test_revocation_date_valid_mandate_valid_auths(self):
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
for procedure in ["transports", "logement"]:
Autorisation.objects.create(mandat=mandate, demarche=procedure)
self.assertEqual(mandate.revocation_date, None)
def test_revocation_date_valid_mandate_one_revoked_auth(self):
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
Autorisation.objects.create(
mandat=mandate, demarche="transports", revocation_date=timezone.now()
)
Autorisation.objects.create(mandat=mandate, demarche="logement")
self.assertEqual(mandate.revocation_date, None)
def test_revocation_date_valid_mandate_all_revoked_auths(self):
revocation_date = timezone.now()
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
for procedure in ["transports", "logement"]:
Autorisation.objects.create(
mandat=mandate, demarche=procedure, revocation_date=revocation_date
)
self.assertEqual(mandate.revocation_date, revocation_date)
def was_explicitly_revoked_valid_mandate_valid_auths(self):
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
for procedure in ["transports", "logement"]:
Autorisation.objects.create(mandat=mandate, demarche=procedure)
self.assertEqual(mandate.was_explicitly_revoked, False)
def was_explicitly_revoked_valid_mandate_one_revoked_auth(self):
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
Autorisation.objects.create(
mandat=mandate, demarche="transports", revocation_date=timezone.now()
)
Autorisation.objects.create(mandat=mandate, demarche="logement")
self.assertEqual(mandate.was_explicitly_revoked, False)
def was_explicitly_revoked_valid_mandate_all_revoked_auths(self):
revocation_date = timezone.now()
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=timezone.now() + timedelta(days=6),
)
for procedure in ["transports", "logement"]:
Autorisation.objects.create(
mandat=mandate, demarche=procedure, revocation_date=revocation_date
)
self.assertEqual(mandate.was_explicitly_revoked, True)
def test__get_template_path_from_journal_hash_nominal(self):
tpl_name = "20200511_mandat.html"
procedures = ["transports", "logement"]
expiration_date = timezone.now() + timedelta(days=6)
attestation_hash = generate_attestation_hash(
self.aidant_1,
self.usager_1,
procedures,
expiration_date,
mandat_template_path=path_join(settings.MANDAT_TEMPLATE_DIR, tpl_name),
)
AttestationJournalFactory(
aidant=self.aidant_1,
organisation=self.aidant_1.organisation,
usager=self.usager_1,
demarche=",".join(procedures),
attestation_hash=attestation_hash,
)
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=expiration_date,
)
for procedure in procedures:
Autorisation.objects.create(mandat=mandate, demarche=procedure)
# Add another active mandate with auths so that we have a real life example
other_mandate = MandatFactory(
organisation=self.aidant_1.organisation, usager=self.usager_1
)
AutorisationFactory(mandat=other_mandate)
result = mandate._get_mandate_template_path_from_journal_hash()
self.assertEqual(result, f"aidants_connect_web/mandat_templates/{tpl_name}")
def test__get_template_path_from_journal_hash_with_old_mandate(self):
tpl_name = "20200511_mandat.html"
procedures = ["transports", "logement"]
expiration_date = timezone.now() - timedelta(days=6)
creation_date = timezone.now() - timedelta(days=12)
attestation_hash = generate_attestation_hash(
self.aidant_1,
self.usager_1,
procedures,
expiration_date,
creation_date=creation_date.date().isoformat(),
mandat_template_path=path_join(settings.MANDAT_TEMPLATE_DIR, tpl_name),
)
AttestationJournalFactory(
aidant=self.aidant_1,
organisation=self.aidant_1.organisation,
usager=self.usager_1,
demarche=",".join(procedures),
attestation_hash=attestation_hash,
creation_date=creation_date,
)
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=creation_date,
duree_keyword="SHORT",
expiration_date=expiration_date,
)
for procedure in procedures:
Autorisation.objects.create(
mandat=mandate,
demarche=procedure,
revocation_date=timezone.now() - timedelta(days=6),
)
# Add another active mandate with auths so that we have a real life example
other_mandate = MandatFactory(
organisation=self.aidant_1.organisation, usager=self.usager_1
)
AutorisationFactory(mandat=other_mandate)
result = mandate._get_mandate_template_path_from_journal_hash()
self.assertEqual(result, f"aidants_connect_web/mandat_templates/{tpl_name}")
def test__get_template_path_from_journal_hash_with_old_delegation(self):
tpl_name = "20200511_mandat.html"
procedures = ["transports", "logement"]
expiration_date = timezone.now() + timedelta(days=6)
attestation_hash = generate_attestation_hash(
self.aidant_1,
self.usager_1,
procedures,
expiration_date,
mandat_template_path=path_join(settings.MANDAT_TEMPLATE_DIR, tpl_name),
)
old_attestation_hash = generate_attestation_hash(
self.aidant_1,
self.usager_1,
procedures,
expiration_date,
mandat_template_path=path_join(
settings.MANDAT_TEMPLATE_DIR, "20200201_mandat.html"
),
)
AttestationJournalFactory(
aidant=self.aidant_1,
organisation=self.aidant_1.organisation,
usager=self.usager_1,
demarche=",".join(procedures),
attestation_hash=attestation_hash,
)
AttestationJournalFactory(
aidant=self.aidant_1,
organisation=self.aidant_1.organisation,
usager=self.usager_1,
demarche=",".join(procedures),
attestation_hash=old_attestation_hash,
creation_date=timezone.now() - timedelta(weeks=1),
)
mandate = Mandat.objects.create(
organisation=self.aidant_1.organisation,
usager=self.usager_1,
creation_date=timezone.now(),
duree_keyword="SHORT",
expiration_date=expiration_date,
)
for procedure in procedures:
Autorisation.objects.create(mandat=mandate, demarche=procedure)
# Add another active mandate with auths so that we have a real life example
other_mandate = MandatFactory(
organisation=self.aidant_1.organisation, usager=self.usager_1
)
AutorisationFactory(mandat=other_mandate)
result = mandate._get_mandate_template_path_from_journal_hash()
self.assertEqual(result, f"aidants_connect_web/mandat_templates/{tpl_name}")
@tag("models")
class AutorisationModelTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.aidant_marge = AidantFactory()
cls.aidant_patricia = AidantFactory()
cls.usager_homer = UsagerFactory()
cls.usager_ned = UsagerFactory(family_name="Flanders", sub="nedflanders")
cls.mandat_marge_homer_6 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_homer,
expiration_date=timezone.now() + timedelta(days=6),
)
cls.mandat_patricia_ned_6 = MandatFactory(
organisation=cls.aidant_patricia.organisation,
usager=cls.usager_ned,
expiration_date=timezone.now() + timedelta(days=6),
)
def test_saving_and_retrieving_autorisation(self):
first_autorisation = AutorisationFactory(
mandat=self.mandat_marge_homer_6,
demarche="Carte grise",
)
second_autorisation = AutorisationFactory(
mandat=self.mandat_patricia_ned_6,
demarche="Revenus",
)
self.assertEqual(Autorisation.objects.count(), 2)
self.assertEqual(
first_autorisation.mandat.organisation,
self.mandat_marge_homer_6.organisation,
)
self.assertEqual(first_autorisation.demarche, "Carte grise")
self.assertEqual(second_autorisation.mandat.usager.family_name, "Flanders")
fake_date = datetime(2019, 1, 14, tzinfo=pytz_timezone("Europe/Paris"))
@freeze_time(fake_date)
def test_autorisation_expiration_date_setting(self):
mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=self.usager_homer,
expiration_date=timezone.now() + timedelta(days=3),
)
autorisation = AutorisationFactory(
mandat=mandat,
demarche="Carte grise",
)
self.assertEqual(
autorisation.creation_date,
datetime(2019, 1, 14, tzinfo=pytz_timezone("Europe/Paris")),
)
self.assertEqual(
autorisation.mandat.expiration_date,
datetime(2019, 1, 17, tzinfo=pytz_timezone("Europe/Paris")),
)
def test_was_separately_revoked_auth_not_revoked(self):
mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=self.usager_homer,
expiration_date=timezone.now() + timedelta(days=3),
)
autorisation: Autorisation = AutorisationFactory(
mandat=mandat,
demarche="Carte grise",
)
self.assertFalse(autorisation.was_separately_revoked)
def test_was_separately_revoked_mandate_not_revoked(self):
mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=self.usager_homer,
expiration_date=timezone.now() + timedelta(days=3),
)
AutorisationFactory(
mandat=mandat,
demarche="logement",
)
autorisation: Autorisation = AutorisationFactory(
mandat=mandat, demarche="papiers", revocation_date=timezone.now()
)
self.assertTrue(autorisation.was_separately_revoked)
def test_was_separately_revoked_mandate_revoked_false(self):
mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=self.usager_homer,
expiration_date=timezone.now() + timedelta(days=3),
)
AutorisationFactory(
mandat=mandat, demarche="logement", revocation_date=timezone.now()
)
autorisation: Autorisation = AutorisationFactory(
mandat=mandat, demarche="papiers", revocation_date=timezone.now()
)
self.assertFalse(autorisation.was_separately_revoked)
def test_was_separately_revoked_mandate_revoked_true(self):
mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=self.usager_homer,
expiration_date=timezone.now() + timedelta(days=3),
)
AutorisationFactory(
mandat=mandat, demarche="logement", revocation_date=timezone.now()
)
autorisation: Autorisation = AutorisationFactory(
mandat=mandat,
demarche="papiers",
revocation_date=timezone.now() - timedelta(days=1),
)
self.assertTrue(autorisation.was_separately_revoked)
@tag("models")
class OrganisationModelTests(TestCase):
def test_create_and_retrieve_organisation(self):
self.assertEqual(OrganisationType.objects.count(), 12)
o_type = OrganisationTypeFactory(name="CCAS")
OrganisationFactory(
name="Girard S.A.R.L",
siret="123",
type=o_type,
address="3 rue du chat, 27120 Houlbec-Cocherel",
)
self.assertEqual(Organisation.objects.count(), 1)
self.assertEqual(OrganisationType.objects.count(), 13)
organisation = Organisation.objects.all()[0]
self.assertEqual(organisation.name, "Girard S.A.R.L")
self.assertEqual(organisation.type, o_type)
self.assertEqual(organisation.address, "3 rue du chat, 27120 Houlbec-Cocherel")
def test_display_address(self):
organisation_no_address = Organisation(name="L'Internationale")
organisation_address = Organisation(
name="COMMUNE D'HOULBEC COCHEREL",
siret=123,
address="45 avenue du Général de Gaulle, 27120 HOULBEC COCHEREL",
)
organisation_no_address.save()
organisation_address.save()
self.assertEqual(organisation_no_address.display_address, "")
self.assertNotEqual(
organisation_no_address.display_address,
Organisation._meta.get_field("address").default,
)
self.assertEqual(
organisation_address.display_address, organisation_address.address
)
def test_deactivate_organisation(self):
orga_one = OrganisationFactory(name="L'Internationale")
orga_two = OrganisationFactory()
aidant_marge = AidantFactory(organisation=orga_one)
aidant_lisa = AidantFactory(organisation=orga_one)
aidant_homer = AidantFactory(organisation=orga_two)
self.assertTrue(orga_one.is_active)
self.assertTrue(orga_two.is_active)
orga_one.deactivate_organisation()
orga_one.refresh_from_db()
aidant_marge.refresh_from_db()
aidant_lisa.refresh_from_db()
aidant_homer.refresh_from_db()
self.assertFalse(orga_one.is_active)
self.assertTrue(orga_two.is_active)
self.assertFalse(aidant_marge.is_active)
self.assertFalse(aidant_lisa.is_active)
self.assertTrue(aidant_homer.is_active)
def test_deactivate_organisation_with_multistruct_aidant(self):
orga_one = OrganisationFactory(name="Ouane")
orga_two = OrganisationFactory(name="Tou")
aidant_nour = AidantFactory(organisation=orga_one)
aidant_nour.organisations.add(orga_two)
self.assertTrue(orga_one.is_active)
self.assertTrue(orga_two.is_active)
self.assertTrue(aidant_nour.is_active)
self.assertEqual(aidant_nour.organisation, orga_one)
orga_one.deactivate_organisation()
orga_one.refresh_from_db()
aidant_nour.refresh_from_db()
self.assertFalse(orga_one.is_active)
self.assertTrue(orga_two.is_active)
self.assertTrue(aidant_nour.is_active)
self.assertEqual(aidant_nour.organisation, orga_two)
def test_activate_organisation(self):
orga = OrganisationFactory(name="L'Internationale", is_active=False)
orga.activate_organisation()
orga.refresh_from_db()
self.assertTrue(orga.is_active)
def test_set_empty_zipcode_from_address(self):
organisation_no_address = Organisation(name="L'Internationale")
organisation_no_address.save()
self.assertEqual("0", organisation_no_address.zipcode)
organisation_no_address.set_empty_zipcode_from_address()
organisation_no_address.refresh_from_db()
self.assertEqual("0", organisation_no_address.zipcode)
organisation_with_address = Organisation(
name="L'Internationale", address=" blaa 13013 Paris"
)
organisation_with_address.save()
self.assertEqual("0", organisation_with_address.zipcode)
organisation_with_address.set_empty_zipcode_from_address()
organisation_with_address.refresh_from_db()
self.assertEqual("13013", organisation_with_address.zipcode)
organisation_with_zipcode = Organisation(
name="L'Internationale", zipcode="75015", address=" blaa 13013 Paris"
)
organisation_with_zipcode.save()
self.assertEqual("75015", organisation_with_zipcode.zipcode)
organisation_with_zipcode.set_empty_zipcode_from_address()
organisation_with_zipcode.refresh_from_db()
self.assertEqual("75015", organisation_with_zipcode.zipcode)
def test_count_mandats(self):
def create_active_mandats(count, organisation):
for _ in range(count):
MandatFactory(
organisation=organisation,
expiration_date=timezone.now() + timedelta(days=6),
)
def create_expired_mandats(count, organisation):
for _ in range(count):
MandatFactory(
organisation=organisation,
expiration_date=timezone.now() - timedelta(days=6),
)
org_without_mandats = OrganisationFactory(name="Licornes")
self.assertEqual(0, org_without_mandats.num_mandats)
self.assertEqual(0, org_without_mandats.num_active_mandats)
org_with_active_mandats = OrganisationFactory(name="Dragons")
create_active_mandats(3, org_with_active_mandats)
self.assertEqual(3, org_with_active_mandats.num_mandats)
self.assertEqual(3, org_with_active_mandats.num_active_mandats)
org_with_active_and_inactive_mandats = OrganisationFactory(name="Libellules")
create_active_mandats(3, org_with_active_and_inactive_mandats)
create_expired_mandats(4, org_with_active_and_inactive_mandats)
self.assertEqual(7, org_with_active_and_inactive_mandats.num_mandats)
self.assertEqual(3, org_with_active_and_inactive_mandats.num_active_mandats)
def test_count_usagers(self):
def create_6_mandats_for_2_usagers(organisation):
thomas = UsagerFactory(given_name="Thomas")
for _ in range(5):
MandatFactory(organisation=organisation, usager=thomas)
MandatFactory(organisation=organisation)
organisation = OrganisationFactory()
create_6_mandats_for_2_usagers(organisation=organisation)
self.assertEqual(2, organisation.num_usagers)
def test_count_aidants(self):
orga_a = OrganisationFactory(name="A")
orga_b = OrganisationFactory(name="Baker Street")
for _ in range(2):
aidant_a = AidantFactory(organisation=orga_a)
aidant_a.organisations.set((orga_a, orga_b))
for _ in range(3):
aidant_b = AidantFactory(organisation=orga_b)
aidant_b.organisations.set((orga_a, orga_b))
for _ in range(4):
aidant_c = AidantFactory(organisation=orga_a, is_active=False)
aidant_c.organisations.set((orga_a, orga_b))
self.assertEqual(orga_a.num_active_aidants, 5)
@tag("models", "aidant")
class AidantModelTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser_org = OrganisationFactory()
def test_i_can_create_a_superuser(self):
self.assertEqual(Aidant.objects.filter(is_superuser=True).count(), 0)
Aidant.objects.create_superuser(
username="admin", organisation=self.superuser_org
)
self.assertEqual(Aidant.objects.filter(is_superuser=True).count(), 1)
def test_what_happens_to_password_when_not_set(self):
aidant = Aidant.objects.create(
username="Marge", organisation=self.superuser_org
)
self.assertEqual(aidant.password, "")
def test_what_happens_when_username_not_set(self):
aidant = Aidant.objects.create(organisation=self.superuser_org)
self.assertEqual(aidant.username, "")
def test_what_happens_when_an_aidant_tries_to_use_same_username(self):
Aidant.objects.create(username="Marge", organisation=self.superuser_org)
self.assertRaises(IntegrityError, Aidant.objects.create, username="Marge")
def test_get_aidant_organisation(self):
orga = OrganisationFactory(
name="COMMUNE DE HOULBEC COCHEREL",
siret=123,
address="45 avenue du Général de Gaulle, 90210 Beverly Hills",
)
aidant = AidantFactory(organisation=orga)
self.assertEqual(aidant.organisation.name, "COMMUNE DE HOULBEC COCHEREL")
def test_get_active_aidants(self):
AidantFactory()
AidantFactory(is_active=False)
self.assertEqual(Aidant.objects.active().count(), 1)
@tag("models", "aidant")
class AidantModelMethodsTests(TestCase):
@classmethod
def setUpTestData(cls):
# Aidants : Marge & Lisa belong to the same organisation, Patricia does not
cls.aidant_marge = AidantFactory(validated_cgu_version="0.1")
cls.aidant_lisa = AidantFactory(
organisation=cls.aidant_marge.organisation,
validated_cgu_version=settings.CGU_CURRENT_VERSION,
)
cls.aidant_patricia = AidantFactory()
# Juliette is responsible in the same structure as Marge & Lisa
cls.respo_juliette = AidantFactory(
organisation=cls.aidant_marge.organisation,
)
cls.respo_juliette.responsable_de.add(cls.aidant_marge.organisation)
# TOTP Device
device = TOTPDevice(user=cls.aidant_marge)
device.save()
for _ in range(2):
device = TOTPDevice(user=cls.aidant_patricia)
device.save()
# Active Usagers
cls.usager_homer = UsagerFactory(given_name="Homer")
cls.usager_ned = UsagerFactory(given_name="Ned")
# Usager with no mandat
cls.usager_bart = UsagerFactory(given_name="Bart")
# Inactive Usagers
cls.usager_sophie = UsagerFactory(given_name="Sophie")
cls.usager_lola = UsagerFactory(given_name="Lola")
# Mandats Marge
cls.mandat_marge_homer_1 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_homer,
expiration_date=timezone.now() - timedelta(days=6),
)
AutorisationFactory(
mandat=cls.mandat_marge_homer_1,
demarche="Carte grise",
)
cls.mandat_marge_homer_2 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_homer,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
mandat=cls.mandat_marge_homer_2,
demarche="Revenus",
)
cls.mandat_marge_homer_3 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_homer,
expiration_date=timezone.now() + timedelta(days=365),
)
AutorisationFactory(
mandat=cls.mandat_marge_homer_3,
demarche="social",
)
cls.mandat_marge_ned_1 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_ned,
expiration_date=timezone.now() - timedelta(days=6),
)
AutorisationFactory(
mandat=cls.mandat_marge_ned_1,
demarche="Logement",
)
# Partially revoked mandat
cls.mandat_marge_ned_2 = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_ned,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
mandat=cls.mandat_marge_ned_2,
demarche="transports",
)
AutorisationFactory(
mandat=cls.mandat_marge_ned_2,
demarche="famille",
)
AutorisationFactory(
mandat=cls.mandat_marge_ned_2,
demarche="social",
)
AutorisationFactory(
mandat=cls.mandat_marge_ned_2,
demarche="travail",
)
AutorisationFactory(
demarche="papiers",
mandat=cls.mandat_marge_ned_2,
revocation_date=timezone.now(),
)
# Expired mandat
cls.mandat_marge_sophie = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_sophie,
expiration_date=timezone.now() - timedelta(days=6),
)
AutorisationFactory(
mandat=cls.mandat_marge_sophie,
demarche="transports",
)
# Revoked mandat
cls.mandat_marge_lola = MandatFactory(
organisation=cls.aidant_marge.organisation,
usager=cls.usager_lola,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
demarche="papiers",
mandat=cls.mandat_marge_lola,
revocation_date=timezone.now(),
)
# Mandat Patricia
cls.mandat_marge_lola = MandatFactory(
organisation=cls.aidant_patricia.organisation,
usager=cls.usager_lola,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
demarche="papiers",
mandat=cls.mandat_marge_lola,
)
def test_get_usagers(self):
self.assertEqual(len(self.aidant_marge.get_usagers()), 4)
self.assertEqual(len(self.aidant_lisa.get_usagers()), 4)
self.assertEqual(len(self.aidant_patricia.get_usagers()), 1)
def test_get_usager(self):
usager_john = UsagerFactory()
self.assertIsNone(self.aidant_marge.get_usager(usager_john.id))
self.assertEqual(
self.aidant_marge.get_usager(self.usager_homer.id), self.usager_homer
)
def test_active_usagers(self):
usagers = Usager.objects.all()
self.assertEqual(len(usagers), 5)
active_usagers = usagers.active()
self.assertEqual(len(active_usagers), 3)
def test_get_usagers_with_active_autorisation(self):
self.assertEqual(
len(self.aidant_marge.get_usagers_with_active_autorisation()), 2
)
self.assertEqual(
len(self.aidant_lisa.get_usagers_with_active_autorisation()), 2
)
self.assertEqual(
len(self.aidant_patricia.get_usagers_with_active_autorisation()), 1
)
def test_get_active_autorisations_for_usager(self):
self.assertEqual(
len(
self.aidant_marge.get_active_autorisations_for_usager(self.usager_homer)
),
2,
)
self.assertEqual(
len(self.aidant_marge.get_active_autorisations_for_usager(self.usager_ned)),
4,
)
self.assertEqual(
len(
self.aidant_marge.get_active_autorisations_for_usager(self.usager_bart)
),
0,
)
self.assertEqual(
len(
self.aidant_lisa.get_active_autorisations_for_usager(self.usager_homer)
),
2,
)
self.assertEqual(
len(self.aidant_lisa.get_active_autorisations_for_usager(self.usager_ned)),
4,
)
self.assertEqual(
len(self.aidant_lisa.get_active_autorisations_for_usager(self.usager_bart)),
0,
)
self.assertEqual(
len(self.aidant_lisa.get_active_autorisations_for_usager(self.usager_lola)),
0,
)
self.assertEqual(
len(
self.aidant_patricia.get_active_autorisations_for_usager(
self.usager_lola
)
),
1,
)
def test_get_inactive_autorisations_for_usager(self):
self.assertEqual(
len(
self.aidant_marge.get_inactive_autorisations_for_usager(
self.usager_homer
)
),
1,
)
self.assertEqual(
len(
self.aidant_marge.get_inactive_autorisations_for_usager(self.usager_ned)
),
2,
)
self.assertEqual(
len(
self.aidant_marge.get_inactive_autorisations_for_usager(
self.usager_bart
)
),
0,
)
self.assertEqual(
len(
self.aidant_lisa.get_inactive_autorisations_for_usager(
self.usager_homer
)
),
1,
)
self.assertEqual(
len(
self.aidant_lisa.get_inactive_autorisations_for_usager(self.usager_ned)
),
2,
)
self.assertEqual(
len(
self.aidant_lisa.get_inactive_autorisations_for_usager(self.usager_bart)
),
0,
)
def test_get_active_demarches_for_usager(self):
self.assertCountEqual(
list(self.aidant_marge.get_active_demarches_for_usager(self.usager_homer)),
["Revenus", "social"],
)
self.assertCountEqual(
list(self.aidant_marge.get_active_demarches_for_usager(self.usager_ned)),
["famille", "social", "transports", "travail"],
)
self.assertCountEqual(
list(self.aidant_lisa.get_active_demarches_for_usager(self.usager_homer)),
["Revenus", "social"],
)
self.assertCountEqual(
list(self.aidant_lisa.get_active_demarches_for_usager(self.usager_ned)),
["famille", "social", "transports", "travail"],
)
def test_get_valid_autorisation_method(self):
# A valid mandat with one revoked autorisation
usager_charles = UsagerFactory(given_name="Charles", sub="Charles")
active_mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=usager_charles,
)
valid_autorisation = AutorisationFactory(
mandat=active_mandat,
demarche="papiers",
revocation_date=None,
)
AutorisationFactory(
mandat=active_mandat,
demarche="transport",
revocation_date=timezone.now() - timedelta(days=1),
)
self.assertEqual(
self.aidant_marge.get_valid_autorisation("papiers", usager_charles),
valid_autorisation,
)
self.assertEqual(
self.aidant_marge.get_valid_autorisation("transport", usager_charles), None
)
# An expired Mandat
expired_mandat = MandatFactory(
organisation=self.aidant_marge.organisation,
usager=usager_charles,
expiration_date=timezone.now() - timedelta(days=1),
)
AutorisationFactory(
mandat=expired_mandat,
demarche="social",
revocation_date=None,
)
self.assertEqual(
self.aidant_marge.get_valid_autorisation("social", usager_charles), None
)
def test_is_in_organisation(self):
self.assertTrue(
self.aidant_marge.is_in_organisation(self.aidant_lisa.organisation),
"Aidant.is_in_organisation devrait indiquer que la personne fait partie de "
"sa propre organisation.",
)
self.assertFalse(
self.aidant_marge.is_in_organisation(OrganisationFactory()),
"Aidant.is_in_organisation devrait indiquer que la personne ne fait pas "
"partie d'une organisation étrangère.",
)
def test_is_responsable_structure(self):
# an aidant without further modification is not responsable structure
self.assertFalse(self.aidant_lisa.is_responsable_structure())
# however Juliette is responsable structure
self.assertTrue(self.respo_juliette.is_responsable_structure())
def test_can_see_aidant(self):
self.assertTrue(self.respo_juliette.can_see_aidant(self.aidant_marge))
self.assertFalse(self.respo_juliette.can_see_aidant(self.aidant_patricia))
def test_must_validate_cgu(self):
# an aidant without further modification must validate user conditions
self.assertTrue(self.aidant_patricia.must_validate_cgu())
# an aidant who has validated current version of user conditions
# does not need to revalidate them
self.assertFalse(self.aidant_lisa.must_validate_cgu())
# an aidant who has validated an outaded version of CGU
# must validate them again
self.assertTrue(self.aidant_marge.must_validate_cgu())
def test_has_a_totp_device(self):
self.assertFalse(self.aidant_lisa.has_a_totp_device)
self.assertTrue(self.aidant_marge.has_a_totp_device)
self.assertTrue(self.aidant_patricia.has_a_totp_device)
def test_remove_user_from_organisation_deactivate_user(self):
aidant: Aidant = AidantFactory()
organisation: Organisation = aidant.organisation
self.assertTrue(aidant.is_active, "L'aidant n'est pas actif")
aidant.remove_from_organisation(aidant.organisation)
self.assertFalse(
aidant.is_active,
"L'aidant est toujours actif après la tentative de suppression de son "
"organisation.",
)
self.assertSequenceEqual([organisation], list(aidant.organisations.all()))
def test_remove_user_from_organisation(self):
aidant: Aidant = AidantFactory()
organisation: Organisation = aidant.organisation
supplementary_organisation_1 = OrganisationFactory()
supplementary_organisation_2 = OrganisationFactory()
aidant.organisations.add(
supplementary_organisation_1, supplementary_organisation_2
)
self.assertTrue(aidant.is_active, "L'aidant n'est pas actif")
aidant.remove_from_organisation(supplementary_organisation_1)
self.assertTrue(
aidant.is_active,
"L'aidant n'est plus actif après la tentative de suppression d'une "
"organisation surnuméraire",
)
self.assertSequenceEqual(
[organisation, supplementary_organisation_2],
list(aidant.organisations.order_by("id").all()),
)
def test_remove_user_from_organisation_set_main_org(self):
aidant: Aidant = AidantFactory()
organisation: Organisation = aidant.organisation
supplementary_organisation_1 = OrganisationFactory()
supplementary_organisation_2 = OrganisationFactory()
aidant.organisations.add(
supplementary_organisation_1, supplementary_organisation_2
)
self.assertTrue(aidant.is_active, "L'aidant n'est pas actif")
aidant.remove_from_organisation(organisation)
self.assertTrue(
aidant.is_active,
"L'aidant n'est plus actif après la tentative de suppression d'une "
"organisation surnuméraire",
)
self.assertSequenceEqual(
[supplementary_organisation_1, supplementary_organisation_2],
list(aidant.organisations.order_by("id").all()),
)
self.assertEqual(
supplementary_organisation_1,
aidant.organisation,
"L'organisation principale de l'aidant n'a pas été remplacée par une "
"organisation valide après que l'aidant en a été retiré",
)
def test_remove_user_from_organisation_does_not_change_main_org(self):
aidant: Aidant = AidantFactory()
supplementary_organisation_1 = OrganisationFactory()
supplementary_organisation_2 = OrganisationFactory()
supplementary_organisation_to_remove = OrganisationFactory()
aidant.organisations.add(
supplementary_organisation_1,
supplementary_organisation_2,
supplementary_organisation_to_remove,
OrganisationFactory(),
OrganisationFactory(),
OrganisationFactory(),
)
aidant.organisation = supplementary_organisation_1
self.assertEqual(aidant.organisation, supplementary_organisation_1)
aidant.remove_from_organisation(supplementary_organisation_to_remove)
self.assertEqual(aidant.organisation, supplementary_organisation_1)
@mock.patch("aidants_connect_web.models.aidants__organisations_changed.send")
def test_remove_user_from_organisation_sends_signal(self, send: Mock):
aidant: Aidant = AidantFactory()
supplementary_organisation_1 = OrganisationFactory()
aidant.organisations.add(supplementary_organisation_1)
aidant.remove_from_organisation(supplementary_organisation_1)
send.assert_called_once_with(
sender=aidant.__class__,
instance=aidant,
diff={"removed": [supplementary_organisation_1], "added": []},
)
def test_set_organisations_raises_error_when_removing_everything(self):
aidant: Aidant = AidantFactory()
with self.assertRaises(ValueError) as err:
aidant.set_organisations([])
self.assertEqual(
"Can't remove all the organisations from aidant", f"{err.exception}"
)
def test_set_organisations_correctly_sets_organisations(self):
aidant: Aidant = AidantFactory()
organisation_to_remove = OrganisationFactory()
aidant.organisations.add(organisation_to_remove)
organisation_to_set_1 = OrganisationFactory()
organisation_to_set_2 = OrganisationFactory()
self.assertSequenceEqual(
[aidant.organisation, organisation_to_remove],
list(aidant.organisations.order_by("id")),
)
aidant.set_organisations(
[aidant.organisation, organisation_to_set_1, organisation_to_set_2]
)
self.assertSequenceEqual(
[aidant.organisation, organisation_to_set_1, organisation_to_set_2],
list(aidant.organisations.order_by("id")),
)
def test_set_organisations_set_current_active_organisation_when_removed(self):
aidant: Aidant = AidantFactory()
organisation_to_set_1 = OrganisationFactory()
organisation_to_set_2 = OrganisationFactory()
aidant.set_organisations([organisation_to_set_1, organisation_to_set_2])
self.assertSequenceEqual(
[organisation_to_set_1, organisation_to_set_2],
list(aidant.organisations.order_by("id")),
)
self.assertEqual(organisation_to_set_1, aidant.organisation)
def test_set_organisations_does_not_change_main_org(self):
aidant: Aidant = AidantFactory()
supplementary_organisation_1 = OrganisationFactory()
supplementary_organisation_2 = OrganisationFactory()
supplementary_organisation_to_remove = OrganisationFactory()
aidant.organisations.add(
supplementary_organisation_1,
supplementary_organisation_2,
supplementary_organisation_to_remove,
OrganisationFactory(),
OrganisationFactory(),
OrganisationFactory(),
)
aidant.organisation = supplementary_organisation_1
self.assertEqual(aidant.organisation, supplementary_organisation_1)
aidant.set_organisations(
set(aidant.organisations.all()) - {supplementary_organisation_to_remove}
)
self.assertEqual(aidant.organisation, supplementary_organisation_1)
@mock.patch("aidants_connect_web.models.aidants__organisations_changed.send")
def test_set_organisations_sends_signal(self, send: Mock):
aidant: Aidant = AidantFactory()
previous_organisation = aidant.organisation
organisation_to_remove = OrganisationFactory()
aidant.organisations.add(organisation_to_remove)
organisation_to_set_1 = OrganisationFactory()
organisation_to_set_2 = OrganisationFactory()
aidant.set_organisations([organisation_to_set_1, organisation_to_set_2])
send.assert_called_once_with(
sender=aidant.__class__,
instance=aidant,
diff={
"removed": [previous_organisation, organisation_to_remove],
"added": [organisation_to_set_1, organisation_to_set_2],
},
)
@tag("models", "journal")
class JournalModelTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.aidant_thierry = AidantFactory(
email="thierry@thierry.com",
first_name="Thierry",
last_name="Martin",
organisation=OrganisationFactory(name="Commune de Vernon"),
)
cls.journal_entry = Journal.objects.create(
action="connect_aidant",
aidant=cls.aidant_thierry,
organisation=cls.aidant_thierry.organisation,
)
cls.usager_ned = UsagerFactory(given_name="Ned", family_name="Flanders")
cls.first_mandat = MandatFactory(
organisation=cls.aidant_thierry.organisation,
usager=cls.usager_ned,
expiration_date=timezone.now() + timedelta(days=6),
)
cls.first_autorisation = AutorisationFactory(
mandat=cls.first_mandat,
demarche="Revenus",
)
Journal.log_autorisation_creation(
cls.first_autorisation, aidant=cls.aidant_thierry
)
cls.mandat_thierry_ned_365 = MandatFactory(
organisation=cls.aidant_thierry.organisation,
usager=cls.usager_ned,
expiration_date=timezone.now() + timedelta(days=365),
)
def test_a_journal_entry_can_be_created(self):
# Aidant connects and first autorisation is created
self.assertEqual(len(Journal.objects.all()), 2)
def test_logging_of_aidant_conection(self):
entry = Journal.log_connection(aidant=self.aidant_thierry)
self.assertEqual(len(Journal.objects.all()), 3)
self.assertEqual(entry.action, "connect_aidant")
self.assertEqual(entry.aidant.id, self.aidant_thierry.id)
def test_a_franceconnect_usager_journal_entry_can_be_created(self):
entry = Journal.log_franceconnection_usager(
aidant=self.aidant_thierry,
usager=self.usager_ned,
)
self.assertEqual(len(Journal.objects.all()), 3)
self.assertEqual(entry.action, "franceconnect_usager")
def test_log_autorisation_creation_complete(self):
autorisation = AutorisationFactory(
mandat=self.mandat_thierry_ned_365,
demarche="logement",
)
Journal.log_autorisation_creation(autorisation, self.aidant_thierry)
journal_entries = Journal.objects.all()
self.assertEqual(len(journal_entries), 3)
last_entry = journal_entries.last()
self.assertEqual(last_entry.action, "create_autorisation")
self.assertEqual(last_entry.usager.id, self.usager_ned.id)
self.assertEqual(last_entry.autorisation, autorisation.id)
def test_log_autorisation_use_complete(self):
entry = Journal.log_autorisation_use(
aidant=self.aidant_thierry,
usager=self.usager_ned,
demarche="transports",
access_token="fjfgjfdkldlzlsmqqxxcn",
autorisation=self.first_autorisation,
)
self.assertEqual(len(Journal.objects.all()), 3)
self.assertEqual(entry.action, "use_autorisation")
self.assertEqual(entry.demarche, "transports")
def test_log_autorisation_cancel_complete(self):
entry = Journal.log_autorisation_cancel(
autorisation=self.first_autorisation, aidant=self.aidant_thierry
)
self.assertEqual(len(Journal.objects.all()), 3)
self.assertEqual(entry.action, "cancel_autorisation")
def test_it_is_impossible_to_change_an_existing_entry(self):
entry = Journal.log_autorisation_use(
aidant=self.aidant_thierry,
usager=self.usager_ned,
demarche="transports",
access_token="fjfgjfdkldlzlsmqqxxcn",
autorisation=self.first_autorisation,
)
entry.demarches = ["logement"]
self.assertRaises(NotImplementedError, entry.save)
self.assertEqual(Journal.objects.get(id=entry.id).demarche, "transports")
def test_it_is_impossible_to_delete_an_existing_entry(self):
entry = Journal.log_autorisation_use(
aidant=self.aidant_thierry,
usager=self.usager_ned,
demarche="transports",
access_token="fjfgjfdkldlzlsmqqxxcn",
autorisation=self.first_autorisation,
)
self.assertRaises(NotImplementedError, entry.delete)
self.assertEqual(Journal.objects.get(id=entry.id).demarche, "transports")
def test_a_create_attestation_journal_entry_can_be_created(self):
demarches = ["transports", "logement"]
expiration_date = timezone.now() + timedelta(days=6)
mandat = MandatFactory()
entry = Journal.log_attestation_creation(
aidant=self.aidant_thierry,
usager=self.usager_ned,
demarches=demarches,
duree=6,
is_remote_mandat=False,
access_token="fjfgjfdkldlzlsmqqxxcn",
attestation_hash=generate_attestation_hash(
self.aidant_thierry, self.usager_ned, demarches, expiration_date
),
mandat=mandat,
)
self.assertEqual(len(Journal.objects.all()), 3)
self.assertEqual(entry.action, "create_attestation")
attestation_string = ";".join(
[
str(self.aidant_thierry.id),
date.today().isoformat(),
"logement,transports",
expiration_date.date().isoformat(),
str(self.aidant_thierry.organisation.id),
generate_file_sha256_hash(f"templates/{settings.MANDAT_TEMPLATE_PATH}"),
self.usager_ned.sub,
]
)
self.assertTrue(
validate_attestation_hash(attestation_string, entry.attestation_hash)
)
@tag("models", "habilitation_request")
class HabilitationRequestMethodTests(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_validate_when_all_is_fine(self):
for habilitation_request in (
HabilitationRequestFactory(status=HabilitationRequest.STATUS_PROCESSING),
HabilitationRequestFactory(status=HabilitationRequest.STATUS_NEW),
):
self.assertEqual(
0, Aidant.objects.filter(email=habilitation_request.email).count()
)
self.assertTrue(habilitation_request.validate_and_create_aidant())
self.assertEqual(
1, Aidant.objects.filter(email=habilitation_request.email).count()
)
db_hab_request = HabilitationRequest.objects.get(id=habilitation_request.id)
self.assertEqual(
db_hab_request.status, HabilitationRequest.STATUS_VALIDATED
)
def test_validate_if_aidant_already_exists(self):
aidant = AidantFactory()
habilitation_request = HabilitationRequestFactory(
status=HabilitationRequest.STATUS_PROCESSING, email=aidant.email
)
self.assertTrue(habilitation_request.validate_and_create_aidant())
self.assertEqual(
1, Aidant.objects.filter(email=habilitation_request.email).count()
)
habilitation_request.refresh_from_db()
self.assertEqual(
habilitation_request.status, HabilitationRequest.STATUS_VALIDATED
)
aidant.refresh_from_db()
self.assertIn(habilitation_request.organisation, aidant.organisations.all())
def test_do_not_validate_if_invalid_status(self):
for status in (
HabilitationRequest.STATUS_VALIDATED,
HabilitationRequest.STATUS_REFUSED,
HabilitationRequest.STATUS_CANCELLED,
):
habilitation_request = HabilitationRequestFactory(status=status)
self.assertEqual(
0, Aidant.objects.filter(email=habilitation_request.email).count()
)
self.assertFalse(habilitation_request.validate_and_create_aidant())
self.assertEqual(
0, Aidant.objects.filter(email=habilitation_request.email).count()
)
db_hab_request = HabilitationRequest.objects.get(id=habilitation_request.id)
self.assertEqual(db_hab_request.status, status)
| 37.651439
| 88
| 0.654977
|
1e797652f0ff70a1f3c8e9b7e867c040a8a29da7
| 3,338
|
py
|
Python
|
transfers_skinkie-v1.py
|
bliksemlabs/rrrr
|
81b4afa1a5e1184a5ef31db4d6fd12f6ebf2ddb3
|
[
"BSD-2-Clause"
] | 95
|
2015-01-03T18:16:19.000Z
|
2022-03-08T11:23:49.000Z
|
transfers_skinkie-v1.py
|
bliksemlabs/rrrr
|
81b4afa1a5e1184a5ef31db4d6fd12f6ebf2ddb3
|
[
"BSD-2-Clause"
] | 55
|
2015-01-08T00:30:37.000Z
|
2019-04-03T19:13:57.000Z
|
transfers_skinkie-v1.py
|
bliksemlabs/rrrr
|
81b4afa1a5e1184a5ef31db4d6fd12f6ebf2ddb3
|
[
"BSD-2-Clause"
] | 25
|
2015-01-03T10:13:39.000Z
|
2020-09-11T11:10:10.000Z
|
#!/usr/bin/python
import math, sys
from copy import copy
# requires graphserver to be installed
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
verbose = False
RADIUS = 2000 # meters
OBSTRUCTION = 1.3 #factor to expand straight-line distance
range_lat = RADIUS / 111111.111
if len(sys.argv) < 2 :
print 'usage: transfers.py infile.gtfsdb [verbose]'
exit(1)
gtfsdb_file = sys.argv[1]
try :
with open(gtfsdb_file) as f :
db = GTFSDatabase(gtfsdb_file)
except IOError as e :
print 'gtfsdb file "%s" cannot be opened' % gtfsdb_file
exit(1)
if len(sys.argv) > 2 and sys.argv[2] == "verbose" :
verbose = True
# we we are interested in all routes available on each stop
all_query = """select stops.stop_id, stops.stop_name, stops.stop_lat, stops.stop_lon, routes from
(select stop_id, group_concat(route_id, ',') as routes
from (select distinct route_id, stop_id from trips, stop_times
where trips.trip_id = stop_times.trip_id) as x
group by stop_id) as y, stops where y.stop_id = stops.stop_id;"""
near_query = """
select stop_id, stop_name, stop_lat, stop_lon from stops where
stop_lat > (:lat - :range_lat) and stop_lat < (:lat + :range_lat) and
stop_lon > (:lon - :range_lon) and stop_lon < (:lon + :range_lon) ORDER BY (((stop_lat - :lat) * (stop_lat - :lat)) + ((stop_lon - :lon) * (stop_lon - :lon))) ASC;
"""
# equirectangular / sinusoidal projection
def distance (lat1, lon1, lat2, lon2) :
avg_lat = (lat1 + lat2) / 2
xscale = math.cos(math.radians(avg_lat))
return distance (lat1, lon1, lat2, lon2, xscale)
def distance (lat1, lon1, lat2, lon2, xscale) :
dlon = lon2 - lon1
dlat = lat2 - lat1
dlon *= xscale
d2 = dlon * dlon + dlat * dlat
return math.sqrt(d2) * 111111.111
stops = {}
for sid, sname, lat, lon, routes in db.execute(all_query) :
stops[sid] = {'name': sname, 'lat': lat, 'lon': lon, 'routes': set(routes.split(','))}
# can also compare squared distances in scaled meters
transfers = []
n_processed = 0
for sid, param in stops.items():
if verbose :
print sid, sname
xscale = math.cos(math.radians(param['lat']))
range_lon = range_lat * xscale
routes = copy(param['routes'])
# print xscale, range_lat, range_lon
for sid2, sname2, lat2, lon2 in db.execute(near_query, {'range_lat': range_lat, 'range_lon': range_lon, 'lat': param['lat'], 'lon': param['lon']}):
if sid == sid2:
continue
d = distance (param['lat'], param['lon'], lat2, lon2, xscale)
if d > RADIUS :
continue
if verbose :
print " ", sid2, sname2, '%0.1f m' % d
if sid2 in stops and len(stops[sid2]['routes'] - routes) > 0:
routes = routes.union(stops[sid2]['routes'])
transfers.append ( (sid, sid2, d * OBSTRUCTION) )
n_processed += 1;
if n_processed % 1000 == 0 :
print 'processed %d stops' % n_processed
cur = db.get_cursor()
cur.execute('delete from transfers;') # where transfer_type = 9;')
cur.executemany('insert into transfers values (?,?,9,?);', transfers)
cur.execute('create index if not exists transfers_from_stop_id ON transfers (from_stop_id)')
print 'committing...'
db.conn.commit()
# print 'vacuuming...'
# cur.execute('vacuum;')
# db.conn.commit()
| 34.412371
| 163
| 0.644997
|
b63a9d22815b567142e121ddb848452852fd6213
| 19,802
|
py
|
Python
|
cinder/opts.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 1
|
2019-02-17T17:49:41.000Z
|
2019-02-17T17:49:41.000Z
|
cinder/opts.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | null | null | null |
cinder/opts.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2020-06-16T07:29:48.000Z
|
2020-06-21T10:22:57.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################
# WARNING!
#
# Do not edit this file directly. This file should be generated by
# running the command "tox -e genopts" any time a config option
# has been added, changed, or removed.
###################################################################
import itertools
from keystoneauth1 import loading
from cinder import objects # noqa
objects.register_all()
from cinder.api import common as cinder_api_common
from cinder.api.middleware import auth as cinder_api_middleware_auth
from cinder.api.views import versions as cinder_api_views_versions
from cinder.backup import api as cinder_backup_api
from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver
from cinder.backup import driver as cinder_backup_driver
from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph
from cinder.backup.drivers import gcs as cinder_backup_drivers_gcs
from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs
from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs
from cinder.backup.drivers import posix as cinder_backup_drivers_posix
from cinder.backup.drivers import swift as cinder_backup_drivers_swift
from cinder.backup.drivers import tsm as cinder_backup_drivers_tsm
from cinder.backup import manager as cinder_backup_manager
from cinder.cmd import backup as cinder_cmd_backup
from cinder.cmd import volume as cinder_cmd_volume
from cinder.common import config as cinder_common_config
import cinder.compute
from cinder.compute import nova as cinder_compute_nova
from cinder import context as cinder_context
from cinder import coordination as cinder_coordination
from cinder.db import api as cinder_db_api
from cinder.db import base as cinder_db_base
from cinder.image import glance as cinder_image_glance
from cinder.image import image_utils as cinder_image_imageutils
from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr
from cinder.message import api as cinder_message_api
from cinder import quota as cinder_quota
from cinder.scheduler import driver as cinder_scheduler_driver
from cinder.scheduler import host_manager as cinder_scheduler_hostmanager
from cinder.scheduler import manager as cinder_scheduler_manager
from cinder.scheduler import scheduler_options as \
cinder_scheduler_scheduleroptions
from cinder.scheduler.weights import capacity as \
cinder_scheduler_weights_capacity
from cinder.scheduler.weights import volume_number as \
cinder_scheduler_weights_volumenumber
from cinder import service as cinder_service
from cinder import service_auth as cinder_serviceauth
from cinder import ssh_utils as cinder_sshutils
from cinder.transfer import api as cinder_transfer_api
from cinder.volume import api as cinder_volume_api
from cinder.volume import driver as cinder_volume_driver
from cinder.volume.drivers.datera import datera_iscsi as \
cinder_volume_drivers_datera_dateraiscsi
from cinder.volume.drivers.dell_emc.powermax import common as \
cinder_volume_drivers_dell_emc_powermax_common
from cinder.volume.drivers.dell_emc.sc import storagecenter_common as \
cinder_volume_drivers_dell_emc_sc_storagecentercommon
from cinder.volume.drivers.dell_emc.unity import driver as \
cinder_volume_drivers_dell_emc_unity_driver
from cinder.volume.drivers.dell_emc.vnx import common as \
cinder_volume_drivers_dell_emc_vnx_common
from cinder.volume.drivers.dell_emc.vxflexos import driver as \
cinder_volume_drivers_dell_emc_vxflexos_driver
from cinder.volume.drivers.dell_emc import xtremio as \
cinder_volume_drivers_dell_emc_xtremio
from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common as \
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon
from cinder.volume.drivers.fusionstorage import dsware as \
cinder_volume_drivers_fusionstorage_dsware
from cinder.volume.drivers.hpe import hpe_3par_common as \
cinder_volume_drivers_hpe_hpe3parcommon
from cinder.volume.drivers.huawei import common as \
cinder_volume_drivers_huawei_common
from cinder.volume.drivers.ibm import flashsystem_common as \
cinder_volume_drivers_ibm_flashsystemcommon
from cinder.volume.drivers.ibm import flashsystem_iscsi as \
cinder_volume_drivers_ibm_flashsystemiscsi
from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs
from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as \
cinder_volume_drivers_ibm_ibm_storage_ds8kproxy
from cinder.volume.drivers.ibm.ibm_storage import ibm_storage as \
cinder_volume_drivers_ibm_ibm_storage_ibmstorage
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi
from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli
from cinder.volume.drivers.inspur.as13000 import as13000_driver as \
cinder_volume_drivers_inspur_as13000_as13000driver
from cinder.volume.drivers.inspur.instorage import instorage_common as \
cinder_volume_drivers_inspur_instorage_instoragecommon
from cinder.volume.drivers.inspur.instorage import instorage_iscsi as \
cinder_volume_drivers_inspur_instorage_instorageiscsi
from cinder.volume.drivers.kaminario import kaminario_common as \
cinder_volume_drivers_kaminario_kaminariocommon
from cinder.volume.drivers.lenovo import lenovo_common as \
cinder_volume_drivers_lenovo_lenovocommon
from cinder.volume.drivers import linstordrv as \
cinder_volume_drivers_linstordrv
from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm
from cinder.volume.drivers.macrosan import driver as \
cinder_volume_drivers_macrosan_driver
from cinder.volume.drivers.netapp import options as \
cinder_volume_drivers_netapp_options
from cinder.volume.drivers.nexenta import options as \
cinder_volume_drivers_nexenta_options
from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs
from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble
from cinder.volume.drivers.prophetstor import options as \
cinder_volume_drivers_prophetstor_options
from cinder.volume.drivers import pure as cinder_volume_drivers_pure
from cinder.volume.drivers import qnap as cinder_volume_drivers_qnap
from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte
from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd
from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs
from cinder.volume.drivers.san.hp import hpmsa_common as \
cinder_volume_drivers_san_hp_hpmsacommon
from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san
from cinder.volume.drivers.sandstone import sds_driver as \
cinder_volume_drivers_sandstone_sdsdriver
from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire
from cinder.volume.drivers import storpool as cinder_volume_drivers_storpool
from cinder.volume.drivers.stx import common as \
cinder_volume_drivers_stx_common
from cinder.volume.drivers.synology import synology_common as \
cinder_volume_drivers_synology_synologycommon
from cinder.volume.drivers.veritas_access import veritas_iscsi as \
cinder_volume_drivers_veritas_access_veritasiscsi
from cinder.volume.drivers.vmware import vmdk as \
cinder_volume_drivers_vmware_vmdk
from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage
from cinder.volume.drivers.windows import iscsi as \
cinder_volume_drivers_windows_iscsi
from cinder.volume.drivers.windows import smbfs as \
cinder_volume_drivers_windows_smbfs
from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara
from cinder.volume import manager as cinder_volume_manager
from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf
from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \
cinder_zonemanager_drivers_brocade_brcdfabricopts
from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \
cinder_zonemanager_drivers_brocade_brcdfczonedriver
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \
cinder_zonemanager_drivers_cisco_ciscofabricopts
from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \
cinder_zonemanager_drivers_cisco_ciscofczonedriver
from cinder.zonemanager import fc_zone_manager as \
cinder_zonemanager_fczonemanager
def list_opts():
return [
('backend',
itertools.chain(
[cinder_cmd_volume.host_opt],
)),
('brcd_fabric_example',
itertools.chain(
cinder_zonemanager_drivers_brocade_brcdfabricopts.
brcd_zone_opts,
)),
('cisco_fabric_example',
itertools.chain(
cinder_zonemanager_drivers_cisco_ciscofabricopts.
cisco_zone_opts,
)),
('coordination',
itertools.chain(
cinder_coordination.coordination_opts,
)),
('DEFAULT',
itertools.chain(
cinder_api_common.api_common_opts,
[cinder_api_middleware_auth.use_forwarded_for_opt],
cinder_api_views_versions.versions_opts,
cinder_backup_api.backup_opts,
cinder_backup_chunkeddriver.backup_opts,
cinder_backup_driver.backup_opts,
cinder_backup_drivers_ceph.service_opts,
cinder_backup_drivers_gcs.gcsbackup_service_opts,
cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts,
cinder_backup_drivers_nfs.nfsbackup_service_opts,
cinder_backup_drivers_posix.posixbackup_service_opts,
cinder_backup_drivers_swift.swiftbackup_service_opts,
cinder_backup_drivers_tsm.tsm_opts,
cinder_backup_manager.backup_manager_opts,
[cinder_cmd_backup.backup_workers_opt],
[cinder_cmd_volume.cluster_opt],
cinder_common_config.api_opts,
cinder_common_config.core_opts,
cinder_common_config.auth_opts,
cinder_common_config.backup_opts,
cinder_common_config.image_opts,
cinder_common_config.global_opts,
cinder_common_config.compression_opts,
cinder.compute.compute_opts,
cinder_context.context_opts,
cinder_db_api.db_opts,
cinder_db_api.backup_opts,
[cinder_db_base.db_driver_opt],
cinder_image_glance.image_opts,
cinder_image_glance.glance_core_properties_opts,
cinder_image_imageutils.image_opts,
cinder_message_api.messages_opts,
cinder_quota.quota_opts,
cinder_scheduler_driver.scheduler_driver_opts,
cinder_scheduler_hostmanager.host_manager_opts,
cinder_scheduler_manager.scheduler_manager_opts,
[cinder_scheduler_scheduleroptions.
scheduler_json_config_location_opt],
cinder_scheduler_weights_capacity.capacity_weight_opts,
cinder_scheduler_weights_volumenumber.
volume_number_weight_opts,
cinder_service.service_opts,
cinder_sshutils.ssh_opts,
cinder_transfer_api.volume_transfer_opts,
[cinder_volume_api.allow_force_upload_opt],
[cinder_volume_api.volume_host_opt],
[cinder_volume_api.volume_same_az_opt],
[cinder_volume_api.az_cache_time_opt],
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.backup_opts,
cinder_volume_driver.image_opts,
cinder_volume_drivers_datera_dateraiscsi.d_opts,
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
infortrend_opts,
cinder_volume_drivers_inspur_as13000_as13000driver.
inspur_as13000_opts,
cinder_volume_drivers_inspur_instorage_instoragecommon.
instorage_mcs_opts,
cinder_volume_drivers_inspur_instorage_instorageiscsi.
instorage_mcs_iscsi_opts,
cinder_volume_drivers_sandstone_sdsdriver.sds_opts,
cinder_volume_drivers_veritas_access_veritasiscsi.VA_VOL_OPTS,
cinder_volume_manager.volume_manager_opts,
cinder_wsgi_eventletserver.socket_opts,
)),
('fc-zone-manager',
itertools.chain(
cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts,
cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts,
cinder_zonemanager_fczonemanager.zone_manager_opts,
)),
('key_manager',
itertools.chain(
cinder_keymgr_confkeymgr.key_mgr_opts,
)),
('service_user',
itertools.chain(
cinder_serviceauth.service_user_opts,
loading.get_auth_plugin_conf_options('v3password'),
loading.get_session_conf_options(),
)),
('backend_defaults',
itertools.chain(
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.image_opts,
cinder_volume_driver.fqdn_opts,
cinder_volume_drivers_dell_emc_powermax_common.powermax_opts,
cinder_volume_drivers_dell_emc_sc_storagecentercommon.
common_opts,
cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS,
cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS,
cinder_volume_drivers_dell_emc_vxflexos_driver.vxflexos_opts,
cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS,
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon.
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
cinder_volume_drivers_huawei_common.huawei_opts,
cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts,
cinder_volume_drivers_ibm_flashsystemiscsi.
flashsystem_iscsi_opts,
cinder_volume_drivers_ibm_gpfs.gpfs_opts,
cinder_volume_drivers_ibm_gpfs.gpfs_remote_ssh_opts,
cinder_volume_drivers_ibm_ibm_storage_ds8kproxy.ds8k_opts,
cinder_volume_drivers_ibm_ibm_storage_ibmstorage.driver_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon.
storwize_svc_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc.
storwize_svc_fc_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi.
storwize_svc_iscsi_opts,
cinder_volume_drivers_infinidat.infinidat_opts,
cinder_volume_drivers_kaminario_kaminariocommon.
kaminario_opts,
cinder_volume_drivers_lenovo_lenovocommon.common_opts,
cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts,
cinder_volume_drivers_linstordrv.linstor_opts,
cinder_volume_drivers_lvm.volume_opts,
cinder_volume_drivers_macrosan_driver.config.macrosan_opts,
cinder_volume_drivers_netapp_options.netapp_proxy_opts,
cinder_volume_drivers_netapp_options.netapp_connection_opts,
cinder_volume_drivers_netapp_options.netapp_transport_opts,
cinder_volume_drivers_netapp_options.netapp_basicauth_opts,
cinder_volume_drivers_netapp_options.netapp_cluster_opts,
cinder_volume_drivers_netapp_options.netapp_provisioning_opts,
cinder_volume_drivers_netapp_options.netapp_img_cache_opts,
cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts,
cinder_volume_drivers_netapp_options.netapp_san_opts,
cinder_volume_drivers_netapp_options.netapp_replication_opts,
cinder_volume_drivers_netapp_options.netapp_support_opts,
cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS,
cinder_volume_drivers_nfs.nfs_opts,
cinder_volume_drivers_nimble.nimble_opts,
cinder_volume_drivers_prophetstor_options.DPL_OPTS,
cinder_volume_drivers_pure.PURE_OPTS,
cinder_volume_drivers_qnap.qnap_opts,
cinder_volume_drivers_quobyte.volume_opts,
cinder_volume_drivers_rbd.RBD_OPTS,
cinder_volume_drivers_remotefs.nas_opts,
cinder_volume_drivers_remotefs.volume_opts,
cinder_volume_drivers_san_hp_hpmsacommon.common_opts,
cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts,
cinder_volume_drivers_san_san.san_opts,
cinder_volume_drivers_solidfire.sf_opts,
cinder_volume_drivers_storpool.storpool_opts,
cinder_volume_drivers_stx_common.common_opts,
cinder_volume_drivers_stx_common.iscsi_opts,
cinder_volume_drivers_synology_synologycommon.cinder_opts,
cinder_volume_drivers_vmware_vmdk.vmdk_opts,
cinder_volume_drivers_vzstorage.vzstorage_opts,
cinder_volume_drivers_windows_iscsi.windows_opts,
cinder_volume_drivers_windows_smbfs.volume_opts,
cinder_volume_drivers_zadara.zadara_opts,
cinder_volume_manager.volume_backend_opts,
cinder_volume_targets_spdknvmf.spdk_opts,
)),
('nova',
itertools.chain(
cinder_compute_nova.nova_opts,
cinder_compute_nova.nova_session_opts,
cinder_compute_nova.nova_auth_opts,
)),
]
| 53.088472
| 78
| 0.748308
|
682c944735a64e69ca54c3b93d66fcc8014fbacc
| 2,332
|
py
|
Python
|
tests/unit/types/request/test_request.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/types/request/test_request.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/types/request/test_request.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from google.protobuf.json_format import MessageToDict, MessageToJson
from jina.excepts import BadRequestType
from jina.helper import get_random_identity
from jina.proto import jina_pb2
from jina.types.request import Request
from jina.types.sets.document import DocumentSet
from jina.types.sets.querylang import QueryLangSet
@pytest.fixture(scope='function')
def req():
r = jina_pb2.RequestProto()
r.request_id = get_random_identity()
r.index.docs.add()
return r
def test_init(req):
assert Request(request=None)
assert Request(request=req, copy=True)
assert Request(request=req, copy=False)
assert Request(request=MessageToDict(req))
assert Request(request=MessageToJson(req))
def test_init_fail():
with pytest.raises(BadRequestType):
Request(request=5)
def test_docs(req):
request = Request(request=req, copy=False)
docs = request.docs
assert request.is_used
assert isinstance(docs, DocumentSet)
assert len(docs) == 1
def test_groundtruth(req):
request = Request(request=req, copy=False)
groundtruths = request.groundtruths
assert request.is_used
assert isinstance(groundtruths, DocumentSet)
assert len(groundtruths) == 0
def test_request_type_set_get(req):
request = Request(request=req, copy=False)
request.request_type = 'search'
assert request.request_type == 'SearchRequestProto'
def test_request_type_set_get_fail(req):
request = Request(request=req, copy=False)
with pytest.raises(ValueError):
request.request_type = 'random'
def test_queryset(req):
request = Request(request=req, copy=False)
queryset = request.queryset
assert request.is_used
assert isinstance(queryset, QueryLangSet)
def test_command(req):
request = Request(request=req, copy=False)
cmd = request.command
assert request.is_used
assert cmd
assert isinstance(cmd, str)
def test_as_pb_object(req):
request = Request(request=req)
request.as_pb_object
assert request.is_used
request = Request(request=None)
assert request.as_pb_object
assert request.is_used
def test_as_json_str(req):
request = Request(request=req)
assert isinstance(request.to_json(), str)
request = Request(request=None)
assert isinstance(request.to_json(), str)
| 25.911111
| 68
| 0.738422
|
d12f85facd1a89f48b19e0f3d3448a479b4b7833
| 23,818
|
py
|
Python
|
calm/dsl/builtins/models/substrate.py
|
darshanpyadav93/calm-dsl
|
0c12474a26ea5aa9c800dea7f45b51da10d85041
|
[
"Apache-2.0"
] | null | null | null |
calm/dsl/builtins/models/substrate.py
|
darshanpyadav93/calm-dsl
|
0c12474a26ea5aa9c800dea7f45b51da10d85041
|
[
"Apache-2.0"
] | 3
|
2022-03-04T11:21:29.000Z
|
2022-03-04T12:14:33.000Z
|
calm/dsl/builtins/models/substrate.py
|
abhijeetkaurav1st/calm-dsl
|
6487a896967b3fd667b9320e2ad3a397c9960497
|
[
"Apache-2.0"
] | null | null | null |
import sys
from distutils.version import LooseVersion as LV
from .entity import EntityType, Entity, EntityTypeBase, EntityDict
from .validator import PropertyValidator
from .readiness_probe import readiness_probe
from .provider_spec import provider_spec
from .ahv_vm import AhvVmType, ahv_vm
from .client_attrs import update_dsl_metadata_map, get_dsl_metadata_map
from .metadata_payload import get_metadata_obj
from .helper import common as common_helper
from calm.dsl.config import get_context
from calm.dsl.constants import CACHE, PROVIDER_ACCOUNT_TYPE_MAP
from calm.dsl.store import Cache
from calm.dsl.store import Version
from calm.dsl.log import get_logging_handle
LOG = get_logging_handle(__name__)
# Substrate
class SubstrateDict(EntityDict):
@staticmethod
def pre_validate(vdict, name, value):
if name == "readiness_probe":
if isinstance(value, dict):
rp_validator, is_array = vdict[name]
rp_cls_type = rp_validator.get_kind()
return rp_cls_type(None, (Entity,), value)
return value
class SubstrateType(EntityType):
__schema_name__ = "Substrate"
__openapi_type__ = "app_substrate"
__prepare_dict__ = SubstrateDict
ALLOWED_FRAGMENT_ACTIONS = {
"__pre_create__": "pre_action_create",
"__post_delete__": "post_action_delete",
}
def get_profile_environment(cls):
"""returns the profile environment, if substrate has been defined in blueprint file"""
cls_bp = common_helper._walk_to_parent_with_given_type(cls, "BlueprintType")
environment = {}
if cls_bp:
for cls_profile in cls_bp.profiles:
for cls_deployment in cls_profile.deployments:
if cls_deployment.substrate.name != str(cls):
continue
environment = getattr(cls_profile, "environment", {})
if environment:
LOG.debug(
"Found environment {} associated to app-profile {}".format(
environment.get("name"), cls_profile
)
)
break
return environment
def get_referenced_account_uuid(cls):
"""
SUBSTRATE GIVEN UNDER BLUEPRINT
If calm-version < v3.2.0:
1. account_reference is not available at substrate-level, So need to read from project only
If calm-version >= 3.2.0:
1. account_reference is available at substrate-level
1.a: If env is given at profile-level, then account must be whitelisted in environment
1.b: If env is not given at profile-level, then account must be whitelisted in project
2. If account_reference is not available at substrate-level
2.a: If env is given at profile-level, return provider account in env
2.b: If env is not given at profile-level, return provider account in project
SUBSTRATE GIVEN UNDER ENVIRONMENT
If calm-version < v3.2.0:
1. account_reference is not available at substrate-level, So need to read from project only
If calm-version >= 3.2.0:
1. account_reference is available at substrate-level
1. account must be filtered at environment
2. If account_reference is not available at substrate-level
2.a: return provider account whitelisted in environment
"""
provider_account = getattr(cls, "account", {})
calm_version = Version.get_version("Calm")
provider_type = getattr(cls, "provider_type")
provider_account_type = PROVIDER_ACCOUNT_TYPE_MAP.get(provider_type, "")
if not provider_account_type:
return ""
# Fetching project data
project_cache_data = common_helper.get_cur_context_project()
project_name = project_cache_data.get("name")
project_accounts = project_cache_data.get("accounts_data", {}).get(
provider_account_type, []
)
if not project_accounts:
LOG.error(
"No '{}' account registered to project '{}'".format(
provider_account_type, project_name
)
)
sys.exit(-1)
# If substrate is defined in blueprint file
cls_bp = common_helper._walk_to_parent_with_given_type(cls, "BlueprintType")
if cls_bp:
environment = {}
for cls_profile in cls_bp.profiles:
for cls_deployment in cls_profile.deployments:
if cls_deployment.substrate.name != str(cls):
continue
environment = getattr(cls_profile, "environment", {})
if environment:
LOG.debug(
"Found environment {} associated to app-profile {}".format(
environment.get("name"), cls_profile
)
)
break
# If environment is given at profile level
if environment:
environment_cache_data = Cache.get_entity_data_using_uuid(
entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment["uuid"]
)
if not environment_cache_data:
LOG.error(
"Environment {} not found. Please run: calm update cache".format(
environment["name"]
)
)
sys.exit(-1)
accounts = environment_cache_data.get("accounts_data", {}).get(
provider_account_type, []
)
if not accounts:
LOG.error(
"Environment '{}' has no '{}' account.".format(
environment_cache_data.get("name", ""),
provider_account_type,
)
)
sys.exit(-1)
# If account given at substrate, it should be whitelisted in environment
if provider_account and provider_account["uuid"] != accounts[0]["uuid"]:
LOG.error(
"Account '{}' not filtered in environment '{}'".format(
provider_account["name"],
environment_cache_data.get("name", ""),
)
)
sys.exit(-1)
# If provider_account is not given, then fetch from env
elif not provider_account:
provider_account = {
"name": accounts[0]["name"],
"uuid": accounts[0]["uuid"],
}
# If environment is not given at profile level
else:
# if provider_account is given, it should be part of project
if not project_accounts:
LOG.error(
"No '{}' account registered to project '{}'".format(
provider_account_type, project_name
)
)
sys.exit(-1)
if (
provider_account
and provider_account["uuid"] not in project_accounts
):
LOG.error(
"Account '{}' not filtered in project '{}'".format(
provider_account["name"], project_name
)
)
sys.exit(-1)
# Else take first account in project
elif not provider_account:
provider_account = {"uuid": project_accounts[0], "kind": "account"}
# If substrate defined inside environment
cls_env = common_helper._walk_to_parent_with_given_type(cls, "EnvironmentType")
if cls_env:
infra = getattr(cls_env, "providers", [])
whitelisted_account = {}
for _pdr in infra:
if _pdr.type == PROVIDER_ACCOUNT_TYPE_MAP[provider_type]:
whitelisted_account = _pdr.account_reference.get_dict()
break
if LV(calm_version) >= LV("3.2.0"):
if provider_account and provider_account[
"uuid"
] != whitelisted_account.get("uuid", ""):
LOG.error(
"Account '{}' not filtered in environment '{}'".format(
provider_account["name"], str(cls_env)
)
)
sys.exit(-1)
elif not whitelisted_account:
LOG.error(
"No account is filtered in environment '{}'".format(
str(cls_env)
)
)
sys.exit(-1)
elif not provider_account:
provider_account = whitelisted_account
# If version is less than 3.2.0, then it should use account from poroject only, OR
# If no account is supplied, will take 0th account in project (in both case of blueprint/environment)
if not provider_account:
provider_account = {"uuid": project_accounts[0], "kind": "account"}
return provider_account["uuid"]
def compile(cls):
cdict = super().compile()
readiness_probe_dict = {}
if "readiness_probe" in cdict and cdict["readiness_probe"]:
readiness_probe_dict = cdict["readiness_probe"]
if hasattr(readiness_probe_dict, "compile"):
readiness_probe_dict = readiness_probe_dict.compile()
else:
readiness_probe_dict = readiness_probe().compile()
# Fill out os specific details if not found
if cdict["os_type"] == "Linux":
if not readiness_probe_dict.get("connection_type", ""):
readiness_probe_dict["connection_type"] = "SSH"
if not readiness_probe_dict.get("connection_port", ""):
readiness_probe_dict["connection_port"] = 22
if not readiness_probe_dict.get("connection_protocol", ""):
readiness_probe_dict["connection_protocol"] = ""
else:
if not readiness_probe_dict.get("connection_type", ""):
readiness_probe_dict["connection_type"] = "POWERSHELL"
if not readiness_probe_dict.get("connection_port", ""):
readiness_probe_dict["connection_port"] = 5985
if not readiness_probe_dict.get("connection_protocol", ""):
readiness_probe_dict["connection_protocol"] = "http"
if cdict.get("vm_recovery_spec", {}) and cdict["type"] != "AHV_VM":
LOG.error(
"Recovery spec is supported only for AHV_VM substrate (given {})".format(
cdict["type"]
)
)
sys.exit("Unknown attribute vm_recovery_spec given")
# Handle cases for empty readiness_probe and vm_recovery_spec
if cdict["type"] == "AHV_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict[
"address"
] = "@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@"
if cdict.get("vm_recovery_spec", {}):
_vrs = cdict.pop("vm_recovery_spec", None)
if _vrs:
cdict["create_spec"] = ahv_vm(
name=_vrs.vm_name, resources=_vrs.vm_override_resources
)
cdict["recovery_point_reference"] = _vrs.recovery_point
elif cdict["type"] == "EXISTING_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict["address"] = "@@{ip_address}@@"
elif cdict["type"] == "AWS_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict["address"] = "@@{public_ip_address}@@"
elif cdict["type"] == "K8S_POD": # Never used (Omit after discussion)
readiness_probe_dict["address"] = ""
cdict.pop("editables", None)
elif cdict["type"] == "AZURE_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict[
"address"
] = "@@{platform.publicIPAddressList[0]}@@"
elif cdict["type"] == "VMWARE_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict["address"] = "@@{platform.ipAddressList[0]}@@"
elif cdict["type"] == "GCP_VM":
if not readiness_probe_dict.get("address", ""):
readiness_probe_dict[
"address"
] = "@@{platform.networkInterfaces[0].accessConfigs[0].natIP}@@"
else:
raise Exception("Un-supported vm type :{}".format(cdict["type"]))
if not cdict.get("vm_recovery_spec", {}):
cdict.pop("vm_recovery_spec", None)
# Adding min defaults in vm spec required by each provider
if not cdict.get("create_spec"):
# TODO shift them to constants file
provider_type_map = {
"AWS_VM": "aws",
"VMWARE_VM": "vmware",
"AHV_VM": "nutanix_pc", # Accounts of type nutanix are not used after 2.9
"AZURE_VM": "azure",
"GCP_VM": "gcp",
}
if cdict["type"] in provider_type_map:
if cdict["type"] == "AHV_VM":
# UI expects defaults. Jira: https://jira.nutanix.com/browse/CALM-20134
if not cdict.get("create_spec"):
cdict["create_spec"] = {"resources": {"nic_list": []}}
else:
# Getting the account_uuid for each provider
# Getting the metadata obj
metadata_obj = get_metadata_obj()
project_ref = metadata_obj.get("project_reference") or dict()
# If project not found in metadata, it will take project from config
ContextObj = get_context()
project_config = ContextObj.get_project_config()
project_name = project_ref.get("name", project_config["name"])
project_cache_data = Cache.get_entity_data(
entity_type=CACHE.ENTITY.PROJECT, name=project_name
)
if not project_cache_data:
LOG.error(
"Project {} not found. Please run: calm update cache".format(
project_name
)
)
sys.exit(-1)
# Registered accounts
project_accounts = project_cache_data["accounts_data"]
provider_type = provider_type_map[cdict["type"]]
account_uuids = project_accounts.get(provider_type, [])
if not account_uuids:
LOG.error(
"No {} account registered in project '{}'".format(
provider_type, project_name
)
)
sys.exit(-1)
# Adding default spec
cdict["create_spec"] = {
"resources": {"account_uuid": account_uuids[0]}
}
# Template attribute should be present for vmware spec
if cdict["type"] == "VMWARE_VM":
cdict["create_spec"]["template"] = ""
# Modifying the editable object
provider_spec_editables = cdict.pop("editables", {})
cdict["editables"] = {}
if provider_spec_editables:
cdict["editables"]["create_spec"] = provider_spec_editables
# Popping out the editables from readiness_probe
readiness_probe_editables = readiness_probe_dict.pop("editables_list", [])
if readiness_probe_editables:
cdict["editables"]["readiness_probe"] = {
k: True for k in readiness_probe_editables
}
# In case we have read provider_spec from a yaml file, validate that we have consistent values for
# Substrate.account (if present) and account_uuid in provider_spec (if present).
# The account_uuid mentioned in provider_spec yaml should be a registered PE under the Substrate.account PC
substrate_account_uuid = cls.get_referenced_account_uuid()
spec_account_uuid = ""
try:
spec_account_uuid = cdict["create_spec"]["resources"]["account_uuid"]
except (AttributeError, TypeError, KeyError):
pass
if substrate_account_uuid:
account_cache_data = Cache.get_entity_data_using_uuid(
entity_type="account", uuid=substrate_account_uuid
)
if not account_cache_data:
LOG.error(
"Account (uuid={}) not found. Please update cache".format(
substrate_account_uuid
)
)
sys.exit(-1)
account_name = account_cache_data["name"]
if spec_account_uuid:
if cdict["type"] == "AHV_VM":
if (
not account_cache_data.get("data", {})
.get("clusters", {})
.get(spec_account_uuid)
):
LOG.error(
"cluster account_uuid (uuid={}) used in the provider spec is not found to be registered"
" under the Nutanix PC account {}. Please update cache".format(
spec_account_uuid, account_name
)
)
sys.exit(-1)
elif cdict["type"] != "EXISTING_VM":
if spec_account_uuid != substrate_account_uuid:
LOG.error(
"Account '{}'(uuid='{}') not matched with account_uuid used in provider-spec (uuid={})".format(
account_name, substrate_account_uuid, spec_account_uuid
)
)
sys.exit(-1)
else:
# if account_uuid is not available add it
if cdict["type"] == "AHV_VM":
# default is first cluster account
account_uuid = list(account_cache_data["data"]["clusters"].keys())[
0
]
_cs = cdict["create_spec"]
if isinstance(_cs, AhvVmType):
# NOTE: We cann't get subnet_uuid here, as it involved parent reference
subnet_name = ""
cluster_name = ""
_nics = _cs.resources.nics
for _nic in _nics:
_nic_dict = _nic.subnet_reference.get_dict()
if not common_helper.is_macro(_nic_dict["name"]):
subnet_name = _nic_dict["name"]
cluster_name = _nic_dict["cluster"]
break
if subnet_name:
account_uuid = common_helper.get_pe_account_uuid_using_pc_account_uuid_and_nic_data(
pc_account_uuid=substrate_account_uuid,
subnet_name=subnet_name,
cluster_name=cluster_name,
)
# Assigning the pe account uuid to ahv vm resources
_cs.resources.account_uuid = account_uuid
else:
subnet_uuid = ""
_nics = _cs.get("resources", {}).get("nic_list", [])
for _nic in _nics:
_nu = _nic["subnet_reference"].get("uuid", "")
if _nu and not common_helper.is_macro(_nu):
subnet_uuid = _nu
break
if subnet_uuid:
account_uuid = common_helper.get_pe_account_uuid_using_pc_account_uuid_and_subnet_uuid(
pc_account_uuid=substrate_account_uuid,
subnet_uuid=subnet_uuid,
)
cdict["create_spec"]["resources"]["account_uuid"] = account_uuid
# Add account uuid for non-ahv providers
if cdict["type"] not in ["EXISTING_VM", "AHV_VM", "K8S_POD"]:
cdict["create_spec"]["resources"]["account_uuid"] = substrate_account_uuid
cdict.pop("account_reference", None)
cdict["readiness_probe"] = readiness_probe_dict
return cdict
def pre_compile(cls):
"""Adds Ahvvm data to substrate metadata"""
super().pre_compile()
# Adding mapping for substrate class in case of AHV provider
types = EntityTypeBase.get_entity_types()
AhvVmType = types.get("AhvVm", None)
provider_spec = cls.provider_spec
if isinstance(provider_spec, AhvVmType):
ui_name = getattr(cls, "name", "") or cls.__name__
sub_metadata = get_dsl_metadata_map([cls.__schema_name__, ui_name])
vm_dsl_name = provider_spec.__name__
vm_display_name = getattr(provider_spec, "name", "") or vm_dsl_name
sub_metadata[AhvVmType.__schema_name__] = {
vm_display_name: {"dsl_name": vm_dsl_name}
}
update_dsl_metadata_map(
cls.__schema_name__, entity_name=ui_name, entity_obj=sub_metadata
)
@classmethod
def pre_decompile(mcls, cdict, context=[], prefix=""):
# Handle provider_spec
cdict = super().pre_decompile(cdict, context, prefix=prefix)
cdict["create_spec"] = provider_spec(cdict["create_spec"])
if "__name__" in cdict:
cdict["__name__"] = "{}{}".format(prefix, cdict["__name__"])
return cdict
@classmethod
def decompile(mcls, cdict, context=[], prefix=""):
if cdict["type"] == "K8S_POD":
LOG.error("Decompilation support for pod deployments is not available.")
sys.exit(-1)
cls = super().decompile(cdict, context=context, prefix=prefix)
provider_spec = cls.provider_spec
if cls.provider_type == "AHV_VM":
context = [cls.__schema_name__, getattr(cls, "name", "") or cls.__name__]
vm_cls = AhvVmType.decompile(provider_spec, context=context, prefix=prefix)
cls.provider_spec = vm_cls
return cls
def get_task_target(cls):
return cls.get_ref()
class SubstrateValidator(PropertyValidator, openapi_type="app_substrate"):
__default__ = None
__kind__ = SubstrateType
def substrate(**kwargs):
name = kwargs.get("name", None)
bases = (Entity,)
return SubstrateType(name, bases, kwargs)
Substrate = substrate()
| 41.136442
| 123
| 0.531279
|
3992d814b825eb30b83133e3e3f52fa9311b2adf
| 2,643
|
py
|
Python
|
doc/sphinxext/github_link.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 50,961
|
2015-01-01T06:06:31.000Z
|
2022-03-31T23:40:12.000Z
|
doc/sphinxext/github_link.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 17,065
|
2015-01-01T02:01:58.000Z
|
2022-03-31T23:48:34.000Z
|
doc/sphinxext/github_link.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 26,886
|
2015-01-01T00:59:27.000Z
|
2022-03-31T18:03:23.000Z
|
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
# Unwrap the object to get the correct source
# file in case that is wrapped by a decorator
obj = inspect.unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
| 31.094118
| 85
| 0.614075
|
631c3195d17570d4a6766ca0b9fdd42be90b55d1
| 6,033
|
py
|
Python
|
cryptotools/ECDSA/secp256k1.py
|
ArtoLabs/cryptotools
|
cdb03d4900513d057391562af9d1255f81b79a30
|
[
"MIT"
] | 155
|
2018-02-16T17:12:04.000Z
|
2022-03-17T08:27:12.000Z
|
cryptotools/ECDSA/secp256k1.py
|
djmuratb/cryptotools
|
51c641c8cd6b564d7c39cb3eaaee05d62e024147
|
[
"MIT"
] | 22
|
2018-07-03T18:56:22.000Z
|
2022-03-23T12:05:39.000Z
|
cryptotools/ECDSA/secp256k1.py
|
djmuratb/cryptotools
|
51c641c8cd6b564d7c39cb3eaaee05d62e024147
|
[
"MIT"
] | 61
|
2018-06-01T20:28:43.000Z
|
2022-03-25T04:11:51.000Z
|
import secrets
from cryptotools import message
from cryptotools import ECDSA
from cryptotools.number_theory_stuff import mulinv, modsqrt
from cryptotools.transformations import int_to_bytes, bytes_to_int, hex_to_int, bytes_to_hex, hex_to_bytes
P = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
# Generator
G = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
# Order
N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
CURVE = ECDSA.Curve(P, 0, 7, G, N, name='secp256k1')
class Point(ECDSA.Point):
def __init__(self, x, y):
super().__init__(x, y, CURVE)
class PrivateKey(message.Message):
def __init__(self, bts):
assert bytes_to_int(bts) < N, 'Key larger than Curve Order'
super().__init__(bts)
@classmethod
def random(cls):
key = secrets.randbelow(N)
return cls.from_int(key)
@classmethod
def from_wif(cls, wif: str) -> 'PrivateKey':
from cryptotools.BTC import base58, sha256
from cryptotools.BTC.network import network
bts = base58.decode(wif)
network_byte, key, checksum = bts[0:1], bts[1:-4], bts[-4:]
assert sha256(sha256(network_byte + key))[:4] == checksum, 'Invalid Checksum'
assert network_byte == network('wif'), 'Invalid Network byte'
if key.endswith(b'\x01'):
key = key[:-1]
compressed = True # TODO
else:
compressed = False # TODO
return cls(key)
def wif(self, compressed=False) -> str:
from cryptotools.BTC import base58, sha256
from cryptotools.BTC.network import network
extended = network('wif') + self.bytes() + (b'\x01' if compressed else b'')
hashed = sha256(sha256(extended))
checksum = hashed[:4]
return base58.encode(extended + checksum)
def to_public(self) -> 'PublicKey':
point = CURVE.G * self.int()
return PublicKey(point)
def __repr__(self):
return f"PrivateKey({self.msg})"
def sign_hash(self, hash):
e = hex_to_int(hash) if isinstance(hash, str) else bytes_to_int(hash)
r, s = 0, 0
while r == 0 or s == 0:
k = secrets.randbelow(N)
point = CURVE.G * k
r = point.x % N
inv_k = mulinv(k, N)
s = (inv_k * (e + r * self.int())) % N
return message.Signature(r=r, s=s)
class PublicKey:
def __init__(self, point: Point):
self.point = point
def __eq__(self, other: 'PublicKey') -> bool:
return self.point == other.point
def __repr__(self) -> str:
return f"PublicKey({self.x}, {self.y})"
@classmethod
def decode(cls, key: bytes) -> 'PublicKey':
if key.startswith(b'\x04'): # uncompressed key
assert len(key) == 65, 'An uncompressed public key must be 65 bytes long'
x, y = bytes_to_int(key[1:33]), bytes_to_int(key[33:])
else: # compressed key
assert len(key) == 33, 'A compressed public key must be 33 bytes long'
x = bytes_to_int(key[1:])
root = modsqrt(CURVE.f(x), P)
if key.startswith(b'\x03'): # odd root
y = root if root % 2 == 1 else -root % P
elif key.startswith(b'\x02'): # even root
y = root if root % 2 == 0 else -root % P
else:
assert False, 'Wrong key format'
return cls(Point(x, y))
@classmethod
def from_private(cls, prv):
key = PrivateKey.from_int(prv) if isinstance(prv, int) else prv
return key.to_public()
@classmethod
def from_hex(cls, hexstring: str) -> 'PublicKey':
return cls.decode(hex_to_bytes(hexstring))
@property
def x(self) -> int:
"""X coordinate of the (X, Y) point"""
return self.point.x
@property
def y(self) -> int:
"""Y coordinate of the (X, Y) point"""
return self.point.y
def encode(self, compressed=False) -> bytes:
if compressed:
if self.y & 1: # odd root
return b'\x03' + int_to_bytes(self.x).rjust(32, b'\x00')
else: # even root
return b'\x02' + int_to_bytes(self.x).rjust(32, b'\x00')
return b'\x04' + int_to_bytes(self.x).rjust(32, b'\x00') + int_to_bytes(self.y).rjust(32, b'\x00')
def hex(self, compressed=False) -> str:
return bytes_to_hex(self.encode(compressed=compressed))
def to_address(self, addrtype: str, compressed=False) -> str:
from cryptotools.BTC.address import pubkey_to_address
if compressed is True and addrtype == 'P2PKH':
return pubkey_to_address(self.encode(compressed=True), addrtype)
return pubkey_to_address(self, addrtype)
def is_pubkey(hexstr):
try:
if isinstance(hexstr, bytes):
PublicKey.decode(hexstr)
else:
PublicKey.from_hex(hexstr)
except AssertionError:
return False
return True
def generate_keypair():
private = PrivateKey.random()
public = private.to_public()
return private, public
class Message(message.Message):
def sign(self, private: PrivateKey):
e = hex_to_int(self.hash())
r, s = 0, 0
while r == 0 or s == 0:
k = secrets.randbelow(N)
point = CURVE.G * k
r = point.x % N
inv_k = mulinv(k, N)
s = (inv_k * (e + r * private.int())) % N
return message.Signature(r=r, s=s)
def verify(self, signature: message.Signature, public: PublicKey) -> bool:
r, s = signature.r, signature.s
if not (1 <= r < N and 1 <= s < N):
return False
e = hex_to_int(self.hash())
w = mulinv(s, N)
u1 = (e * w) % N
u2 = (r * w) % N
point = CURVE.G * u1 + public.point * u2
return r % N == point.x % N
| 31.421875
| 138
| 0.59158
|
44f738869fbccdb1d607dd8b8353db8f579ab35a
| 18,429
|
py
|
Python
|
ikbtfunctions/ik_robots.py
|
mdecourse/IKBT
|
ed62fd027ca34a59067cf30a6c3ab58c356c14fb
|
[
"BSD-3-Clause"
] | null | null | null |
ikbtfunctions/ik_robots.py
|
mdecourse/IKBT
|
ed62fd027ca34a59067cf30a6c3ab58c356c14fb
|
[
"BSD-3-Clause"
] | null | null | null |
ikbtfunctions/ik_robots.py
|
mdecourse/IKBT
|
ed62fd027ca34a59067cf30a6c3ab58c356c14fb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
#
# Inverse Kinematics Classes
#
# Copyright 2017 University of Washington
# Developed by Dianmu Zhang and Blake Hannaford
# BioRobotics Lab, University of Washington
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sympy as sp
#import numpy as np
from ikbtbasics.kin_cl import *
from ikbtfunctions.helperfunctions import *
from ikbtbasics.ik_classes import * # special classes for Inverse kinematics in sympy
#
####
#
# NOTE: due to an obscure sympy bug, you cannot use numerical values in any DH position. Use a symbolic constant
# instead (like a_3 below), declare it in params, and give it your value in pvals
#
#####
def robot_params(name):
pvals = {} # null for most robots
List = ['UR5', 'Puma', 'Chair_Helper', 'Brad', 'ArmRobo', 'Wrist', 'Arm_3', 'MiniDD', 'Olson13','Stanford', 'Chair6DOF','Khat6DOF','Craig417']
if not (name in List):
print('robot_params(): Unknown robot, ' + name )
print('Here are the defined robots: ')
for n in List:
print(' ', n)
quit()
if(name == 'ArmRobo'): # submitted by issue #15
# standardize on the order "alpha N-1, a N-1, d N, theta N' for the DH table columns.
# Olson 2013
# DOF: 6
# methods to test: m5, m3,
# Yb = d_1, Xb = d_2, L1 = l3, L2 = l4, L3 = l5
dh = sp.Matrix([
[sp.pi/2, 0., l_2, th_1],
[0, l_1, 0., th_2],
[sp.pi/2, 0., 0., th_3],
[-sp.pi/2, 0., l_3, th_4],
[sp.pi/2, 0., 0., th_5],
[0, 0., d_6, 0.]
])
vv = [1, 1, 1, 1, 1, 0]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5), unknown(d_6)]
params = [l_1, l_2, l_3]
pvals = {l_1: 0.19681, l_2: 0.251, l_3:0.145423}
#Big thanks for your help, really appreciate it.
if(name == 'Arm_3'): # two link planar test (Issue #12)
#
dh = sp.Matrix([
[ 0 , a_1 , 0 , th_1 ],
[ 0 , a_2 , 0 , th_2 ],
[ 0 , a_3 , 0 , th_3 ],
[ 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 ]
])
vv = [1,1,1,0,0,0]
variables = [unknown(th_1), unknown(th_2), unknown(th_3)]
params = [a_1, a_2, a_3]
pvals = {a_1:1, a_2:1, a_3:1}
if(name == 'Brad'): # 3-RRR articulated manipulator
# Brad 2019
# DOF: 3
dh = sp.Matrix([
[0, 0., d_1, th_1],
[sp.pi/2., l_2, 0., th_2],
[0., l_3, 0., th_3],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]
])
vv = [1, 1, 1, 0, 0, 0]
variables = [unknown(th_1), unknown(th_2), unknown(th_3)]
params = [d_1, l_2, l_3]
pvals = {d_1: 0.06, l_2: 0.15, l_3: 0.15}
if(name == 'UR5'): #Universal Robots - test case for 3 parallel axes
dh = sp.Matrix([
[ 0 , 0 , d_1 , th_1 ],
[ sp.pi/2 , 0 , 0 , th_2 ],
[ 0 , a_2, 0 , th_3 ],
[ 0 , a_3, d_4 , th_4 ],
[ sp.pi/2 , 0 , d_5 , th_5 ],
[ -sp.pi/2 , 0 , d_6 , th_6 ]
])
vv = [1,1,1,1,1,1]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5), unknown(th_6)]
params = [a_2,a_3,d_1,d_4,d_5,d_6]
pvals = {a_2: .425, a_3: .392, d_1:.089, d_4:.109, d_5:.095, d_6:.082} # meters
if(name == 'Craig417'):
dh = sp.Matrix([
[ 0 , 0 , 0 , th_1 ],
[-sp.pi/2, 0 , 0 , th_2 ],
[ sp.pi/4 , 0, d_2 , th_3 ],
[ 0 , a_3, d_3 , th_4 ],
[ 0 , 0 , 0, 0 ],
[ 0 , 0 , 0, 0 ]
])
vv = [1,1,1,1,1,1]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4)]
params = [d_2, d_3, a_3]
pvals = {d_2:1, d_3:1, a_3:1} # meters
# The famous Puma 560 (solved in Craig)
#
if(name == 'Puma'):
dh = sp.Matrix([
[ 0 , 0 , d_1 , th_1 ], # Note: Puma is used for tests so mods to this table
[-sp.pi/2 , 0 , 0 , th_2 ], # may break ikbtleaves.updateL.TestSolver007
[ 0 , a_2, d_3 , th_3 ],
[-sp.pi/2 , a_3, d_4, th_4 ],
[-sp.pi/2 , 0, 0 , th_5 ],
[ sp.pi/2 , 0, 0 , th_6 ]
])
vv = [1,1,1,1,1,1]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5), unknown(th_6)]
params = [d_1, a_2, a_3, d_3, d_4]
pvals = {d_1:0.6,a_2:0.432, a_3:0.0203, d_3:0.1245, d_4:0.432} # meters
if(name == 'Chair_Helper'):
vv = [0,1,1,1,1,1] # must be length 5 since 5dof and 5 unks
dh = sp.Matrix([
[ 0, 0, d_1 , 0 ],
[ 0 , l_1, 0 , th_2 ],
[ sp.pi/2, 0, l_2 , th_3 ],
[ sp.pi/2 , 0, 0, th_4 ], # must fill remaining rows with zeros
[-sp.pi/2 , 0, l_4, th_5 ],
[ 0 , 0, 0, 0 ]
])
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [l_1, l_2, l_4]
pvals = {l_1: 2, l_2: 1, l_4: 3} # can change to values
if(name == 'Wrist'):
sp.var('A B C')
### These somewhat wierd DH params give you the ZYX Euler Angles
# matrix of example 4.7 (don't ask how I got this!)
dh = sp.Matrix([
[ 0, 0, 0, A ],
[ -sp.pi/2, 0, 0, (sp.pi/2 + B) ],
[ sp.pi/2 , 0, 0, (sp.pi/2 + C) ], # must fill remaining rows with zeros
[ -sp.pi/2, 0, 0, -sp.pi/2 ],
[ 0 , 0, 0, 0 ],
[ 0 , 0, 0, 0 ]
])
vv = [1,1,1,1,1,1]
variables = [unknown(A), unknown(B), unknown(C)]
params = []
pvals = {}
if(name == 'MiniDD'):
#
# UW BRL Mini Direct Drive Robot, 5-DOF
#
dh = sp.Matrix([
[ 0 , 0 , d_1 , 0 ],
[ -sp.pi/2 , 0 , 0 , th_2 ],
[ -sp.pi/2 , l_3 , 0 , th_3 ],
[ -sp.pi/2 , 0 , l_4 , th_4 ],
[ -sp.pi/2 , 0 , 0 , th_5 ],
[ 0 , 0 , 0 , 0 ]
])
vv = [0,1,1,1,1]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5) ]
params = [l_3, l_4]
pvals = {l_3: 5, l_4:2}
if(name == 'Olson13'):
# standardize on the order "alpha N-1, a N-1, d N, theta N' for the DH table columns.
# Olson 2013
# DOF: 6
# methods to test: m5, m3,
# Yb = d_1, Xb = d_2, L1 = l3, L2 = l4, L3 = l5
dh = sp.Matrix([
[-sp.pi/2, 0., d_1, sp.pi/2],
[sp.pi/2, 0., d_2, -sp.pi/2],
[sp.pi/2, 0., l_3, th_3],
[sp.pi/2, 0., 0., th_4],
[0., l_4, 0., th_5],
[sp.pi/2, 0., l_5, th_6]
])
vv = [0, 0, 1, 1, 1, 1]
variables = [unknown(d_1), unknown(d_2), unknown(th_3), unknown(th_4), unknown(th_5), unknown(th_6)]
params = [l_3, l_4, l_5]
pvals = {l_3: 1, l_4: 4, l_5:2}
if(name == 'Stanford'):
sp.var('l_4 l_6')
dh = sp.Matrix([
[-sp.pi/2, 0., l_1, th_1],
[sp.pi/2, 0., l_2, th_2],
[0, 0., d_3, -sp.pi/2],
[-sp.pi/2, 0., l_4, th_4],
[sp.pi/2, 0., 0., th_5],
[0., 0., l_6, th_6]
])
vv = [1, 1, 0, 1, 1, 1]
variables = [unknown(th_1), unknown(th_2), unknown(d_3), unknown(th_4), unknown(th_5), unknown(th_6)]
params = [l_1, l_2, l_4, l_6]
pvals = {l_1:1, l_2: 2, l_4:4, l_6:3}
if(name=='Sims11'):
# Sims 2011,
# DOF: 5
# methods to test: m5, m3, m4, m6
print("looking at Sims11")
sp.var('l_1 l_2 l_3')
dh = sp.Matrix([
[0., 0., d_1, 0.],
[sp.pi/2, 0., d_2, 0.],
[sp.pi/2, l_1, 0., th_3],
[sp.pi/2, 0., l_2, th_4],
[sp.pi/2, l_3, 0., th_5],
[0., 0., 0., 0.]
])
vv = [0, 0, 1, 1, 1, 1, 1]
variables = [unknown(d_1), unknown(d_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [l_1, l_2, l_3]
pvals = {l_1: 5, l_2:2, l_3:4}
if(name == 'Srisuan11'):
# Srisuan 2011,
# DOF: 6
dh = sp.Matrix([
[0., 0., d_1, 0.],
[0., 0., 0., th_2],
[sp.pi/2, 0., l_1, th_3],
[sp.pi/2, 0., d_4, sp.pi/2],
[0., 0., 0., th_5],
[sp.pi/2, 0., 0., th_6]
])
vv = [0, 1, 1, 0, 1, 1]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(d_4), unknown(th_5), unknown(th_6)]
params = [l_1]
pvals = {l_1:2}
if(name == 'Axtman13'):
# Axtman 2013sp,
# DOF: 4
dh = sp.Matrix([
[0., 0., d_1, 0.],
[sp.pi/2, 0., l_2, th_2],
[sp.pi/2, 0., 0., th_3],
[0., l_3, 0., th_4],
[0., l_4, 0., 0.],
[0., 0., 0., 0.],
])
sp.var('l_3 l_4')
vv = [0, 1, 1, 1, 0, 0]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4)]
params = [l_2, l_3, l_4]
pvals = {l_2: 1, l_3:2, l_4:3}
if(name == 'Mackler13'):
# Mackler 2013sp
# DOF: 5
dh = sp.Matrix([
[-sp.pi/2, h, d_1, 0.],
[sp.pi/2, 0., l_1, th_2],
[sp.pi/2, l_2, 0., th_3],
[sp.pi/2, 0., l_3, th_4],
[-sp.pi/2, 0., 0., th_5],
[0, 0, 0, 0]
])
sp.var('l_3')
vv = [0, 1, 1, 1, 1, 0]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [l_1, l_2, l_3]
pvals = {l_1:2, l_2:2, l_3: 4}
if(name == 'Minder13'):
# Minder 2013sp
# DOF: 4
dh = sp.Matrix([
[0., 0., d_1, 0.],
[sp.pi/2, 0., l_2, th_2],
[sp.pi/2, 0., 0., th_3],
[sp.pi/2, 0., -l_3, th_4],
[0., 0., 0., 0.],
[0., 0., 0., 0.]
])
sp.var('l_3')
vv = [0, 1, 1, 1, 0, 0]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4)]
params = [l_2,l_3]
pvals = {l_2: 1, l_3:2}
if(name == 'Palm13'):
# Palm 2013sp
# DOF: 4
dh = sp.Matrix([
[sp.pi/2, 0., l_1, th_1],
[sp.pi/2, 0., d_2, -sp.pi/2],
[0., 0., 0., th_3],
[-sp.pi/2, l_3, h, th_4],
[0., 0, 0, 0.],
[0., 0., 0., 0.]
])
sp.var('l_1 l_3 h')
vv = [1, 0, 1, 1, 0, 0]
variables = [unknown(th_1), unknown(d_2), unknown(th_3), unknown(th_4)]
params = [l_1, l_3, h]
pvals = {l_1:3, l_3: 1, h:2}
if(name == 'Parkman13'):
# Parkman 2013sp
# DOF: 5
dh = sp.Matrix([
[0., 0., h, th_1],
[sp.pi/2, 0., 0., th_2 ],
[0., l_2, 0., th_3],
[sp.pi, l_3, d_4, 0.],
[sp.pi/2, 0., l_5, th_5],
[0., 0., 0., 0.]
])
sp.var('h l_2 l_3 l_5')
vv = [1, 1, 1, 0, 1, 0]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(d_4), unknown(th_5)]
params = [h, l_2, l_3, l_5]
pvals = {h: 1, l_2: 2, l_3:3, l_5:5}
if(name == 'Frei13'):
# Frei 13sp
# DOF 5
dh = sp.Matrix([
[0., 0., d_1, 0.],
[sp.pi/2, 0., 0., th_2],
[sp.pi/2, 0., l_3, th_3],
[sp.pi/2, 0., 0., th_4],
[sp.pi/2, 0., l_5, th_5],
[0., 0., 0., 0.]
])
sp.var('l_3 l_5')
vv = [0, 1, 1, 1, 1, 0]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [l_3, l_5]
pvals = {l_3: 6, l_5:3}
if(name == 'Wachtveitl'):
# Wachtveitl 2013sp
# DOF: 5
dh = sp.Matrix([
[-sp.pi/2, 0., d_1, 0.],
[sp.pi/2, 0., h, th_2],
[sp.pi/2, 0., 0., th_3],
[0., l_2, l_3, th_4],
[sp.pi/2, 0., l_4, th_5],
[0., 0., 0, 0.]
])
sp.var('h l_3 l_4')
vv = [0, 1, 1, 1, 1, 0]
variables = [unknown(d_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [h, l_2, l_3, l_4]
pvals = {h:1, l_2:2, l_3:3, l_4:2}
if(name == 'Bartell'):
# Bartell 2013
# DOF: 5
dh = sp.Matrix([
[0., 0., l_1, th_1],
[sp.pi/2, 0., d_2, 0.],
[0., 0., 0., th_3],
[sp.pi/2, 0., d_4, 0.],
[0., 0., 0., th_5],
[sp.pi/2, 0., 0., 0.]
])
vv = [1, 0, 1, 0, 1, 1]
variables = [unknown(th_1), unknown(d_2), unknown(th_3), unknown(d_4), unknown(th_5)]
params = [l_1]
pvals = {l_1:2}
if(name == 'DZhang'):
# Dianmu Zhang
# DOF 5
dh = sp.Matrix([
[0., 0., h, th_1],
[sp.pi/2, l_1, 0, th_2],
[0, l_3, 0, th_3],
[sp.pi/2, 0., l_4, th_4],
[sp.pi/2, 0., 0, th_5],
[0, 0., 0., 0]
])
sp.var('h l_3 l_4')
vv = [1, 1, 1, 1, 1, 1]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5)]
params = [h, l_1, l_3, l_4]
pvals = {h:1, l_1:1, l_3:3, l_4:4}
if(name == 'Khat6DOF'):
#
# This is Kuka Model KR60
# ( as analyzed in Khatamian6DOF_IK2015.pdf)
# unsolved, 6DOF
dh = sp.Matrix([ ## This one requires sum-of-angles.
[ 0, a_1 , l_1 , th_1 ],
[ sp.pi/2, 0, 0 , th_2 ],
[ 0 , a_2, 0 , th_3 ],
[ sp.pi/2 , a_3, l_4, th_4 ],
[-sp.pi/2 , 0, 0 , th_5 ],
[ sp.pi/2 , 0, 0 , th_6 ]
])
vv = [1,1,1,1,1,1]
variables = [unknown(th_1), unknown(th_2), unknown(th_3), unknown(th_4), unknown(th_5), unknown(th_6)]
params = [a_1, l_1, a_2, a_3, l_4]
pvals = {a_1: 1, l_1:2, a_2:4, a_3:2, l_4:5}
################## (all robots) ######################
## make sure each unknown knows its position (index)
i = 1
for v in variables:
v.n = i
i+=1
return [dh, vv, params, pvals, variables]
| 38.797895
| 757
| 0.397471
|
b221c5cc1a7d8dc6c02ef68bfa344545eb96b283
| 6,634
|
py
|
Python
|
tests/core/test_relayer.py
|
Jacobs4/pyatv
|
52956adf3b79198be52cc03649f3ddeee19f9e6c
|
[
"MIT"
] | null | null | null |
tests/core/test_relayer.py
|
Jacobs4/pyatv
|
52956adf3b79198be52cc03649f3ddeee19f9e6c
|
[
"MIT"
] | null | null | null |
tests/core/test_relayer.py
|
Jacobs4/pyatv
|
52956adf3b79198be52cc03649f3ddeee19f9e6c
|
[
"MIT"
] | null | null | null |
"""Unit tests for pyatv.core.relayer."""
import pytest
from pyatv import exceptions
from pyatv.const import Protocol
from pyatv.core.relayer import Relayer
@pytest.fixture
def relay_base_only():
relayer = Relayer(BaseClass, [Protocol.MRP])
relayer.register(SubClass1(), Protocol.MRP)
yield relayer
class BaseClass:
def no_args(self):
pass
def with_args(self, arg):
pass
def with_kwargs(self, **kwargs):
pass
@property
def prop(self):
pass
async def async_no_args(self):
pass
async def async_with_args(self, arg):
pass
async def async_with_kwargs(self, **kwargs):
pass
class SubClass1(BaseClass):
def no_args(self):
return "subclass1"
def with_args(self, arg):
return arg * 2
def with_kwargs(self, **kwargs):
return kwargs["a"] * kwargs["b"]
@property
def prop(self):
return 123
async def async_no_args(self):
return "subclass1"
async def async_with_args(self, arg):
return arg * 2
async def async_with_kwargs(self, **kwargs):
return kwargs["a"] * kwargs["b"]
class SubClass2(BaseClass):
def with_args(self, arg):
return arg
class SubClass3(BaseClass):
def with_kwargs(self, **kwargs):
return kwargs["a"] - kwargs["b"]
class SubClass4(BaseClass):
def __init__(self, ret_string):
self.ret_string = ret_string
def no_args(self):
return self.ret_string
@pytest.mark.asyncio
async def test_base_cases(relay_base_only):
assert relay_base_only.relay("no_args")() == "subclass1"
assert relay_base_only.relay("with_args")(3) == 6
assert relay_base_only.relay("with_kwargs")(a=2, b=3) == 6
assert relay_base_only.relay("prop") == 123
assert await relay_base_only.relay("async_no_args")() == "subclass1"
assert await relay_base_only.relay("async_with_args")(3) == 6
assert await relay_base_only.relay("async_with_kwargs")(a=2, b=3) == 6
def test_class_priority():
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(SubClass1(), Protocol.AirPlay)
relayer.register(SubClass3(), Protocol.MRP)
relayer.register(SubClass2(), Protocol.DMAP)
assert relayer.relay("no_args")() == "subclass1"
assert relayer.relay("with_args")(3) == 3
assert relayer.relay("with_kwargs")(a=4, b=1) == 3
def test_relay_missing_instance_ignored_and_raises_not_found():
relayer = Relayer(BaseClass, [Protocol.MRP])
with pytest.raises(exceptions.NotSupportedError):
relayer.relay("no_args")
def test_relay_missing_target_raises():
relayer = Relayer(BaseClass, [Protocol.MRP])
relayer.register(SubClass2(), Protocol.MRP)
with pytest.raises(exceptions.NotSupportedError):
relayer.relay("no_args")
def test_relay_method_not_in_interface_raises():
relayer = Relayer(BaseClass, [Protocol.MRP])
relayer.register(SubClass2(), Protocol.MRP)
with pytest.raises(RuntimeError):
relayer.relay("missing_method")
def test_add_instance_not_in_priority_list_raises():
relayer = Relayer(BaseClass, [Protocol.MRP])
with pytest.raises(RuntimeError):
relayer.register(SubClass1(), Protocol.DMAP)
def test_relay_override_priority():
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP])
relayer.register(SubClass1(), Protocol.DMAP)
relayer.register(SubClass2(), Protocol.MRP)
assert relayer.relay("with_args", [Protocol.MRP, Protocol.DMAP])(3) == 3
assert relayer.relay("with_args", [Protocol.DMAP, Protocol.MRP])(3) == 6
def test_main_instance():
instance2 = SubClass2()
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(SubClass1(), Protocol.DMAP)
relayer.register(SubClass3(), Protocol.AirPlay)
relayer.register(instance2, Protocol.MRP)
assert relayer.main_instance == instance2
def test_main_instance_missing_instance_for_priority():
relayer = Relayer(BaseClass, [Protocol.MRP])
with pytest.raises(exceptions.NotSupportedError):
relayer.main_instance
def test_get_instance_of_type():
instance1 = SubClass1()
instance2 = SubClass2()
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(instance1, Protocol.MRP)
relayer.register(instance2, Protocol.DMAP)
assert relayer.get(Protocol.MRP) == instance1
assert relayer.get(Protocol.DMAP) == instance2
assert relayer.get(Protocol.AirPlay) is None
def test_takeover_and_release():
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(SubClass4("airplay"), Protocol.AirPlay)
relayer.register(SubClass4("mrp"), Protocol.MRP)
relayer.register(SubClass4("dmap"), Protocol.DMAP)
assert relayer.relay("no_args")() == "mrp"
relayer.takeover(Protocol.AirPlay)
assert relayer.relay("no_args")() == "airplay"
relayer.release()
assert relayer.relay("no_args")() == "mrp"
def test_takeover_while_takeover_raises():
relayer = Relayer(BaseClass, [Protocol.AirPlay])
relayer.register(SubClass4("airplay"), Protocol.AirPlay)
relayer.takeover(Protocol.DMAP)
with pytest.raises(exceptions.InvalidStateError):
relayer.takeover(Protocol.DMAP)
def test_takeover_overrides_manual_priority():
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(SubClass4("airplay"), Protocol.AirPlay)
relayer.register(SubClass4("mrp"), Protocol.MRP)
relayer.register(SubClass4("dmap"), Protocol.DMAP)
relayer.takeover(Protocol.AirPlay)
assert (
relayer.relay("no_args", [Protocol.DMAP, Protocol.MRP, Protocol.AirPlay])()
== "airplay"
)
def test_takeover_overrides_main_instance():
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP])
relayer.register(SubClass4("mrp"), Protocol.MRP)
relayer.register(SubClass4("dmap"), Protocol.DMAP)
relayer.takeover(Protocol.DMAP)
assert relayer.main_instance.no_args() == "dmap"
def test_get_all_instances():
mrp = SubClass4("mrp")
dmap = SubClass4("dmap")
airplay = SubClass4("airplay")
relayer = Relayer(BaseClass, [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay])
relayer.register(mrp, Protocol.MRP)
relayer.register(dmap, Protocol.DMAP)
relayer.register(airplay, Protocol.AirPlay)
instances = relayer.instances
assert len(instances) == 3
assert mrp in instances
assert dmap in instances
assert airplay in instances
| 27.991561
| 83
| 0.702743
|
381f6b38713cc91451a23a7f06571962dce0bdb8
| 915
|
py
|
Python
|
tests/unit/test_simple.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | 3
|
2021-12-11T03:52:57.000Z
|
2022-03-22T20:42:56.000Z
|
tests/unit/test_simple.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | 12
|
2021-12-31T19:22:09.000Z
|
2022-03-21T03:49:13.000Z
|
tests/unit/test_simple.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Jacob Baumbach
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from adorn.orchestrator.orchestrator import Orchestrator
from adorn.unit.simple import Simple
def test__contains():
assert not Simple._contains(int, Orchestrator)
def test__type_check():
assert Simple._type_check(int, Orchestrator, 1) is None
def test__from_obj():
assert Simple._from_obj(int, Orchestrator, 1) is None
| 32.678571
| 74
| 0.769399
|
2e8a8659cfa29b1bb15672d44ddcb9d9044b4036
| 12,413
|
py
|
Python
|
cinderclient/tests/v1/test_auth.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/tests/v1/test_auth.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/tests/v1/test_auth.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import requests
from cinderclient.v1 import client
from cinderclient import exceptions
from cinderclient.tests import utils
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1", service_type='volume')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v1",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
def test_authenticate_tenant_id(self):
cs = client.Client("username", "password",
auth_url="http://localhost:8776/v1",
tenant_id='tenant_id', service_type='volume')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
"tenant": {
"description": None,
"enabled": True,
"id": "tenant_id",
"name": "demo"
} # tenant associated with token
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v1",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantId': cs.client.tenant_id,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
tenant_id = resp["access"]["token"]["tenant"]["id"]
self.assertEqual(cs.client.tenant_id, tenant_id)
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1")
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
auth_response = utils.TestResponse({
"status_code": 401,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_redirect(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1", service_type='volume')
dict_correct_response = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"adminURL": "http://localhost:8776/v1",
"region": "RegionOne",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1/",
},
],
},
],
},
}
correct_response = json.dumps(dict_correct_response)
dict_responses = [
{"headers": {'location': 'http://127.0.0.1:5001'},
"status_code": 305,
"text": "Use proxy"},
# Configured on admin port, cinder redirects to v2.0 port.
# When trying to connect on it, keystone auth succeed by v1.0
# protocol (through headers) but tokens are being returned in
# body (looks like keystone bug). Leaved for compatibility.
{"headers": {},
"status_code": 200,
"text": correct_response},
{"headers": {},
"status_code": 200,
"text": correct_response}
]
responses = [(utils.TestResponse(resp)) for resp in dict_responses]
def side_effect(*args, **kwargs):
return responses.pop(0)
mock_request = mock.Mock(side_effect=side_effect)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
resp = dict_correct_response
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
class AuthenticationTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id", "auth_url")
management_url = 'https://localhost/v1.1/443470'
auth_response = utils.TestResponse({
'status_code': 204,
'headers': {
'x-server-management-url': management_url,
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
},
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'Accept': 'application/json',
'X-Auth-User': 'username',
'X-Auth-Key': 'password',
'X-Auth-Project-Id': 'project_id',
'User-Agent': cs.client.USER_AGENT
}
mock_request.assert_called_with(
"GET",
cs.client.auth_url,
headers=headers,
**self.TEST_REQUEST_BASE)
self.assertEqual(cs.client.management_url,
auth_response.headers['x-server-management-url'])
self.assertEqual(cs.client.auth_token,
auth_response.headers['x-auth-token'])
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id", "auth_url")
auth_response = utils.TestResponse({"status_code": 401})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_automatic(self):
cs = client.Client("username", "password", "project_id", "auth_url")
http_client = cs.client
http_client.management_url = ''
mock_request = mock.Mock(return_value=(None, None))
@mock.patch.object(http_client, 'request', mock_request)
@mock.patch.object(http_client, 'authenticate')
def test_auth_call(m):
http_client.get('/')
m.assert_called()
mock_request.assert_called()
test_auth_call()
def test_auth_manual(self):
cs = client.Client("username", "password", "project_id", "auth_url")
@mock.patch.object(cs.client, 'authenticate')
def test_auth_call(m):
cs.authenticate()
m.assert_called()
test_auth_call()
| 36.616519
| 78
| 0.491662
|
90c0c95bdf40453feb4a39fc7b046c0c7f25357a
| 5,582
|
py
|
Python
|
src/admin_extra_buttons/handlers.py
|
saxix/django-admin-extra-buttons
|
7c72d0887ba71b4f7ae8bd2a1c0f8b78bf0fe6ca
|
[
"BSD-1-Clause"
] | null | null | null |
src/admin_extra_buttons/handlers.py
|
saxix/django-admin-extra-buttons
|
7c72d0887ba71b4f7ae8bd2a1c0f8b78bf0fe6ca
|
[
"BSD-1-Clause"
] | null | null | null |
src/admin_extra_buttons/handlers.py
|
saxix/django-admin-extra-buttons
|
7c72d0887ba71b4f7ae8bd2a1c0f8b78bf0fe6ca
|
[
"BSD-1-Clause"
] | null | null | null |
import inspect
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.utils.functional import cached_property
from .buttons import Button, ChoiceButton, LinkButton
from .utils import HttpResponseRedirectToReferrer, check_permission, handle_basic_auth, labelize
class BaseExtraHandler:
def __init__(self, func, **kwargs):
self.func = func
self.func._handler = self
self.config = kwargs
self.model_admin = kwargs.get('model_admin', None)
self.decorators = kwargs.get('decorators', [])
self.login_required = kwargs.get('login_required', True)
self._pattern = kwargs.get('pattern', None)
self.permission = kwargs.get('permission')
self.sig: inspect.Signature = inspect.signature(self.func)
@cached_property
def func_args(self):
return list(self.sig.parameters)
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}>"
def get_instance(self, model_admin):
""" return a 'clone' of current Handler"""
return self.__class__(self.func, model_admin=model_admin, **self.config)
@cached_property
def name(self):
return self.func.__name__
def __call__(self, model_admin, request, *args, **kwargs):
obj = None
if len(self.sig.parameters) > 2:
pk = kwargs.get(list(self.sig.parameters)[2])
obj = model_admin.get_object(request, pk)
if self.permission:
check_permission(self.permission, request, obj)
elif self.login_required and not request.user.is_authenticated:
raise PermissionDenied
ret = self.func(model_admin, request, *args, **kwargs)
if not isinstance(ret, HttpResponse):
return HttpResponseRedirectToReferrer(request)
return ret
class ViewHandler(BaseExtraHandler):
def __init__(self, func, login_required=True, http_basic_auth=False, **kwargs):
self.login_required = login_required
self.http_basic_auth = http_basic_auth
super().__init__(func,
http_basic_auth=http_basic_auth,
login_required=login_required,
**kwargs)
def __call__(self, model_admin, request, *args, **kwargs):
if self.login_required and self.http_basic_auth and not request.user.is_authenticated:
handle_basic_auth(request)
return super().__call__(model_admin, request, *args, **kwargs)
@cached_property
def url_pattern(self):
if self._pattern:
return self._pattern
else:
pattern = ''
for arg in list(self.sig.parameters)[2:]:
pattern += f'<path:{arg}>/'
pattern += f'{self.name}/'
return pattern
class ButtonMixin:
def __init__(self, func, html_attrs=None,
change_list=None, change_form=None, visible=True, enabled=True, **kwargs):
self.change_form = change_form
self.change_list = change_list
self.visible = visible
self.enabled = enabled
self.html_attrs = html_attrs or {}
super().__init__(func, change_form=change_form,
change_list=change_list,
html_attrs=html_attrs,
enabled=enabled,
visible=visible,
**kwargs)
def get_button_params(self, context, **extra):
return {'label': self.config.get('label', labelize(self.name)),
'handler': self,
'html_attrs': self.html_attrs,
'change_list': self.change_list,
'change_form': self.change_form,
'visible': self.visible,
'enabled': self.enabled,
'context': context,
'login_required': self.login_required,
'permission': self.permission,
**extra
}
def get_button(self, context):
return self.button_class(**self.get_button_params(context))
class ButtonHandler(ButtonMixin, ViewHandler):
"""View handler for `@button` decorated views"""
button_class = Button
class LinkHandler(ButtonMixin, BaseExtraHandler):
button_class = LinkButton
url_pattern = None
def __init__(self, func, **kwargs):
self.href = kwargs.pop('href', None)
super().__init__(func, href=self.href, **kwargs)
def get_button_params(self, context, **extra):
return super().get_button_params(context,
href=self.href,
url_pattern=self.url_pattern,
**extra,
)
def get_button(self, context):
params = self.get_button_params(context)
button = self.button_class(**params)
self.func(self.model_admin, button)
return button
class ChoiceHandler(LinkHandler):
button_class = ChoiceButton
def __init__(self, func, **kwargs):
self.href = kwargs.pop('href', None)
self.choices = kwargs.pop('choices', None)
self.selected_choice = None
super().__init__(func, href=self.href, choices=self.choices, **kwargs)
def get_button_params(self, context, **extra):
return super().get_button_params(context,
choices=self.choices,
**extra,
)
| 35.55414
| 96
| 0.596381
|
2706819b1900d5480ea950f6363b998984534291
| 197
|
py
|
Python
|
hmm/tests/utils.py
|
ondrejba/hmm
|
1e9fe47a6057d93e7c77614016a89d5d46959e97
|
[
"MIT"
] | null | null | null |
hmm/tests/utils.py
|
ondrejba/hmm
|
1e9fe47a6057d93e7c77614016a89d5d46959e97
|
[
"MIT"
] | null | null | null |
hmm/tests/utils.py
|
ondrejba/hmm
|
1e9fe47a6057d93e7c77614016a89d5d46959e97
|
[
"MIT"
] | null | null | null |
def get_tensor_shape(tensor):
shape = []
for s in tensor.shape:
if s is None:
shape.append(s)
else:
shape.append(s.value)
return tuple(shape)
| 16.416667
| 33
| 0.532995
|
59b9b3bf50605e1e1ca2712745ccb1285a037fd9
| 678
|
py
|
Python
|
accounts/sitemaps.py
|
SarangWadode/medstore
|
07cb70661a8cba6f8dd090dfbd589bfacb7bf12a
|
[
"MIT"
] | 2
|
2021-03-24T13:36:39.000Z
|
2022-02-10T13:51:59.000Z
|
accounts/sitemaps.py
|
SarangWadode/medstore
|
07cb70661a8cba6f8dd090dfbd589bfacb7bf12a
|
[
"MIT"
] | 44
|
2021-01-05T01:51:38.000Z
|
2022-02-10T13:44:26.000Z
|
accounts/sitemaps.py
|
mukeshgurpude/medstore
|
498b76acbeb9727e7a61560e4016b3577c2706d2
|
[
"MIT"
] | 1
|
2020-10-28T09:26:01.000Z
|
2020-10-28T09:26:01.000Z
|
"""
Sitemap classes related to the account related views
"""
from django.contrib import sitemaps
from django.urls import reverse
class AccountSiteMap(sitemaps.Sitemap):
"""
Urls related to custom user model
"""
priority = .5
changeFreq = 'daily'
def items(self):
return ['profile', 'apply']
def location(self, item):
return reverse(f'Account:{item}')
class AllAuthSitemap(sitemaps.Sitemap):
"""
Sitemap urls for allauth account urls
"""
priority = .5
changeFreq = 'monthly'
def items(self):
return ['login', 'logout']
def location(self, item):
return reverse(f"account_{item}")
| 19.941176
| 52
| 0.632743
|
ea9c958abe73ce21beb124d7c402204387de7e98
| 193
|
py
|
Python
|
code/comp_racine.py
|
christophesaintjean/IntroProgS1_2020
|
99555d1e3681d88ee023592a16caecdec6f7c0b4
|
[
"CC0-1.0"
] | null | null | null |
code/comp_racine.py
|
christophesaintjean/IntroProgS1_2020
|
99555d1e3681d88ee023592a16caecdec6f7c0b4
|
[
"CC0-1.0"
] | null | null | null |
code/comp_racine.py
|
christophesaintjean/IntroProgS1_2020
|
99555d1e3681d88ee023592a16caecdec6f7c0b4
|
[
"CC0-1.0"
] | null | null | null |
import math
import racine
x = float(input('x ? '))
if abs(math.sqrt(x) - racine.racine_dicho(x)) < 1e-6:
print("Les valeurs sont les mêmes")
else:
print("Roger, on a un problème !!!")
| 21.444444
| 53
| 0.642487
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.