hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eefa2d530adebabe10a337d7cdc3e28bc2a8ca7c | 12,839 | py | Python | EMD_data_training_server.py | NahianHasan/Cardiovascular_Disease_Classification_Employing_EMD | 57bf6425808bcff4f1c54e6be2e1df9c14b61313 | [
"MIT"
] | 6 | 2019-10-10T18:53:13.000Z | 2020-08-13T08:39:43.000Z | EMD_data_training_server.py | NahianHasan/Cardiovascular_Disease_Classification_Employing_EMD | 57bf6425808bcff4f1c54e6be2e1df9c14b61313 | [
"MIT"
] | null | null | null | EMD_data_training_server.py | NahianHasan/Cardiovascular_Disease_Classification_Employing_EMD | 57bf6425808bcff4f1c54e6be2e1df9c14b61313 | [
"MIT"
] | 1 | 2020-04-22T07:50:49.000Z | 2020-04-22T07:50:49.000Z | #import other files
import EMD_data_prepare as E
import EMD_Models
import config
import Folder_creation as FC
import Training_Analysis as TRA
import Confusion_Matrix as CM
#import other libraries
import wfdb
import os
import sys
import threading
import time
import glob
import argparse
import numpy as np
import GPUtil
import pandas
from time import time
import pickle
import math
import random
from collections import Counter
import matplotlib.pyplot as plt
#import keras libraries
import keras.layers.core as K
from keras.utils import np_utils
from keras.callbacks import CSVLogger
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from keras.constraints import maxnorm
from keras.models import model_from_json
from keras.optimizers import SGD
from keras.callbacks import LearningRateScheduler,EarlyStopping,TensorBoard
from keras.utils import plot_model
from keras.models import model_from_json
from Keras_FB import main as fb
from keras.models import load_model
#import from scikit learn libraries
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder,LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.utils import class_weight
import tensorflow as tf
#####################################################################################################################
global C,Y_val
global M
C = config.Config()
M = EMD_Models.MODELS()
def separate_threads(folder,IMF_number,filepath,patient_data,problem_data,csv_folder,samplenumber,resume,initial_epoch):
print ('IMF {} is training'.format(IMF_number))
samplenumber=samplenumber
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
#itterate through csv file
#for emd based training
csv_path = {}
for i in IMF_number:
csv_path[str(i)] = csv_folder+'IMF'+str(i)+'_train'+'.csv'
#for original data training
#csv_path = csv_folder+'Original_train.csv'
classes = C.disease_names
X_prim = {}
for i in IMF_number:
X_prim[str(i)] = []
for i in IMF_number:
dataframe = pandas.read_csv(csv_path[str(i)], header=None)
dataset = dataframe.values
X_prim[str(i)] = dataset[:,0:samplenumber].astype(float)
Y_prim = dataset[:,samplenumber]
print 'IMF ',i,' is loaded'
print len(X_prim[str(i)])
X_modified = []
Y_modified = []
for i in range(0,C.Total_Train_Data):
sum = np.zeros(samplenumber)
for j in IMF_number:
sum = [a + b for a, b in zip(sum, X_prim[str(j)])]
X_modified.append(sum)
print i
Y_modified = Y_prim
X = []
Y = []
##Remove Hypertrophy Class
indices = [s for s, x in enumerate(Y_prim) if x not in ['Hypertrophy','Miscellaneous','n/a']]
for f in indices:
X.append(X_modified[f])
Y.append(Y_modified[f])
'''
# for training Original data
indices = [s for s, x in enumerate(Y_prim) if x not in [ 'Miscellaneous', 'Hypertrophy']]
for f in indices:
X.append(X_prim[f])
Y.append(Y_prim[f])
'''
print Counter(Y)
#encode class_values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoder_Y = encoder.transform(Y)
#convert integers to dummy variables(i.e: one hot encoding)
dummy_Y = np_utils.to_categorical(encoder_Y)
#Split the dataset to train and test data
X_train,X_test,Y_train,Y_test = train_test_split(X,dummy_Y,test_size = C.valuation_split, random_state = seed)
print '\n\nData Loaded\n\n'
if C.CNN_model_use:
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
if resume=='False':
#get the model and print the summary
model = M.IMF_models[str(IMF_number)]()
if C.optimizer=='sgd':
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
#Compile Model
model.compile(loss = 'categorical_crossentropy', optimizer = sgd, metrics = ['accuracy'])
else:
model.compile(loss = 'categorical_crossentropy', optimizer = C.optimizer, metrics = ['accuracy'])
plot_model(model, to_file=folder+'/Model_Figures/EMD_model.png')
elif resume=='True':
#load_architecture
json_file = open(folder+'/Final_Weights/model_IMF_'+str(IMF_number)+'.json','r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
#Load weights
weight_folder = folder+'/Training_Records/IMF_'+str(IMF_number)+'/weights_best_of_'+'*'
filenames = glob.glob(weight_folder)
filenames.sort(reverse = True)
model_weight_file = filenames[0]
print '\n\n\n', model_weight_file, '\n\n\n'
model.load_weights(model_weight_file)
if C.optimizer=='sgd':
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
#Compile Model
model.compile(loss = 'categorical_crossentropy', optimizer = sgd, metrics = ['accuracy'])
else:
model.compile(loss = 'categorical_crossentropy', optimizer = C.optimizer, metrics = ['accuracy'])
print model.summary()
if resume=='False':
#SAVE THE MODEL Architecture
model_json = model.to_json()
mdl_save_path = folder+'/Final_Weights/model_IMF_'+str(IMF_number)+'.json'
with open(mdl_save_path, "w") as json_file:
json_file.write(model_json)
#################### callback list #######################
def step_decay(epoch):
#Drop based Learning rate
initial_lrate = C.initial_lrate
drop = C.lrate_drop
epochs_drop = C.lrate_epochs_drop
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
'''
#Cyclical learning rate(triangular)
"""Given the inputs, calculates the lr that should be applicable for this iteration"""
base_lr = 0.0001
max_lr = 0.001
cycle = np.floor(1 + epoch/(2 * C.lrate_epochs_drop))
x = np.abs(epoch/C.lrate_epochs_drop - 2 * cycle + 1)
lrate = base_lr + (max_lr - base_lr) * np.maximum(0, (1-x))
return lrate
'''
#checkpoint path
chk_path = folder+'/Training_Records/'+'IMF_'+str(IMF_number)+'/weights_best_of_'+'IMF_'+str(IMF_number)+'.hdf5'
checkpoint_best = ModelCheckpoint(chk_path,monitor='val_acc',verbose=1,save_best_only=True,mode=C.chkpointpath_saving_mode)
#Save Every epoch
chk_path = folder+'/Saved_All_Weights/'+'IMF_'+str(IMF_number)+'/IMF_'+str(IMF_number)+"_Each_Epoch.hdf5"
each_epoch = ModelCheckpoint(chk_path,monitor='val_acc',verbose=1,save_best_only=False,mode='auto', period=1)
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
#Early Stopping
Early_stop = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=20, verbose=0, mode='max')
#Callback that streams epoch results to a csv file.
csv_logger = CSVLogger(folder+'/Training_CSV_log/training_IMF_'+str(IMF_number)+'.log')
#keras FB ip
#FB = fb.sendmessage(savelog=True,fexten='TEST',username='',password='')
#Tensorboard visualization
TENS_FILE = folder+'/Tensorboard_Visualization/IMF_'+str(IMF_number)+'/{}'
tensor_board = TensorBoard(log_dir = TENS_FILE.format(time()),histogram_freq=0,write_graph=True,write_images=False)
#open a terminal and write 'tensorboard --logdir=logdir/' and go to the browser
#################################################################
callback_list=[checkpoint_best,each_epoch,lrate,csv_logger,tensor_board,Early_stop]
'''
#dataset Standardization
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_valuation = scaler.transform(X_valuation)
'''
#Fit the model
if C.grid_search:
# grid search epochs, batch size and optimizer
optimizers = ['rmsprop' , 'adam']
init = ['glorot_uniform' , 'normal' , 'uniform']
epochs = np.array([50, 100, 150])
batches = np.array([5, 10, 20])
param_grid = dict(optimizer=optimizers, nb_epoch=epochs, batch_size=batches, init=init)
grid = GridSearchCV(estimator=model, param_grid=param_grid)
history = grid.fit(X_train, Y_train, validation_data=(X_test,Y_test), nb_epoch=C.nb_epoch,
batch_size=C.batch_size, verbose=1,callbacks=callback_list,
shuffle=C.shuffle,initial_epoch=initial_epoch)
elif not C.grid_search:
#Fit the model
history = model.fit(X_train, Y_train, validation_data=(X_test,Y_test), nb_epoch=C.nb_epoch,
batch_size=C.batch_size, verbose=1,callbacks=callback_list,
shuffle=C.shuffle,initial_epoch=initial_epoch)
#save the history of whole training
filehandler = open(folder+"/Training_History/IMF_"+str(IMF_number)+".obj","wb")
pickle.dump(history.history,filehandler)
filehandler.close()
'''
#evaluate the model on whole training dataset
scores = model.evaluate(X,dummy_Y, verbose=0)
print("IMF_%s---%s: %.2f%%" % (IMF_number,model.metrics_names[1], scores[1]*100))
#Save the final scores to text file
with open(folder+"/Training_Results/IMF_Training_Result.txt", "a") as myfile:
string = 'IMF_'+str(IMF_number)+'----'+model.metrics_names[1]+' = '+str(scores[1]*100)+'-------'+'\n'
myfile.write(string)
'''
#########################################################################################################################
def Main():
deviceIDs=[]
while not deviceIDs:
deviceIDs = GPUtil.getAvailable(order='first',limit=1,maxMemory=0.80,maxLoad=0.99)
print 'searching for GPU to be available. Please wait.....'
print 'GPU Found...Starting Training\n'
# Assume that you have 12GB of GPU memory and want to allocate ~4GB:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
parser = argparse.ArgumentParser(description='ECG data training using EMD Data with separate threading',
usage='Classifying EMD Data',
epilog='Give proper arguments')
parser.add_argument('-p',"--data_path",metavar='', help="Path to temain database",default=C.data_path)
parser.add_argument('-c',"--csv_path",metavar='',help="Path to the CSV Folder of EMD Data",default=C.IMF_csv_path)
parser.add_argument('-res',"--resume_train",metavar='',help="Resume Training",default='False')
parser.add_argument('-inep',"--ini_epoch",metavar='',help="Initial Epoch after Resuming Training",default=C.initial_epoch)
parser.add_argument('-reim',"--res_imf",metavar='',help="Resumed IMF number after resuming",default=1)
parser.add_argument('-rc',"--patient_data_path",metavar='',help="Path to the Patient file RECORD.txt",default=C.patient_data_path)
parser.add_argument('-pd',"--problem_data_path",metavar='',help="Path to the text file where problematic data to be stored",default=C.preoblem_data_path)
parser.add_argument('-s',"--sample_number",metavar='',help="Number of samples to be taken by each record",type=int,default=C.samplenumber)
parser.add_argument('-imf',"--number_of_IMFs",metavar='',help="Number of IMFs to be extracted",default=C.number_of_IMFs,type=int,choices=[2,3,4,5,6])
parser.add_argument('-spl',"--split_perc",metavar='',help="Splitting percentage of train and test(upper limit)",type=float,default=C.split_perc)
parser.add_argument('-fold',"--res_fold",metavar='',help="Save training and testing results in folder")
args = parser.parse_args()
file_path=args.data_path
csv_folder=args.csv_path
patient_data=args.patient_data_path
problem_data=args.problem_data_path
samplenumber=int(args.sample_number)
number_of_IMFs=int(args.number_of_IMFs)
spl_perc = float(args.split_perc)
resume = args.resume_train
resumed_IMF_number = int(args.res_imf)
initial_epoch = int(args.ini_epoch)
folder = args.res_fold
#Check whether specific folders are present or not....if not create them
FC.Folder_creation(number_of_IMFs,folder)
#Generate EMD separate IMF csv files in the csv path
if C.EMD_data_prepare is True:
response = raw_input("Are you sure that you want to prepare the EMD Data Files again(Y/N): ")
if response == 'Y':
print('EMD data preparing\n')
E.EMD_data_preparation(file_path,patient_data,csv_folder,problem_data,samplenumber,number_of_IMFs,spl_perc)
print('EMD data preparation finished\n')
elif response == 'N':
print('Skippng EMD Data Preparation Step')
elif C.EMD_data_prepare is False:
response = raw_input("Are you sure that you do not want to prepare the EMD Data Files(Y/N): ")
if response == 'N':
print('EMD data preparing\n')
E.EMD_data_preparation(file_path,patient_data,csv_folder,problem_data,samplenumber,number_of_IMFs,spl_perc)
print('EMD data preparation finished\n')
elif response == 'Y':
print('EMD Data already prepared.So going to training phase of each IMF')
print '\n\nOriginal Data training started\n\n'
separate_threads(folder,C.IMF_array,file_path,patient_data,problem_data,csv_folder,samplenumber,resume,initial_epoch)
#Plotting the history of training
print 'Finished Training all the IMF segments'
print "Let's see how the training was"
#if(__name__ == '__Main__'):
Main()
#Delete all .pyc files
direc = os.getcwd()
test=os.listdir(direc)
for item in test:
if item.endswith(".pyc"):
os.remove(item)
| 39.749226 | 154 | 0.727315 |
86c4312123d39252c56cb7c7e6b50403a0fb1319 | 3,972 | py | Python | lib/py/src/server/TProcessPoolServer.py | yelirekim/thrift | 2fd8a15fc4e458aee13dd3be7fcba96bb5019c38 | [
"Apache-2.0"
] | 11 | 2016-09-28T09:13:21.000Z | 2021-08-23T07:28:41.000Z | lib/py/src/server/TProcessPoolServer.py | yelirekim/thrift | 2fd8a15fc4e458aee13dd3be7fcba96bb5019c38 | [
"Apache-2.0"
] | 1 | 2018-07-09T01:38:43.000Z | 2018-07-11T20:15:45.000Z | lib/py/src/server/TProcessPoolServer.py | yelirekim/thrift | 2fd8a15fc4e458aee13dd3be7fcba96bb5019c38 | [
"Apache-2.0"
] | 15 | 2015-01-09T04:56:04.000Z | 2021-04-13T12:33:05.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition, reduction
from TServer import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception as x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException, tx:
pass
except Exception as x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception, x:
logging.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception as x:
logging.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
| 33.378151 | 79 | 0.63721 |
2472f20546d5ae69481bea036f52c9d87428afa4 | 146 | py | Python | starter-stack/player1.py | InsperDynamics/Soccer-Simulation-2D | a548d576ca4ab2a8f797810f5e23875c45cef73f | [
"Apache-2.0"
] | null | null | null | starter-stack/player1.py | InsperDynamics/Soccer-Simulation-2D | a548d576ca4ab2a8f797810f5e23875c45cef73f | [
"Apache-2.0"
] | null | null | null | starter-stack/player1.py | InsperDynamics/Soccer-Simulation-2D | a548d576ca4ab2a8f797810f5e23875c45cef73f | [
"Apache-2.0"
] | null | null | null | import os
def player1():
os.chdir('Agent1/src')
os.system('./start.sh -t teamnacme') #Aqui você pode trocar o nome do time
player1()
| 14.6 | 78 | 0.650685 |
2d09b2e7c8596327b2f83c4cd4c4a0d7a71ecc25 | 746 | py | Python | setup.py | a12k/credstash | 83b30071398bf3c768096fc8d4384934c33e955a | [
"Apache-2.0"
] | null | null | null | setup.py | a12k/credstash | 83b30071398bf3c768096fc8d4384934c33e955a | [
"Apache-2.0"
] | null | null | null | setup.py | a12k/credstash | 83b30071398bf3c768096fc8d4384934c33e955a | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='credstash',
version='1.14.0',
description='A utility for managing secrets in the cloud using AWS KMS and DynamoDB',
license='Apache2',
url='https://github.com/LuminalOSS/credstash',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
],
scripts=['credstash.py'],
py_modules=['credstash'],
install_requires=[
'cryptography>=1.5, <2.1',
'boto3>=1.1.1',
],
extras_require={
'YAML': ['PyYAML>=3.10']
},
entry_points={
'console_scripts': [
'credstash = credstash:main'
]
}
)
| 25.724138 | 89 | 0.587131 |
34ec955e7e297540287fd0fc0d5d560841075bb7 | 1,708 | py | Python | app/core/migrations/0001_initial.py | kaminski-pawel/dj-recipe-api | 5cb180e6ac8e4189d2c9f6e53661e9d468e59ee2 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | kaminski-pawel/dj-recipe-api | 5cb180e6ac8e4189d2c9f6e53661e9d468e59ee2 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | kaminski-pawel/dj-recipe-api | 5cb180e6ac8e4189d2c9f6e53661e9d468e59ee2 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.8 on 2019-04-02 23:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.235294 | 266 | 0.638759 |
53f7a1a84481c155005699d3fe95284d3a4fdb55 | 3,423 | py | Python | src/m6_loops_within_loops_printing.py | robertcarl/20-Exam3Practice | e63a9e9bbae81f599d3beea20ae9002a203d7cef | [
"MIT"
] | null | null | null | src/m6_loops_within_loops_printing.py | robertcarl/20-Exam3Practice | e63a9e9bbae81f599d3beea20ae9002a203d7cef | [
"MIT"
] | null | null | null | src/m6_loops_within_loops_printing.py | robertcarl/20-Exam3Practice | e63a9e9bbae81f599d3beea20ae9002a203d7cef | [
"MIT"
] | null | null | null | """
PRACTICE Exam 3.
This problem provides practice at:
*** LOOPS WITHIN LOOPS in PRINTING-TO-CONSOLE problems. ***
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Drew Roberts.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
###############################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_shape()
def run_test_shape():
""" Tests the shape function. """
print()
print('--------------------------------------------------')
print('Testing the SHAPE function:')
print('--------------------------------------------------')
print()
print('Test 1 of shape: r=7')
shape(7)
print()
print('Test 2 of shape: r=4')
shape(4)
print()
print('Test 3 of shape: r=2')
shape(2)
def shape(r):
"""
Prints a shape with r rows that looks like this example where r=7:
+++++++!7654321
++++++!654321-
+++++!54321--
++++!4321---
+++!321----
++!21-----
+!1------
Another example, where r=4:
++++!4321
+++!321-
++!21--
+!1---
Preconditions: r is a positive number.
For purposes of "lining up", assume r is a single digit.
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Some tests are already written for you (above).
#
###########################################################################
# IMPLEMENTATION RESTRICTION:
# You may NOT use string multiplication in this problem.
###########################################################################
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
# -------------------------------------------------------------------------
for k in range(1, r + 1):
for j in range(k):
print(' ', end='')
for l in range(r - k + 1):
print('+', end='')
print('!', end='')
for i in range(r - k, -1, -1):
print(i + 1, end='')
for m in range(k - 1):
print('-', end='')
print()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 31.694444 | 79 | 0.434122 |
0114cc564b7d8ff0c99aa1f9c244d023b4e99874 | 1,649 | py | Python | data/interpretability/info_error_warning/classes/data_loader.py | QuLog1/QuLog | 121f3a8c6f5ee60cde771c36b9eef823a1b2597a | [
"Apache-2.0"
] | null | null | null | data/interpretability/info_error_warning/classes/data_loader.py | QuLog1/QuLog | 121f3a8c6f5ee60cde771c36b9eef823a1b2597a | [
"Apache-2.0"
] | null | null | null | data/interpretability/info_error_warning/classes/data_loader.py | QuLog1/QuLog | 121f3a8c6f5ee60cde771c36b9eef823a1b2597a | [
"Apache-2.0"
] | null | null | null | from keras.preprocessing.sequence import pad_sequences
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import torch
import numpy as np
TORCH_INT_TYPE = torch.int16
NP_INT_TYPE = np.int16
def create_data_loaders(load_train, labels_train, load_test, labels_test, pad_len, batch_size):
train_data = TensorDataset(
torch.tensor(get_padded_data(load_train, pad_len=pad_len), dtype=torch.int32),
torch.tensor(labels_train.astype(np.int32), dtype=torch.int32))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
test_data = TensorDataset(
torch.tensor(get_padded_data(load_test, pad_len=pad_len), dtype=torch.int32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return train_dataloader, test_dataloader
def create_test_data_loaders(load_test, labels_test, pad_len, batch_size):
test_data = TensorDataset(
torch.tensor(get_padded_data(load_test, pad_len=pad_len), dtype=torch.int32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return test_dataloader
def get_padded_data(data, pad_len):
pd = pad_sequences(data, maxlen=pad_len, dtype="long",
truncating="post", padding="post")
return pd
| 39.261905 | 95 | 0.763493 |
07defab8c663a19881eee9ce12b4cfbc1b04e281 | 2,606 | py | Python | pagina_01/views.py | JeanContreras12/ColungaRepo | af59e07f31b3d56ebdf02431a2967134985c1624 | [
"MIT"
] | 1 | 2021-06-02T02:04:14.000Z | 2021-06-02T02:04:14.000Z | pagina_01/views.py | JeanContreras12/ColungaRepo | af59e07f31b3d56ebdf02431a2967134985c1624 | [
"MIT"
] | 1 | 2021-06-03T03:03:59.000Z | 2021-06-03T03:03:59.000Z | pagina_01/views.py | JeanContreras12/ColungaRepo | af59e07f31b3d56ebdf02431a2967134985c1624 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
from .forms import CustomUserForm
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import Group
from django.contrib import messages
from .decorators import solo_admin
from django.views import generic
from django.urls import reverse_lazy
from pagina_01.forms import EditProfileForm
# Create your views here.
@solo_admin
def loginADMIN(request):
return render(request, 'pagina_01/logeadoADMIN.html')
@solo_admin
def planificadorAdmin(request):
return render(request,'pagina_01/planificadorADMIN.html')
def login(request):
return render(request, 'pagina_01/logeado.html')
def planificador(request):
return render(request,'pagina_01/planificador.html')
def saladechat(request):
return render(request,'pagina_01/saladechat.html')
def videoconferencia(request):
return render(request,'pagina_01/videoconferencias.html')
def comunicadosINDEX(request):
return render(request, 'pagina_01/comunicadosINDEX.html')
def comunicados(request):
return render(request, 'pagina_01/comunicados.html')
@solo_admin
def organizacionesADMIN(request):
return render(request,'pagina_01/organizacionesADMIN.html')
def organizaciones(request):
return render(request,'pagina_01/organizaciones.html')
@solo_admin
def perfilADMIN(request):
return render(request, 'pagina_01/perfilADMIN.html')
def perfil(request):
return render(request, 'pagina_01/perfil.html')
def contacto(request):
return render(request, 'pagina_01/contacto.html')
@solo_admin
def registro(request):
data={
'form':CustomUserForm()
}
if request.method == 'POST':
formulario = CustomUserForm(request.POST)
if formulario.is_valid():
user=formulario.save()
#autenticar al usuario y redirigir al inicio
username=formulario.cleaned_data['username']
password=formulario.cleaned_data['password1']
user=authenticate(username=username,password=password)
group=Group.objects.get(name='customer')
user.groups.add(group)
login(request)
messages.success(request,'cuenta creada con exito')
return render(request, 'pagina_01/registro.html',data)
class UserEditView(generic.UpdateView):
form_class = EditProfileForm
template_name = 'pagina_01/edit_profile.html'
success_url = reverse_lazy('edit_profile')
def get_object(self):
return self.request.user
| 35.216216 | 70 | 0.748657 |
b7d407504dc059490d6ebadacc0cdbb06c36d5d6 | 3,358 | py | Python | opensanctions/crawlers/ua_sfms_blacklist.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
] | null | null | null | opensanctions/crawlers/ua_sfms_blacklist.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
] | null | null | null | opensanctions/crawlers/ua_sfms_blacklist.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
] | null | null | null | from datetime import datetime
from prefixdate import parse_formats
from opensanctions.helpers import make_sanction
from opensanctions.util import jointext, remove_bracketed, multi_split
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
def parse_date(date):
if date is None:
return
date = date.replace(".", "").strip()
if ";" in date:
date, _ = date.split(";", 1)
return parse_formats(date, FORMATS)
def parse_entry(context, entry):
entity = context.make("LegalEntity")
if entry.findtext("./type-entry") == "2":
entity = context.make("Person")
entry_id = entry.findtext("number-entry")
entity.make_slug(entry_id)
sanction = make_sanction(entity)
sanction.add("program", entry.findtext("./program-entry"))
date_entry = entry.findtext("./date-entry")
if date_entry:
date = datetime.strptime(date_entry, "%Y%m%d")
entity.context["created_at"] = date.isoformat()
sanction.add("startDate", date.date())
for aka in entry.findall("./aka-list"):
first_name = aka.findtext("./aka-name1")
entity.add("firstName", first_name, quiet=True)
second_name = aka.findtext("./aka-name2")
entity.add("secondName", second_name, quiet=True)
third_name = aka.findtext("./aka-name3")
entity.add("middleName", third_name, quiet=True)
last_name = aka.findtext("./aka-name4")
entity.add("lastName", last_name, quiet=True)
name = jointext(first_name, second_name, third_name, last_name)
if aka.findtext("type-aka") == "N":
entity.add("name", name)
else:
if aka.findtext("./quality-aka") == "2":
entity.add("weakAlias", name)
else:
entity.add("alias", name)
for node in entry.findall("./title-list"):
entity.add("title", node.text, quiet=True)
for doc in entry.findall("./document-list"):
reg = doc.findtext("./document-reg")
number = doc.findtext("./document-id")
country = doc.findtext("./document-country")
passport = context.make("Passport")
passport.make_id("Passport", entity.id, reg, number, country)
passport.add("holder", entity)
passport.add("passportNumber", number)
passport.add("summary", reg)
passport.add("country", country)
context.emit(passport)
for doc in entry.findall("./id-number-list"):
entity.add("idNumber", doc.text)
for node in entry.findall("./address-list"):
entity.add("address", node.findtext("./address"))
for pob in entry.findall("./place-of-birth-list"):
entity.add("birthPlace", pob.text, quiet=True)
for dob in entry.findall("./date-of-birth-list"):
entity.add("birthDate", parse_date(dob.text), quiet=True)
for nat in entry.findall("./nationality-list"):
for country in multi_split(nat.text, [";", ","]):
country = remove_bracketed(country)
entity.add("nationality", country, quiet=True)
context.emit(entity, target=True, unique=True)
context.emit(sanction)
def crawl(context):
context.fetch_resource("source.xml", context.dataset.data.url)
doc = context.parse_resource_xml("source.xml")
for entry in doc.findall(".//acount-list"):
parse_entry(context, entry)
| 36.107527 | 71 | 0.625074 |
735491b1f2d86b699cb7ebabca000acf732b7597 | 7,585 | py | Python | clmr/models/preliminary_models/NoPadConnectNetNoAVGAlter.py | Marcel-Velez/CLMR | 730bd9078756650a53b4c6438b29e5aeb2c15134 | [
"Apache-2.0"
] | null | null | null | clmr/models/preliminary_models/NoPadConnectNetNoAVGAlter.py | Marcel-Velez/CLMR | 730bd9078756650a53b4c6438b29e5aeb2c15134 | [
"Apache-2.0"
] | null | null | null | clmr/models/preliminary_models/NoPadConnectNetNoAVGAlter.py | Marcel-Velez/CLMR | 730bd9078756650a53b4c6438b29e5aeb2c15134 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
from torch.autograd import Variable
import math
import torch.nn.functional as F
from torchsummary import summary
POOLSIZE = 2
DROPOUT_RATE = .25
def init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
class Deconv(nn.Module):
def __init__(self, in_chan, out_chan, kernel, stride, padding):
super(Deconv, self).__init__()
self.layers = []
self.layers.append(nn.Conv1d(in_chan, out_chan, kernel_size=kernel, stride=stride, padding=padding))
self.layers.append(nn.BatchNorm1d(out_chan))
self.layers.append(nn.ReLU())
self.layers.append(nn.Conv1d(out_chan, out_chan, kernel_size=kernel, stride=stride, padding=padding))
self.layers.append(nn.BatchNorm1d(out_chan))
self.layers.append(nn.ReLU())
self.layers = nn.Sequential(*self.layers)
self.layers.apply(init_weights)
def forward(self, x):
out = self.layers(x)
return out
class NoPadConnectNetNoAVGAlter(nn.Module):
def __init__(self, in_channels=16, n_classes=1):
super(NoPadConnectNetNoAVGAlter, self).__init__()
padding = 0
self.pool3 = nn.MaxPool1d(3, stride=3)
self.pool4 = nn.MaxPool1d(4, stride=4)
self.pool5 = nn.MaxPool1d(5, stride=5)
self.pool = nn.MaxPool1d(6, stride=6)
transposedStride = 6
kernel_size = 6
dropout = nn.Dropout(DROPOUT_RATE)
self.conv1 = Deconv(1 , int(in_channels), kernel=kernel_size, stride=1, padding=padding)
self.conv2 = Deconv(int(in_channels), in_channels*2, kernel=kernel_size, stride=1, padding=padding)
self.conv3 = Deconv(in_channels*2, in_channels*4, kernel=kernel_size, stride=1, padding=padding)
self.conv4 = Deconv(in_channels*4, in_channels*8, kernel=kernel_size, stride=1, padding=padding)
self.conv5 = Deconv(in_channels*8, in_channels*16, kernel=kernel_size, stride=1, padding=padding)
self.transposedConv6 = nn.ConvTranspose1d(in_channels*16, in_channels*8, kernel_size=transposedStride, stride=transposedStride, padding=padding)
self.transposedConv7 = nn.ConvTranspose1d(in_channels*8, in_channels*4, kernel_size=transposedStride, stride=transposedStride, padding=padding)#, padding='same')
self.transposedConv8 = nn.ConvTranspose1d(in_channels*4, in_channels*2, kernel_size=transposedStride, stride=transposedStride, padding=padding)#, padding='same')
self.transposedConv9 = nn.ConvTranspose1d(in_channels*2, in_channels*1, kernel_size=transposedStride, stride=transposedStride, padding=padding)#, padding='same')
self.conv6 = Deconv(in_channels*16, in_channels*8, kernel=kernel_size, stride=1, padding=padding)
self.conv7 = Deconv(in_channels*8, in_channels*4, kernel=kernel_size, stride=1, padding=padding) # 8 from trans conv and 4 from same res
self.conv8 = Deconv(in_channels*4, in_channels*2, kernel=kernel_size, stride=1, padding=padding) # x from trans conv and 2 from same res
self.conv9 = Deconv(int(in_channels*2), in_channels*1, kernel=kernel_size, stride=1, padding=padding) # x from trans conv and 1 from same res
# self.conv_to_n_classes = nn.Conv1d(in_channels=in_channels, out_channels=512, kernel_size=1, stride=1, padding=0)
# go down again
self.convDown1 = Deconv(int(in_channels*3), in_channels*2, kernel=kernel_size, stride=1, padding=padding)
self.convDown2 = Deconv(in_channels*6, in_channels*4, kernel=kernel_size, stride=1, padding=padding)
self.convDown3 = Deconv(in_channels*12, in_channels*8, kernel=kernel_size, stride=1, padding=padding)
self.convDown4 = Deconv(in_channels*24, in_channels*16, kernel=4, stride=1, padding=padding)
self.convDown5 = Deconv(in_channels*16, in_channels*32, kernel=4, stride=1, padding=padding)
# self.convDown6 = Deconv(in_channels*32, in_channels*32, kernel=kernel_size, stride=1, padding=padding)
# self.convDown7 = Deconv(in_channels*32, in_channels*32, kernel=kernel_size, stride=1, padding=padding)
# self.loseConv1 = nn.Conv1d(in_channels*32, in_channels*32, kernel_size=3, stride=1, padding=0)
# self.loseConv2 = nn.Conv1d(in_channels*32, in_channels*32, kernel_size=3, stride=1, padding=0)
# self.loseConv3 = nn.Conv1d(in_channels*32, in_channels*32, kernel_size=3, stride=1, padding=0)
# self.loseConv4 = nn.Conv1d(in_channels*32, in_channels*32, kernel_size=5, stride=1, padding=0)
# self.lastConv = nn.Conv1d(in_channels*32, in_channels*32, kernel_size=3, stride =1 , padding=0)
# self.output_avg = nn.AvgPool1d(234)
self.fc = nn.Linear(512, n_classes)
torch.nn.init.xavier_uniform(self.fc.weight)
def forward(self, x):
# print(x.shape)
# exit()
c1 = self.conv1(x)
p1 = self.pool(c1)
c2 = self.conv2(p1)
p2 = self.pool(c2)
c3 = self.conv3(p2)
p3 = self.pool(c3)
c4 = self.conv4(p3)
p4 = self.pool(c4)
c5 = self.conv5(p4)
# expansive
u6 = self.transposedConv6(c5)
u6 = torch.cat((u6, c4[:,:,32:-31]), axis=1) # sum to 10
c6 = self.conv6(u6)
u7 = self.transposedConv7(c6)
# u7 = F.pad(u7, (0,1))
u7 = torch.cat((u7, c3[:,:,250:-250]), axis=1) # sum to 10
c7 = self.conv7(u7)
u8 = self.transposedConv8(c7)
u8 = torch.cat((u8, c2[:,:,1561:-1560]), axis=1) # sum to 10
c8 = self.conv8(u8)
u9 = self.transposedConv9(c8)
# u9 = F.pad(u9, (0,1))
u9 = torch.cat((u9, c1[:,:,9426:-9425]), axis=1) # sum to 10
c9 = self.conv9(u9)
p9 = self.pool(c9)
# and way down we go
# p9 = F.pad(p9, (3,3))
newthrough1 = torch.cat((p9, c8[:,:,1:-1]), axis=1) # sum to 10
c10 = self.convDown1(newthrough1)
p10 = self.pool(c10)
# p10 = F.pad(p10, (1,1))
newthrough2 = torch.cat((p10, c7[:,:,2:-2]), axis=1) # sum to 10
c11 = self.convDown2(newthrough2)
p11 = self.pool(c11)
# p11 = F.pad(p11, (2,2))
newthrough3 = torch.cat((p11, c6[:,:,2:-2]), axis=1) # sum to 10
c12 = self.convDown3(newthrough3)
p12 = self.pool(c12)
newthrough4 = torch.cat((p12, c5[:,:,2:-2]), axis=1) # sum to 10
c13 = self.convDown4(newthrough4)
# print("\n\nc13 size ", c13.shape)
p13 = self.pool3(c13)
# print("\n\n p13 size", p13.shape)
# standalone one layer deeper than rest of network
c14 = self.convDown5(p13)
# p14 = self.pool(c14)
# # standalone one layer deeper than rest of network
# c15 = self.loseConv1(p14)
# p15 = self.pool(c15)
# # standalone one layer deeper than rest of network
# c16 = self.loseConv2(p15)
# p16 = self.pool(c16)
# c17 = self.loseConv3(p16)
# # print("c17: ", c17.shape)
# # p17 = self.pool(c17)
# # print(p17.shape)
# output = self.loseConv4(c17)
output = c14#self.output_avg(c14)
# print(output)
# output = self.fc2(output)
output = self.fc(output.permute(0,2,1))
# print('out', output)
# print(self.weight)
# exit()
return output.view(output.shape[0],-1)
| 38.502538 | 169 | 0.633092 |
8c2d1d028cc121951fa31ef2b4b91c6a5febefeb | 546 | py | Python | nemoobot/bot/tests/test_utils.py | samuelfirst/nemoobot | b74ad66d4f2052eaba14e4b79e20c3da274b5909 | [
"MIT"
] | 1 | 2021-01-30T09:19:37.000Z | 2021-01-30T09:19:37.000Z | nemoobot/bot/tests/test_utils.py | samuelfirst/nemoobot | b74ad66d4f2052eaba14e4b79e20c3da274b5909 | [
"MIT"
] | 2 | 2020-12-21T20:57:19.000Z | 2021-01-26T08:08:09.000Z | nemoobot/bot/tests/test_utils.py | samuelfirst/nemoobot | b74ad66d4f2052eaba14e4b79e20c3da274b5909 | [
"MIT"
] | 1 | 2020-12-22T07:42:42.000Z | 2020-12-22T07:42:42.000Z | import pytest
import requests
from unittest.mock import MagicMock
from mock import patch
from pytest_mock import mocker
from bot.utils import load_user_settings
@pytest.fixture
def setup_mock_get_method(mocker):
mocker.patch('requests.get')
yield requests.get
def test_load_user_settings_returns_list_if_any_error(setup_mock_get_method):
mock_requests = setup_mock_get_method.return_value
mock_requests.json.return_value.raise_exception.side_effect = KeyError()
result = load_user_settings()
assert list() == result
| 24.818182 | 77 | 0.809524 |
35dc2fff7614e8a79d3182a7758453b15222baff | 4,218 | py | Python | faq/views.py | howiworkdaily/django-faq | fc680d6be1deaa035e4bb2e752bb57db3eb0e096 | [
"BSD-3-Clause"
] | 46 | 2015-02-01T22:33:00.000Z | 2022-02-27T05:25:11.000Z | faq/views.py | jhensley/django-faq | fc680d6be1deaa035e4bb2e752bb57db3eb0e096 | [
"BSD-3-Clause"
] | 2 | 2015-02-28T11:28:33.000Z | 2015-03-15T21:03:37.000Z | faq/views.py | jhensley/django-faq | fc680d6be1deaa035e4bb2e752bb57db3eb0e096 | [
"BSD-3-Clause"
] | 23 | 2015-03-12T15:06:27.000Z | 2021-09-30T03:19:15.000Z | from __future__ import absolute_import
from django.db.models import Max
from django.core.urlresolvers import reverse, NoReverseMatch
from django.contrib import messages
from django.http import Http404
from django.shortcuts import redirect, render, get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import ListView, DetailView, TemplateView, CreateView
from .models import Question, Topic
from .forms import SubmitFAQForm
class TopicList(ListView):
model = Topic
template = "faq/topic_list.html"
allow_empty = True
context_object_name = "topics"
def get_context_data(self, **kwargs):
data = super(TopicList, self).get_context_data(**kwargs)
# This slightly magical queryset grabs the latest update date for
# topic's questions, then the latest date for that whole group.
# In other words, it's::
#
# max(max(q.updated_on for q in topic.questions) for topic in topics)
#
# Except performed in the DB, so quite a bit more efficiant.
#
# We can't just do Question.objects.all().aggregate(max('updated_on'))
# because that'd prevent a subclass from changing the view's queryset
# (or even model -- this view'll even work with a different model
# as long as that model has a many-to-one to something called "questions"
# with an "updated_on" field). So this magic is the price we pay for
# being generic.
last_updated = (data['object_list']
.annotate(updated=Max('questions__updated_on'))
.aggregate(Max('updated')))
data.update({'last_updated': last_updated['updated__max']})
return data
class TopicDetail(DetailView):
model = Topic
template = "faq/topic_detail.html"
context_object_name = "topic"
def get_context_data(self, **kwargs):
# Include a list of questions this user has access to. If the user is
# logged in, this includes protected questions. Otherwise, not.
qs = self.object.questions.active()
if self.request.user.is_anonymous():
qs = qs.exclude(protected=True)
data = super(TopicDetail, self).get_context_data(**kwargs)
data.update({
'questions': qs,
'last_updated': qs.aggregate(updated=Max('updated_on'))['updated'],
})
return data
class QuestionDetail(DetailView):
queryset = Question.objects.active()
template = "faq/question_detail.html"
def get_queryset(self):
topic = get_object_or_404(Topic, slug=self.kwargs['topic_slug'])
# Careful here not to hardcode a base queryset. This lets
# subclassing users re-use this view on a subset of questions, or
# even on a new model.
# FIXME: similar logic as above. This should push down into managers.
qs = super(QuestionDetail, self).get_queryset().filter(topic=topic)
if self.request.user.is_anonymous():
qs = qs.exclude(protected=True)
return qs
class SubmitFAQ(CreateView):
model = Question
form_class = SubmitFAQForm
template_name = "faq/submit_question.html"
success_view_name = "faq_submit_thanks"
def get_form_kwargs(self):
kwargs = super(SubmitFAQ, self).get_form_kwargs()
kwargs['instance'] = Question()
if self.request.user.is_authenticated():
kwargs['instance'].created_by = self.request.user
return kwargs
def form_valid(self, form):
response = super(SubmitFAQ, self).form_valid(form)
messages.success(self.request,
_("Your question was submitted and will be reviewed by for inclusion in the FAQ."),
fail_silently=True,
)
return response
def get_success_url(self):
# The superclass version raises ImproperlyConfigered if self.success_url
# isn't set. Instead of that, we'll try to redirect to a named view.
if self.success_url:
return self.success_url
else:
return reverse(self.success_view_name)
class SubmitFAQThanks(TemplateView):
template_name = "faq/submit_thanks.html" | 39.055556 | 95 | 0.667378 |
5eba0a9d05553d93abb656d3a94ea1895fee25fd | 1,216 | py | Python | recipe/hunspell_test.py | regro-cf-autotick-bot/hunspell-en-feedstock | d218096f540dd1e3edb6f2abf0d4f12e8e351b0b | [
"BSD-3-Clause"
] | null | null | null | recipe/hunspell_test.py | regro-cf-autotick-bot/hunspell-en-feedstock | d218096f540dd1e3edb6f2abf0d4f12e8e351b0b | [
"BSD-3-Clause"
] | 6 | 2020-06-16T02:34:33.000Z | 2021-11-08T03:59:07.000Z | recipe/hunspell_test.py | regro-cf-autotick-bot/hunspell-en-feedstock | d218096f540dd1e3edb6f2abf0d4f12e8e351b0b | [
"BSD-3-Clause"
] | 2 | 2020-06-16T00:50:15.000Z | 2020-10-08T15:04:33.000Z | import os, sys, shutil, subprocess
from pathlib import Path
OUT = Path(os.environ["PREFIX"]) / "share" / "hunspell_dictionaries"
PKG = os.environ["PKG_NAME"]
L10N = PKG.split("-")[-1].upper()
HUNSPELL_ARGS = ["hunspell", "-G"]
L10N_SEPARATOR = {
"AU": "_",
"CA": "_",
"GB": "-",
"US": "_",
"ZA": "_",
}
LOCALES = [f"en{sep}{l10n}" for l10n, sep in L10N_SEPARATOR.items()]
if L10N != "EN":
LOCALES = [f"en{L10N_SEPARATOR[L10N]}{L10N}"]
for locale in LOCALES:
print(f"Checking if the {locale} dictionary is detected...")
p = subprocess.Popen(["hunspell", "-D"], stderr=subprocess.PIPE)
out, err = p.communicate()
dicts = err.decode("utf-8")
l10n_dict = OUT / locale
assert str(l10n_dict) in dicts, [l10n_dict, dicts]
def hunspell(word, expected, locale):
args = HUNSPELL_ARGS + ["-d", locale]
print(f"Checking if the output of `{args}` for `{word}` is `{expected}`...")
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate(word.encode("utf-8"))
assert out.decode("utf-8").strip() == expected, out
for locale in LOCALES:
hunspell("test", "test", locale)
hunspell("mispellled", "", locale)
| 29.658537 | 80 | 0.628289 |
032899978098fbac5c807398811b4d2a03d71362 | 2,031 | py | Python | object_detection/core/box_coder_test.py | gourav108/coreml | 6bc2d494dff23cff923368e735992a4f4a47483c | [
"MIT"
] | 14 | 2018-06-26T09:40:19.000Z | 2022-01-24T00:12:07.000Z | object_detection/core/box_coder_test.py | gourav108/coreml | 6bc2d494dff23cff923368e735992a4f4a47483c | [
"MIT"
] | 2 | 2018-05-30T16:56:49.000Z | 2018-07-23T22:55:43.000Z | object_detection/core/box_coder_test.py | gourav108/coreml | 6bc2d494dff23cff923368e735992a4f4a47483c | [
"MIT"
] | 7 | 2018-06-08T05:53:01.000Z | 2020-06-09T12:23:44.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core.box_coder."""
import tensorflow as tf
from core import box_coder
from core import box_list
class MockBoxCoder(box_coder.BoxCoder):
"""Test BoxCoder that encodes/decodes using the multiply-by-two function."""
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return 2.0 * boxes.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes / 2.0)
class BoxCoderTest(tf.test.TestCase):
def test_batch_decode(self):
mock_anchor_corners = tf.constant(
[[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
mock_anchors = box_list.BoxList(mock_anchor_corners)
mock_box_coder = MockBoxCoder()
expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
[[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
encoded_boxes_list = [mock_box_coder.encode(
box_list.BoxList(tf.constant(boxes)), mock_anchors)
for boxes in expected_boxes]
encoded_boxes = tf.stack(encoded_boxes_list)
decoded_boxes = box_coder.batch_decode(
encoded_boxes, mock_box_coder, mock_anchors)
with self.test_session() as sess:
decoded_boxes_result = sess.run(decoded_boxes)
self.assertAllClose(expected_boxes, decoded_boxes_result)
if __name__ == '__main__':
tf.test.main()
| 32.758065 | 80 | 0.669129 |
efa7879c766943710f9e2c5e3c7e6cbb4cdf566c | 15,613 | py | Python | metadrive/manager/agent_manager.py | decisionforce/metadrive | c18e29f5169868dabe74c327ab092daeca5bf98c | [
"Apache-2.0"
] | 125 | 2021-08-30T06:33:57.000Z | 2022-03-31T09:02:44.000Z | metadrive/manager/agent_manager.py | Morefrees/metadrive | c18e29f5169868dabe74c327ab092daeca5bf98c | [
"Apache-2.0"
] | 72 | 2021-08-30T16:23:41.000Z | 2022-03-31T19:17:16.000Z | metadrive/manager/agent_manager.py | Morefrees/metadrive | c18e29f5169868dabe74c327ab092daeca5bf98c | [
"Apache-2.0"
] | 20 | 2021-09-09T08:20:25.000Z | 2022-03-24T13:24:07.000Z | import copy
from typing import Dict
from gym.spaces import Box, Dict, MultiDiscrete
from metadrive.constants import DEFAULT_AGENT
from metadrive.manager.base_manager import BaseManager
from metadrive.policy.AI_protect_policy import AIProtectPolicy
from metadrive.policy.env_input_policy import EnvInputPolicy
from metadrive.policy.manual_control_policy import ManualControlPolicy
class AgentManager(BaseManager):
"""
This class maintain the relationship between active agents in the environment with the underlying instance
of objects.
Note:
agent name: Agent name that exists in the environment, like agent0, agent1, ....
object name: The unique name for each object, typically be random string.
"""
INITIALIZED = False # when vehicles instances are created, it will be set to True
def __init__(self, init_observations, init_action_space):
"""
The real init is happened in self.init(), in which super().__init__() will be called
"""
# BaseVehicles which can be controlled by policies when env.step() called
self._active_objects = {}
# BaseVehicles which will be recycled after the delay_done time
self._dying_objects = {}
self._agents_finished_this_frame = dict() # for observation space
self.next_agent_count = 0
# fake init. before creating engine and vehicles, it is necessary when all vehicles re-created in runtime
self.observations = copy.copy(init_observations) # its value is map<agent_id, obs> before init() is called
self._init_observations = init_observations # map <agent_id, observation>
# init spaces before initializing env.engine
observation_space = {
agent_id: single_obs.observation_space
for agent_id, single_obs in init_observations.items()
}
assert isinstance(init_action_space, dict)
assert isinstance(observation_space, dict)
self._init_observation_spaces = observation_space
self._init_action_spaces = init_action_space
self.observation_spaces = copy.copy(observation_space)
self.action_spaces = copy.copy(init_action_space)
# this map will be override when the env.init() is first called and vehicles are made
self._agent_to_object = {k: k for k in self.observations.keys()} # no target vehicles created, fake init
self._object_to_agent = {k: k for k in self.observations.keys()} # no target vehicles created, fake init
# get the value in init()
self._allow_respawn = None
self._debug = None
self._delay_done = None
self._infinite_agents = None
def _get_vehicles(self, config_dict: dict):
from metadrive.component.vehicle.vehicle_type import random_vehicle_type, vehicle_type
ret = {}
for agent_id, v_config in config_dict.items():
v_type = random_vehicle_type(self.np_random) if self.engine.global_config["random_agent_model"] else \
vehicle_type[v_config["vehicle_model"] if v_config.get("vehicle_model", False) else "default"]
obj = self.spawn_object(v_type, vehicle_config=v_config)
ret[agent_id] = obj
policy = self._get_policy(obj)
self.engine.add_policy(obj.id, policy)
return ret
def _get_policy(self, obj):
# note: agent.id = object id
if self.engine.global_config["agent_policy"] is not None:
return self.engine.global_config["agent_policy"](obj, self.generate_seed())
if self.engine.global_config["manual_control"]:
if self.engine.global_config.get("use_AI_protector", False):
policy = AIProtectPolicy(obj, self.generate_seed())
else:
policy = ManualControlPolicy(obj, self.generate_seed())
else:
policy = EnvInputPolicy(obj, self.generate_seed())
return policy
def before_reset(self):
if not self.INITIALIZED:
super(AgentManager, self).__init__()
self.INITIALIZED = True
super(AgentManager, self).before_reset()
def reset(self):
"""
Agent manager is really initialized after the BaseVehicle Instances are created
"""
self.random_spawn_lane_in_single_agent()
config = self.engine.global_config
self._debug = config["debug"]
self._delay_done = config["delay_done"]
self._infinite_agents = config["num_agents"] == -1
self._allow_respawn = config["allow_respawn"]
init_vehicles = self._get_vehicles(config_dict=self.engine.global_config["target_vehicle_configs"])
vehicles_created = set(init_vehicles.keys())
vehicles_in_config = set(self._init_observations.keys())
assert vehicles_created == vehicles_in_config, "{} not defined in target vehicles config".format(
vehicles_created.difference(vehicles_in_config)
)
# it is used when reset() is called to reset its original agent_id
self._agent_to_object = {agent_id: vehicle.name for agent_id, vehicle in init_vehicles.items()}
self._object_to_agent = {vehicle.name: agent_id for agent_id, vehicle in init_vehicles.items()}
self._active_objects = {v.name: v for v in init_vehicles.values()}
self._dying_objects = {}
self._agents_finished_this_frame = dict()
# real init {obj_name: space} map
self.observations = dict()
self.observation_spaces = dict()
self.action_spaces = dict()
for agent_id, vehicle in init_vehicles.items():
self.observations[vehicle.name] = self._init_observations[agent_id]
obs_space = self._init_observation_spaces[agent_id]
self.observation_spaces[vehicle.name] = obs_space
if not self.engine.global_config["offscreen_render"]:
assert isinstance(obs_space, Box)
else:
assert isinstance(obs_space, Dict), "Multi-agent observation should be gym.Dict"
action_space = self._init_action_spaces[agent_id]
self.action_spaces[vehicle.name] = action_space
assert isinstance(action_space, Box) or isinstance(action_space, MultiDiscrete)
self.next_agent_count = len(init_vehicles)
def random_spawn_lane_in_single_agent(self):
if not self.engine.global_config["is_multi_agent"] and \
self.engine.global_config.get("random_spawn_lane_index", False) and self.engine.current_map is not None:
spawn_road_start = self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"][0]
spawn_road_end = self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"][1]
index = self.np_random.randint(self.engine.current_map.config["lane_num"])
self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"] = (
spawn_road_start, spawn_road_end, index
)
def finish(self, agent_name, ignore_delay_done=False):
"""
ignore_delay_done: Whether to ignore the delay done. This is not required when the agent success the episode!
"""
if not self.engine.replay_episode:
vehicle_name = self._agent_to_object[agent_name]
v = self._active_objects.pop(vehicle_name)
if (not ignore_delay_done) and (self._delay_done > 0):
self._put_to_dying_queue(v)
else:
# move to invisible place
self._remove_vehicle(v)
self._agents_finished_this_frame[agent_name] = v.name
self._check()
def _check(self):
if self._debug:
current_keys = sorted(list(self._active_objects.keys()) + list(self._dying_objects.keys()))
exist_keys = sorted(list(self._object_to_agent.keys()))
assert current_keys == exist_keys, "You should confirm_respawn() after request for propose_new_vehicle()!"
def propose_new_vehicle(self):
# Create a new vehicle.
agent_name = self.next_agent_id()
next_config = self.engine.global_config["target_vehicle_configs"]["agent0"]
vehicle = self._get_vehicles({agent_name: next_config})[agent_name]
new_v_name = vehicle.name
self._agent_to_object[agent_name] = new_v_name
self._object_to_agent[new_v_name] = agent_name
self.observations[new_v_name] = self._init_observations["agent0"]
self.observations[new_v_name].reset(vehicle)
self.observation_spaces[new_v_name] = self._init_observation_spaces["agent0"]
self.action_spaces[new_v_name] = self._init_action_spaces["agent0"]
self._active_objects[vehicle.name] = vehicle
self._check()
vehicle.before_step([0, 0])
vehicle.set_static(False)
return agent_name, vehicle
def next_agent_id(self):
ret = "agent{}".format(self.next_agent_count)
self.next_agent_count += 1
return ret
def set_allow_respawn(self, flag: bool):
self._allow_respawn = flag
def before_step(self):
# not in replay mode
self._agents_finished_this_frame = dict()
step_infos = {}
for agent_id in self.active_agents.keys():
policy = self.engine.get_policy(self._agent_to_object[agent_id])
action = policy.act(agent_id)
step_infos[agent_id] = policy.get_action_info()
step_infos[agent_id].update(self.get_agent(agent_id).before_step(action))
finished = set()
for v_name in self._dying_objects.keys():
self._dying_objects[v_name][1] -= 1
if self._dying_objects[v_name][1] == 0: # Countdown goes to 0, it's time to remove the vehicles!
v = self._dying_objects[v_name][0]
self._remove_vehicle(v)
finished.add(v_name)
for v_name in finished:
self._dying_objects.pop(v_name)
return step_infos
def after_step(self, *args, **kwargs):
step_infos = self.for_each_active_agents(lambda v: v.after_step())
return step_infos
def _translate(self, d):
return {self._object_to_agent[k]: v for k, v in d.items()}
def get_vehicle_list(self):
return list(self._active_objects.values()) + [v for (v, _) in self._dying_objects.values()]
def get_observations(self):
if hasattr(self, "engine") and self.engine.replay_episode:
return self.engine.replay_manager.get_replay_agent_observations()
else:
ret = {
old_agent_id: self.observations[v_name]
for old_agent_id, v_name in self._agents_finished_this_frame.items()
}
for obj_id, observation in self.observations.items():
if self.is_active_object(obj_id):
ret[self.object_to_agent(obj_id)] = observation
return ret
def get_observation_spaces(self):
ret = {
old_agent_id: self.observation_spaces[v_name]
for old_agent_id, v_name in self._agents_finished_this_frame.items()
}
for obj_id, space in self.observation_spaces.items():
if self.is_active_object(obj_id):
ret[self.object_to_agent(obj_id)] = space
return ret
def get_action_spaces(self):
ret = dict()
for obj_id, space in self.action_spaces.items():
if self.is_active_object(obj_id):
ret[self.object_to_agent(obj_id)] = space
return ret
def is_active_object(self, object_name):
if not self.INITIALIZED:
return True
return True if object_name in self._active_objects.keys() else False
@property
def active_agents(self):
"""
Return Map<agent_id, BaseVehicle>
"""
return self.engine.replay_manager.replay_agents if hasattr(self, "engine") and self.engine.replay_episode else {
self._object_to_agent[k]: v
for k, v in self._active_objects.items()
}
@property
def active_objects(self):
"""
Return meta-data, a pointer, Caution !
:return: Map<obj_name, obj>
"""
raise DeprecationWarning("prohibit! Use active agent instead")
return self._active_objects
def get_agent(self, agent_name):
object_name = self.agent_to_object(agent_name)
return self.get_object(object_name)
def get_object(self, object_name):
if object_name in self._active_objects:
return self._active_objects[object_name]
elif object_name in self._dying_objects:
return self._dying_objects[object_name]
else:
raise ValueError("Object {} not found!".format(object_name))
def object_to_agent(self, obj_name):
"""
We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager,
since this two functions DO NOT work when replaying episode.
:param obj_name: BaseVehicle name
:return: agent id
"""
# if obj_name not in self._active_objects.keys() and self.INITIALIZED:
# raise ValueError("You can not access a pending Object(BaseVehicle) outside the agent_manager!")
return self._object_to_agent[obj_name]
def agent_to_object(self, agent_id):
"""
We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager,
since this two functions DO NOT work when replaying episode.
"""
return self._agent_to_object[agent_id]
def destroy(self):
# when new agent joins in the game, we only change this two maps.
if self.INITIALIZED:
super(AgentManager, self).destroy()
self._agent_to_object = {}
self._object_to_agent = {}
# BaseVehicles which can be controlled by policies when env.step() called
self._active_objects = {}
# BaseVehicles which can be respawned
self._dying_objects = {}
# Dict[object_id: value], init for **only** once after spawning vehicle
self.observations = {}
self.observation_spaces = {}
self.action_spaces = {}
self.next_agent_count = 0
self.INITIALIZED = False
def _put_to_dying_queue(self, v):
vehicle_name = v.name
v.set_static(True)
self._dying_objects[vehicle_name] = [v, self._delay_done]
def _remove_vehicle(self, vehicle):
vehicle_name = vehicle.name
assert vehicle_name not in self._active_objects
self.clear_objects([vehicle_name])
self._agent_to_object.pop(self._object_to_agent[vehicle_name])
self._object_to_agent.pop(vehicle_name)
@property
def allow_respawn(self):
if not self._allow_respawn:
return False
if len(self._active_objects) + len(self._dying_objects) < self.engine.global_config["num_agents"] \
or self._infinite_agents:
return True
else:
return False
def for_each_active_agents(self, func, *args, **kwargs):
"""
This func is a function that take each vehicle as the first argument and *arg and **kwargs as others.
"""
assert len(self.active_agents) > 0, "Not enough vehicles exist!"
ret = dict()
for k, v in self.active_agents.items():
ret[k] = func(v, *args, **kwargs)
return ret
| 43.856742 | 120 | 0.660283 |
2f150beb1e765fb78298290368cce5d76113eb6f | 8,633 | py | Python | tests/program_analysis/test_py_ast_to_cast.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | null | null | null | tests/program_analysis/test_py_ast_to_cast.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | null | null | null | tests/program_analysis/test_py_ast_to_cast.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | null | null | null | import pytest
import ast
import json
from automates.program_analysis.PyAST2CAST import py_ast_to_cast
from automates.program_analysis.CAST2GrFN.model.cast import (
AstNode,
Assignment,
Attribute,
BinaryOp,
BinaryOperator,
Call,
ClassDef,
Dict,
Expr,
FunctionDef,
List,
Loop,
ModelBreak,
ModelContinue,
ModelIf,
ModelReturn,
Module,
Name,
Number,
Set,
String,
SourceRef,
Subscript,
Tuple,
UnaryOp,
UnaryOperator,
VarType,
Var,
)
from automates.program_analysis.CAST2GrFN import cast
DATA_DIR = "tests/data/program_analysis/PyAST2CAST"
def dump_cast(C):
print(C.to_json_str())
def run_test_case(filepath, prog_name):
file_handle = open(filepath)
file_list = file_handle.readlines()
line_count = 0
for l in file_list:
line_count += 1
file_handle.close()
file_contents = open(filepath).read()
convert = py_ast_to_cast.PyASTToCAST(prog_name)
test_C = convert.visit(ast.parse(file_contents))
test_C.source_refs = [SourceRef(prog_name, None, None, 1, line_count)]
out_cast = cast.CAST([test_C], cast_source_language="python")
to_compare = out_cast.to_json_object()
raw_json = json.load(
open(f"{DATA_DIR}/expected_output/{prog_name.split('.')[0]}--CAST.json", "r")
)
assert raw_json == to_compare
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_class_1():
prog_name = "test_class_1.py"
folder = "class"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_class_2():
prog_name = "test_class_2.py"
folder = "class"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_bool_1():
prog_name = "test_bool_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_call_1():
prog_name = "test_call_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_ext_slice_1():
prog_name = "test_ext_slice_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_increment_1():
prog_name = "test_increment_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_list_1():
prog_name = "test_list_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_list_2():
prog_name = "test_list_2.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_name_1():
prog_name = "test_name_1.py"
folder = "expression"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_add_1():
prog_name = "test_add_1.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_add_2():
prog_name = "test_add_2.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_1():
prog_name = "test_assign_1.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_2():
prog_name = "test_assign_2.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_3():
prog_name = "test_assign_3.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_4():
prog_name = "test_assign_4.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_5():
prog_name = "test_assign_5.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_assign_6():
prog_name = "test_assign_6.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_function_1():
prog_name = "test_function_1.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_lambda_1():
prog_name = "test_lambda_1.py"
folder = "function_def"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_if_1():
prog_name = "test_if_1.py"
folder = "if"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_if_2():
prog_name = "test_if_2.py"
folder = "if"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_if_3():
prog_name = "test_if_3.py"
folder = "if"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_if_4():
prog_name = "test_if_4.py"
folder = "if"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_if_5():
prog_name = "test_if_5.py"
folder = "if"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_import_1():
prog_name = "test_import_1.py"
folder = "import"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_import_2():
prog_name = "test_import_2.py"
folder = "import"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_import_3():
prog_name = "test_import_3.py"
folder = "import"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_for_1():
prog_name = "test_for_1.py"
folder = "loop"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_for_2():
prog_name = "test_for_2.py"
folder = "loop"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
@pytest.mark.skip(reason="cast updates require changes to test cases")
def test_while_1():
prog_name = "test_while_1.py"
folder = "loop"
filepath = f"{DATA_DIR}/{folder}/{prog_name}"
run_test_case(filepath, prog_name)
| 23.652055 | 85 | 0.698367 |
bcd6812d4569e6159320d978f84f45133d089dab | 1,503 | py | Python | lib/python3.4/site-packages/pip/_vendor/progress/counter.py | LChristakis/chalice-hunter | 6bffea4620e23ce9ff12ac30526ebafcb9c10058 | [
"MIT"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | lib/python3.4/site-packages/pip/_vendor/progress/counter.py | LChristakis/chalice-hunter | 6bffea4620e23ce9ff12ac30526ebafcb9c10058 | [
"MIT"
] | 309 | 2016-10-27T23:47:06.000Z | 2017-04-02T04:40:21.000Z | lib/python3.4/site-packages/pip/_vendor/progress/counter.py | LChristakis/chalice-hunter | 6bffea4620e23ce9ff12ac30526ebafcb9c10058 | [
"MIT"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = ('○', '◔', '◑', '◕', '●')
| 30.06 | 74 | 0.689288 |
9b26282ed78ef26e3eba56a0d9d8373f02bb6798 | 28,123 | py | Python | scrapy/tests/test_http_request.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T06:18:22.000Z | 2022-03-04T06:18:22.000Z | scrapy/tests/test_http_request.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | null | null | null | scrapy/tests/test_http_request.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | null | null | null | import cgi
import unittest
import xmlrpclib
from cStringIO import StringIO
from urlparse import urlparse
from scrapy.http import Request, FormRequest, XmlRpcRequest, Headers, HtmlResponse
class RequestTest(unittest.TestCase):
request_class = Request
default_method = 'GET'
default_headers = {}
default_meta = {}
def test_init(self):
# Request requires url in the constructor
self.assertRaises(Exception, self.request_class)
# url argument must be basestring
self.assertRaises(TypeError, self.request_class, 123)
r = self.request_class('http://www.example.com')
r = self.request_class("http://www.example.com")
assert isinstance(r.url, str)
self.assertEqual(r.url, "http://www.example.com")
self.assertEqual(r.method, self.default_method)
assert isinstance(r.headers, Headers)
self.assertEqual(r.headers, self.default_headers)
self.assertEqual(r.meta, self.default_meta)
meta = {"lala": "lolo"}
headers = {"caca": "coco"}
r = self.request_class("http://www.example.com", meta=meta, headers=headers, body="a body")
assert r.meta is not meta
self.assertEqual(r.meta, meta)
assert r.headers is not headers
self.assertEqual(r.headers["caca"], "coco")
def test_url_no_scheme(self):
self.assertRaises(ValueError, self.request_class, 'foo')
def test_headers(self):
# Different ways of setting headers attribute
url = 'http://www.scrapy.org'
headers = {'Accept':'gzip', 'Custom-Header':'nothing to tell you'}
r = self.request_class(url=url, headers=headers)
p = self.request_class(url=url, headers=r.headers)
self.assertEqual(r.headers, p.headers)
self.assertFalse(r.headers is headers)
self.assertFalse(p.headers is r.headers)
# headers must not be unicode
h = Headers({'key1': u'val1', u'key2': 'val2'})
h[u'newkey'] = u'newval'
for k, v in h.iteritems():
self.assert_(isinstance(k, str))
for s in v:
self.assert_(isinstance(s, str))
def test_eq(self):
url = 'http://www.scrapy.org'
r1 = self.request_class(url=url)
r2 = self.request_class(url=url)
self.assertNotEqual(r1, r2)
set_ = set()
set_.add(r1)
set_.add(r2)
self.assertEqual(len(set_), 2)
def test_url(self):
"""Request url tests"""
r = self.request_class(url="http://www.scrapy.org/path")
self.assertEqual(r.url, "http://www.scrapy.org/path")
# url quoting on creation
r = self.request_class(url="http://www.scrapy.org/blank%20space")
self.assertEqual(r.url, "http://www.scrapy.org/blank%20space")
r = self.request_class(url="http://www.scrapy.org/blank space")
self.assertEqual(r.url, "http://www.scrapy.org/blank%20space")
# url encoding
r1 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="utf-8")
r2 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="latin1")
self.assertEqual(r1.url, "http://www.scrapy.org/price/%C2%A3")
self.assertEqual(r2.url, "http://www.scrapy.org/price/%A3")
def test_body(self):
r1 = self.request_class(url="http://www.example.com/")
assert r1.body == ''
r2 = self.request_class(url="http://www.example.com/", body="")
assert isinstance(r2.body, str)
self.assertEqual(r2.encoding, 'utf-8') # default encoding
r3 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='utf-8')
assert isinstance(r3.body, str)
self.assertEqual(r3.body, "Price: \xc2\xa3100")
r4 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='latin1')
assert isinstance(r4.body, str)
self.assertEqual(r4.body, "Price: \xa3100")
def test_ajax_url(self):
# ascii url
r = self.request_class(url="http://www.example.com/ajax.html#!key=value")
self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key=value")
# unicode url
r = self.request_class(url=u"http://www.example.com/ajax.html#!key=value")
self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key=value")
def test_copy(self):
"""Test Request copy"""
def somecallback():
pass
r1 = self.request_class("http://www.example.com", callback=somecallback, errback=somecallback)
r1.meta['foo'] = 'bar'
r2 = r1.copy()
# make sure copy does not propagate callbacks
assert r1.callback is somecallback
assert r1.errback is somecallback
assert r2.callback is r1.callback
assert r2.errback is r2.errback
# make sure meta dict is shallow copied
assert r1.meta is not r2.meta, "meta must be a shallow copy, not identical"
self.assertEqual(r1.meta, r2.meta)
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical"
self.assertEqual(r1.headers, r2.headers)
self.assertEqual(r1.encoding, r2.encoding)
self.assertEqual(r1.dont_filter, r2.dont_filter)
# Request.body can be identical since it's an immutable object (str)
def test_copy_inherited_classes(self):
"""Test Request children copies preserve their class"""
class CustomRequest(self.request_class):
pass
r1 = CustomRequest('http://www.example.com')
r2 = r1.copy()
assert type(r2) is CustomRequest
def test_replace(self):
"""Test Request.replace() method"""
r1 = self.request_class("http://www.example.com", method='GET')
hdrs = Headers(dict(r1.headers, key='value'))
r2 = r1.replace(method="POST", body="New body", headers=hdrs)
self.assertEqual(r1.url, r2.url)
self.assertEqual((r1.method, r2.method), ("GET", "POST"))
self.assertEqual((r1.body, r2.body), ('', "New body"))
self.assertEqual((r1.headers, r2.headers), (self.default_headers, hdrs))
# Empty attributes (which may fail if not compared properly)
r3 = self.request_class("http://www.example.com", meta={'a': 1}, dont_filter=True)
r4 = r3.replace(url="http://www.example.com/2", body='', meta={}, dont_filter=False)
self.assertEqual(r4.url, "http://www.example.com/2")
self.assertEqual(r4.body, '')
self.assertEqual(r4.meta, {})
assert r4.dont_filter is False
def test_method_always_str(self):
r = self.request_class("http://www.example.com", method=u"POST")
assert isinstance(r.method, str)
class FormRequestTest(RequestTest):
request_class = FormRequest
def test_empty_formdata(self):
r1 = self.request_class("http://www.example.com", formdata={})
self.assertEqual(r1.body, '')
def test_default_encoding(self):
# using default encoding (utf-8)
data = {'one': 'two', 'price': '\xc2\xa3 100'}
r2 = self.request_class("http://www.example.com", formdata=data)
self.assertEqual(r2.method, 'POST')
self.assertEqual(r2.encoding, 'utf-8')
self.assertEqual(r2.body, 'price=%C2%A3+100&one=two')
self.assertEqual(r2.headers['Content-Type'], 'application/x-www-form-urlencoded')
def test_custom_encoding(self):
data = {'price': u'\xa3 100'}
r3 = self.request_class("http://www.example.com", formdata=data, encoding='latin1')
self.assertEqual(r3.encoding, 'latin1')
self.assertEqual(r3.body, 'price=%A3+100')
def test_multi_key_values(self):
# using multiples values for a single key
data = {'price': u'\xa3 100', 'colours': ['red', 'blue', 'green']}
r3 = self.request_class("http://www.example.com", formdata=data)
self.assertEqual(r3.body, 'colours=red&colours=blue&colours=green&price=%C2%A3+100')
def test_from_response_post(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html")
req = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'})
self.assertEqual(req.method, 'POST')
self.assertEqual(req.headers['Content-type'], 'application/x-www-form-urlencoded')
self.assertEqual(req.url, "http://www.example.com/this/post.php")
fs = _qs(req)
self.assertEqual(set(fs["test"]), set(["val1", "val2"]))
self.assertEqual(set(fs["one"]), set(["two", "three"]))
self.assertEqual(fs['test2'], ['xxx'])
self.assertEqual(fs['six'], ['seven'])
def test_from_response_extra_headers(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""")
req = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'},
headers={"Accept-Encoding": "gzip,deflate"})
self.assertEqual(req.method, 'POST')
self.assertEqual(req.headers['Content-type'], 'application/x-www-form-urlencoded')
self.assertEqual(req.headers['Accept-Encoding'], 'gzip,deflate')
def test_from_response_get(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html")
r1 = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'})
self.assertEqual(r1.method, 'GET')
self.assertEqual(urlparse(r1.url).hostname, "www.example.com")
self.assertEqual(urlparse(r1.url).path, "/this/get.php")
fs = _qs(r1)
self.assertEqual(set(fs['test']), set(['val1', 'val2']))
self.assertEqual(set(fs['one']), set(['two', 'three']))
self.assertEqual(fs['test2'], ['xxx'])
self.assertEqual(fs['six'], ['seven'])
def test_from_response_override_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'})
fs = _qs(req)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_submit_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'})
fs = _qs(req)
self.assertEqual(fs['clickable1'], ['clicked1'])
self.assertFalse('clickable2' in fs, fs)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_submit_not_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'}, \
clickdata={'name': 'clickable2'})
fs = _qs(req)
self.assertEqual(fs['clickable2'], ['clicked2'])
self.assertFalse('clickable1' in fs, fs)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_dont_submit_image_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="image" name="i2" src="http://my.image.org/1.jpg">
<input type="submit" name="i3" value="i3v">
</form>""")
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v']})
def test_from_response_multiple_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable" value="clicked1">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="one" value="clicked1">
<input type="hidden" name="two" value="clicked2">
</form>""")
req = self.request_class.from_response(response, \
clickdata={'name': 'clickable', 'value': 'clicked2'})
fs = _qs(req)
self.assertEqual(fs['clickable'], ['clicked2'])
self.assertEqual(fs['one'], ['clicked1'])
self.assertEqual(fs['two'], ['clicked2'])
def test_from_response_unicode_clickdata(self):
response = _buildresponse(
u"""<form action="get.php" method="GET">
<input type="submit" name="price in \u00a3" value="\u00a3 1000">
<input type="submit" name="price in \u20ac" value="\u20ac 2000">
<input type="hidden" name="poundsign" value="\u00a3">
<input type="hidden" name="eurosign" value="\u20ac">
</form>""")
req = self.request_class.from_response(response, \
clickdata={'name': u'price in \u00a3'})
fs = _qs(req)
self.assertTrue(fs[u'price in \u00a3'.encode('utf-8')])
def test_from_response_multiple_forms_clickdata(self):
response = _buildresponse(
"""<form name="form1">
<input type="submit" name="clickable" value="clicked1">
<input type="hidden" name="field1" value="value1">
</form>
<form name="form2">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="field2" value="value2">
</form>
""")
req = self.request_class.from_response(response, formname='form2', \
clickdata={'name': 'clickable'})
fs = _qs(req)
self.assertEqual(fs['clickable'], ['clicked2'])
self.assertEqual(fs['field2'], ['value2'])
self.assertFalse('field1' in fs, fs)
def test_from_response_override_clickable(self):
response = _buildresponse('''<form><input type="submit" name="clickme" value="one"> </form>''')
req = self.request_class.from_response(response, \
formdata={'clickme': 'two'}, clickdata={'name': 'clickme'})
fs = _qs(req)
self.assertEqual(fs['clickme'], ['two'])
def test_from_response_dont_click(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
r1 = self.request_class.from_response(response, dont_click=True)
fs = _qs(r1)
self.assertFalse('clickable1' in fs, fs)
self.assertFalse('clickable2' in fs, fs)
def test_from_response_ambiguous_clickdata(self):
response = _buildresponse(
"""
<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
self.assertRaises(ValueError, self.request_class.from_response,
response, clickdata={'type': 'submit'})
def test_from_response_non_matching_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable" value="clicked">
</form>""")
self.assertRaises(ValueError, self.request_class.from_response,
response, clickdata={'nonexistent': 'notme'})
def test_from_response_errors_noform(self):
response = _buildresponse("""<html></html>""")
self.assertRaises(ValueError, self.request_class.from_response, response)
def test_from_response_invalid_html5(self):
response = _buildresponse("""<!DOCTYPE html><body></html><form>"""
"""<input type="text" name="foo" value="xxx">"""
"""</form></body></html>""")
req = self.request_class.from_response(response, formdata={'bar': 'buz'})
fs = _qs(req)
self.assertEqual(fs, {'foo': ['xxx'], 'bar': ['buz']})
def test_from_response_errors_formnumber(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""")
self.assertRaises(IndexError, self.request_class.from_response, response, formnumber=1)
def test_from_response_noformname(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>""")
r1 = self.request_class.from_response(response, formdata={'two':'3'})
self.assertEqual(r1.method, 'POST')
self.assertEqual(r1.headers['Content-type'], 'application/x-www-form-urlencoded')
fs = _qs(r1)
self.assertEqual(fs, {'one': ['1'], 'two': ['3']})
def test_from_response_formname_exists(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>""")
r1 = self.request_class.from_response(response, formname="form2")
self.assertEqual(r1.method, 'POST')
fs = _qs(r1)
self.assertEqual(fs, {'four': ['4'], 'three': ['3']})
def test_from_response_formname_notexist(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>""")
r1 = self.request_class.from_response(response, formname="form3")
self.assertEqual(r1.method, 'POST')
fs = _qs(r1)
self.assertEqual(fs, {'one': ['1']})
def test_from_response_formname_errors_formnumber(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>""")
self.assertRaises(IndexError, self.request_class.from_response, \
response, formname="form3", formnumber=2)
def test_from_response_select(self):
res = _buildresponse(
'''<form>
<select name="i1">
<option value="i1v1">option 1</option>
<option value="i1v2" selected>option 2</option>
</select>
<select name="i2">
<option value="i2v1">option 1</option>
<option value="i2v2">option 2</option>
</select>
<select>
<option value="i3v1">option 1</option>
<option value="i3v2">option 2</option>
</select>
<select name="i4" multiple>
<option value="i4v1">option 1</option>
<option value="i4v2" selected>option 2</option>
<option value="i4v3" selected>option 3</option>
</select>
<select name="i5" multiple>
<option value="i5v1">option 1</option>
<option value="i5v2">option 2</option>
</select>
<select name="i6"></select>
<select name="i7"/>
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v2'], 'i2': ['i2v1'], 'i4': ['i4v2', 'i4v3']})
def test_from_response_radio(self):
res = _buildresponse(
'''<form>
<input type="radio" name="i1" value="i1v1">
<input type="radio" name="i1" value="iv2" checked>
<input type="radio" name="i2" checked>
<input type="radio" name="i2">
<input type="radio" name="i3" value="i3v1">
<input type="radio" name="i3">
<input type="radio" value="i4v1">
<input type="radio">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['iv2'], 'i2': ['on']})
def test_from_response_checkbox(self):
res = _buildresponse(
'''<form>
<input type="checkbox" name="i1" value="i1v1">
<input type="checkbox" name="i1" value="iv2" checked>
<input type="checkbox" name="i2" checked>
<input type="checkbox" name="i2">
<input type="checkbox" name="i3" value="i3v1">
<input type="checkbox" name="i3">
<input type="checkbox" value="i4v1">
<input type="checkbox">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['iv2'], 'i2': ['on']})
def test_from_response_input_text(self):
res = _buildresponse(
'''<form>
<input type="text" name="i1" value="i1v1">
<input type="text" name="i2">
<input type="text" value="i3v1">
<input type="text">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v1'], 'i2': ['']})
def test_from_response_input_hidden(self):
res = _buildresponse(
'''<form>
<input type="hidden" name="i1" value="i1v1">
<input type="hidden" name="i2">
<input type="hidden" value="i3v1">
<input type="hidden">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v1'], 'i2': ['']})
def test_from_response_input_hidden(self):
res = _buildresponse(
'''<form>
<input type="hidden" name="i1" value="i1v1">
<input type="hidden" name="i2">
<input type="hidden">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v1'], 'i2': ['']})
def test_from_response_input_textarea(self):
res = _buildresponse(
'''<form>
<textarea name="i1">i1v</textarea>
<textarea name="i2"></textarea>
<textarea name="i3"/>
<textarea>i4v</textarea>
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v'], 'i2': [''], 'i3': ['']})
def test_from_response_descendants(self):
res = _buildresponse(
'''<form>
<div>
<fieldset>
<input type="text" name="i1">
<select name="i2">
<option value="v1" selected>
</select>
</fieldset>
<input type="radio" name="i3" value="i3v2" checked>
<input type="checkbox" name="i4" value="i4v2" checked>
<textarea name="i5"></textarea>
<input type="hidden" name="h1" value="h1v">
</div>
<input type="hidden" name="h2" value="h2v">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(set(fs), set(['h2', 'i2', 'i1', 'i3', 'h1', 'i5', 'i4']))
def test_from_response_xpath(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form action="post2.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>""")
r1 = self.request_class.from_response(response, formxpath="//form[@action='post.php']")
fs = _qs(r1)
self.assertEqual(fs['one'], ['1'])
r1 = self.request_class.from_response(response, formxpath="//form/input[@name='four']")
fs = _qs(r1)
self.assertEqual(fs['three'], ['3'])
self.assertRaises(ValueError, self.request_class.from_response,
response, formxpath="//form/input[@name='abc']")
def _buildresponse(body, **kwargs):
kwargs.setdefault('body', body)
kwargs.setdefault('url', 'http://example.com')
kwargs.setdefault('encoding', 'utf-8')
return HtmlResponse(**kwargs)
def _qs(req):
if req.method == 'POST':
qs = req.body
else:
qs = req.url.partition('?')[2]
return cgi.parse_qs(qs, True)
class XmlRpcRequestTest(RequestTest):
request_class = XmlRpcRequest
default_method = 'POST'
default_headers = {'Content-Type': ['text/xml']}
def _test_request(self, **kwargs):
r = self.request_class('http://scrapytest.org/rpc2', **kwargs)
self.assertEqual(r.headers['Content-Type'], 'text/xml')
self.assertEqual(r.body, xmlrpclib.dumps(**kwargs))
self.assertEqual(r.method, 'POST')
self.assertEqual(r.encoding, kwargs.get('encoding', 'utf-8'))
self.assertTrue(r.dont_filter, True)
def test_xmlrpc_dumps(self):
self._test_request(params=('value',))
self._test_request(params=('username', 'password'), methodname='login')
self._test_request(params=('response', ), methodresponse='login')
self._test_request(params=(u'pas\xa3',), encoding='utf-8')
self._test_request(params=(u'pas\xa3',), encoding='latin')
self._test_request(params=(None,), allow_none=1)
self.assertRaises(TypeError, self._test_request)
self.assertRaises(TypeError, self._test_request, params=(None,))
if __name__ == "__main__":
unittest.main()
| 42.353916 | 105 | 0.577357 |
75d4f74399552c4753d961b874d2ede86c38b922 | 986 | py | Python | neutron/db/migration/alembic_migrations/versions/yoga/expand/cd9ef14ccf87_add_index_to_agents_host.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | neutron/db/migration/alembic_migrations/versions/yoga/expand/cd9ef14ccf87_add_index_to_agents_host.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | neutron/db/migration/alembic_migrations/versions/yoga/expand/cd9ef14ccf87_add_index_to_agents_host.py | dangervon/neutron | 06ce0c2c94d2256a8f6804a1eacb0733747dcf46 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
"""add index to agents host
Revision ID: cd9ef14ccf87
Revises: 8160f7a9cebb
Create Date: 2022-01-07 15:45:33.319170
"""
# revision identifiers, used by Alembic.
revision = 'cd9ef14ccf87'
down_revision = '8160f7a9cebb'
TABLE = 'agents'
COLUMN = 'host'
def upgrade():
op.create_index(op.f('ix_' + TABLE + '_' + COLUMN), TABLE, [COLUMN])
| 25.947368 | 78 | 0.719067 |
833618195a62da0c306eabed37e03931c973918c | 2,403 | py | Python | model/SSIM.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | model/SSIM.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | model/SSIM.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import keras_contrib.backend as KC
from keras import backend as K
class DSSIMObjective:
def __init__(self, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
self.__name__ = 'DSSIMObjective'
self.kernel_size = kernel_size
self.k1 = k1
self.k2 = k2
self.max_value = max_value
self.c1 = (self.k1 * self.max_value) ** 2
self.c2 = (self.k2 * self.max_value) ** 2
self.dim_ordering = K.image_data_format()
self.backend = K.backend()
def __int__(self, x):
return K.int_shape(x) if self.backend == 'tensorflow' else K.shape(x)
def __call__(self, y_true, y_pred):
# There are additional parameters for this function
# Note: some of the 'modes' for edge behavior do not yet have a
# gradient definition in the Theano tree
# and cannot be used for learning
kernel = [self.kernel_size, self.kernel_size]
y_true = K.reshape(y_true, [-1] + list(self.__int__(y_pred)[1:]))
y_pred = K.reshape(y_pred, [-1] + list(self.__int__(y_pred)[1:]))
patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid',
self.dim_ordering)
patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid',
self.dim_ordering)
# Reshape to get the var in the cells
bs, w, h, c1, c2, c3 = self.__int__(patches_pred)
patches_pred = K.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
patches_true = K.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
# Get mean
u_true = K.mean(patches_true, axis=-1)
u_pred = K.mean(patches_pred, axis=-1)
# Get variance
var_true = K.var(patches_true, axis=-1)
var_pred = K.var(patches_pred, axis=-1)
# Get std dev
covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred + self.c2)
denom = ((K.square(u_true)
+ K.square(u_pred)
+ self.c1) * (var_pred + var_true + self.c2))
ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero
return K.mean((1.0 - ssim) / 2.0) | 43.690909 | 89 | 0.575531 |
40888293dc7a862822c8c8d61077b7c856bf94d2 | 1,169 | py | Python | src/models/Seq2seq.py | mhannani/ZinVert | d54e1ab1980ed70945c34d2ceb294d559126f623 | [
"Apache-2.0"
] | null | null | null | src/models/Seq2seq.py | mhannani/ZinVert | d54e1ab1980ed70945c34d2ceb294d559126f623 | [
"Apache-2.0"
] | null | null | null | src/models/Seq2seq.py | mhannani/ZinVert | d54e1ab1980ed70945c34d2ceb294d559126f623 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
class Seq2Seq(nn.Module):
"""
Seq2seq model, combining encoder and decoder models.
"""
def __init__(self, encoder, decoder):
"""
The class constructor.
:param encoder: The Encoder model.
:param decoder: The Decocer model.
"""
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, teacher_forcing_ratio = 0.5):
"""
The forward pass
:param src: torch.Tensor(BATCH_SIZE, 37 (LENGTH_OF_LONGEST_SENTENCE_IN_CORPUS))
Source sentences [English sentences as batch]
:param tgt: torch.Tensor(BATCH_SIZE, 46 (LENGTH_OF_LONGEST_SENTENCE_IN_CORPUS))
Target sentences [Dutch sentences as batch]
:param teacher_forcing_ratio:
Teacher forcing ratio for applying the technique.
:return: Torch.Tensor()
Decoder output, Torch.Size([45 * 512, 19215])
"""
# encode the source sentence
hidden, cell = self.encoder(src)
outputs = self.decoder(tgt, hidden, cell, teacher_forcing_ratio)
return outputs
| 29.225 | 87 | 0.621044 |
eba39459a560a1856acc4ffa362a663a1751a7f1 | 2,618 | py | Python | dbbackup/settings.py | sroussy/django-dbbackup | db1df2b06a5484e2a232a5fff3cf90d14a7caf67 | [
"BSD-3-Clause"
] | null | null | null | dbbackup/settings.py | sroussy/django-dbbackup | db1df2b06a5484e2a232a5fff3cf90d14a7caf67 | [
"BSD-3-Clause"
] | null | null | null | dbbackup/settings.py | sroussy/django-dbbackup | db1df2b06a5484e2a232a5fff3cf90d14a7caf67 | [
"BSD-3-Clause"
] | null | null | null | # DO NOT IMPORT THIS BEFORE django.configure() has been run!
import os
from django.conf import settings
DATABASES = getattr(settings, 'DBBACKUP_DATABASES', list(settings.DATABASES.keys()))
BACKUP_DIRECTORY = getattr(settings, 'DBBACKUP_BACKUP_DIRECTORY', os.getcwd())
# Fake host
DBBACKUP_FAKE_HOST = getattr(settings, 'DBBACKUP_FAKE_HOST', 'django-dbbackup')
# Directory to use for temporary files
TMP_DIR = getattr(settings, 'DBBACKUP_TMP_DIR', '/tmp')
# Days to keep backups
CLEANUP_KEEP = getattr(settings, 'DBBACKUP_CLEANUP_KEEP', 10)
# Days to keep backed up media (default: same as CLEANUP_KEEP)
CLEANUP_KEEP_MEDIA = getattr(settings, 'DBBACKUP_CLEANUP_KEEP_MEDIA', CLEANUP_KEEP)
MEDIA_PATH = getattr(settings, 'DBBACKUP_MEDIA_PATH', settings.MEDIA_ROOT)
DATE_FORMAT = getattr(settings, 'DBBACKUP_DATE_FORMAT', '%Y-%m-%d-%H%M%S')
SERVER_NAME = getattr(settings, 'DBBACKUP_SERVER_NAME', '')
FORCE_ENGINE = getattr(settings, 'DBBACKUP_FORCE_ENGINE', '')
FILENAME_TEMPLATE = getattr(settings, 'DBBACKUP_FILENAME_TEMPLATE', '{databasename}-{servername}-{datetime}.{extension}')
READ_FILE = '<READ_FILE>'
WRITE_FILE = '<WRITE_FILE>'
# Environment dictionary
BACKUP_ENVIRONMENT = {}
RESTORE_ENVIRONMENT = {}
# TODO: Unify backup and restore commands to support adding extra flags instead
# of just having full statements.
SQLITE_BACKUP_COMMANDS = getattr(settings, 'DBBACKUP_SQLITE_BACKUP_COMMANDS', [
[READ_FILE, '{databasename}'],
])
SQLITE_RESTORE_COMMANDS = getattr(settings, 'DBBACKUP_SQLITE_RESTORE_COMMANDS', [
[WRITE_FILE, '{databasename}'],
])
# TODO: Why are these even here? The MySQL commands are built in a dynamic
# fashion through MySQLSettings
MYSQL_BACKUP_COMMANDS = getattr(settings, 'DBBACKUP_MYSQL_BACKUP_COMMANDS', None)
MYSQL_RESTORE_COMMANDS = getattr(settings, 'DBBACKUP_MYSQL_RESTORE_COMMANDS', None)
POSTGRESQL_BACKUP_COMMANDS = getattr(settings, 'DBBACKUP_POSTGRESQL_BACKUP_COMMANDS', None)
POSTGRESQL_RESTORE_COMMANDS = getattr(settings, 'DBBACKUP_POSTGRESQL_RESTORE_COMMANDS', None)
POSTGRESQL_RESTORE_SINGLE_TRANSACTION = getattr(settings, 'DBBACKUP_POSTGRESQL_RESTORE_SINGLE_TRANSACTION', True)
POSTGIS_SPATIAL_REF = getattr(settings, 'DBBACKUP_POSTGIS_SPACIAL_REF', False)
FAILURE_RECIPIENTS = getattr(settings, 'DBBACKUP_FAILURE_RECIPIENTS', settings.ADMINS)
SEND_EMAIL = getattr(settings, 'DBBACKUP_SEND_EMAIL', True)
SERVER_EMAIL = getattr(settings, 'DBBACKUP_SERVER_EMAIL', settings.SERVER_EMAIL)
GPG_ALWAYS_TRUST = getattr(settings, 'DBBACKUP_GPG_ALWAYS_TRUST', False)
GPG_RECIPIENT = GPG_ALWAYS_TRUST = getattr(settings, 'DBBACKUP_GPG_RECIPIENT', None)
| 41.555556 | 121 | 0.800611 |
e3b542f188d3a18abd69225d2567fab5713cf533 | 946 | py | Python | cyclofit/rides/forms.py | piyushmohan01/CycloFit-SEPM | f97a7032e22e29daf48f0796462a22e58b20709c | [
"MIT"
] | 4 | 2021-09-10T00:30:15.000Z | 2022-03-03T09:05:03.000Z | cyclofit/rides/forms.py | piyushmohan01/CycloFit-SEPM | f97a7032e22e29daf48f0796462a22e58b20709c | [
"MIT"
] | 1 | 2022-03-03T05:41:13.000Z | 2022-03-03T05:44:41.000Z | cyclofit/rides/forms.py | piyushmohan01/CycloFit-SEPM | f97a7032e22e29daf48f0796462a22e58b20709c | [
"MIT"
] | 3 | 2021-05-18T18:19:55.000Z | 2021-10-13T11:29:56.000Z | from flask_wtf import FlaskForm
from wtforms import SelectField, IntegerField, SubmitField, RadioField
from wtforms.validators import DataRequired
class NewRideForm(FlaskForm):
# distance and calorie-count
duration = SelectField('Ride Duration', choices=[(15, '15 Min'), (30, '30 Min'),\
(45, '45 Min'), (60, '1 Hour'), (90, '1.5 Hour')])
avg_speed = SelectField('Average Speed', choices=[(15, '15 KM/H'), (20, '20 KM/H'),\
(25, '25 KM/H'), (30, '30 KM/H'), (35, '35 KM/H')])
rider_weight = IntegerField('Rider Weight',\
validators=[DataRequired('Please enter your weight!')])
cycle_type = SelectField('Cyclo-Type', choices=[('Premium', 'Cyclo-Premium'),\
('Health', 'Cyclo-Health'), ('Student', 'Cyclo-Student'), ('Afford', 'Cyclo-Afford')])
ride_rating = RadioField('Ride Rating', choices=[('1','1'),('2','2'),\
('3','3'),('4','4'),('5','5')])
submit = SubmitField('Submit')
| 52.555556 | 94 | 0.615222 |
0878e6268fab762b577b608c79988c3f34d42ff5 | 22,285 | py | Python | test/functional/test_framework/util.py | Everyone-Coin/scholarshipcoin | 5ce0333efff7e20387c467ae8e9dc2a11184c5fe | [
"MIT"
] | 4 | 2021-01-26T09:19:26.000Z | 2021-08-15T12:42:10.000Z | test/functional/test_framework/util.py | Everyone-Coin/scholarshipcoin | 5ce0333efff7e20387c467ae8e9dc2a11184c5fe | [
"MIT"
] | 5 | 2021-01-26T18:18:48.000Z | 2021-03-24T20:45:27.000Z | test/functional/test_framework/util.py | Everyone-Coin/scholarshipcoin | 5ce0333efff7e20387c467ae8e9dc2a11184c5fe | [
"MIT"
] | 5 | 2021-02-01T19:15:22.000Z | 2022-02-07T02:52:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s SCHO too low! (Should be %s SCHO)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s SCHO too high! (Should be %s SCHO)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "scholarship.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "scholarship.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "scholarship.conf")):
with open(os.path.join(datadir, "scholarship.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| 38.891798 | 140 | 0.652277 |
e5b5ec6a7e66cca97e462465b1990a63f99cf115 | 4,783 | py | Python | lib/acconeer_utils/clients/json/client.py | GoldenRed/acconeer-python-exploration | 5e60cbc105e532c4a6d5562ba29e195854e3d7c5 | [
"BSD-3-Clause-Clear"
] | 1 | 2019-10-15T15:57:50.000Z | 2019-10-15T15:57:50.000Z | lib/acconeer_utils/clients/json/client.py | GoldenRed/acconeer-python-exploration | 5e60cbc105e532c4a6d5562ba29e195854e3d7c5 | [
"BSD-3-Clause-Clear"
] | 2 | 2019-10-15T12:59:40.000Z | 2019-10-17T11:25:00.000Z | lib/acconeer_utils/clients/json/client.py | GoldenRed/acconeer-python-exploration | 5e60cbc105e532c4a6d5562ba29e195854e3d7c5 | [
"BSD-3-Clause-Clear"
] | null | null | null | from time import time
from copy import deepcopy
import logging
from distutils.version import StrictVersion
from acconeer_utils.clients.base import BaseClient, ClientError
from acconeer_utils.clients import links
from acconeer_utils.clients.json import protocol
from acconeer_utils.clients.base import decode_version_str
log = logging.getLogger(__name__)
class JSONClient(BaseClient):
def __init__(self, host, **kwargs):
super().__init__(**kwargs)
self._link = links.SocketLink(host)
self._session_cmd = None
self._session_ready = False
self._num_subsweeps = None
def _connect(self):
info = {}
self._link.connect()
cmd = {"cmd": "get_version"}
self._send_cmd(cmd)
try:
header, _ = self._recv_frame()
except links.LinkError as e:
raise ClientError("no response from server") from e
log.debug("connected and got a response")
if header["status"] != "ok":
raise ClientError("server error while connecting")
msg = header["message"].lower()
log.info("version msg: {}".format(msg))
startstr = "server version v"
if not msg.startswith(startstr):
log.warning("server version unknown")
return info
server_version_str = msg[len(startstr):].strip()
info.update(decode_version_str(server_version_str))
if info["strict_version"] >= StrictVersion("1.10"):
cmd = {"cmd": "get_board_sensor_count"}
self._send_cmd(cmd)
header, _ = self._recv_frame()
msg = header["message"]
board_sensor_count = int(msg)
info["board_sensor_count"] = board_sensor_count
return info
def _setup_session(self, config):
if isinstance(config, dict):
cmd = deepcopy(config)
log.warning("setup with raw dict config - you're on your own")
else:
cmd = protocol.get_dict_for_config(config)
cmd["output_format"] = "json+binary"
self._session_cmd = cmd
info = self._init_session()
log.debug("setup session")
return info
def _start_streaming(self):
if not self._session_ready:
self._init_session()
cmd = {"cmd": "start_streaming"}
self._send_cmd(cmd)
header, _ = self._recv_frame()
if header["status"] != "start":
raise ClientError
log.debug("started streaming")
def _get_next(self):
header, payload = self._recv_frame()
status = header["status"]
if status == "end":
raise ClientError("session ended")
elif status != "ok":
raise ClientError("server error")
return protocol.decode_stream_frame(header, payload, self.squeeze, self._num_subsweeps)
def _stop_streaming(self):
cmd = {"cmd": "stop_streaming"}
self._send_cmd(cmd)
t0 = time()
while time() - t0 < self._link._timeout:
header, _ = self._recv_frame()
status = header["status"]
if status == "end":
break
elif status == "ok": # got streaming data
continue
else:
raise ClientError
else:
raise ClientError
self._session_ready = False
log.debug("stopped streaming")
def _disconnect(self):
self._link.disconnect()
self._session_cmd = None
self._session_ready = False
log.debug("disconnected")
def _init_session(self, retry=True):
if self._session_cmd is None:
raise ClientError
self._send_cmd(self._session_cmd)
header, _ = self._recv_frame()
if header["status"] == "error":
if retry:
return self._init_session(retry=False)
else:
raise ClientError("server error while initializing session")
elif header["status"] != "ok":
raise ClientError("got unexpected header")
log.debug("session initialized")
self._session_ready = True
info = protocol.get_session_info_for_header(header)
self._num_subsweeps = info.get("number_of_subsweeps")
return info
def _send_cmd(self, cmd_dict):
cmd_dict["api_version"] = 2
packed = protocol.pack(cmd_dict)
self._link.send(packed)
def _recv_frame(self):
packed_header = self._link.recv_until(b'\n')
header = protocol.unpack(packed_header)
payload_len = header["payload_size"]
if payload_len > 0:
payload = self._link.recv(payload_len)
else:
payload = None
return header, payload
| 28.640719 | 95 | 0.597324 |
7d4744d2d1a40bd217e0b6099d4a6406e19cdfc0 | 34,110 | py | Python | downstream_tasks/run_classifier.py | meganbarnes/clinicalBERT | b1ec6de9094df525011159acd8c8718c7949e376 | [
"MIT"
] | null | null | null | downstream_tasks/run_classifier.py | meganbarnes/clinicalBERT | b1ec6de9094df525011159acd8c8718c7949e376 | [
"MIT"
] | null | null | null | downstream_tasks/run_classifier.py | meganbarnes/clinicalBERT | b1ec6de9094df525011159acd8c8718c7949e376 | [
"MIT"
] | null | null | null | # Code is adapted from the PyTorch pretrained BERT repo - See copyright & license below.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
#added
import json
from random import shuffle
import math
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# NEW
class MedNLIProcessor(DataProcessor):
def _chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
file_path = os.path.join(data_dir, "mli_train_v1.jsonl")
return self._create_examples(file_path)
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
file_path = os.path.join(data_dir, "mli_dev_v1.jsonl")
return self._create_examples(file_path)
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
file_path = os.path.join(data_dir, "mli_test_v1.jsonl")
return self._create_examples(file_path)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, file_path):
examples = []
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
example = json.loads(line)
examples.append(
InputExample(guid=example['pairID'], text_a=example['sentence1'],
text_b=example['sentence2'], label=example['gold_label']))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
max_len = 0
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
seq_len = len(tokens_a) + len(tokens_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
seq_len = len(tokens_a)
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
if seq_len > max_len:
max_len = seq_len
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
print('Max Sequence Length: %d' %max_len)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
# From hugging face distiller.py
def save_checkpoint(model, dump_path, checkpoint_name: str = "checkpoint.pth"):
"""
Save the current state. Only by the master process.
"""
mdl_to_save = model.module if hasattr(model, "module") else model
mdl_to_save.config.save_pretrained(self.dump_path)
state_dict = mdl_to_save.state_dict()
torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
def setup_parser():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese, biobert.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--model_loc', type=str, default='', help="Specify the location of the bio or clinical bert model")
return parser
def main():
parser = setup_parser()
args = parser.parse_args()
# specifies the path where the biobert or clinical bert model is saved
if args.bert_model == 'biobert' or args.bert_model == 'clinical_bert':
args.bert_model = args.model_loc
print(args.bert_model)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"mednli": MedNLIProcessor
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"mednli": 3
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
print('TRAIN')
train = processor.get_train_examples(args.data_dir)
print([(train[i].text_a,train[i].text_b, train[i].label) for i in range(3)])
print('DEV')
dev = processor.get_dev_examples(args.data_dir)
print([(dev[i].text_a,dev[i].text_b, dev[i].label) for i in range(3)])
print('TEST')
test = processor.get_test_examples(args.data_dir)
print([(test[i].text_a,test[i].text_b, test[i].label) for i in range(3)])
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for epoch_num in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Saving checkpoint
save_checkpoint(model, args.output_dir, "epoch_%d_checkpoint.pth" % epoch_num)
if args.do_train:
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
# Load a trained model and config that you have fine-tuned
config = BertConfig(output_config_file)
model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': loss}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if args.do_test and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
test_examples = processor.get_test_examples(args.data_dir)
test_features = convert_examples_to_features(
test_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running testing *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
model.eval()
test_loss, test_accuracy = 0, 0
nb_test_steps, nb_test_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc="Testing"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_test_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_test_accuracy = accuracy(logits, label_ids)
test_loss += tmp_test_loss.mean().item()
test_accuracy += tmp_test_accuracy
nb_test_examples += input_ids.size(0)
nb_test_steps += 1
test_loss = test_loss / nb_test_steps
test_accuracy = test_accuracy / nb_test_examples
loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'test_loss': test_loss,
'test_accuracy': test_accuracy,
'global_step': global_step,
'loss': loss}
output_test_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_file, "w") as writer:
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 42.58427 | 139 | 0.60815 |
20f537268340330bc1bb1ae8fcf9d69d467c54f2 | 6,768 | py | Python | std/huggingface/mlm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | std/huggingface/mlm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | std/huggingface/mlm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | # Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune for masked language modeling (BERT, ALBERT, RoBERTa...)
import logging
import math
import random
import torch
from datasets import load_dataset
from functools import partial
from torch.utils.data import DataLoader
from transformers import AutoModelForMaskedLM, DataCollatorForLanguageModeling
from .params import TRAIN, EVAL, ALL, EACH
from .runner import Runner as Base
from .utils import group_texts
log = logging.getLogger(__name__)
class Runner(Base):
AutoModel = AutoModelForMaskedLM
@property
def dataset(self):
if self._dataset is None:
ps = self.params
if ps.dataset_name is not None:
y = load_dataset(ps.dataset_name, ps.dataset_config)
if EVAL not in y.keys():
y[EVAL] = load_dataset(ps.dataset_name, ps.dataset_config, split=f"train[:{ps.split_percent}%]")
y[TRAIN] = load_dataset(ps.dataset_name, ps.dataset_config, split=f"train[{ps.split_percent}%:]")
else:
x, xs = None, {}
if ps.eval_file is not None:
xs[EVAL] = x = ps.eval_file
if ps.train_file is not None:
xs[TRAIN] = x = ps.train_file
x = x.split(".")[-1]
if x == "txt":
x = "text"
y = load_dataset(x, data_files=xs)
if EVAL not in y.keys():
y[EVAL] = load_dataset(x, data_files=xs, split=f"train[:{ps.split_percent}%]")
y[TRAIN] = load_dataset(x, data_files=xs, split=f"train[{ps.split_percent}%:]")
self._dataset = y
return self._dataset
@property
def cols(self):
if self._cols is None:
cs = self.dataset[TRAIN].column_names
t = "text" if "text" in cs else cs[0]
self._cols = {ALL: cs, EACH: [t]}
return self._cols
@property
def tokenizer(self):
if self._tokenizer is None:
ps, t = self.params, super().tokenizer
if ps.max_seq_length is None:
b = t.model_max_length
if b > 1024:
log.warning(f"Using max_seq_length=1024")
b = 1024
else:
if ps.max_seq_length > t.model_max_length:
log.warning(f"Using max_seq_length={t.model_max_length}")
b = min(ps.max_seq_length, t.model_max_length)
self.max_seq_length = b
return self._tokenizer
@property
def train_ds(self):
if self._train_ds is None:
ps, mgr, ds = self.params, self.mgr, self.dataset
if ps.line_by_line:
with mgr.main_process_first():
self._dataset = y = ds.map(
self.prep_for_train,
batched=True,
num_proc=ps.preproc_num_workers,
remove_columns=[self.cols[EACH][0]],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer line_by_line",
)
else:
with mgr.main_process_first():
y = ds.map(
self.prep_for_train,
batched=True,
num_proc=ps.preproc_num_workers,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on every text",
)
with mgr.main_process_first():
self._dataset = y = y.map(
partial(group_texts, self.max_seq_length),
batched=True,
num_proc=ps.preproc_num_workers,
load_from_cache_file=not ps.overwrite_cache,
desc=f"Grouping texts in blocks of {self.max_seq_length}",
)
y = y[TRAIN]
if ps.max_train_samples is not None:
y = y.select(range(ps.max_train_samples))
for i in random.sample(range(len(y)), 3):
log.info(f"Sample {i} of the training set: {y[i]}")
self._train_ds = y
return self._train_ds
def prep_for_train(self, xs):
ps, c = self.params, self.cols[EACH][0]
if ps.line_by_line:
xs[c] = [x for x in xs[c] if len(x) > 0 and not x.isspace()]
return self.tokenizer(
xs[c],
padding=self.padding,
truncation=True,
max_length=self.max_seq_length,
return_special_tokens_mask=True,
)
else:
return self.tokenizer(xs[c], return_special_tokens_mask=True)
@property
def loaders(self):
if self._loaders is None:
ps = self.params
c = DataCollatorForLanguageModeling(self.tokenizer, mlm_probability=ps.mlm_probability)
t = DataLoader(self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.per_device_train_batch_size)
e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.per_device_eval_batch_size)
self._loaders = {TRAIN: t, EVAL: e}
return self._loaders
def eval_epoch(self, e):
m, mgr = self.model, self.mgr
m.eval()
y = []
for xs in self.loaders[EVAL]:
with torch.no_grad():
ys = m(**xs)
y.append(mgr.gather(ys.loss.repeat(self.params.per_device_eval_batch_size)))
y = torch.cat(y)[: len(self.eval_ds)]
try:
y = math.exp(torch.mean(y))
except OverflowError:
y = float("inf")
mgr.print(f"epoch {e}: perplexity: {y}")
def main():
x = Runner()
x.dataset
x.config
x.tokenizer
x.model
x.model.resize_token_embeddings(len(x.tokenizer))
x.loaders
x.prepare()
x.train()
x.save()
if __name__ == "__main__":
main()
| 37.392265 | 117 | 0.552305 |
260351fab4774241d51f99be02133bf480309d6f | 3,334 | py | Python | original_script.py | codepost-io/heatmap-viewer | a8edc17ac0a01b7aca22cb9e9ec897387272a5ff | [
"MIT"
] | 1 | 2019-08-22T22:19:39.000Z | 2019-08-22T22:19:39.000Z | original_script.py | codepost-io/heatmap-viewer | a8edc17ac0a01b7aca22cb9e9ec897387272a5ff | [
"MIT"
] | null | null | null | original_script.py | codepost-io/heatmap-viewer | a8edc17ac0a01b7aca22cb9e9ec897387272a5ff | [
"MIT"
] | null | null | null | import requests
import functools
import pandas as pd # Package to manipulate tables of data
import seaborn as sns # Package to create visual heatmap
import matplotlib.pyplot as plt # Package to plot heatmap
api_key = "<API - KEY>" # Temporary Api token provided to CourseAdmin user
headers = {"Authorization": api_key} # Set authorization token in header that will be passed with each api request
url = 'http://api.codepost.io/' # url
s = requests.Session()
####### Calculate the average grade of an assignment
def get_submissions(assignmentID):
r = requests.get(url + 'assignments/%s/submissions/' % str(assignmentID), headers=headers)
return r.json()
def avg_grade(assignmentID):
submissions = get_submissions(assignmentID=assignmentID) # Get all submissions for an assignment
graded_submissions = [sub for sub in submissions if sub['grade']] # Filter out ungraded submissions (grade == null)
avg_grade = functools.reduce(lambda x,y: x + y['grade'], graded_submissions, 0) / len(graded_submissions)
print("Average grade on this assignment is %s" % avg_grade)
avg_grade(2) # Get the average grade for assignment with id 2
####### Example 2: For a given assignment, create and plot a heatmap of rubricComment usage by Grader
def getCommentAuthor(commentID):
r = requests.get(url + 'comments/%s/' % str(commentID), headers=headers)
return r.json()['author']
def heatmap(assignmentID):
# hmap is a dictionary mapping {'graderEmail':{ ('rubricCommentName', 'rubricCommentID') : numTimesUsed }}
hmap = {}
# Get array of rubricComments for an assignment, each of which has fields 'id', 'text', and 'comments' (array of comment ids)
r = requests.get(url + 'assignments/%s/rubric/' % str(assignmentID), headers=headers)
for rubricComment in r.json()['rubricComments']:
rubricCommentIdentifier = (rubricComment['text'], rubricComment['id']) # Create a unique identifier of (text, id)
linkedCommentIDs = rubricComment['comments'] # Get all the submission comments that are linked to the rubricComment
for commentID in linkedCommentIDs:
# For each submission comment linked to the rubric comment, get the comment's author
grader =requests.get(url + 'comments/%s/' % str(commentID), headers=headers).json()["author"]
# Update the mapping
if grader in hmap and rubricCommentIdentifier in hmap[grader]:
hmap[grader][rubricCommentIdentifier] += 1
elif grader in hmap:
hmap[grader][rubricCommentIdentifier] = 1
else:
hmap[grader] = {rubricCommentIdentifier: 1}
# Heatmap styling and plotting - once the data is pulled, package choice and styling up to you :)
dataframe = pd.DataFrame(hmap)
dataframe.fillna(0, inplace=True) # Fill in zeroes for empty grader, rubricComment pairs
dataframe.rename(columns=lambda x: x.split("@")[0],inplace=True) # strip out netID for plot simplicity
dataframe = dataframe.reindex(sorted(dataframe.columns), axis=1) # sort columns
sns.heatmap(dataframe, cmap=sns.light_palette("green"), cbar_kws={'label': '# Comments'})
plt.xlabel('Grader emails')
plt.ylabel('Rubric Comment text - id')
plt.tight_layout()
plt.show()
heatmap(2) # Plot a heatmap for assignment with id 2 | 53.774194 | 129 | 0.705159 |
ab35f89af753121beda31c85107913756ecccf46 | 53,512 | py | Python | src/twisted/conch/test/test_keys.py | mmilata/twisted | a24e9345c609f824a813febfc6af7f350812dbc0 | [
"Unlicense",
"MIT"
] | 4 | 2020-07-05T21:15:38.000Z | 2021-04-09T05:42:19.000Z | src/twisted/conch/test/test_keys.py | mmilata/twisted | a24e9345c609f824a813febfc6af7f350812dbc0 | [
"Unlicense",
"MIT"
] | 10 | 2020-06-05T23:30:34.000Z | 2021-09-22T18:56:54.000Z | src/twisted/conch/test/test_keys.py | mmilata/twisted | a24e9345c609f824a813febfc6af7f350812dbc0 | [
"Unlicense",
"MIT"
] | 5 | 2020-05-12T11:04:44.000Z | 2020-05-31T14:08:00.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
from __future__ import absolute_import, division
from twisted.python.reflect import requireModule
cryptography = requireModule("cryptography")
if cryptography is None:
skipCryptography = 'Cannot run without cryptography.'
pyasn1 = requireModule("pyasn1")
if cryptography and pyasn1:
from twisted.conch.ssh import keys, common, sexpy
import base64
import os
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.trial import unittest
from twisted.python.compat import long
from twisted.python.filepath import FilePath
class KeyTests(unittest.TestCase):
if cryptography is None:
skip = skipCryptography
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self.rsaObj = keys.Key._fromRSAComponents(
n=keydata.RSAData['n'],
e=keydata.RSAData['e'],
d=keydata.RSAData['d'],
p=keydata.RSAData['p'],
q=keydata.RSAData['q'],
u=keydata.RSAData['u'],
)._keyObject
self.dsaObj = keys.Key._fromDSAComponents(
y=keydata.DSAData['y'],
p=keydata.DSAData['p'],
q=keydata.DSAData['q'],
g=keydata.DSAData['g'],
x=keydata.DSAData['x'],
)._keyObject
self.ecObj = keys.Key._fromECComponents(
x=keydata.ECDatanistp256['x'],
y=keydata.ECDatanistp256['y'],
privateValue=keydata.ECDatanistp256['privateValue'],
curve=keydata.ECDatanistp256['curve']
)._keyObject
self.ecObj384 = keys.Key._fromECComponents(
x=keydata.ECDatanistp384['x'],
y=keydata.ECDatanistp384['y'],
privateValue=keydata.ECDatanistp384['privateValue'],
curve=keydata.ECDatanistp384['curve']
)._keyObject
self.ecObj521 = keys.Key._fromECComponents(
x=keydata.ECDatanistp521['x'],
y=keydata.ECDatanistp521['y'],
privateValue=keydata.ECDatanistp521['privateValue'],
curve=keydata.ECDatanistp521['curve']
)._keyObject
self.rsaSignature = (
b"\x00\x00\x00\x07ssh-rsa\x00\x00\x01\x00~Y\xa3\xd7\xfdW\xc6pu@"
b"\xd81\xa1S\xf3O\xdaE\xf4/\x1ex\x1d\xf1\x9a\xe1G3\xd9\xd6U\x1f"
b"\x8c\xd9\x1b\x8b\x90\x0e\x8a\xc1\x91\xd8\x0cd\xc9\x0c\xe7\xb2"
b"\xc9,'=\x15\x1cQg\xe7x\xb5j\xdbI\xc0\xde\xafb\xd7@\xcar\x0b"
b"\xce\xa3zM\x151q5\xde\xfa\x0c{wjKN\x88\xcbC\xe5\x89\xc3\xf9i"
b"\x96\x91\xdb\xca}\xdbR\x1a\x13T\xf9\x0cDJH\x0b\x06\xcfl\xf3"
b"\x13[\x82\xa2\x9d\x93\xfd\x8e\xce|\xfb^n\xd4\xed\xe2\xd1\x8a"
b"\xb7aY\x9bB\x8f\xa4\xc7\xbe7\xb5\x0b9j\xa4.\x87\x13\xf7\xf0"
b"\xda\xd7\xd2\xf9\x1f9p\xfd?\x18\x0f\xf2N\x9b\xcf/\x1e)\n>A\x19"
b"\xc2\xb5j\xf9UW\xd4\xae\x87B\xe6\x99t\xa2y\x90\x98\xa2\xaaf\xcb"
b"\x86\xe5k\xe3\xce\xe0u\x1c\xeb\x93\x1aN\x88\xc9\x93Y\xc3.V\xb1L"
b"44`C\xc7\xa66\xaf\xfa\x7f\x04Y\x92\xfa\xa4\x1a\x18%\x19\xd5 4^"
b"\xb9rY\xba \x01\xf9.\x89%H\xbe\x1c\x83A\x96"
)
self.dsaSignature = (
b'\x00\x00\x00\x07ssh-dss\x00\x00\x00(?\xc7\xeb\x86;\xd5TFA\xb4'
b'\xdf\x0c\xc4E@4,d\xbc\t\xd9\xae\xdd[\xed-\x82nQ\x8cf\x9b\xe8\xe1'
b'jrg\x84p<'
)
self.patch(randbytes, 'secureRandom', lambda x: b'\xff' * x)
self.keyFile = self.mktemp()
with open(self.keyFile, 'wb') as f:
f.write(keydata.privateRSA_lsh)
def tearDown(self):
os.unlink(self.keyFile)
def test_size(self):
"""
The L{keys.Key.size} method returns the size of key object in bits.
"""
self.assertEqual(keys.Key(self.rsaObj).size(), 2048)
self.assertEqual(keys.Key(self.dsaObj).size(), 1024)
self.assertEqual(keys.Key(self.ecObj).size(), 256)
self.assertEqual(keys.Key(self.ecObj384).size(), 384)
self.assertEqual(keys.Key(self.ecObj521).size(), 521)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEqual(
keys.Key._guessStringType(keydata.publicRSA_openssh),
'public_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.publicDSA_openssh),
'public_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.publicECDSA_openssh),
'public_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_openssh),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_openssh_new),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_openssh),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_openssh_new),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateECDSA_openssh),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateECDSA_openssh_new),
'private_openssh')
self.assertEqual(
keys.Key._guessStringType(keydata.publicRSA_lsh), 'public_lsh')
self.assertEqual(
keys.Key._guessStringType(keydata.publicDSA_lsh), 'public_lsh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_lsh), 'private_lsh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_lsh), 'private_lsh')
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_agentv3), 'agentv3')
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_agentv3), 'agentv3')
self.assertEqual(
keys.Key._guessStringType(
b'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(
keys.Key._guessStringType(
b'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType(b'not a key'), None)
def test_public(self):
"""
The L{keys.Key.public} method returns a public key for both
public and private keys.
"""
# NB: This assumes that the private and public keys correspond
# to each other.
privateRSAKey = keys.Key.fromString(keydata.privateRSA_openssh)
publicRSAKey = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertEqual(privateRSAKey.public(), publicRSAKey.public())
privateDSAKey = keys.Key.fromString(keydata.privateDSA_openssh)
publicDSAKey = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertEqual(privateDSAKey.public(), publicDSAKey.public())
privateECDSAKey = keys.Key.fromString(keydata.privateECDSA_openssh)
publicECDSAKey = keys.Key.fromString(keydata.publicECDSA_openssh)
self.assertEqual(privateECDSAKey.public(), publicECDSAKey.public())
def test_isPublic(self):
"""
The L{keys.Key.isPublic} method returns True for public keys
otherwise False.
"""
rsaKey = keys.Key.fromString(keydata.privateRSA_openssh)
dsaKey = keys.Key.fromString(keydata.privateDSA_openssh)
ecdsaKey = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertTrue(rsaKey.public().isPublic())
self.assertFalse(rsaKey.isPublic())
self.assertTrue(dsaKey.public().isPublic())
self.assertFalse(dsaKey.isPublic())
self.assertTrue(ecdsaKey.public().isPublic())
self.assertFalse(ecdsaKey.isPublic())
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEqual(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEqual(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEqual(privateKey.type(), type)
for k, v in data.items():
self.assertEqual(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(keydata.publicECDSA_openssh,
keydata.privateECDSA_openssh, 'EC', keydata.ECDatanistp256)
self._testPublicPrivateFromString(keydata.publicRSA_openssh,
keydata.privateRSA_openssh, 'RSA', keydata.RSAData)
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase=b'encrypted'),
keys.Key.fromString(keydata.privateRSA_openssh))
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh))
self._testPublicPrivateFromString(keydata.publicDSA_openssh,
keydata.privateDSA_openssh, 'DSA', keydata.DSAData)
def test_fromOpenSSHErrors(self):
"""
Tests for invalid key types.
"""
badKey = b"""-----BEGIN FOO PRIVATE KEY-----
MIGkAgEBBDAtAi7I8j73WCX20qUM5hhHwHuFzYWYYILs2Sh8UZ+awNkARZ/Fu2LU
LLl5RtOQpbWgBwYFK4EEACKhZANiAATU17sA9P5FRwSknKcFsjjsk0+E3CeXPYX0
Tk/M0HK3PpWQWgrO8JdRHP9eFE9O/23P8BumwFt7F/AvPlCzVd35VfraFT0o4cCW
G0RqpQ+np31aKmeJshkcYALEchnU+tQ=
-----END EC PRIVATE KEY-----"""
self.assertRaises(keys.BadKeyError,
keys.Key._fromString_PRIVATE_OPENSSH, badKey, None)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + b'\n'))
def test_fromNewerOpenSSH(self):
"""
Newer versions of OpenSSH generate encrypted keys which have a longer
IV than the older versions. These newer keys are also loaded.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh_encrypted_aes,
passphrase=b'testxp')
self.assertEqual(key.type(), 'RSA')
key2 = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes + b'\n',
passphrase=b'testxp')
self.assertEqual(key, key2)
def test_fromOpenSSH_v1_format(self):
"""
OpenSSH 6.5 introduced a newer "openssh-key-v1" private key format
(made the default in OpenSSH 7.8). Loading keys in this format
produces identical results to loading the same keys in the old
PEM-based format.
"""
for old, new in (
(keydata.privateRSA_openssh, keydata.privateRSA_openssh_new),
(keydata.privateDSA_openssh, keydata.privateDSA_openssh_new),
(keydata.privateECDSA_openssh,
keydata.privateECDSA_openssh_new),
(keydata.privateECDSA_openssh384,
keydata.privateECDSA_openssh384_new),
(keydata.privateECDSA_openssh521,
keydata.privateECDSA_openssh521_new)):
self.assertEqual(
keys.Key.fromString(new), keys.Key.fromString(old))
self.assertEqual(
keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_new,
passphrase=b'encrypted'),
keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase=b'encrypted'))
def test_fromOpenSSH_windows_line_endings(self):
"""
Test that keys are correctly generated from OpenSSH strings with
Windows line endings.
"""
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(
keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData.replace(b'\n', b'\r\n')))
def test_fromLSHPublicUnsupportedType(self):
"""
C{BadKeyError} exception is raised when public key has an unknown
type.
"""
sexp = sexpy.pack([[b'public-key', [b'bad-key', [b'p', b'2']]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString, data=b'{' + base64.encodestring(sexp) + b'}',
)
def test_fromLSHPrivateUnsupportedType(self):
"""
C{BadKeyError} exception is raised when private key has an unknown
type.
"""
sexp = sexpy.pack([[b'private-key', [b'bad-key', [b'p', b'2']]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString, sexp,
)
def test_fromLSHRSA(self):
"""
RSA public and private keys can be generated from a LSH strings.
"""
self._testPublicPrivateFromString(
keydata.publicRSA_lsh,
keydata.privateRSA_lsh,
'RSA',
keydata.RSAData,
)
def test_fromLSHDSA(self):
"""
DSA public and private key can be generated from LSHs.
"""
self._testPublicPrivateFromString(
keydata.publicDSA_lsh,
keydata.privateDSA_lsh,
'DSA',
keydata.DSAData,
)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, 'RSA',
keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, 'DSA',
keydata.DSAData)
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
b'\x00\x00\x00\x07ssh-foo'+ b'\x00\x00\x00\x01\x01'*5)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, b'')
# no key data with a bad key type
self.assertRaises(
keys.BadKeyError, keys.Key.fromString, b'', 'bad_type')
# trying to decrypt a key which doesn't support encryption
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
keydata.publicRSA_lsh, passphrase=b'unencrypted')
# trying to decrypt a key with the wrong passphrase
self.assertRaises(
keys.EncryptedKeyError, keys.Key.fromString,
keys.Key(self.rsaObj).toString(
'openssh', passphrase=b'encrypted'))
# key with no key data
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b'-----BEGIN RSA KEY-----\nwA==\n')
# key with invalid DEK Info
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: weird type
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with invalid encryption type
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FOO-123-BAR,01234567
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (AES)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (DES3)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEqual(keys.Key.fromFile(self.keyFile),
keys.Key.fromString(keydata.privateRSA_lsh))
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, 'bad_type')
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, passphrase='unencrypted')
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject
key = keys.Key(obj)
self.assertEqual(key._keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(
keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(
keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_dataError(self):
"""
The L{keys.Key.data} method raises RuntimeError for bad keys.
"""
badKey = keys.Key(b'')
self.assertRaises(RuntimeError, badKey.data)
def test_fingerprintdefault(self):
"""
Test that the fingerprint method returns fingerprint in
L{FingerprintFormats.MD5-HEX} format by default.
"""
self.assertEqual(keys.Key(self.rsaObj).fingerprint(),
'85:25:04:32:58:55:96:9f:57:ee:fb:a8:1a:ea:69:da')
self.assertEqual(keys.Key(self.dsaObj).fingerprint(),
'63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1')
def test_fingerprint_md5_hex(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.MD5-HEX} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(
keys.FingerprintFormats.MD5_HEX),
'85:25:04:32:58:55:96:9f:57:ee:fb:a8:1a:ea:69:da')
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(
keys.FingerprintFormats.MD5_HEX),
'63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1')
def test_fingerprintsha256(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.SHA256-BASE64} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(
keys.FingerprintFormats.SHA256_BASE64),
'FBTCOoknq0mHy+kpfnY9tDdcAJuWtCpuQMaV3EsvbUI=')
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(
keys.FingerprintFormats.SHA256_BASE64),
'Wz5o2YbKyxOEcJn1au/UaALSVruUzfz0vaLI1xiIGyY=')
def test_fingerprintBadFormat(self):
"""
A C{BadFingerPrintFormat} error is raised when unsupported
formats are requested.
"""
with self.assertRaises(keys.BadFingerPrintFormat) as em:
keys.Key(self.rsaObj).fingerprint('sha256-base')
self.assertEqual('Unsupported fingerprint format: sha256-base',
em.exception.args[0])
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEqual(keys.Key(self.rsaObj).type(), 'RSA')
self.assertEqual(keys.Key(self.rsaObj).sshType(), b'ssh-rsa')
self.assertEqual(keys.Key(self.dsaObj).type(), 'DSA')
self.assertEqual(keys.Key(self.dsaObj).sshType(), b'ssh-dss')
self.assertEqual(keys.Key(self.ecObj).type(), 'EC')
self.assertEqual(keys.Key(self.ecObj).sshType(),
keydata.ECDatanistp256['curve'])
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlobUnsupportedType(self):
"""
A C{BadKeyError} error is raised whey the blob has an unsupported
key type.
"""
badBlob = common.NS(b'ssh-bad')
self.assertRaises(keys.BadKeyError,
keys.Key.fromString, badBlob)
def test_fromBlobRSA(self):
"""
A public RSA key is correctly generated from a public key blob.
"""
rsaPublicData = {
'n': keydata.RSAData['n'],
'e': keydata.RSAData['e'],
}
rsaBlob = (
common.NS(b'ssh-rsa') +
common.MP(rsaPublicData['e']) +
common.MP(rsaPublicData['n'])
)
rsaKey = keys.Key.fromString(rsaBlob)
self.assertTrue(rsaKey.isPublic())
self.assertEqual(rsaPublicData, rsaKey.data())
def test_fromBlobDSA(self):
"""
A public DSA key is correctly generated from a public key blob.
"""
dsaPublicData = {
'p': keydata.DSAData['p'],
'q': keydata.DSAData['q'],
'g': keydata.DSAData['g'],
'y': keydata.DSAData['y'],
}
dsaBlob = (
common.NS(b'ssh-dss') +
common.MP(dsaPublicData['p']) +
common.MP(dsaPublicData['q']) +
common.MP(dsaPublicData['g']) +
common.MP(dsaPublicData['y'])
)
dsaKey = keys.Key.fromString(dsaBlob)
self.assertTrue(dsaKey.isPublic())
self.assertEqual(dsaPublicData, dsaKey.data())
def test_fromBlobECDSA(self):
"""
Key.fromString generates ECDSA keys from blobs.
"""
from cryptography import utils
ecPublicData = {
'x': keydata.ECDatanistp256['x'],
'y': keydata.ECDatanistp256['y'],
'curve': keydata.ECDatanistp256['curve']
}
ecblob = (common.NS(ecPublicData['curve']) +
common.NS(ecPublicData['curve'][-8:]) +
common.NS(b'\x04' +
utils.int_to_bytes(ecPublicData['x'], 32) +
utils.int_to_bytes(ecPublicData['y'], 32))
)
eckey = keys.Key.fromString(ecblob)
self.assertTrue(eckey.isPublic())
self.assertEqual(ecPublicData, eckey.data())
def test_fromPrivateBlobUnsupportedType(self):
"""
C{BadKeyError} is raised when loading a private blob with an
unsupported type.
"""
badBlob = common.NS(b'ssh-bad')
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_fromPrivateBlobRSA(self):
"""
A private RSA key is correctly generated from a private key blob.
"""
rsaBlob = (
common.NS(b'ssh-rsa') +
common.MP(keydata.RSAData['n']) +
common.MP(keydata.RSAData['e']) +
common.MP(keydata.RSAData['d']) +
common.MP(keydata.RSAData['u']) +
common.MP(keydata.RSAData['p']) +
common.MP(keydata.RSAData['q'])
)
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
self.assertFalse(rsaKey.isPublic())
self.assertEqual(keydata.RSAData, rsaKey.data())
self.assertEqual(
rsaKey, keys.Key._fromString_PRIVATE_BLOB(rsaKey.privateBlob()))
def test_fromPrivateBlobDSA(self):
"""
A private DSA key is correctly generated from a private key blob.
"""
dsaBlob = (
common.NS(b'ssh-dss') +
common.MP(keydata.DSAData['p']) +
common.MP(keydata.DSAData['q']) +
common.MP(keydata.DSAData['g']) +
common.MP(keydata.DSAData['y']) +
common.MP(keydata.DSAData['x'])
)
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
self.assertFalse(dsaKey.isPublic())
self.assertEqual(keydata.DSAData, dsaKey.data())
self.assertEqual(
dsaKey, keys.Key._fromString_PRIVATE_BLOB(dsaKey.privateBlob()))
def test_fromPrivateBlobECDSA(self):
"""
A private EC key is correctly generated from a private key blob.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
publicNumbers = ec.EllipticCurvePublicNumbers(
x=keydata.ECDatanistp256['x'], y=keydata.ECDatanistp256['y'],
curve=ec.SECP256R1())
ecblob = (
common.NS(keydata.ECDatanistp256['curve']) +
common.NS(keydata.ECDatanistp256['curve'][-8:]) +
common.NS(publicNumbers.public_key(default_backend()).public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint
)) +
common.MP(keydata.ECDatanistp256['privateValue'])
)
eckey = keys.Key._fromString_PRIVATE_BLOB(ecblob)
self.assertFalse(eckey.isPublic())
self.assertEqual(keydata.ECDatanistp256, eckey.data())
self.assertEqual(
eckey, keys.Key._fromString_PRIVATE_BLOB(eckey.privateBlob()))
def test_blobRSA(self):
"""
Return the over-the-wire SSH format of the RSA public key.
"""
self.assertEqual(
keys.Key(self.rsaObj).blob(),
common.NS(b'ssh-rsa') +
common.MP(self.rsaObj.private_numbers().public_numbers.e) +
common.MP(self.rsaObj.private_numbers().public_numbers.n)
)
def test_blobDSA(self):
"""
Return the over-the-wire SSH format of the DSA public key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).blob(),
common.NS(b'ssh-dss') +
common.MP(publicNumbers.parameter_numbers.p) +
common.MP(publicNumbers.parameter_numbers.q) +
common.MP(publicNumbers.parameter_numbers.g) +
common.MP(publicNumbers.y)
)
def test_blobEC(self):
"""
Return the over-the-wire SSH format of the EC public key.
"""
from cryptography import utils
byteLength = (self.ecObj.curve.key_size + 7) // 8
self.assertEqual(
keys.Key(self.ecObj).blob(),
common.NS(keydata.ECDatanistp256['curve']) +
common.NS(keydata.ECDatanistp256['curve'][-8:]) +
common.NS(b'\x04' +
utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.x, byteLength) +
utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.y, byteLength))
)
def test_blobNoKey(self):
"""
C{RuntimeError} is raised when the blob is requested for a Key
which is not wrapping anything.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlobRSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of an
RSA private key.
"""
numbers = self.rsaObj.private_numbers()
self.assertEqual(
keys.Key(self.rsaObj).privateBlob(),
common.NS(b'ssh-rsa') +
common.MP(numbers.public_numbers.n) +
common.MP(numbers.public_numbers.e) +
common.MP(numbers.d) +
common.MP(numbers.iqmp) +
common.MP(numbers.p) +
common.MP(numbers.q)
)
def test_privateBlobDSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of a DSA
private key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).privateBlob(),
common.NS(b'ssh-dss') +
common.MP(publicNumbers.parameter_numbers.p) +
common.MP(publicNumbers.parameter_numbers.q) +
common.MP(publicNumbers.parameter_numbers.g) +
common.MP(publicNumbers.y) +
common.MP(self.dsaObj.private_numbers().x)
)
def test_privateBlobEC(self):
"""
L{keys.Key.privateBlob} returns the SSH ptotocol-level format of EC
private key.
"""
from cryptography.hazmat.primitives import serialization
self.assertEqual(
keys.Key(self.ecObj).privateBlob(),
common.NS(keydata.ECDatanistp256['curve']) +
common.NS(keydata.ECDatanistp256['curve'][-8:]) +
common.NS(
self.ecObj.public_key().public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint)) +
common.MP(self.ecObj.private_numbers().private_value)
)
def test_privateBlobNoKeyObject(self):
"""
Raises L{RuntimeError} if the underlying key object does not exists.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_agentv3)
self.assertEqual(key.toString('openssh'), keydata.privateRSA_openssh)
self.assertEqual(
key.toString('openssh', passphrase=b'encrypted'),
keydata.privateRSA_openssh_encrypted)
self.assertEqual(
key.public().toString('openssh'),
keydata.publicRSA_openssh[:-8]) # no comment
self.assertEqual(
key.public().toString('openssh', comment=b'comment'),
keydata.publicRSA_openssh)
def test_toOpenSSHRSA_v1_format(self):
"""
L{keys.Key.toString} serializes an RSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
new_key_data = key.toString('openssh', subtype='v1')
new_enc_key_data = key.toString(
'openssh', subtype='v1', passphrase='encrypted')
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_key_data.splitlines()[0])
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_enc_key_data.splitlines()[0])
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase='encrypted'))
def test_toOpenSSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateDSA_openssh)
self.assertEqual(
key.public().toString('openssh', comment=b'comment'),
keydata.publicDSA_openssh)
self.assertEqual(
key.public().toString('openssh'),
keydata.publicDSA_openssh[:-8]) # no comment
def test_toOpenSSHDSA_v1_format(self):
"""
L{keys.Key.toString} serializes a DSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
new_key_data = key.toString('openssh', subtype='v1')
new_enc_key_data = key.toString(
'openssh', subtype='v1', passphrase='encrypted')
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_key_data.splitlines()[0])
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_enc_key_data.splitlines()[0])
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase='encrypted'))
def test_toOpenSSHECDSA(self):
"""
L{keys.Key.toString} serializes an ECDSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertEqual(
key.public().toString('openssh', comment=b'comment'),
keydata.publicECDSA_openssh)
self.assertEqual(
key.public().toString('openssh'),
keydata.publicECDSA_openssh[:-8]) # no comment
def test_toOpenSSHECDSA_v1_format(self):
"""
L{keys.Key.toString} serializes an ECDSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateECDSA_openssh)
new_key_data = key.toString('openssh', subtype='v1')
new_enc_key_data = key.toString(
'openssh', subtype='v1', passphrase='encrypted')
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_key_data.splitlines()[0])
self.assertEqual(
b'-----BEGIN OPENSSH PRIVATE KEY-----',
new_enc_key_data.splitlines()[0])
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase='encrypted'))
def test_toLSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateRSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicRSA_lsh)
def test_toLSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateDSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicDSA_lsh)
def test_toAgentv3RSA(self):
"""
L{keys.Key.toString} serializes an RSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateRSA_agentv3)
def test_toAgentv3DSA(self):
"""
L{keys.Key.toString} serializes a DSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateDSA_agentv3)
def test_toStringErrors(self):
"""
L{keys.Key.toString} raises L{keys.BadKeyError} when passed an invalid
format type.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString,
'bad_type')
def test_signAndVerifyRSA(self):
"""
Signed data can be verified using RSA.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateRSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyDSA(self):
"""
Signed data can be verified using DSA.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateDSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyEC(self):
"""
Signed data can be verified using EC.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateECDSA_openssh)
signature = key.sign(data)
key384 = keys.Key.fromString(keydata.privateECDSA_openssh384)
signature384 = key384.sign(data)
key521 = keys.Key.fromString(keydata.privateECDSA_openssh521)
signature521 = key521.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
self.assertTrue(key384.public().verify(signature384, data))
self.assertTrue(key384.verify(signature384, data))
self.assertTrue(key521.public().verify(signature521, data))
self.assertTrue(key521.verify(signature521, data))
def test_verifyRSA(self):
"""
A known-good RSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, b''))
self.assertFalse(key.verify(self.rsaSignature, b'a'))
self.assertFalse(key.verify(self.dsaSignature, b''))
def test_verifyDSA(self):
"""
A known-good DSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, b''))
self.assertFalse(key.verify(self.dsaSignature, b'a'))
self.assertFalse(key.verify(self.rsaSignature, b''))
def test_verifyDSANoPrefix(self):
"""
Some commercial SSH servers send DSA keys as 2 20-byte numbers;
they are still verified as valid keys.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature[-40:], b''))
def test_reprPrivateRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
private key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (2048 bits)
attr d:
\t21:4c:08:66:a2:28:d5:b4:fb:8e:0f:72:1b:85:09:
\t00:b9:f2:4e:37:f0:1c:57:4b:e3:51:7f:9e:23:a7:
\te4:3a:98:55:1b:ea:8b:7a:98:1e:bc:d8:ba:b1:f9:
\t89:12:18:60:ac:e8:cc:0b:4e:09:5a:40:6a:ba:2f:
\t99:f8:b3:24:60:84:b9:ce:69:95:9a:f9:e2:fc:1f:
\t51:4d:27:15:db:2b:27:ad:ef:b4:69:ac:be:7d:10:
\teb:86:47:70:73:b4:00:87:95:15:3b:37:f9:e7:14:
\te7:80:bb:68:1e:1b:e6:dd:bb:73:63:b9:67:e6:b2:
\t27:7f:cf:cf:30:9b:c2:98:fd:d9:18:36:2f:36:2e:
\tf1:3d:81:7a:9f:e1:03:2d:47:db:34:51:62:39:dd:
\t4f:e9:ac:a8:8b:d9:d6:f3:84:c4:17:b9:71:9d:06:
\t08:42:78:4d:bb:c5:2a:f4:c3:58:cd:55:2b:ed:be:
\t33:5f:04:ea:7b:e6:04:24:63:f2:2d:d7:3d:1b:6c:
\td5:9c:63:43:2f:92:88:8d:3e:6e:da:18:37:d8:0f:
\t25:67:89:1d:b9:46:34:5e:c9:ce:c4:8b:ed:92:5a:
\t33:07:0f:df:86:08:f9:92:e9:db:eb:38:08:36:c9:
\tcd:cd:0a:01:48:5b:39:3e:7a:ca:c6:80:a9:dc:d4:
\t39
attr e:
\t01:00:01
attr n:
\t00:d5:6a:ac:78:23:d6:d6:1b:ec:25:a1:50:c4:77:
\t63:50:84:45:01:55:42:14:2a:2a:e0:d0:60:ee:d4:
\te9:a3:ad:4a:fa:39:06:5e:84:55:75:5f:00:36:bf:
\t6f:aa:2a:3f:83:26:37:c1:69:2e:5b:fd:f0:f3:d2:
\t7d:d6:98:cd:3a:40:78:d5:ca:a8:18:c0:11:93:24:
\t09:0c:81:4c:8f:f7:9c:ed:13:16:6a:a4:04:e9:49:
\t77:c3:e4:55:64:b3:79:68:9e:2c:08:eb:ac:e8:04:
\t2d:21:77:05:a7:8e:ef:53:30:0d:a5:e5:bb:3d:6a:
\te2:09:36:6f:fd:34:d3:7d:6f:46:ff:87:da:a9:29:
\t27:aa:ff:ad:f5:85:e6:3e:1a:b8:7a:1d:4a:b1:ea:
\tc0:5a:f7:30:df:1f:c2:a4:e4:ef:3f:91:49:96:40:
\td5:19:77:2d:37:c3:5e:ec:9d:a6:3a:44:a5:c2:a4:
\t29:dd:d5:ba:9c:3d:45:b3:c6:2c:18:64:d5:ba:3d:
\tdf:ab:7f:cd:42:ac:a7:f1:18:0b:a0:58:15:62:0b:
\ta4:2a:6e:43:c3:e4:04:9f:35:a3:47:8e:46:ed:33:
\ta5:65:bd:bc:3b:29:6e:02:0b:57:df:74:e8:13:b4:
\t37:35:7e:83:5f:20:26:60:a6:dc:ad:8b:c6:6c:79:
\t98:f7
attr p:
\t00:d9:70:06:d8:e2:bc:d4:78:91:50:94:d4:c1:1b:
\t89:38:6c:46:64:5a:51:a0:9a:07:3d:48:8f:03:51:
\tcc:6b:12:8e:7d:1a:b1:65:e7:71:75:39:e0:32:05:
\t75:8d:18:4c:af:93:b1:49:b1:66:5f:78:62:7a:d1:
\t0c:ca:e6:4d:43:b3:9c:f4:6b:7d:e6:0c:98:dc:cf:
\t21:62:8e:d5:2e:12:de:04:ae:d7:24:6e:83:31:a2:
\t15:a2:44:3d:22:a9:62:26:22:b9:b2:ed:54:0a:9d:
\t08:83:a7:07:0d:ff:19:18:8e:d8:ab:1d:da:48:9c:
\t31:68:11:a1:66:6d:e3:d8:1d
attr q:
\t00:fb:44:17:8b:a4:36:be:1e:37:1d:a7:f6:61:6c:
\t04:c4:aa:dd:78:3e:07:8c:1e:33:02:ae:03:14:87:
\t83:7a:e5:9e:7d:08:67:a8:f2:aa:bf:12:70:cf:72:
\ta9:a7:c7:0b:1d:88:d5:20:fd:9c:63:ca:47:30:55:
\t4e:8b:c4:cf:f4:7f:16:a4:92:12:74:a1:09:c2:c4:
\t6e:9c:8c:33:ef:a5:e5:f7:e0:2b:ad:4f:5c:11:aa:
\t1a:84:37:5b:fd:7a:ea:c3:cd:7c:b0:c8:e4:1f:54:
\t63:b5:c7:af:df:f4:09:a7:fc:c7:25:fc:5c:e9:91:
\td7:92:c5:98:1e:56:d3:b1:23
attr u:
\t00:85:4b:1b:7a:9b:12:10:37:9e:1f:ad:5e:da:fe:
\tc6:96:fe:df:35:6b:b9:34:e2:16:97:92:26:09:bd:
\tbd:70:20:03:a7:35:bd:2d:1b:a0:d2:07:47:2b:d4:
\tde:a8:a8:07:07:1b:b8:04:20:a7:27:41:3c:6c:39:
\t39:e9:41:ce:e7:17:1d:d1:4c:5c:bc:3d:d2:26:26:
\tfe:6a:d6:fd:48:72:ae:46:fa:7b:c3:d3:19:60:44:
\t1d:a5:13:a7:80:f5:63:29:d4:7a:5d:06:07:16:5d:
\tf6:8b:3d:cb:64:3a:e2:84:5a:4d:8c:06:2d:2d:9d:
\t1c:eb:83:4c:78:3d:79:54:ce>""")
def test_reprPublicRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
public key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj).public()),
"""<RSA Public Key (2048 bits)
attr e:
\t01:00:01
attr n:
\t00:d5:6a:ac:78:23:d6:d6:1b:ec:25:a1:50:c4:77:
\t63:50:84:45:01:55:42:14:2a:2a:e0:d0:60:ee:d4:
\te9:a3:ad:4a:fa:39:06:5e:84:55:75:5f:00:36:bf:
\t6f:aa:2a:3f:83:26:37:c1:69:2e:5b:fd:f0:f3:d2:
\t7d:d6:98:cd:3a:40:78:d5:ca:a8:18:c0:11:93:24:
\t09:0c:81:4c:8f:f7:9c:ed:13:16:6a:a4:04:e9:49:
\t77:c3:e4:55:64:b3:79:68:9e:2c:08:eb:ac:e8:04:
\t2d:21:77:05:a7:8e:ef:53:30:0d:a5:e5:bb:3d:6a:
\te2:09:36:6f:fd:34:d3:7d:6f:46:ff:87:da:a9:29:
\t27:aa:ff:ad:f5:85:e6:3e:1a:b8:7a:1d:4a:b1:ea:
\tc0:5a:f7:30:df:1f:c2:a4:e4:ef:3f:91:49:96:40:
\td5:19:77:2d:37:c3:5e:ec:9d:a6:3a:44:a5:c2:a4:
\t29:dd:d5:ba:9c:3d:45:b3:c6:2c:18:64:d5:ba:3d:
\tdf:ab:7f:cd:42:ac:a7:f1:18:0b:a0:58:15:62:0b:
\ta4:2a:6e:43:c3:e4:04:9f:35:a3:47:8e:46:ed:33:
\ta5:65:bd:bc:3b:29:6e:02:0b:57:df:74:e8:13:b4:
\t37:35:7e:83:5f:20:26:60:a6:dc:ad:8b:c6:6c:79:
\t98:f7>""")
def test_reprPublicECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
public key.
"""
self.assertEqual(repr(keys.Key(self.ecObj).public()),
"""<Elliptic Curve Public Key (256 bits)
curve:
\tecdsa-sha2-nistp256
x:
\t76282513020392096317118503144964731774299773481750550543382904345687059013883
y:""" +
"\n\t8154319786460285263226566476944164753434437589431431968106113715931064" +
"6683104>\n")
def test_reprPrivateECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
private key.
"""
self.assertEqual(repr(keys.Key(self.ecObj)),
"""<Elliptic Curve Private Key (256 bits)
curve:
\tecdsa-sha2-nistp256
privateValue:
\t34638743477210341700964008455655698253555655678826059678074967909361042656500
x:
\t76282513020392096317118503144964731774299773481750550543382904345687059013883
y:""" +
"\n\t8154319786460285263226566476944164753434437589431431968106113715931064" +
"6683104>\n")
class PersistentRSAKeyTests(unittest.TestCase):
"""
Tests for L{keys._getPersistentRSAKey}.
"""
if cryptography is None:
skip = skipCryptography
def test_providedArguments(self):
"""
L{keys._getPersistentRSAKey} will put the key in
C{directory}/C{filename}, with the key length of C{keySize}.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
def test_noRegeneration(self):
"""
L{keys._getPersistentRSAKey} will not regenerate the key if the key
already exists.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
keyContent = keyFile.getContent()
# Set the key size to 1024 bits. Since it exists already, it will find
# the 512 bit key, and not generate a 1024 bit key.
key = keys._getPersistentRSAKey(keyFile, keySize=1024)
self.assertEqual(key.size(), 512)
self.assertEqual(keyFile.getContent(), keyContent)
def test_keySizeZero(self):
"""
If the key generated by L{keys.getPersistentRSAKey} is set to None
the key size should then become 0.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
key._keyObject = None
self.assertEqual( key.size(), 0)
| 38.608947 | 79 | 0.663907 |
19a18a247d4381814a222e45e444d6f21729a5d9 | 4,791 | py | Python | pypureclient/flasharray/FA_2_7/models/hardware_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_7/models/hardware_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_7/models/hardware_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class HardwareGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[Hardware]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.Hardware]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[Hardware])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HardwareGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HardwareGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HardwareGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.853846 | 524 | 0.606137 |
cac8d5fee2d0336e015c6c2326f024f40ad5c2a2 | 2,878 | py | Python | examples/GLSurfacePlot.py | abbasegbeyemi/pyqtgraph | 6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed | [
"MIT"
] | null | null | null | examples/GLSurfacePlot.py | abbasegbeyemi/pyqtgraph | 6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed | [
"MIT"
] | 1 | 2021-04-04T15:05:47.000Z | 2021-05-15T23:56:42.000Z | examples/GLSurfacePlot.py | abbasegbeyemi/pyqtgraph | 6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed | [
"MIT"
] | 1 | 2021-05-19T10:11:17.000Z | 2021-05-19T10:11:17.000Z | # -*- coding: utf-8 -*-
"""
This example demonstrates the use of GLSurfacePlotItem.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
## Create a GL View widget to display data
app = pg.mkQApp("GLSurfacePlot Example")
w = gl.GLViewWidget()
w.show()
w.setWindowTitle('pyqtgraph example: GLSurfacePlot')
w.setCameraPosition(distance=50)
## Add a grid to the view
g = gl.GLGridItem()
g.scale(2,2,1)
g.setDepthValue(10) # draw grid after surfaces since they may be translucent
w.addItem(g)
## Simple surface plot example
## x, y values are not specified, so assumed to be 0:50
z = pg.gaussianFilter(np.random.normal(size=(50,50)), (1,1))
p1 = gl.GLSurfacePlotItem(z=z, shader='shaded', color=(0.5, 0.5, 1, 1))
p1.scale(16./49., 16./49., 1.0)
p1.translate(-18, 2, 0)
w.addItem(p1)
## Saddle example with x and y specified
x = np.linspace(-8, 8, 50)
y = np.linspace(-8, 8, 50)
z = 0.1 * ((x.reshape(50,1) ** 2) - (y.reshape(1,50) ** 2))
p2 = gl.GLSurfacePlotItem(x=x, y=y, z=z, shader='normalColor')
p2.translate(-10,-10,0)
w.addItem(p2)
## Manually specified colors
z = pg.gaussianFilter(np.random.normal(size=(50,50)), (1,1))
x = np.linspace(-12, 12, 50)
y = np.linspace(-12, 12, 50)
colors = np.ones((50,50,4), dtype=float)
colors[...,0] = np.clip(np.cos(((x.reshape(50,1) ** 2) + (y.reshape(1,50) ** 2)) ** 0.5), 0, 1)
colors[...,1] = colors[...,0]
p3 = gl.GLSurfacePlotItem(z=z, colors=colors.reshape(50*50,4), shader='shaded', smooth=False)
p3.scale(16./49., 16./49., 1.0)
p3.translate(2, -18, 0)
w.addItem(p3)
## Animated example
## compute surface vertex data
cols = 90
rows = 100
x = np.linspace(-8, 8, cols+1).reshape(cols+1,1)
y = np.linspace(-8, 8, rows+1).reshape(1,rows+1)
d = (x**2 + y**2) * 0.1
d2 = d ** 0.5 + 0.1
## precompute height values for all frames
phi = np.arange(0, np.pi*2, np.pi/20.)
z = np.sin(d[np.newaxis,...] + phi.reshape(phi.shape[0], 1, 1)) / d2[np.newaxis,...]
## create a surface plot, tell it to use the 'heightColor' shader
## since this does not require normal vectors to render (thus we
## can set computeNormals=False to save time when the mesh updates)
p4 = gl.GLSurfacePlotItem(x=x[:,0], y = y[0,:], shader='heightColor', computeNormals=False, smooth=False)
p4.shader()['colorMap'] = np.array([0.2, 2, 0.5, 0.2, 1, 1, 0.2, 0, 2])
p4.translate(10, 10, 0)
w.addItem(p4)
index = 0
def update():
global p4, z, index
index -= 1
p4.setData(z=z[index%z.shape[0]])
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(30)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 28.78 | 105 | 0.664698 |
0dbe845818982fcc2d8f06eadecf04fa6816df55 | 4,983 | py | Python | rbac/common/user/create_user.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 75 | 2018-04-06T09:13:34.000Z | 2020-05-18T18:59:47.000Z | rbac/common/user/create_user.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 989 | 2018-04-18T21:01:56.000Z | 2019-10-23T15:37:09.000Z | rbac/common/user/create_user.py | fthornton67/sawtooth-next-directory | 79479afb8d234911c56379bb1d8abf11f28ef86d | [
"Apache-2.0"
] | 72 | 2018-04-13T18:29:12.000Z | 2020-05-29T06:00:33.000Z | # Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" Implements the CREATE_USER message
usage: rbac.user.new()
"""
from rbac.common import addresser
from rbac.common.base.base_message import BaseMessage
from rbac.common.logs import get_default_logger
LOGGER = get_default_logger(__name__)
class CreateUser(BaseMessage):
""" Implements the CREATE_USER message
usage: rbac.user.new()
"""
def __init__(self):
super().__init__()
self._register()
@property
def message_action_type(self):
"""The action type from AddressSpace performed by this message"""
return addresser.MessageActionType.CREATE
@property
def address_type(self):
"""The address type from AddressSpace implemented by this class"""
return addresser.AddressSpace.USER
@property
def object_type(self):
"""The object type from AddressSpace implemented by this class"""
return addresser.ObjectType.USER
@property
def related_type(self):
"""The related type from AddressSpace implemented by this class"""
return addresser.ObjectType.NONE
@property
def relationship_type(self):
"""The related type from AddressSpace implemented by this class"""
return addresser.RelationshipType.ATTRIBUTES
def make_addresses(self, message, signer_user_id):
"""Makes the appropriate inputs & output addresses for the message type"""
inputs, _ = super().make_addresses(message, signer_user_id)
user_address = self.address(object_id=message.next_id)
inputs.add(user_address)
if message.manager_id:
manager_address = self.address(object_id=message.manager_id)
inputs.add(manager_address)
if message.key:
key_address = addresser.key.address(object_id=message.key)
user_key_address = addresser.user.key.address(
object_id=message.next_id, related_id=message.key
)
inputs.add(key_address)
inputs.add(user_key_address)
outputs = inputs
return inputs, outputs
@property
def allow_signer_not_in_state(self):
"""Whether the signer of the message is allowed to not be
in state. Used only for when the transaction also creates the
signer of the message (e.g. CREATE_USER)"""
return True
def validate(self, message, signer=None):
"""Validates the message values"""
super().validate(message=message, signer=signer)
if len(message.name) < 5:
raise ValueError("Users must have names longer than 4 characters")
if message.manager_id is not None:
if message.next_id == message.manager_id:
raise ValueError("User cannot be their own manager")
def validate_state(self, context, message, payload, input_state, store):
"""Validates the message against state"""
super().validate_state(
context=context,
message=message,
payload=payload,
input_state=input_state,
store=store,
)
if addresser.user.exists_in_state_inputs(
inputs=payload.inputs, input_state=input_state, object_id=message.next_id
):
raise ValueError(
"User with id {} already exists in state".format(message.next_id)
)
if message.manager_id and not addresser.user.exists_in_state_inputs(
inputs=payload.inputs, input_state=input_state, object_id=message.manager_id
):
raise ValueError(
"Manager with id {} does not exist in state".format(message.manager_id)
)
def apply_update(self, message, payload, object_id, related_id, output_state):
"""Stores data beyond the user record"""
if message.key:
addresser.key.store(
object_id=message.key,
message=message,
outputs=payload.outputs,
output_state=output_state,
)
addresser.user.key.create_relationship(
object_id=object_id,
related_id=message.key,
outputs=payload.outputs,
output_state=output_state,
created_date=payload.now,
)
| 36.639706 | 88 | 0.642785 |
9f89c04375f08b57ae375a35ed381efa1c03f191 | 3,646 | py | Python | backend/Tests/test_categorize.py | Cameramorphic/classify-images | bf1e6c39145fe6562a485d5a593fd63f32511db1 | [
"MIT"
] | 2 | 2021-02-11T17:04:48.000Z | 2021-02-11T17:04:51.000Z | backend/Tests/test_categorize.py | Cameramorphic/classify-images | bf1e6c39145fe6562a485d5a593fd63f32511db1 | [
"MIT"
] | 5 | 2021-02-11T17:06:25.000Z | 2021-02-11T17:06:51.000Z | backend/Tests/test_categorize.py | Cameramorphic/classify-images | bf1e6c39145fe6562a485d5a593fd63f32511db1 | [
"MIT"
] | null | null | null | import requests
import os
import abstract_test
files = os.listdir("Pictures")
categorize_endpoint = abstract_test.ADDRESS + abstract_test.categorize
def test_get_categorize(module_scoped_container_getter):
abstract_test.wait_for_server(abstract_test.categorize)
response = requests.get(categorize_endpoint)
assert response.status_code == 200
assert response.text == abstract_test.SELECT_FILES_HTML
def test_post_categorize_csv(module_scoped_container_getter):
multipart_form_data = build_categorize_multipart("Pictures/", abstract_test.example_csv)
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 201)
for f in files:
if f != "sources.txt":
assert ((json_response[f] == "an apple") or (json_response[f] == "a cat") or json_response[f] == "a dog")
assert len(json_response) == len(files) - 1
def test_post_categorize_csv_utf16(module_scoped_container_getter):
multipart_form_data = build_categorize_multipart("Pictures/", abstract_test.utf16_csv)
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 400)
assert json_response["error"] == "Invalid encoding in file " + abstract_test.utf16_csv + ", valid encodings are UTF-8 and US-ASCII"
def test_post_categorize_json_utf16(module_scoped_container_getter):
multipart_form_data = build_categorize_multipart("Pictures/", abstract_test.utf16_json)
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 400)
assert json_response["error"] == "Invalid encoding in file " + abstract_test.utf16_json + ", valid encodings are UTF-8 and US-ASCII"
def test_post_categorize_json(module_scoped_container_getter):
multipart_form_data = build_categorize_multipart("Pictures/", abstract_test.example_json)
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 201)
for f in files:
if f != "sources.txt":
assert ((json_response[f] == "an apple") or (json_response[f] == "a dog"))
assert len(json_response) == len(files) - 1
def test_post_categorize_invalid_image_files(module_scoped_container_getter):
multipart_form_data = abstract_test.build_base_multipart_images(abstract_test.categorize, os.listdir('InvalidFiles'), "InvalidFiles/")
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 400)
assert json_response["error"] == "Invalid extension, allowed extensions are: ['png', 'jpg', 'jpeg']"
def test_post_categorize_invalid_categories_file(module_scoped_container_getter):
multipart_form_data = abstract_test.build_base_multipart_images(abstract_test.categorize, files, "Pictures/")
multipart_form_data.append(('categories', (str("invalid2.pdf")
, open('InvalidFiles/' + "Invalid2.pdf", 'rb')
, 'text/plain')))
json_response = abstract_test.post_multipart(abstract_test.categorize, multipart_form_data, 400)
assert json_response["error"] == "Invalid extension, allowed extensions are: ['csv', 'json']"
def build_categorize_multipart(images_path, categories_file_name):
multipart_form_data = abstract_test.build_base_multipart_images(abstract_test.categorize, files, images_path)
multipart_form_data.append(('categories', (str(categories_file_name)
, open('CategoryFiles/' + categories_file_name, 'rb')
, 'text/plain')))
return multipart_form_data
| 48.613333 | 138 | 0.738892 |
fe15f8f191f5227e19860dcaaf0a5f2deac304a3 | 223 | py | Python | trader/__init__.py | geisten/bot | 76d4aef279cd168f6cbf7994055c1d289329e49c | [
"MIT"
] | null | null | null | trader/__init__.py | geisten/bot | 76d4aef279cd168f6cbf7994055c1d289329e49c | [
"MIT"
] | null | null | null | trader/__init__.py | geisten/bot | 76d4aef279cd168f6cbf7994055c1d289329e49c | [
"MIT"
] | null | null | null | """Package definition"""
from .trader import TradingBook, Order, HookValue
from .binance import authenticate_to_broker, test_runner
__all__ = ('authenticate_to_broker', 'test_runner', 'TradingBook', 'Order', 'HookValue')
| 37.166667 | 89 | 0.775785 |
cb31a61e1e5a62ad786a4f8c95663b08e721fe4f | 6,149 | py | Python | cohesity_management_sdk/models/protection_environment_enum.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/protection_environment_enum.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/protection_environment_enum.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ProtectionEnvironmentEnum(object):
"""Implementation of the 'ProtectionSourceEnvironment' enum.
Specifies the source environment of the protection job.
Supported environment types such as 'kView', 'kSQL', 'kVMware',
etc.
NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter.
'kVMware' indicates the VMware Protection Source environment.
'kHyperV' indicates the HyperV Protection Source environment.
'kSQL' indicates the SQL Protection Source environment.
'kView' indicates the View Protection Source environment.
'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'Nimble' indicates the Nimble Storage Protection Source environment.
'kAzure' indicates the Microsoft's Azure Protection Source environment.
'kNetapp' indicates the Netapp Protection Source environment.
'kAgent' indicates the Agent Protection Source environment.
'kGenericNas' indicates the Generic Network Attached Storage Protection
Source environment.
'kAcropolis' indicates the Acropolis Protection Source environment.
'kPhsicalFiles' indicates the Physical Files Protection Source
environment.
'kIsilon' indicates the Dell EMC's Isilon Protection Source environment.
'kGPFS' indicates IBM's GPFS Protection Source environment.
'kKVM' indicates the KVM Protection Source environment.
'kAWS' indicates the AWS Protection Source environment.
'kExchange' indicates the Exchange Protection Source environment.
'kHyperVVSS' indicates the HyperV VSS Protection Source
environment.
'kOracle' indicates the Oracle Protection Source environment.
'kGCP' indicates the Google Cloud Platform Protection Source environment.
'kFlashBlade' indicates the Flash Blade Protection Source environment.
'kAWSNative' indicates the AWS Native Protection Source environment.
'kO365' indicates the Office 365 Protection Source environment.
'kO365Outlook' indicates Office 365 outlook Protection Source environment.
'kHyperFlex' indicates the Hyper Flex Protection Source environment.
'kGCPNative' indicates the GCP Native Protection Source environment.
'kAzureNative' indicates the Azure Native Protection Source environment.
'kKubernetes' indicates a Kubernetes Protection Source environment.
'kElastifile' indicates Elastifile Protection Source environment.
'kAD' indicates Active Directory Protection Source environment.
'kRDSSnapshotManager' indicates AWS RDS Protection Source environment.
'kCassandra' indicates Cassandra Protection Source environment.
'kMongoDB' indicates MongoDB Protection Source environment.
'kCouchbase' indicates Couchbase Protection Source environment.
'kHdfs' indicates Hdfs Protection Source environment.
'kHive' indicates Hive Protection Source environment.
'kHBase' indicates HBase Protection Source environment.
'kUDA' indicates Universal Data Adapter Protection Source environment.
Attributes:
KVMWARE: TODO: type description here.
KHYPERV: TODO: type description here.
KSQL: TODO: type description here.
KVIEW: TODO: type description here.
KPUPPETEER: TODO: type description here.
KPHYSICAL: TODO: type description here.
KPURE: TODO: type description here.
KNIMBLE: TODO: type description here.
KAZURE: TODO: type description here.
KNETAPP: TODO: type description here.
KAGENT: TODO: type description here.
KGENERICNAS: TODO: type description here.
KACROPOLIS: TODO: type description here.
KPHYSICALFILES: TODO: type description here.
KISILON: TODO: type description here.
KGPFS: TODO: type description here.
KKVM: TODO: type description here.
KAWS: TODO: type description here.
KEXCHANGE: TODO: type description here.
KHYPERVVSS: TODO: type description here.
KORACLE: TODO: type description here.
KGCP: TODO: type description here.
KFLASHBLADE: TODO: type description here.
KAWSNATIVE: TODO: type description here.
KO365: TODO: type description here.
KO365OUTLOOK: TODO: type description here.
KHYPERFLEX: TODO: type description here.
KGCPNATIVE: TODO: type description here.
KAZURENATIVE: TODO: type description here.
KKUBERNETES: TODO: type description here.
KELASTIFILE: TODO: type description here.
KAD: TODO: type description here.
KRDSSNAPSHOTMANAGER: TODO: type description here.
KCASSANDRA: TODO: type description here.
KMONGODB: TODO: type description here.
KCOUCHBASE: TODO: type description here.
KHDFS: TODO: type description here.
KHIVE: TODO: type description here.
KHBASE: TODO: type description here.
KUDA: TODO: type description here.
"""
K_VMWARE = 'kVMware'
K_HYPERV = 'kHyperV'
KSQL = 'kSQL'
KVIEW = 'kView'
KPUPPETEER = 'kPuppeteer'
KPHYSICAL = 'kPhysical'
KPURE = 'kPure'
KNIMBLE = 'kNimble'
KAZURE = 'kAzure'
KNETAPP = 'kNetapp'
KAGENT = 'kAgent'
KGENERICNAS = 'kGenericNas'
KACROPOLIS = 'kAcropolis'
KPHYSICALFILES = 'kPhysicalFiles'
KISILON = 'kIsilon'
KGPFS = 'kGPFS'
KKVM = 'kKVM'
KAWS = 'kAWS'
KEXCHANGE = 'kExchange'
K_HYPERV_VSS = 'kHyperVVSS'
KORACLE = 'kOracle'
KGCP = 'kGCP'
KFLASHBLADE = 'kFlashBlade'
KAWSNATIVE = 'kAWSNative'
KO365 = 'kO365'
KO365OUTLOOK = 'kO365Outlook'
KHYPERFLEX = 'kHyperFlex'
KGCPNATIVE = 'kGCPNative'
KAZURENATIVE = 'kAzureNative'
KKUBERNETES = 'kKubernetes'
KELASTIFILE = 'kElastifile'
KAD = 'kAD'
KRDSSNAPSHOTMANAGER = 'kRDSSnapshotManager'
KCASSANDRA = 'kCassandra'
KMONGODB = 'kMongoDB'
KCOUCHBASE = 'kCouchbase'
KHDFS = 'kHdfs'
KHIVE = 'kHive'
KHBASE = 'kHBase'
KUDA = 'kUDA'
| 34.161111 | 78 | 0.709058 |
487fc0e001273d66a1720fef440e1e060253dfb9 | 1,112 | py | Python | application/ov1.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | 4 | 2021-01-04T05:46:43.000Z | 2022-01-06T16:33:40.000Z | application/ov1.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | null | null | null | application/ov1.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | null | null | null | import streamlit as st
from analyze_data.metrics import Metrics
from do_data.getter import Reader
class OV_1():
def __init__(self):
pass
def narrative(self):
st.write(
'Cook county is the largest county in the United States by population and has millions of court records available for analysis.',
'In addition to size, Cook County is also the home county for Chicago and surrounding areas.',
'The availability of this data, at scale, provides interesting analytical opportunities to support public awareness of the court system.',
'Although the source data is publicly available, the raw data is split into different sections and is difficult to interpret without significant engineering.',
'This dashboard represents the results of a processed and ready-to-analyze dataset about the courts.'
)
return
def court_counts(self, year=2020):
return Metrics().ov1(year)
# self.st.plotly_chart(Metrics().ov1_initiation())
def timeseries(self):
return Metrics().ov1_regression() | 44.48 | 171 | 0.70054 |
caeb0b7319e4d3f714118a3dec653097700b07e8 | 403 | py | Python | juliany_pizza/wsgi.py | kzborisov/Juliany-Pizza | 4ebc0b21e314b244048df79e4858f30447b43f8b | [
"MIT"
] | null | null | null | juliany_pizza/wsgi.py | kzborisov/Juliany-Pizza | 4ebc0b21e314b244048df79e4858f30447b43f8b | [
"MIT"
] | 9 | 2022-03-23T13:13:23.000Z | 2022-03-28T13:40:20.000Z | juliany_pizza/wsgi.py | kzborisov/Juliany-Pizza | 4ebc0b21e314b244048df79e4858f30447b43f8b | [
"MIT"
] | null | null | null | """
WSGI config for juliany_pizza project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'juliany_pizza.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
f2484ee2c5e53a1d8d41be21c4ba30199d4c05ac | 12,419 | py | Python | dataset/transform.py | lilinxi/210414_CfgYoloV3 | e6bbb64efa22e7d4c1f583f033370be4b16e548b | [
"MIT"
] | null | null | null | dataset/transform.py | lilinxi/210414_CfgYoloV3 | e6bbb64efa22e7d4c1f583f033370be4b16e548b | [
"MIT"
] | null | null | null | dataset/transform.py | lilinxi/210414_CfgYoloV3 | e6bbb64efa22e7d4c1f583f033370be4b16e548b | [
"MIT"
] | null | null | null | from typing import List
import numpy
import PIL.Image
import torch
import torchvision
def rand(min: float, max: float) -> float:
return numpy.random.rand() * (max - min) + min
class Compose(object):
"""
复合多种变换操作
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes):
for transform in self.transforms:
image, boxes = transform(image, boxes)
return image, boxes
def get_transforms(config: dict, train: bool) -> Compose:
"""
:param train: 是否是训练集,训练集包含额外的数据增强变换,且训练集只返回标注框,验证集返回标注字典
:return:
"""
transforms = []
if train:
transforms.append(ReformAndExtractBoxes())
# transforms.append(ScaleImageAndBoxes(config=config))
transforms.append(RandomScaleImageAndBoxes(config=config))
transforms.append(RandomTransformImage())
transforms.append(RandomFlipImageAndBoxes(config=config))
transforms.append(NormImageAndBoxes(config=config))
else:
transforms.append(ScaleImage(config=config))
transforms.append(NormImage(config=config))
return Compose(transforms)
class ReformAndExtractBoxes(object):
"""
从标注数据中提取包围盒,并变换包围盒的格式
boxes (xmin, ymin, xmax, ymax, label) -> (x, y, w, h, label)
"""
def __call__(self, raw_image: PIL.Image.Image, truth_annotation: dict) -> (PIL.Image.Image, numpy.ndarray):
raw_boxes = []
for box in truth_annotation["boxes"]:
xmin, ymin, xmax, ymax, label = box
raw_x = (xmax + xmin) / 2
raw_y = (ymax + ymin) / 2
raw_w = xmax - xmin
raw_h = ymax - ymin
raw_boxes.append([raw_x, raw_y, raw_w, raw_h, label])
return raw_image, numpy.asarray(raw_boxes).astype(numpy.float32)
class ScaleImageAndBoxes(object):
"""
boxes 和 image 的 等比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, raw_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, ((scaled_width - nw) // 2, (scaled_height - nh) // 2)) # 等比例放缩后的图像粘贴到底板中央
# 6. 变换 boxes
scaled_boxes = raw_boxes.copy()
scaled_boxes[:, 0:4] = raw_boxes[:, 0:4] * scale
scaled_boxes[:, 0] += (scaled_width - nw) // 2
scaled_boxes[:, 1] += (scaled_height - nh) // 2
return new_image, scaled_boxes
class RandomScaleImageAndBoxes(object):
"""
boxes 和 image 的 等随机比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, raw_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
scale = rand(0.1, 1.0) * scale # 0.1 ~ 1.0 scale
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 4.5 随机平移
dx = int(rand(0.0, scaled_width - nw))
dy = int(rand(0.0, scaled_height - nh))
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, (dx, dy)) # 等比例放缩后的图像粘贴到底板中央
# 6. 变换 boxes
scaled_boxes = raw_boxes.copy()
scaled_boxes[:, 0:4] = raw_boxes[:, 0:4] * scale
scaled_boxes[:, 0] += dx
scaled_boxes[:, 1] += dy
return new_image, scaled_boxes
class RandomTransformImage(object):
"""
随机变换图片
"""
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
new_image = scaled_image
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
brightness=(1.0, 10.0), # 亮度的偏移幅度
# contrast=(1.0, 10.0), # 对比度偏移幅度
# saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
# brightness=(1.0, 10.0), # 亮度的偏移幅度
contrast=(1.0, 10.0), # 对比度偏移幅度
# saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
# brightness=(1.0, 10.0), # 亮度的偏移幅度
# contrast=(1.0, 10.0), # 对比度偏移幅度
saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.01:
new_image = torchvision.transforms.Grayscale(num_output_channels=3)(new_image)
return new_image, scaled_boxes
class RandomFlipImageAndBoxes(object):
"""
随机翻转图片
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
new_image = scaled_image
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.RandomHorizontalFlip(p=2)(new_image)
scaled_boxes[:, 0] = self.config["image_width"] - scaled_boxes[:, 0]
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.RandomVerticalFlip(p=2)(new_image)
scaled_boxes[:, 1] = self.config["image_height"] - scaled_boxes[:, 1]
return new_image, scaled_boxes
class ScaleImage(object):
"""
boxes 的 等比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, truth_annotation: dict) -> (PIL.Image.Image, dict):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, ((scaled_width - nw) // 2, (scaled_height - nh) // 2)) # 等比例放缩后的图像粘贴到底板中央
return new_image, truth_annotation
class RescaleBoxes(object):
"""
boxes 等比例放缩(反向)
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> numpy.ndarray:
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 变换 boxes
rescaled_boxes = scaled_boxes.copy()
rescaled_boxes[:, 0] -= (scaled_width - nw) // 2
rescaled_boxes[:, 1] -= (scaled_height - nh) // 2
rescaled_boxes[:, 2] -= (scaled_width - nw) // 2
rescaled_boxes[:, 3] -= (scaled_height - nh) // 2
rescaled_boxes[:, 0:4] = rescaled_boxes[:, 0:4] / scale
rescaled_boxes = numpy.around(rescaled_boxes).astype(numpy.int)
return rescaled_boxes
class NormImageAndBoxes(object):
"""
boxes 和 image 的 归一化
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (
numpy.ndarray, numpy.ndarray):
# 1. 归一化 PIL.Image.Image,width * height * RGB -> channels(RGB) * height * width
norm_image = numpy.asarray(torchvision.transforms.ToTensor()(scaled_image))
# 2. 归一化 boxes
norm_boxes = scaled_boxes.copy()
norm_boxes[:, 0] /= self.config["image_width"]
norm_boxes[:, 1] /= self.config["image_height"]
norm_boxes[:, 2] /= self.config["image_width"]
norm_boxes[:, 3] /= self.config["image_height"]
return norm_image, norm_boxes
class NormImage(object):
"""
image 的 归一化
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, truth_annotation: dict) -> (numpy.ndarray, dict):
# 1. 归一化 PIL.Image.Image,width * height * RGB -> channels(RGB) * height * width
norm_image = numpy.asarray(torchvision.transforms.ToTensor()(scaled_image))
return norm_image, truth_annotation
class RenormAndReformBoxes(object):
"""
从训练集的训练数据中恢复包围盒,并变换包围盒的格式
box_num * (norm_x, norm_y, norm_w, norm_h, label) -> box_num * (xmin, ymin, xmax, ymax, label)
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, tensord_boxes: torch.Tensor) -> numpy.ndarray:
numpy_boxes = tensord_boxes.numpy().copy()
numpy_boxes[:, 0] *= self.config["image_width"]
numpy_boxes[:, 1] *= self.config["image_height"]
numpy_boxes[:, 2] *= self.config["image_width"]
numpy_boxes[:, 3] *= self.config["image_height"]
scaled_boxes = numpy_boxes.copy()
scaled_boxes[:, 0] = numpy_boxes[:, 0] - numpy_boxes[:, 2] / 2
scaled_boxes[:, 1] = numpy_boxes[:, 1] - numpy_boxes[:, 3] / 2
scaled_boxes[:, 2] = numpy_boxes[:, 0] + numpy_boxes[:, 2] / 2
scaled_boxes[:, 3] = numpy_boxes[:, 1] + numpy_boxes[:, 3] / 2
return numpy.around(scaled_boxes).astype(numpy.int)
def train_collate_fn(batch: List[tuple]) -> (torch.Tensor, torch.Tensor):
"""
数据集工具函数,对一个批次的数据进行解包后打包
:param batch:
:return:
"""
# print("1:", type(batch), batch) # batch 是一个返回值的数组:[(image, boxes), ……]
# print("2:", *batch) # *batch 将数组解包为:(image, boxes), ……
# print("3:", type(zip(*batch)), list(zip(*batch))) # zip 再次打包为:(image, ……) and (boxes, ……)
norm_images, norm_boxess = zip(*batch)
tensord_images = torch.as_tensor(norm_images)
tensord_boxes_list = [torch.as_tensor(norm_boxes) for norm_boxes in norm_boxess]
return tensord_images, tensord_boxes_list
def eval_collate_fn(batch: List[tuple]) -> (torch.Tensor, List[dict]):
"""
数据集工具函数,对一个批次的数据进行解包后打包
:param batch:
:return:
"""
# print("1:", type(batch), batch) # batch 是一个返回值的数组:[(image, boxes), ……]
# print("2:", *batch) # *batch 将数组解包为:(image, boxes), ……
# print("3:", type(zip(*batch)), list(zip(*batch))) # zip 再次打包为:(image, ……) and (boxes, ……)
norm_images, truth_annotations = zip(*batch)
tensord_images = torch.as_tensor(norm_images)
truth_annotation_list = list(truth_annotations)
return tensord_images, truth_annotation_list
| 33.117333 | 119 | 0.596666 |
13b28aaa2595419ec3268980a2a2381d3ef18c95 | 20 | py | Python | example_project/some_modules/third_modules/a16.py | Yuriy-Leonov/cython_imports_limit_issue | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | [
"MIT"
] | null | null | null | example_project/some_modules/third_modules/a16.py | Yuriy-Leonov/cython_imports_limit_issue | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | [
"MIT"
] | null | null | null | example_project/some_modules/third_modules/a16.py | Yuriy-Leonov/cython_imports_limit_issue | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | [
"MIT"
] | null | null | null | class A16:
pass
| 6.666667 | 10 | 0.6 |
68729f3e1a90d94e87f6b70900b8bae66fa73ed4 | 400 | py | Python | ratingapp/migrations/0006_auto_20200607_1753.py | Edwin-Karanu-Muiruri/Rating-app | 35b13cec7bffed1f26391cb6e9991cee85e4ee4c | [
"MIT"
] | null | null | null | ratingapp/migrations/0006_auto_20200607_1753.py | Edwin-Karanu-Muiruri/Rating-app | 35b13cec7bffed1f26391cb6e9991cee85e4ee4c | [
"MIT"
] | 6 | 2021-03-30T13:33:32.000Z | 2022-01-13T02:50:26.000Z | ratingapp/migrations/0006_auto_20200607_1753.py | Edwin-Karanu-Muiruri/Rating-app | 35b13cec7bffed1f26391cb6e9991cee85e4ee4c | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2020-06-07 14:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratingapp', '0005_auto_20200607_1732'),
]
operations = [
migrations.RemoveField(
model_name='rating',
name='project',
),
migrations.DeleteModel(
name='Project',
),
]
| 19.047619 | 49 | 0.57 |
0b63c116cc9ff3f972de01448bfb53b9ff94df7e | 1,894 | py | Python | aalh_iit_peopleportraits_010/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_peopleportraits_010/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_peopleportraits_010/merge-description-columns.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | from openpyxl import load_workbook
filename = 'aalh_iit_peopleportraits_010.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 499
iterationrow = 7
targetcol = 46
linkstring = 'Terms associated with the photograph are: '
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
descriptiontest = ws.cell(row=iterationrow, column=minimumcol).value
if descriptiontest == None:
print('No description')
elif descriptiontest.endswith(','):
print(descriptiontest)
description1 = descriptiontest
description2 = description1[:-1]
description3 = description2 + '.'
ws.cell(row=iterationrow, column=minimumcol).value = description3
print(ws.cell(row=iterationrow, column=minimumcol).value)
print('Fixed comma')
for cell in row:
iitdescription = ws.cell(row=iterationrow, column=minimumcol).value
#print(iitdescription)
keywords = ws.cell(row=iterationrow, column=targetcol).value
print(keywords)
if iitdescription == None:
descriptionmerged = linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
else:
descriptionmerged = iitdescription + ' ' + linkstring + keywords
descriptionfinal = descriptionmerged.replace("'", "'")
ws.cell(row=iterationrow, column=minimumcol).value = descriptionfinal
print(ws.cell(row=iterationrow, column=minimumcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_peopleportraits_010.xlsx') | 42.088889 | 105 | 0.658395 |
b7ef95db75aeb08308872adfe96385fd821ec2dd | 15,344 | py | Python | wiphy/code/duc.py | ishikawalab/wiphy | 00131ee4ad2560d9b39f27fd3c2508d810e84802 | [
"MIT"
] | 4 | 2021-03-13T15:17:26.000Z | 2022-03-13T07:51:11.000Z | wiphy/code/duc.py | ishikawalab/wiphy | 00131ee4ad2560d9b39f27fd3c2508d810e84802 | [
"MIT"
] | null | null | null | wiphy/code/duc.py | ishikawalab/wiphy | 00131ee4ad2560d9b39f27fd3c2508d810e84802 | [
"MIT"
] | null | null | null | # Copyright (c) WiPhy Development Team
# This library is released under the MIT License, see LICENSE.txt
__all__ = ['generateDUCCodes']
import numpy as np
def generateDUCCodes(M, L, initu=None):
"""
Generates a codebook of the diagonal unitary code (DUC). A seminal research can be found in [1].
- [1] B. M. Hochwald and W. Sweldens, ``Differential unitary space-time modulation,'' IEEE Trans. Commun., vol. 48, no. 12, pp. 2041--2052, 2000.
Args:
M (int): the number of transmit antennas.
L (int): the constellation size.
"""
codes = np.zeros((L, M, M), dtype=np.complex)
if 'None' in str(type(initu)):
u = _getDiversityMaximizingFactors(M, L)
elif 'str' in str(type(initu)) and initu == "random":
u = _getRandomFactors(M, L)
else:
u = initu
for l in range(L):
codes[l] = np.diag(np.exp(1.0j * 2.0 * np.pi * u * l / L))
return codes
def _getRandomFactors(M, L):
ret = np.ones(M)
ret[1:] = np.sort(np.random.randint(L / 2, size=M - 1) + 1)
return ret
def _getDiversityMaximizingFactors(M, L):
if M == 1 and L == 1:
u = [1]
elif M == 1 and L == 2:
u = [1]
elif M == 1 and L == 4:
u = [1]
elif M == 2 and L == 2:
u = [1, 1]
elif M == 2 and L == 4:
u = [1, 1]
elif M == 2 and L == 16:
u = [1, 7]
elif M == 2 and L == 256:
# maxp = 0.0988238
u = [1, 75]
elif M == 2 and L == 1024:
# maxp = 0.00296903
u = [1, 429]
elif M == 3 and L == 8:
u = [1, 1, 3]
elif M == 3 and L == 64:
u = [1, 11, 27]
elif M == 4 and L == 4:
u = [1, 1, 1, 1]
elif M == 4 and L == 16:
#u = [1, 5, 5, 7] # maxp = 0.125
u = [1, 3, 5, 7] # maxp = ?
elif M == 4 and L == 64:
# maxp = 0.0328117
#u = [1, 18, 23, 26]
# maxp = 0.0354893
#u = [1, 20, 24, 25]
# maxp = 0.035822
#u = [1, 21, 23, 25]
# maxp = 0.0366101
u = [1, 21, 24, 25]
elif M == 4 and L == 256:
u = [1, 25, 97, 107]
# maxp = 0.00947052
u = [1, 93, 94, 97]
# maxp = 0.220834
u = [1, 35, 41, 119]
elif M == 4 and L == 1024:
# maxp = 0.00236106
u = [1, 369, 378, 387]
# maxp = 0.00236452
u = [1, 369, 381, 385]
elif M == 4 and L == 4096:
# maxp = 0.10357
u = [1, 575, 1059, 1921]
elif M == 4 and L == 65536:
# maxp = 0.0459484
u = [1, 12301, 15259, 29983]
elif M == 5 and L == 32:
u = [1, 5, 7, 9, 11]
elif M == 5 and L == 1024:
u = [1, 157, 283, 415, 487]
elif M == 8 and L == 16:
# maxp = 0.0697013
u = [1, 5, 5, 5, 5, 5, 6, 7]
elif M == 8 and L == 256:
# maxp = 0.00532698
u = [1, 84, 87, 88, 89, 91, 91, 97]
elif M == 8 and L == 65536:
# maxp = 6.43277e-06
u = [1, 16722, 17014, 20852, 22321, 23781, 24192, 29994]
elif M == 16 and L == 2:
# maxp = 1
u = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
elif M == 16 and L == 4:
# maxp = 0.707107
u = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]
elif M == 16 and L == 16:
# maxp = 0.545254
u = [1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7]
elif M == 16 and L == 64:
## maxp = 0.441684
# u = [1, 3, 3, 5, 7, 9, 11, 11, 15, 17, 19, 21, 23, 25, 27, 29]
# maxp = 0.510949, wrong metric?
u = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
elif M == 16 and L == 256:
# maxp = 0.387993
# u = [3, 13, 19, 31, 33, 53, 61, 67, 69, 77, 87, 91, 111, 119, 123, 127]
# E1 joint optimization MED = 0.0007319532597573265
# u = [1, 1, 2, 37, 40, 41, 44, 50, 53, 58, 59, 60, 104, 105, 106, 123]
# maxp = 0.3593
u = [1, 27, 35, 41, 43, 55, 63, 75, 77, 87, 89, 93, 101, 107, 117, 125]
elif M == 16 and L == 1024:
# maxp = 0.000236564
u = [1, 223, 258, 278, 305, 320, 322, 347, 356, 359, 362, 362, 362, 386, 394, 403]
elif M == 16 and L == 2048:
# maxp = 0.000117768
u = [1, 446, 516, 555, 609, 640, 644, 694, 712, 718, 723, 724, 724, 772, 787, 805]
elif M == 16 and L == 4096:
# maxp = 5.86961e-05
u = [1, 892, 1031, 1110, 1217, 1280, 1287, 1388, 1424, 1436, 1445, 1447, 1448, 1543, 1573, 1610]
elif M == 16 and L == 65536:
# maxp = 0.166058
u = [3, 1469, 3125, 7251, 8857, 10843, 11229, 13703, 14535, 17301, 17379, 19229, 23447, 24741, 30717, 32767]
elif M == 32 and L == 256:
# maxp = 1.35527e-05
u = [1, 43, 48, 48, 53, 53, 55, 63, 70, 75, 75, 75, 78, 78, 78, 80, 81, 83, 86, 89, 89, 90, 90, 94, 99, 102,
102, 105, 105, 107, 110, 110]
elif M == 32 and L == 1024:
# maxp = 1.08268e-06
u = [1, 138, 167, 182, 193, 238, 243, 250, 254, 264, 264, 273, 274, 275, 278, 291, 303, 308, 315, 350, 351, 358,
366, 373, 374, 377, 402, 411, 425, 439, 469, 494]
elif M == 64 and L == 64:
# maxp = 2.13874e-11
u = [1, 7, 8, 8, 10, 10, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15,
16, 16, 16, 16, 16, 17, 17, 18, 18, 19, 20, 21, 21, 21, 22, 22, 24, 24, 25, 26, 26, 26, 27, 27, 27, 27, 27,
28, 28, 28, 28, 29, 29, 29, 30, 30]
elif M == 64 and L == 128:
# maxp = 0.454461
u = [2, 3, 3, 3, 3, 3, 3, 5, 5, 7, 9, 13, 13, 13, 13, 13, 15, 17, 17, 19, 19, 21, 23, 23, 25, 25, 27, 27,
27, 27, 29, 29, 29, 31, 31, 31, 35, 35, 35, 37, 37, 41, 41, 43, 43, 43, 45, 45, 45, 47, 49, 49, 49, 51,
53, 55, 55, 57, 59, 61, 63, 63, 63, 63]
elif M == 64 and L == 256:
# maxp = 2.87193e-12
# u = [1, 12, 26, 26, 27, 29, 30, 31, 32, 42, 47, 50, 50, 53, 54, 54, 56, 57, 57, 58, 61, 62, 64, 66, 68, 68,
# 68, 69, 69, 71, 76, 77, 78, 79, 79, 80, 80, 81, 82, 84, 86, 88, 89, 90, 90, 90, 90, 93, 95, 96, 97, 98,
# 102, 104, 105, 105, 106, 110, 111, 116, 120, 122, 122, 125]
# maxp = 0.00186234
# u = [1, 1, 3, 3, 9, 16, 17, 19, 23, 23, 23, 24, 25, 25, 25, 27, 27, 27, 31, 36, 36, 41, 42, 43, 43, 45, 48,
# 51, 53, 54, 55, 57, 63, 63, 63, 67, 69, 70, 71, 75, 77, 81, 83, 83, 83, 84, 85, 87, 87, 87, 89, 91, 97,
# 105, 105, 105, 106, 110, 111, 113, 115, 117, 125, 128]
# maxp = 0.00903006
u = [1, 4, 5, 11, 13, 13, 21, 23, 26, 26, 33, 33, 33, 41, 41, 45, 45, 46, 49, 49, 53, 55, 55, 57, 59, 61,
63, 63, 66, 67, 67, 69, 71, 71, 75, 75, 76, 77, 79, 79, 79, 81, 81, 83, 87, 87, 91, 93, 93, 94, 95, 96,
97, 99, 99, 101, 111, 111, 111, 113, 113, 117, 117, 124]
elif M == 64 and L == 1024:
# maxp = 6.08339e-12
u = [1, 59, 110, 118, 125, 138, 151, 178, 191, 191, 196, 197, 204, 213, 231, 239, 247, 247, 249, 253, 255,
263, 263, 265, 265, 267, 271, 275, 277, 284, 288, 298, 306, 308, 309, 311, 317, 325, 328, 330, 331,
332, 334, 335, 341, 342, 351, 359, 367, 371, 373, 378, 381, 384, 386, 401, 402, 406, 408, 442, 444,
482, 484, 487]
elif M == 256 and L == 1024:
# maxp = 8.25184e-57
u = [1, 14, 17, 18, 31, 31, 41, 43, 44, 46, 48, 51, 54, 55, 57, 60, 62, 64, 64, 74, 76, 86, 93, 93, 96, 98,
98, 101, 104, 105, 107, 107, 109, 110, 111, 113, 113, 116, 117, 117, 120, 122, 123, 126, 127, 128, 130,
133, 135, 138, 138, 139, 145, 149, 150, 154, 159, 159, 159, 160, 161, 163, 164, 166, 169, 173, 174,
176, 177, 180, 181, 184, 189, 190, 191, 193, 194, 195, 195, 198, 201, 201, 202, 203, 207, 207, 209,
213, 220, 222, 222, 225, 225, 230, 230, 231, 237, 238, 239, 240, 241, 243, 245, 250, 252, 252, 252,
255, 256, 259, 261, 264, 265, 266, 268, 269, 269, 270, 271, 273, 275, 275, 279, 280, 280, 281, 281,
285, 285, 286, 288, 290, 291, 293, 295, 296, 301, 301, 302, 303, 306, 306, 308, 308, 310, 310, 314,
314, 315, 315, 315, 325, 330, 330, 331, 333, 335, 335, 336, 337, 340, 340, 341, 343, 343, 343, 346,
346, 349, 349, 350, 352, 352, 356, 358, 358, 359, 364, 365, 368, 369, 369, 372, 373, 374, 376, 378,
378, 379, 384, 385, 386, 387, 389, 390, 393, 395, 397, 401, 401, 402, 404, 406, 406, 407, 411, 416,
418, 420, 420, 422, 424, 425, 426, 427, 431, 432, 434, 436, 439, 440, 441, 442, 442, 448, 449, 450,
451, 452, 453, 453, 455, 456, 461, 463, 465, 465, 466, 467, 468, 468, 470, 474, 476, 477, 479, 479,
482, 482, 484, 485, 485, 486, 487, 493, 495]
elif M == 1024 and L == 4096:
# maxp = 3.30684e-271
u = [1, 18, 22, 22, 25, 29, 33, 35, 40, 40, 42, 50, 51, 52, 57, 57, 67, 67, 67, 75, 76, 78, 78, 80, 85, 87,
89, 94, 94, 95, 99, 100, 101, 101, 105, 115, 116, 118, 118, 119, 119, 121, 137, 139, 141, 141, 141,
143, 155, 158, 160, 162, 163, 166, 168, 174, 176, 178, 181, 181, 188, 189, 189, 190, 190, 194, 196,
196, 197, 199, 200, 204, 210, 214, 215, 216, 219, 222, 225, 226, 226, 232, 233, 235, 237, 238, 238,
239, 240, 241, 244, 245, 247, 251, 252, 252, 255, 260, 261, 263, 263, 266, 270, 270, 272, 273, 273,
276, 276, 278, 286, 288, 293, 303, 305, 310, 313, 313, 315, 315, 315, 316, 318, 318, 318, 319, 319,
322, 326, 327, 329, 329, 330, 330, 331, 332, 334, 337, 337, 338, 338, 339, 343, 346, 346, 347, 354,
355, 355, 357, 357, 359, 363, 363, 363, 366, 369, 379, 382, 391, 397, 400, 401, 404, 405, 406, 407,
408, 408, 409, 410, 411, 416, 416, 419, 419, 421, 421, 423, 430, 430, 431, 431, 432, 432, 432, 435,
435, 439, 440, 441, 443, 445, 445, 445, 447, 448, 451, 451, 452, 453, 456, 460, 465, 465, 466, 466,
467, 469, 474, 476, 477, 478, 479, 487, 488, 489, 491, 493, 496, 497, 499, 503, 504, 505, 508, 509,
510, 514, 516, 518, 518, 518, 519, 525, 526, 527, 529, 530, 534, 541, 541, 542, 543, 543, 544, 547,
548, 550, 550, 554, 555, 561, 564, 569, 572, 572, 574, 576, 578, 578, 579, 583, 583, 587, 592, 594,
596, 609, 616, 622, 623, 624, 630, 633, 634, 635, 637, 642, 646, 649, 651, 651, 653, 662, 663, 666,
667, 668, 671, 672, 674, 674, 676, 676, 682, 684, 685, 685, 687, 687, 688, 698, 699, 704, 708, 709,
710, 711, 715, 716, 716, 717, 718, 718, 723, 724, 724, 725, 726, 727, 729, 730, 736, 739, 743, 744,
744, 745, 748, 749, 751, 755, 761, 761, 762, 762, 766, 769, 771, 776, 778, 778, 779, 780, 780, 783,
787, 788, 789, 790, 792, 792, 793, 793, 794, 795, 795, 795, 796, 796, 799, 801, 803, 803, 804, 807,
807, 814, 815, 822, 822, 824, 824, 825, 835, 836, 837, 837, 838, 844, 844, 846, 847, 847, 849, 850,
850, 854, 855, 857, 857, 862, 863, 867, 869, 869, 870, 875, 876, 876, 882, 883, 884, 885, 886, 888,
890, 892, 893, 894, 895, 897, 901, 905, 908, 909, 909, 911, 911, 916, 918, 918, 921, 922, 923, 927,
928, 932, 938, 940, 940, 945, 946, 946, 948, 949, 949, 951, 957, 958, 959, 962, 963, 965, 965, 969,
971, 979, 981, 983, 984, 986, 990, 992, 995, 997, 997, 998, 1001, 1002, 1003, 1006, 1016, 1021, 1023,
1025, 1026, 1028, 1029, 1031, 1033, 1033, 1036, 1037, 1038, 1039, 1039, 1041, 1041, 1041, 1042, 1042,
1043, 1043, 1044, 1044, 1044, 1051, 1054, 1056, 1060, 1060, 1065, 1065, 1069, 1070, 1073, 1074, 1075,
1078, 1078, 1080, 1081, 1082, 1083, 1084, 1086, 1089, 1089, 1090, 1092, 1097, 1099, 1101, 1101, 1101,
1102, 1104, 1106, 1107, 1114, 1117, 1118, 1118, 1121, 1123, 1124, 1124, 1125, 1126, 1128, 1131, 1133,
1134, 1135, 1135, 1135, 1138, 1145, 1146, 1149, 1150, 1156, 1158, 1158, 1159, 1160, 1161, 1162, 1163,
1168, 1169, 1169, 1171, 1174, 1178, 1179, 1182, 1182, 1183, 1183, 1184, 1188, 1188, 1192, 1194, 1194,
1194, 1197, 1198, 1199, 1201, 1205, 1207, 1208, 1210, 1211, 1212, 1215, 1216, 1217, 1217, 1219, 1219,
1224, 1224, 1224, 1228, 1229, 1233, 1234, 1237, 1237, 1237, 1238, 1242, 1242, 1242, 1245, 1246, 1250,
1251, 1253, 1253, 1253, 1254, 1255, 1255, 1256, 1256, 1257, 1259, 1261, 1262, 1266, 1267, 1268, 1269,
1270, 1272, 1277, 1282, 1283, 1288, 1289, 1291, 1296, 1296, 1296, 1297, 1297, 1298, 1299, 1299, 1302,
1303, 1304, 1305, 1307, 1309, 1312, 1314, 1315, 1316, 1321, 1322, 1326, 1329, 1331, 1331, 1335, 1335,
1336, 1340, 1342, 1342, 1342, 1344, 1344, 1347, 1347, 1354, 1356, 1359, 1362, 1362, 1362, 1363, 1364,
1366, 1367, 1372, 1374, 1376, 1377, 1378, 1380, 1380, 1380, 1384, 1385, 1385, 1386, 1386, 1389, 1390,
1391, 1393, 1393, 1394, 1396, 1397, 1401, 1401, 1406, 1409, 1412, 1415, 1415, 1417, 1421, 1421, 1422,
1423, 1425, 1427, 1428, 1429, 1429, 1432, 1436, 1436, 1437, 1442, 1443, 1445, 1446, 1447, 1448, 1448,
1453, 1455, 1459, 1460, 1460, 1461, 1461, 1463, 1463, 1463, 1464, 1466, 1469, 1474, 1475, 1476, 1476,
1477, 1486, 1486, 1487, 1488, 1488, 1491, 1502, 1504, 1504, 1504, 1505, 1508, 1510, 1512, 1513, 1513,
1516, 1524, 1525, 1528, 1528, 1534, 1536, 1536, 1540, 1542, 1543, 1545, 1545, 1547, 1547, 1551, 1552,
1554, 1554, 1555, 1556, 1557, 1557, 1557, 1559, 1560, 1562, 1562, 1564, 1565, 1565, 1567, 1570, 1572,
1575, 1576, 1581, 1585, 1587, 1588, 1589, 1589, 1591, 1592, 1592, 1594, 1596, 1598, 1601, 1608, 1610,
1612, 1612, 1616, 1618, 1620, 1621, 1624, 1629, 1630, 1631, 1632, 1634, 1635, 1638, 1640, 1640, 1641,
1642, 1643, 1648, 1650, 1654, 1655, 1656, 1664, 1666, 1670, 1674, 1674, 1677, 1678, 1678, 1682, 1683,
1687, 1687, 1688, 1688, 1690, 1691, 1694, 1695, 1699, 1699, 1700, 1702, 1703, 1706, 1707, 1709, 1711,
1712, 1718, 1720, 1722, 1722, 1726, 1727, 1729, 1729, 1730, 1732, 1732, 1734, 1735, 1739, 1740, 1742,
1742, 1743, 1746, 1746, 1747, 1748, 1749, 1750, 1753, 1754, 1762, 1764, 1766, 1767, 1767, 1769, 1770,
1770, 1774, 1776, 1782, 1783, 1785, 1785, 1787, 1792, 1794, 1796, 1806, 1808, 1812, 1813, 1818, 1819,
1821, 1826, 1826, 1833, 1834, 1834, 1836, 1841, 1841, 1841, 1842, 1843, 1845, 1846, 1847, 1850, 1850,
1855, 1859, 1862, 1862, 1863, 1864, 1864, 1864, 1867, 1868, 1872, 1872, 1873, 1875, 1875, 1881, 1887,
1888, 1888, 1890, 1892, 1897, 1898, 1902, 1903, 1904, 1905, 1907, 1910, 1911, 1913, 1913, 1914, 1914,
1915, 1920, 1920, 1923, 1923, 1926, 1927, 1927, 1928, 1930, 1933, 1936, 1940, 1944, 1945, 1946, 1946,
1947, 1948, 1948, 1949, 1951, 1952, 1954, 1957, 1958, 1964, 1965, 1967, 1970, 1972, 1973, 1973, 1982,
1987, 1987, 1987, 1988, 1989, 1993, 1993, 1994, 1996, 1999, 2000, 2001, 2003, 2005, 2006, 2008, 2008,
2011, 2013, 2020, 2024, 2025, 2027, 2030, 2033, 2035, 2038, 2041, 2045, 2046, 2046]
else:
print("duc.py does not support the given parameters M = %d and L = %d" % (M, L))
u = np.sort(np.random.randint(L / 2, size=M) + 1)
return np.array(u)
| 60.172549 | 149 | 0.510949 |
092416c0f0ddea338b98d750aa6a26012ff97df7 | 28,463 | py | Python | software/v2.3c/test/run_test.py | HelloWorksGroup/nanoDAP | 3d7b680c015099a3850ab270535001ef0cdd5c96 | [
"Apache-2.0"
] | 708 | 2018-10-07T05:51:03.000Z | 2022-03-31T08:24:21.000Z | software/v2.3c/test/run_test.py | fsyinghua/nanoDAP | abb2576144f368c224b653192094c20ed8aa74d7 | [
"Apache-2.0"
] | 18 | 2019-01-20T16:07:00.000Z | 2022-03-20T04:27:34.000Z | software/v2.3c/test/run_test.py | fsyinghua/nanoDAP | abb2576144f368c224b653192094c20ed8aa74d7 | [
"Apache-2.0"
] | 209 | 2018-12-27T12:49:11.000Z | 2022-03-29T13:16:35.000Z | #
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
DAPLink validation and testing tool
optional arguments:
-h, --help show this help message and exit
--targetdir TARGETDIR
Directory with pre-built target test images.
--user USER MBED username (required for compile-api)
--password PASSWORD MBED password (required for compile-api)
--firmwaredir FIRMWAREDIR
Directory with firmware images to test
--firmware {k20dx_k64f_if,lpc11u35_sscity_if,...} (run script with --help to see full list)
Firmware to test
--project-tool TOOL choices=['uvision', 'mbedcli'],'Tool used to compile the project',
default='uvision'
--logdir LOGDIR Directory to log test results to
--noloadif Skip load step for interface.
--notestendpt Dont test the interface USB endpoints.
--loadbl Load bootloader before test.
--testdl Run DAPLink specific tests. The DAPLink test tests
bootloader updates so use with caution
--testfirst If multiple boards of the same type are found only
test the first one.
--verbose {Minimal,Normal,Verbose,All}
Verbose output
--dryrun Print info on configurations but dont actually run
tests.
--force Try to run tests even if there are problems. Delete logs from previous run.
Example usages
------------------------
Test all built projects in the repository:
test_all.py --user <username> --password <password>
Test everything on a single project in the repository:
test_all.py --project <project> --testfirst --user <username>
--password <password>
Verify that the USB endpoints are working correctly on
an existing board with firmware already loaded:
test_all.py --noloadif --user <username> --password <password>
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import shutil
import argparse
import subprocess
from enum import Enum
from hid_test import test_hid
from serial_test import test_serial
from msd_test import test_mass_storage
from usb_test import test_usb
from daplink_board import get_all_attached_daplink_boards
from project_generator.generate import Generator
from test_info import TestInfo
from daplink_firmware import load_bundle_from_project, load_bundle_from_release
from firmware import Firmware
from target import load_target_bundle, build_target_bundle
from test_daplink import daplink_test
import info
DEFAULT_TEST_DIR = './test_results'
VERB_MINIMAL = 'Minimal' # Just top level errors
VERB_NORMAL = 'Normal' # Top level errors and warnings
VERB_VERBOSE = 'Verbose' # All errors and warnings
VERB_ALL = 'All' # All errors
VERB_LEVELS = [VERB_MINIMAL, VERB_NORMAL, VERB_VERBOSE, VERB_ALL]
def test_endpoints(workspace, parent_test):
"""Run tests to validate DAPLINK fimrware"""
test_info = parent_test.create_subtest('test_endpoints')
test_hid(workspace, test_info)
test_serial(workspace, test_info)
test_mass_storage(workspace, test_info)
test_usb(workspace, test_info)
class TestConfiguration(object):
"""Wrap all the resources needed to run a test"""
def __init__(self, name):
self.name = name
self.board = None
self.target = None
self.if_firmware = None
self.bl_firmware = None
def __str__(self):
name_board = '<None>'
name_target = '<None>'
name_if_firmware = '<None>'
name_bl_firmware = '<None>'
if self.board is not None:
name_board = self.board.name
if self.target is not None:
name_target = self.target.name
if self.if_firmware is not None:
name_if_firmware = self.if_firmware.name
if self.bl_firmware is not None:
name_bl_firmware = self.bl_firmware.name
return "APP=%s BL=%s Board=%s Target=%s" % (name_if_firmware,
name_bl_firmware,
name_board, name_target)
class TestManager(object):
"""Handle tests configuration running and results"""
class _STATE(Enum):
INIT = 0
CONFIGURED = 1
COMPLETE = 2
def __init__(self):
# By default test all configurations and boards
self._target_list = []
self._board_list = []
self._firmware_list = []
self._only_test_first = False
self._load_if = True
self._load_bl = True
self._test_daplink = True
self._test_ep = True
# Internal state
self._state = self._STATE.INIT
self._test_configuration_list = None
self._all_tests_pass = None
self._firmware_filter = None
self._untested_firmware = None
@property
def all_tests_pass(self):
assert self._all_tests_pass is not None, 'Must call run_tests first'
return self._all_tests_pass
def set_test_first_board_only(self, first):
"""Only test one board of each type"""
assert isinstance(first, bool)
assert self._state is self._STATE.INIT
self._only_test_first = first
def set_load_if(self, load):
"""Load new interface firmware before testing"""
assert isinstance(load, bool)
assert self._state is self._STATE.INIT
self._load_if = load
def set_load_bl(self, load):
"""Load new bootloader firmware before testing"""
assert isinstance(load, bool)
assert self._state is self._STATE.INIT
self._load_bl = load
def set_test_daplink(self, run_test):
"""Run DAPLink specific tests"""
assert isinstance(run_test, bool)
assert self._state is self._STATE.INIT
self._test_daplink = run_test
def set_test_ep(self, run_test):
"""Test each endpoint - MSD, CDC, HID"""
assert isinstance(run_test, bool)
assert self._state is self._STATE.INIT
self._test_ep = run_test
def add_firmware(self, firmware_list):
"""Add firmware to be tested"""
assert self._state is self._STATE.INIT
self._firmware_list.extend(firmware_list)
def add_boards(self, board_list):
"""Add boards to be used for testing"""
assert self._state is self._STATE.INIT
self._board_list.extend(board_list)
def add_targets(self, target_list):
"""Add targets to be used for testing"""
assert self._state is self._STATE.INIT
self._target_list.extend(target_list)
def set_firmware_filter(self, name_list):
"""Test only the project names passed given"""
assert self._state is self._STATE.INIT
assert self._firmware_filter is None
self._firmware_filter = set(name_list)
def run_tests(self):
"""Run all configurations"""
# Tests can only be run once per TestManager instance
assert self._state is self._STATE.CONFIGURED
self._state = self._STATE.COMPLETE
all_tests_pass = True
for test_configuration in self._test_configuration_list:
board = test_configuration.board
test_info = TestInfo(test_configuration.name)
test_configuration.test_info = test_info
test_info.info("Board: %s" % test_configuration.board)
test_info.info("Application: %s" %
test_configuration.if_firmware)
test_info.info("Bootloader: %s" %
test_configuration.bl_firmware)
test_info.info("Target: %s" % test_configuration.target)
valid_bl = test_configuration.bl_firmware is not None
if self._load_bl and valid_bl:
bl_path = test_configuration.bl_firmware.hex_path
board.load_bootloader(bl_path, test_info)
if self._load_if:
if_path = test_configuration.if_firmware.hex_path
board.load_interface(if_path, test_info)
board.set_check_fs_on_remount(True)
if self._test_daplink:
daplink_test(test_configuration, test_info)
if self._test_ep:
test_endpoints(test_configuration, test_info)
if test_info.get_failed():
all_tests_pass = False
self._all_tests_pass = all_tests_pass
def print_results(self, info_level):
assert self._state is self._STATE.COMPLETE
# Print info for boards tested
for test_configuration in self._test_configuration_list:
print('')
test_info = test_configuration.test_info
if info_level == VERB_MINIMAL:
test_info.print_msg(TestInfo.FAILURE, 0)
elif info_level == VERB_NORMAL:
test_info.print_msg(TestInfo.WARNING, None)
elif info_level == VERB_VERBOSE:
test_info.print_msg(TestInfo.WARNING, None)
elif info_level == VERB_ALL:
test_info.print_msg(TestInfo.INFO, None)
else:
# This should never happen
assert False
def write_test_results(self, directory, git_sha=None, local_changes=None,
info_level=TestInfo.INFO):
assert self._state is self._STATE.COMPLETE
assert not os.path.exists(directory)
os.mkdir(directory)
# Write out version of tools used for test
tools_file = directory + os.sep + 'requirements.txt'
with open(tools_file, "w") as file_handle:
command = ['pip', 'freeze']
subprocess.check_call(command, stdin=subprocess.PIPE,
stdout=file_handle,
stderr=subprocess.STDOUT)
# Write out each test result
for test_configuration in self._test_configuration_list:
test_info = test_configuration.test_info
file_path = directory + os.sep + test_info.name + '.txt'
with open(file_path, 'w') as file_handle:
file_handle.write("Test configuration: %s\n" %
test_configuration)
file_handle.write("Board: %s\n" % test_configuration.board)
file_handle.write("Application: %s\n" %
test_configuration.if_firmware)
file_handle.write("Bootloader: %s\n" %
test_configuration.bl_firmware)
file_handle.write("Target: %s\n" % test_configuration.target)
file_handle.write("\n")
test_info.print_msg(info_level, None, log_file=file_handle)
# Write out summary
summary_file = directory + os.sep + 'summary.txt'
with open(summary_file, "w") as file_handle:
# Overall result
if self.all_tests_pass:
file_handle.write("All tests pass\n\n")
else:
file_handle.write("One or more tests have failed\n\n")
if git_sha is not None and local_changes is not None:
file_handle.write("Git info for test:\n")
file_handle.write(" Git SHA: %s\n" % git_sha)
file_handle.write(" Local changes: %s\n" % local_changes)
file_handle.write("\n")
# Results for each test
file_handle.write("Test settings:\n")
file_handle.write(" Load application before test: %s\n" %
self._load_if)
file_handle.write(" Load bootloader before test: %s\n" %
self._load_bl)
file_handle.write(" Run DAPLink specific tests: %s\n" %
self._test_daplink)
file_handle.write(" Run endpoint tests: %s\n" %
self._test_ep)
file_handle.write("\n")
# Results for each test
file_handle.write("Tested configurations:\n")
for test_configuration in self._test_configuration_list:
test_info = test_configuration.test_info
test_passed = test_info.get_failed() == 0
result_str = 'Pass' if test_passed else 'Fail'
file_handle.write(" %s: %s\n" %
(test_configuration, result_str))
file_handle.write("\n")
# Untested firmware
untested_list = self.get_untested_firmware()
if len(untested_list) == 0:
file_handle.write("All firmware in package tested\n")
else:
file_handle.write("Untested firmware:\n")
for untested_fw in self.get_untested_firmware():
file_handle.write(" %s\n" % untested_fw.name)
file_handle.write("\n")
# Target test images
target_dir = directory + os.sep + 'target'
os.mkdir(target_dir)
for target in self._target_list:
new_hex = target_dir + os.sep + os.path.basename(target.hex_path)
shutil.copy(target.hex_path, new_hex)
new_bin = target_dir + os.sep + os.path.basename(target.bin_path)
shutil.copy(target.bin_path, new_bin)
def get_test_configurations(self):
assert self._state in (self._STATE.CONFIGURED,
self._STATE.COMPLETE)
return self._test_configuration_list
def get_untested_firmware(self):
assert self._state in (self._STATE.CONFIGURED,
self._STATE.COMPLETE)
return self._untested_firmware
def build_test_configurations(self, parent_test):
assert self._state is self._STATE.INIT
self._state = self._STATE.CONFIGURED
test_info = parent_test.create_subtest('Build test configuration')
# Create table mapping each board id to a list of boards with that ID
board_id_to_board_list = {}
for board in self._board_list:
board_id = board.get_board_id()
if board_id not in board_id_to_board_list:
board_id_to_board_list[board_id] = []
board_list = board_id_to_board_list[board_id]
if self._only_test_first and len(board_list) > 1:
# Ignore this board since we already have one
test_info.info('Ignoring extra boards of type 0x%x' %
board_id)
continue
board_list.append(board)
# Create a list for bootloader firmware and interface firmware
bootloader_firmware_list = []
filtered_interface_firmware_list = []
for firmware in self._firmware_list:
if firmware.type == Firmware.TYPE.BOOTLOADER:
bootloader_firmware_list.append(firmware)
elif firmware.type == Firmware.TYPE.INTERFACE:
name = firmware.name
if ((self._firmware_filter is None) or
(name in self._firmware_filter)):
filtered_interface_firmware_list.append(firmware)
else:
assert False, 'Unsupported firmware type "%s"' % firmware.type
# Create a table mapping name to object with that name
TARGET_NAME_TO_TARGET = {target.name: target for target in
self._target_list}
FIRMWARE_NAME_TO_FIRMWARE = {firmware.name: firmware for firmware in
filtered_interface_firmware_list}
BL_NAME_TO_BL = {firmware.name: firmware for firmware in
bootloader_firmware_list}
# Explicitly specified boards must be present
fw_name_set = set(fw.name for fw in filtered_interface_firmware_list)
if self._firmware_filter is not None:
assert self._firmware_filter == fw_name_set
# Create test configurations for each supported configuration
test_conf_list = []
untested_firmware = set(filtered_interface_firmware_list)
for board_id, fw_name, bl_fw_name, target_name in info.SUPPORTED_CONFIGURATIONS:
target = None
if_firmware = None
bl_firmware = None
if target_name in TARGET_NAME_TO_TARGET:
target = TARGET_NAME_TO_TARGET[target_name]
if fw_name in FIRMWARE_NAME_TO_FIRMWARE:
if_firmware = FIRMWARE_NAME_TO_FIRMWARE[fw_name]
if bl_fw_name in BL_NAME_TO_BL:
bl_firmware = BL_NAME_TO_BL[bl_fw_name]
target_required = self._test_ep
bl_required = self._load_bl or self._test_daplink
if if_firmware is None:
# Skip configuration
continue
if target_required and target is None:
# Skip configuration
test_info.info('No target to test firmware %s' % fw_name)
continue
if bl_required and bl_firmware is None:
# Skip configuration
test_info.info('No bootloader to test firmware %s' % fw_name)
continue
# Check if there is a board to test this firmware
# and if not skip it
if board_id not in board_id_to_board_list:
test_info.info('No board to test firmware %s' % fw_name)
continue
# Create a test configuration for each board
board_list = board_id_to_board_list[board_id]
for board in board_list:
test_conf = TestConfiguration(if_firmware.name + ' ' +
board.name)
test_conf.if_firmware = if_firmware
test_conf.bl_firmware = bl_firmware
test_conf.board = board
test_conf.target = target
test_conf_list.append(test_conf)
# remove this from the untested list
if if_firmware in untested_firmware:
untested_firmware.remove(if_firmware)
assert bl_firmware not in untested_firmware
self._untested_firmware = list(untested_firmware)
self._test_configuration_list = test_conf_list
def get_firmware_names(project_dir):
# Save current directory
cur_dir = os.getcwd()
os.chdir(project_dir)
try:
all_names = set()
projects = list(Generator('projects.yaml').generate())
for project in projects:
assert project.name not in all_names
all_names.add(project.name)
finally:
# Restore the current directory
os.chdir(cur_dir)
return list(all_names)
def get_git_info(project_dir):
cur_dir = os.getcwd()
os.chdir(project_dir)
# Get the git SHA.
try:
git_sha = subprocess.check_output(["git", "rev-parse",
"--verify", "HEAD"])
git_sha = git_sha.strip()
except (subprocess.CalledProcessError, WindowsError):
print("#> ERROR: Failed to get git SHA, do you "
"have git in your PATH environment variable?")
exit(-1)
# Check are there any local, uncommitted modifications.
try:
subprocess.check_output(["git", "diff", "--no-ext-diff",
"--quiet", "--exit-code"])
except subprocess.CalledProcessError:
git_has_changes = True
else:
git_has_changes = False
os.chdir(cur_dir)
return git_sha, git_has_changes
def main():
self_path = os.path.abspath(__file__)
test_dir = os.path.dirname(self_path)
daplink_dir = os.path.dirname(test_dir)
# We make assumptions that break if user copies script file outside the test dir
if os.path.basename(test_dir) != "test":
print("Error - this script must reside in the test directory")
exit(-1)
git_sha, local_changes = get_git_info(daplink_dir)
firmware_list = get_firmware_names(daplink_dir)
firmware_choices = [firmware for firmware in firmware_list if
firmware.endswith('_if')]
description = 'DAPLink validation and testing tool'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--targetdir',
help='Directory with pre-built target test images.',
default=None)
parser.add_argument('--user', type=str, default=None,
help='MBED username (required for compile-api)')
parser.add_argument('--password', type=str, default=None,
help='MBED password (required for compile-api)')
parser.add_argument('--firmwaredir',
help='Directory with firmware images to test',
default=None)
parser.add_argument('--project-tool', choices=['uvision', 'mbedcli'],
help='Tool used to compile the project',
default='uvision')
parser.add_argument('--firmware', help='Firmware to test', action='append',
choices=firmware_choices, default=[], required=False)
parser.add_argument('--logdir', help='Directory to log test results to',
default=DEFAULT_TEST_DIR)
parser.add_argument('--noloadif', help='Skip load step for interface.',
default=False, action='store_true')
parser.add_argument('--notestendpt', help='Dont test the interface '
'USB endpoints.', default=False, action='store_true')
parser.add_argument('--loadbl', help='Load bootloader before test.',
default=False, action='store_true')
parser.add_argument('--testdl', help='Run DAPLink specific tests. '
'The DAPLink test tests bootloader updates so use'
'with caution',
default=False, action='store_true')
parser.add_argument('--testfirst', help='If multiple boards of the same '
'type are found only test the first one.',
default=False, action='store_true')
parser.add_argument('--verbose', help='Verbose output',
choices=VERB_LEVELS, default=VERB_NORMAL)
parser.add_argument('--dryrun', default=False, action='store_true',
help='Print info on configurations but dont '
'actually run tests.')
parser.add_argument('--force', action='store_true', default=False,
help='Try to run tests even if there are problems. Delete logs from previous run.')
args = parser.parse_args()
use_prebuilt = args.targetdir is not None
use_compile_api = args.user is not None and args.password is not None
test_info = TestInfo('DAPLink')
# Validate args
# See if user wants to test endpoints. If yes and he didn't provide
# target test binaries, use the Compile API to build them
all_targets = None
if not args.notestendpt:
if not use_prebuilt and not use_compile_api:
print("Endpoint test requires target test images.")
print(" Directory with pre-built target test images")
print(" must be specified with '--targetdir'")
print("OR")
print(" developer.mbed.org login credentials must be ")
print(" specified with '--user' and '--password' so test ")
print(" images can be built with the RESTful Compile API.")
print("NOTE: you can skip the endpoint tests altogether ")
print("with --notestendpt")
exit(-1)
if args.targetdir is not None:
target_dir = args.targetdir
else:
target_dir = daplink_dir + os.sep + 'tmp'
build_target_bundle(target_dir, args.user, args.password, test_info)
target_bundle = load_target_bundle(target_dir)
all_targets = target_bundle.get_target_list()
if os.path.exists(args.logdir):
if args.force:
shutil.rmtree(args.logdir)
else:
print('Error - test results directory "%s" already exists' %
args.logdir)
exit(-1)
# Get all relevant info
if args.firmwaredir is None:
firmware_bundle = load_bundle_from_project(args.project_tool)
else:
firmware_bundle = load_bundle_from_release(args.firmwaredir)
all_firmware = firmware_bundle.get_firmware_list()
all_boards = get_all_attached_daplink_boards()
for board in all_boards:
if board.get_mode() == board.MODE_BL:
print('Switching to APP mode on board: %s' % board.unique_id)
try:
board.set_mode(board.MODE_IF)
except Exception:
print('Unable to switch mode on board: %s' % board.unique_id)
# Make sure firmware is present
firmware_explicitly_specified = len(args.firmware) != 0
if firmware_explicitly_specified:
all_firmware_names = set(fw.name for fw in all_firmware)
firmware_missing = False
for firmware_name in args.firmware:
if firmware_name not in all_firmware_names:
firmware_missing = True
test_info.failure('Cannot find firmware %s' % firmware_name)
if firmware_missing:
test_info.failure('Firmware missing - aborting test')
exit(-1)
# Create manager and add resources
tester = TestManager()
tester.add_firmware(all_firmware)
tester.add_boards(all_boards)
if all_targets is not None:
tester.add_targets(all_targets)
if firmware_explicitly_specified:
tester.set_firmware_filter(args.firmware)
# Configure test manager
tester.set_test_first_board_only(args.testfirst)
tester.set_load_if(not args.noloadif)
tester.set_test_ep(not args.notestendpt)
tester.set_load_bl(args.loadbl)
tester.set_test_daplink(args.testdl)
# Build test configurations
tester.build_test_configurations(test_info)
test_config_list = tester.get_test_configurations()
if len(test_config_list) == 0:
test_info.failure("Nothing that can be tested")
exit(-1)
else:
test_info.info('Test configurations to be run:')
index = 0
for test_config in test_config_list:
test_info.info(' %i: %s' % (index, test_config))
index += 1
test_info.info('')
untested_list = tester.get_untested_firmware()
if len(untested_list) == 0:
test_info.info("All firmware can be tested")
else:
test_info.info('Fimrware that will not be tested:')
for untested_firmware in untested_list:
test_info.info(' %s' % untested_firmware.name)
test_info.info('')
if firmware_explicitly_specified and len(untested_list) != 0:
test_info.failure("Exiting because not all firmware could be tested")
exit(-1)
# If this is a dryrun don't run tests, just print info
if args.dryrun:
exit(0)
# Run tests
tester.run_tests()
# Print test results
tester.print_results(args.verbose)
tester.write_test_results(args.logdir,
git_sha=git_sha,
local_changes=local_changes)
# Warn about untested boards
print('')
for firmware in tester.get_untested_firmware():
print('Warning - configuration %s is untested' % firmware.name)
if tester.all_tests_pass:
print("All boards passed")
exit(0)
else:
print("Test Failed")
exit(-1)
if __name__ == "__main__":
main()
| 40.25884 | 107 | 0.619822 |
9754d2ce57802b6a16cfcf111dd7d0c3f0123d50 | 3,076 | py | Python | utilities.py | RyanLinXiang/flower-classifier | b9bf566dcc99864666bbf73f68cdf056b9b1c4b5 | [
"MIT"
] | null | null | null | utilities.py | RyanLinXiang/flower-classifier | b9bf566dcc99864666bbf73f68cdf056b9b1c4b5 | [
"MIT"
] | null | null | null | utilities.py | RyanLinXiang/flower-classifier | b9bf566dcc99864666bbf73f68cdf056b9b1c4b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PROGRAMMER: Ryan Lin Xiang
# DATE CREATED: 4th Feb 2020
# REVISED DATE: 05th Feb 2020
# PURPOSE: This file included all the helper functions necessary to save and load the model, as well as process the images
from PIL import Image
import numpy as np
from torch import save, load, from_numpy
import torchvision.models as models
def save_model(model, save_dir, class_to_idx, arch, structure):
"""
Saves the trained and validated model
Parameters:
model : the trained and validated model
save_dir : the path where the classifier model file should be saved
class_to_idx : the path to the file where category indices are saved to trace back the indiced predicted by the model
arch : the architecture of the pre-trained model chosen
structure : the structure of the classifier used to initiate the model
Returns:
None
"""
classifier = {'arch': arch,
'class_to_idx': class_to_idx,
'state_dict': model.classifier.state_dict(),
'structure': structure}
save(classifier, save_dir+"/"+"classifier.pth")
def load_model(classifier_path):
"""
Load the pre-trained model from the specified file with the updated classifier (features are frozen)
Parameters:
classifier_path : the path to the saved classifier model
Returns:
classifier model
"""
classifier = load(classifier_path)
model = getattr(models, classifier['arch'])
model = model(pretrained=True)
model.classifier = classifier['structure']
model.class_to_idx = classifier['class_to_idx']
model.classifier.load_state_dict(classifier['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
Parameters:
image : the path to the image file
Returns:
numpy array with the image file processed and ready as input for the model
'''
img = Image.open(image)
# the shortest dimension of the image gets width of 256 while the other dimension is resized with respect to the ratio
if img.size[0] < img.size[1]:
ratio = 256/img.size[0]
img = img.resize((256,int(img.size[1]*ratio)))
else:
ratio = 256/img.size[1]
img = img.resize((int(img.size[0]*ratio),256))
# crop a square of 224px from the center of the image in order to get the image ready for the model
top = (img.size[1] - 224)/2
bottom = (img.size[1] + 224)/2
left = (img.size[0] - 224)/2
right = (img.size[0] + 224)/2
img = img.crop((left, top, right, bottom))
img = np.array(img)/255
# normalization of the image in order to get the image ready for the model
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = np.transpose((img - mean) / std)
return from_numpy(img)
| 31.71134 | 122 | 0.640767 |
45c73c8dc41c4b6b73948fd08f197402b854a718 | 16,001 | py | Python | HmiLogViewer.py | LoicGRENON/HmiLogViewer | 6e2406a2e1f3d3dc3e864a39dfbe984b4fdcaaf1 | [
"MIT"
] | null | null | null | HmiLogViewer.py | LoicGRENON/HmiLogViewer | 6e2406a2e1f3d3dc3e864a39dfbe984b4fdcaaf1 | [
"MIT"
] | null | null | null | HmiLogViewer.py | LoicGRENON/HmiLogViewer | 6e2406a2e1f3d3dc3e864a39dfbe984b4fdcaaf1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 9 juil. 2015
@author: GRENON Loïc
"""
from PyQt4 import QtCore, QtGui
from ui.main import Ui_MainWindow
from ItemModels import COLOR_ROLE, LogReaderTableModel
from MyAboutDialog import MyAboutDialog
from MyExceptions import *
import codecs
import csv
import sys
import os
import yaml
import ast
import shutil
class HmiLogViewer(QtGui.QMainWindow):
def __init__(self):
super(HmiLogViewer, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.aboutDialog = MyAboutDialog(self)
self.model = None
self.parserConfig = {'headers': None,
'cols': None}
self.projectId = ""
# Menu entries actions
QtCore.QObject.connect(self.ui.actionOpen,
QtCore.SIGNAL("triggered()"),
self.openFile)
QtCore.QObject.connect(self.ui.actionAddFile,
QtCore.SIGNAL("triggered()"),
lambda: self.openFile(True))
QtCore.QObject.connect(self.ui.actionClose,
QtCore.SIGNAL("triggered()"),
self.closeFile)
QtCore.QObject.connect(self.ui.actionSaveAs,
QtCore.SIGNAL("triggered()"),
self.saveFile)
QtCore.QObject.connect(self.ui.actionImportConfigFile,
QtCore.SIGNAL("triggered()"),
self.importConfigFile)
QtCore.QObject.connect(self.ui.actionAbout,
QtCore.SIGNAL("triggered()"),
self.aboutDialog.open)
# Tool buttons actions
QtCore.QObject.connect(self.ui.toolBtnOpen,
QtCore.SIGNAL("clicked()"),
self.openFile)
QtCore.QObject.connect(self.ui.toolBtnAppend,
QtCore.SIGNAL("clicked()"),
lambda: self.openFile(True))
QtCore.QObject.connect(self.ui.toolBtnSave,
QtCore.SIGNAL("clicked()"),
self.saveFile)
def getItemParserConfig(self, projectId=""):
"""
:param projectId: str
:return: dict
"""
projectId = projectId.strip()
try:
# First open the config file which is in the same directory than the executable
with open("HmiLogViewer.yaml", "r") as f:
return self.parseConfig(f, projectId)
except (IOError, OSError):
try:
with open(os.path.join(self.getConfigPath(), "HmiLogViewer.yaml"), "r") as f:
return self.parseConfig(f, projectId)
except (IOError, OSError) as e:
QtGui.QMessageBox.critical(self.ui.centralwidget,
u"Config loading failed",
u"Unable to read config file.\nReturned error is :\n%s" % e,
QtGui.QMessageBox.Ok)
return {'headers': {},
'cols': {'decimals': 0,
'values': {},
'color': {},
'visible': True}}
def parseConfig(self, fp, projectId):
"""
Parse config file for the projectId
:param fp: file object
:param projectId: str
:return: dict
"""
data = yaml.safe_load(fp)
for d in data['LogData']:
if projectId == d['projectId']:
headers = ['Date']
cols = []
for item in d['items']:
headers.append(u"{}\n({})".format(item['name'], item['unit'])
if item['unit'] else
u"{}".format(item['name']))
try:
decimals = item['decimals']
except KeyError:
decimals = 0
try:
values = ast.literal_eval(item['values'])
except (KeyError, ValueError, SyntaxError):
values = {}
try:
color = ast.literal_eval(item['color'])
except (KeyError, ValueError, SyntaxError):
color = {}
try:
visible = item['visible']
except KeyError:
visible = True
cols.append({'decimals': decimals,
'values': values,
'color': color,
'visible': visible})
return {'headers': headers,
'cols': cols}
# Raise exception if project is not found
raise ProjectIdError()
def getConfigPath(self):
dirname = os.path.join("EriVallon", "HmiLogViewer")
if 'ALLUSERSPROFILE' in os.environ:
try:
from win32com.shell import shellcon, shell
appdata_path = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, 0, 0)
except ImportError:
appdata_path = os.environ['ALLUSERSPROFILE']
return os.path.join(appdata_path, dirname)
elif 'XDG_CONFIG_HOME' in os.environ:
return os.path.join(os.environ['XDG_CONFIG_HOME'], dirname)
else:
return os.path.join(os.environ['HOME'], '.config', dirname)
def importConfigFile(self):
sFilePath = QtGui.QFileDialog.getOpenFileName(self.ui.centralwidget,
u"Choose a config file to import",
QtCore.QString(),
"YAML config file (*.yaml)")
if os.path.isfile(sFilePath):
r = QtGui.QMessageBox.warning(self.ui.centralwidget,
u"Load config file",
u"You are about to load a new config file located at : %s\n\n"
u"Make sure you have selected a correct config file, "
u"incorrect file may result in a non-functional application." % sFilePath,
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
QtGui.QMessageBox.Cancel)
if r == QtGui.QMessageBox.Ok:
configPath = self.getConfigPath()
if not os.path.exists(configPath):
os.makedirs(configPath)
shutil.copy(sFilePath, os.path.join(configPath, "HmiLogViewer.yaml"))
def openFile(self, append=False):
"""
Open a csv file
:param append: Do not close previous file if True
"""
sFilePath = QtGui.QFileDialog.getOpenFileName(self.ui.centralwidget,
u"Choose a log file",
QtCore.QString(),
"CSV (*.csv)")
if os.path.isfile(sFilePath):
if not append:
self.closeFile()
self.parseLogFile(str(sFilePath))
self.ui.actionClose.setEnabled(True)
self.ui.actionAddFile.setEnabled(True)
self.ui.toolBtnAppend.setEnabled(True)
self.ui.actionSaveAs.setEnabled(True)
self.ui.toolBtnSave.setEnabled(True)
def closeFile(self):
"""
Effacer les données des fichiers ouverts
"""
self.model = None
self.projectId = ""
self.setModel()
self.ui.actionSaveAs.setEnabled(False)
self.ui.toolBtnSave.setEnabled(False)
self.ui.actionAddFile.setEnabled(False)
self.ui.toolBtnAppend.setEnabled(False)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
for url in event.mimeData().urls():
path = url.toLocalFile().toLocal8Bit().data()
if os.path.isfile(path):
self.parseLogFile(path)
self.ui.actionClose.setEnabled(True)
self.ui.actionAddFile.setEnabled(True)
self.ui.toolBtnAppend.setEnabled(True)
self.ui.actionSaveAs.setEnabled(True)
self.ui.toolBtnSave.setEnabled(True)
def setModel(self):
self.model = LogReaderTableModel(self.parserConfig['headers'], self.ui.centralwidget)
self.ui.tableView.setModel(self.model)
def parseLogFile(self, filename):
"""
Le fichier de journalisation est composé de 3 champs séparés par des tabulations :
1 : Date au format jj/mm/aaaa
2 : Heure au format HH:MM:SS
3 : Le message composé de N champs séparés par des points virgules
:param filename: file to be parsed
"""
if not self.ui.tableView.model():
self.setModel()
try:
with codecs.open(filename, "r", "utf-16") as f:
for lineIdx, line in enumerate(f):
if line.endswith("\r\n"):
line = line[:-2]
if line.endswith("\n"):
line = line[:-1]
# Check for projectId value
if lineIdx == 0:
if self.projectId == "":
self.projectId = line
try:
self.parserConfig = self.getItemParserConfig(self.projectId)
except ProjectIdError:
QtGui.QMessageBox.warning(self.ui.centralwidget,
u"Config cannot be found",
u"A proper config cannot be found for this file",
QtGui.QMessageBox.Ok)
break
self.setModel()
elif line != self.projectId:
QtGui.QMessageBox.warning(self.ui.centralwidget,
u"Wrong log file",
u"The log file you are trying to open seems to be from "
u"a different project than the last opened file.\n"
u"Please close it before opening another.",
QtGui.QMessageBox.Ok)
break
lineFields = line.split()
# Check if lineField has correct field number : 1: Date / 2: Time / 3: Message
if len(lineFields) == 3:
# Le message est composé de N champs séparés par des points-virgule
# -> on les extrait et contrôle que le nombre de champs est correct par rapport au header
msgFields = lineFields[2].strip(";").split(";")
if len(msgFields) == len(self.parserConfig['headers']) - 1:
items = []
for field, fieldConfig in zip(msgFields, self.parserConfig['cols']):
decimals = fieldConfig['decimals']
values = fieldConfig['values']
color = fieldConfig['color']
visible = fieldConfig['visible']
try:
evalField = ast.literal_eval(field)
except ValueError:
evalField = field
try:
if decimals != 0:
item = QtGui.QStandardItem((unicode(float(field)/10.0**decimals)))
else:
item = QtGui.QStandardItem(unicode(values[evalField]))
except (ValueError, KeyError):
item = QtGui.QStandardItem(unicode(field))
try:
item.setData(QtGui.QColor(color[evalField]), COLOR_ROLE)
except (ValueError, KeyError):
pass
items.append(item)
# Add date and time values on the top of the list
items.insert(0, QtGui.QStandardItem(" ".join(lineFields[:-1])))
# Add extra row for aesthetic reasons
items.append(QtGui.QStandardItem())
self.model.appendRow(items)
except (IOError, OSError, UnicodeError) as e:
QtGui.QMessageBox.critical(self.ui.centralwidget,
u"Opening failed",
u"Failed to open file.\nReturned error is :\n%s" % e,
QtGui.QMessageBox.Ok)
self.updateModel()
def updateModel(self):
tv = self.ui.tableView
tv.setModel(self.model)
# Hide the last column to not resize it
tv.setColumnHidden(self.model.columnCount(), True)
# Resize cells to contents
tv.resizeColumnsToContents()
tv.resizeRowsToContents()
# Unhide the last column previously hidden
tv.setColumnHidden(self.model.columnCount(), False)
def saveFile(self):
sFilePath = QtGui.QFileDialog.getSaveFileName(self.ui.centralwidget,
u"Choose a log file",
QtCore.QString(),
"CSV (*.csv)")
if sFilePath:
try:
with open(str(sFilePath), "wb") as f:
writer = csv.writer(f, delimiter=";")
# Write headers
writer.writerow([s.encode("cp1255") for s in self.parserConfig['headers']])
for row in xrange(self.model.rowCount()):
itemRow = ["%s" % self.model.item(row, col).data(QtCore.Qt.DisplayRole).toString()
if col == 0 else
self.model.item(row, col).data(QtCore.Qt.DisplayRole).toInt()[0]
if col < 4 else
"%s" % self.model.item(row, col).data(QtCore.Qt.DisplayRole).toReal()[0]
for col in xrange(self.model.columnCount())]
writer.writerow(itemRow)
except (IOError, OSError, UnicodeError) as e:
QtGui.QMessageBox.critical(self.ui.centralwidget,
u"Saving failed",
u"Failed to save file.\nReturned error is :\n%s" % e,
QtGui.QMessageBox.Ok)
def main():
app = QtGui.QApplication(sys.argv)
w = HmiLogViewer()
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 44.820728 | 116 | 0.460596 |
ceb4b6cfb6cb752aac1b0cb218b02c83671241e2 | 3,129 | py | Python | serve/predict.py | saidulislam/sentiment-analysis-sagemaker | 3f72639c04efe4795ec0b31704c1cca0abc30e98 | [
"Apache-2.0"
] | null | null | null | serve/predict.py | saidulislam/sentiment-analysis-sagemaker | 3f72639c04efe4795ec0b31704c1cca0abc30e98 | [
"Apache-2.0"
] | null | null | null | serve/predict.py | saidulislam/sentiment-analysis-sagemaker | 3f72639c04efe4795ec0b31704c1cca0abc30e98 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data))
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
output = model.forward(data)
output = output.to('cpu')
result = np.round(output.numpy())
result = int(result)
return result
| 32.59375 | 107 | 0.702141 |
ba38c31aacfed98a869e90c009cce5ff38a0ebab | 8,438 | py | Python | iter8_analytics/api/v2/examples/examples_metrics.py | sushmarchandran/iter8-analytics | 8264a8064cad930bf94670b10c061ee411cb948d | [
"Apache-2.0"
] | null | null | null | iter8_analytics/api/v2/examples/examples_metrics.py | sushmarchandran/iter8-analytics | 8264a8064cad930bf94670b10c061ee411cb948d | [
"Apache-2.0"
] | null | null | null | iter8_analytics/api/v2/examples/examples_metrics.py | sushmarchandran/iter8-analytics | 8264a8064cad930bf94670b10c061ee411cb948d | [
"Apache-2.0"
] | null | null | null | """
Metric examples used in other examples.
"""
request_count = {
"name": "request-count",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "request-count"
},
"spec": {
"params": [{
"name": "query",
"value": "sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0)"
}],
"description": "Number of requests",
"type": "counter",
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter"
}
}
}
mean_latency = {
"name": "mean-latency",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "mean-latency"
},
"spec": {
"description": "Mean latency",
"units": "milliseconds",
"params": [{
"name": "query",
"value": "(sum(increase(revision_app_request_latencies_sum{service_name=~'.*$name'}[${elapsedTime}s]))or on() vector(0)) / (sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0))"
}],
"type": "gauge",
"sampleSize": {
"name": "request-count"
},
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://metrics-mock:8080/promcounter"
}
}
}
# This yaml body is marshalled into the corresponding JSON body.
# body: |
# {
# "last": $elapsedTime,
# "sampling": 600,
# "filter": "kubernetes.node.name = 'n1' and service = '$name'",
# "metrics": [
# {
# "id": "cpu.cores.used",
# "aggregations": { "time": "avg", "group": "sum" }
# }
# ],
# "dataSourceType": "container",
# "paging": {
# "from": 0,
# "to": 99
# }
cpu_utilization = {
"name": "cpu-utilization",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "CPU utilization",
"body": "{\n \"last\": $elapsedTime,\n \"sampling\": 600,\n \"filter\": \"kubernetes.node.name = 'n1' and service = '$name'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}\n",
"method": "POST",
"type": "gauge",
"provider": "Sysdig",
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "http://metrics-mock:8080/sysdig"
}
}
}
business_revenue = {
"name": "business-revenue",
"metricObj": {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "business-revenue"
},
"spec": {
"description": "Business Revenue Metric",
"units": "dollars",
"params": [{
"name": "query",
"value": "(sum(increase(business_revenue{service_name=~'.*$name'}[${elapsedTime}s]))or on() vector(0)) / (sum(increase(revision_app_request_latencies_count{service_name=~'.*$name'}[${elapsedTime}s])) or on() vector(0))"
}],
"type": "gauge",
"sampleSize": {
"name": "request-count"
},
"provider": "prometheus",
"jqExpression": ".data.result[0].value[1] | tonumber",
"urlTemplate": "http://prometheus-operated.iter8-monitoring:9090/api/v1/query"
}
}
}
new_relic_embedded = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "name-count"
},
"spec": {
"params": [
{
"name": "nrql",
"value": "SELECT count(appName) FROM PageView WHERE revisionName='${revision}' SINCE ${elapsedTime} seconds ago"
}
],
"description": "A New Relic example",
"type": "Counter",
"headerTemplates": [
{
"name": "X-Query-Key",
"value": "t0p-secret-api-key"
}
],
"provider": "newrelic",
"jqExpression": ".results[0].count | tonumber",
"urlTemplate": "https://insights-api.newrelic.com/v1/accounts/my_account_id"
}
}
new_relic_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "name-count"
},
"spec": {
"params": [
{
"name": "nrql",
"value": "SELECT count(appName) FROM PageView WHERE revisionName='${revision}' SINCE ${elapsedTime} seconds ago"
}
],
"description": "A New Relic example",
"type": "Counter",
"authType": "APIKey",
"secret": "myns/nrcredentials",
"headerTemplates": [
{
"name": "X-Query-Key",
"value": "${mykey}"
}
],
"provider": "newrelic",
"jqExpression": ".results[0].count | tonumber",
"urlTemplate": "https://insights-api.newrelic.com/v1/accounts/my_account_id"
}
}
sysdig_embedded = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "A Sysdig example",
"provider": "sysdig",
"body": "{\n \"last\": ${elapsedTime},\n \"sampling\": 600,\n \"filter\": \"kubernetes.app.revision.name = '${revision}'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}",
"method": "POST",
"type": "Gauge",
"headerTemplates": [
{
"name": "Accept",
"value": "application/json"
},
{
"name": "Authorization",
"value": "Bearer 87654321-1234-1234-1234-123456789012"
}
],
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "https://secure.sysdig.com/api/data"
}
}
sysdig_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "cpu-utilization"
},
"spec": {
"description": "A Sysdig example",
"provider": "sysdig",
"body": "{\n \"last\": ${elapsedTime},\n \"sampling\": 600,\n \"filter\": \"kubernetes.app.revision.name = '${revision}'\",\n \"metrics\": [\n {\n \"id\": \"cpu.cores.used\",\n \"aggregations\": { \"time\": \"avg\", \"group\": \"sum\" }\n }\n ],\n \"dataSourceType\": \"container\",\n \"paging\": {\n \"from\": 0,\n \"to\": 99\n }\n}",
"method": "POST",
"authType": "Bearer",
"secret": "myns/sdcredentials",
"type": "Gauge",
"headerTemplates": [
{
"name": "Accept",
"value": "application/json"
},
{
"name": "Authorization",
"value": "Bearer ${token}"
}
],
"jqExpression": ".data[0].d[0] | tonumber",
"urlTemplate": "https://secure.sysdig.com/api/data"
}
}
elastic_secret = {
"apiVersion": "iter8.tools/v2alpha2",
"kind": "Metric",
"metadata": {
"name": "average-sales"
},
"spec": {
"description": "An elastic example",
"provider": "elastic",
"body": "{\n \"aggs\": {\n \"range\": {\n \"date_range\": {\n \"field\": \"date\",\n \"ranges\": [\n { \"from\": \"now-${elapsedTime}s/s\" } \n ]\n }\n },\n \"items_to_sell\": {\n \"filter\": { \"term\": { \"version\": \"${revision}\" } },\n \"aggs\": {\n \"avg_sales\": { \"avg\": { \"field\": \"sale_price\" } }\n }\n }\n }\n}",
"method": "POST",
"authType": "Basic",
"secret": "myns/elasticcredentials",
"type": "Gauge",
"headerTemplates": [
{
"name": "Content-Type",
"value": "application/json"
}
],
"jqExpression": ".aggregations.items_to_sell.avg_sales.value | tonumber",
"urlTemplate": "https://secure.elastic.com/my/sales"
}
}
| 33.484127 | 410 | 0.492178 |
65d2aa00d414bb51ec6e07527fa5c84de69c4723 | 8,686 | py | Python | sources/modis/arcpy_modis_721_etl_main.py | SERVIR/ReferenceNode_ETL | bda84fba651077dce74dd4d4f178de6acfc9a54c | [
"Apache-2.0"
] | null | null | null | sources/modis/arcpy_modis_721_etl_main.py | SERVIR/ReferenceNode_ETL | bda84fba651077dce74dd4d4f178de6acfc9a54c | [
"Apache-2.0"
] | null | null | null | sources/modis/arcpy_modis_721_etl_main.py | SERVIR/ReferenceNode_ETL | bda84fba651077dce74dd4d4f178de6acfc9a54c | [
"Apache-2.0"
] | 6 | 2016-12-17T22:39:17.000Z | 2019-07-08T08:55:31.000Z | # Developer: SpatialDev
# Company: Spatial Development International
# --------------- Imports -------------------------------------
# standard library
from datetime import datetime, timedelta
import sys
import os
# third-party
import arcpy
# Add the ETLBaseModule directory location to the Python system path in order to import the shared ETL framework modules
sys.path.append("PATH TO ETL MODULES \\ETL\\ETLScripts\\ETLBaseModules\\")
# ETL framework
from etl_controller import ETLController
from modis_etl_delegate import MODISETLDelegate
from arcpy_modis_etl_core import MODISLoader, MODISExtractor, MODISMetaDataTransformer, MODISExtractValidator
# ETL utils
from etl_utils import ETLDebugLogger, ETLExceptionManager
from arcpy_utils import RasterCatalog, FileGeoDatabase, AGServiceManager
# --------------- ETL ---------------------------------------------------------------------------------------------------
def createRasterCatalog(output_basepath, raster_catalog_name):
# configure raster catalog object -------------------------------------
raster_catalog = RasterCatalog(output_basepath, raster_catalog_name, {
'datetime_field':'datetime',
'datetime_field_format':'%m-%d-%Y %I:%M:%S %p',
'datetime_sql_cast':"date",
"archive_days": 90
})
# un-comment AddField_management statements when running the script for a new feature class. Re-comment after creation to speed up the initialization process.
# # custom fields -------------------------------------
# arcpy.AddField_management(raster_catalog.fullpath, raster_catalog.options['datetime_field'], 'DATE', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'datetime_string', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'resolution', 'TEXT', '', '', 25)
#
# # MODIS image specific meta-data fields -------------------------------------
# arcpy.AddField_management(raster_catalog.fullpath, 'subset', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'date', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'satellite', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'projection', 'TEXT', '', '', 50)
# arcpy.AddField_management(raster_catalog.fullpath, 'projection_center_lon', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'projection_center_lat', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'UL_lon', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'UL_lat', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'UR_lon', "TEXT", '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'UR_lat', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'LR_lon', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'LR_lat', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'LL_lon', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'LL_lat', 'TEXT', '', '', 25)
# arcpy.AddField_management(raster_catalog.fullpath, 'x_scale_factor', 'TEXT', '', '', 50)
# arcpy.AddField_management(raster_catalog.fullpath, 'ellipsoid', 'TEXT', '', '', 10)
# arcpy.AddField_management(raster_catalog.fullpath, 'L2_granules', 'TEXT', '', '', 500)
return raster_catalog
def executeETL(raster_catalog):
# initialize utility objects -------------------------------------
debug_log_output_directory = os.path.join(sys.path[0], "MODIS_721_logs")
etl_debug_logger = ETLDebugLogger(debug_log_output_directory, "MODIS_721", {
"debug_log_archive_days":7
})
update_debug_log = etl_debug_logger.updateDebugLog # retrieve a reference to the debug logger function
etl_exception_manager = ETLExceptionManager(sys.path[0], "MODIS_721_exception_reports", {
"create_immediate_exception_reports":True
})
# initialize core ETL objects -------------------------------------
start_datetime = datetime.utcnow()
end_datetime = start_datetime - timedelta(days=raster_catalog.options['archive_days'])
image_extn = "tif" # target extension for MODIS images to downlaod
modis_extract_validator = MODISExtractValidator({
"raster_catalog":raster_catalog,
"raster_name_field":"Name",
"start_datetime":start_datetime,
"end_datetime":end_datetime,
'debug_logger':update_debug_log
})
modis_extractor = MODISExtractor({
"image_content_types":['image/tiff'], # checks the header content type for this value before downloading the images
"text_content_types":['text/html', 'text/plain'], # checks the header content type for any of these values before downloading the meta-data
"subset":['Bhutan', 'Nepal'],
"satellite":['terra','aqua'],
"size":['2km','1km','500m','250m'],
"extn":image_extn,
"subtype":['721'], # list only has one item since it is the category of the raster catalog the ETL is updating
"start_datetime":start_datetime,
"end_datetime":end_datetime,
'debug_logger':update_debug_log
})
modis_meta_data_transformer = MODISMetaDataTransformer({
'debug_logger':update_debug_log
})
modis_loader = MODISLoader({
"raster_catalog":raster_catalog,
"CopyRaster_management_config":{
'config_keyword':'#',
'background_value':'#',
'nodata_value':'#',
'onebit_to_eightbit':'NONE',
'colormap_to_RGB':'NONE',
'pixel_type':'8_BIT_UNSIGNED'
},
'debug_logger':update_debug_log
})
etl_controller = ETLController(sys.path[0], "MODIS_721", {
"remove_etl_workspace_on_finish":True
})
modis_etl_delegate = MODISETLDelegate({
#URL MAY OR MAY NOT NEED TO BE UPDATED
"url":'http://rapidfire.sci.gsfc.nasa.gov/subsets/?subset=', # base URL for all images
"extn":image_extn,
"meta_extn":"txt",
"all_or_none_for_success":False,
'debug_logger':update_debug_log,
'exception_handler':etl_exception_manager.handleException
})
# set ETLDelegate object properties-------------------------------------
modis_etl_delegate.setExtractValidator(modis_extract_validator)
modis_etl_delegate.setExtractor(modis_extractor)
modis_etl_delegate.setTransformer(modis_meta_data_transformer)
modis_etl_delegate.setLoader(modis_loader)
modis_etl_delegate.setETLController(etl_controller)
# execute the ETL operation -------------------------------------
successful_new_run = modis_etl_delegate.startETLProcess()
# perform post-ETL operations -------------------------------------
raster_catalog.deleteOutdatedRows()
etl_debug_logger.deleteOutdatedDebugLogs()
etl_exception_manager.finalizeExceptionXMLLog()
return successful_new_run
# --------------- ETL MAIN ---------------------------------------------------------------------------------------------------
def main(*args, **kwargs):
# create the FileGeoDatabase if it does not already exist
modis_gdb = FileGeoDatabase("PATH ON DISK TO FGDB \\Himalaya\\FileGeodatabases\\", "MODIS.gdb")
# retrieve a reference to the raster catalog, create the raster catalog if it does not already exist
raster_catalog = createRasterCatalog(modis_gdb.fullpath, "MODIS_721")
# execute the main ETL operation
successful_new_run = executeETL(raster_catalog)
if successful_new_run:
# refresh all services to update the data
modis_services = ("Himalaya/BHUTAN_721_AQUA", "Himalaya/BHUTAN_721_TERRA", "Himalaya/NEPAL_721_AQUA", "Himalaya/NEPAL_721_TERRA")
modis_service = AGServiceManager(modis_services, "PATH ON DISK TO SOME TO BE RESTARTED \\ETL\\ETLTools\\AGSSOM.exe", "localhost")
modis_service.refreshService()
# method called upon module execution to start the ETL process
main() | 47.98895 | 166 | 0.616509 |
b8f62440b668cb2116379e968585c324aa74acc2 | 11,200 | py | Python | modules/torrentsearch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 422 | 2015-01-08T14:08:08.000Z | 2022-02-07T11:47:37.000Z | modules/torrentsearch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 581 | 2015-01-01T08:07:16.000Z | 2022-02-23T11:44:37.000Z | modules/torrentsearch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 115 | 2015-01-08T14:41:00.000Z | 2022-02-13T12:31:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import cherrypy
import jsonrpclib
import htpc
from ts import norbits
from ts import ka
from ts import ptp
from ts import rarbg
from ts import torrentproject
from ts import jackett2
from cherrypy.lib.auth2 import require
regex_codec = re.compile(r'(x264|x\.264|h264|h\.264|xvid|x265|x\.265|h265|h\.265|mpeg2|divx)', re.I)
regex_source = re.compile(r'(HDTV|HD-TV|HD\.TV|WEB-DL|WEB_DL|WEB\.DL|WEB_RIP|WEB-RIP|WEBRip|WEB\.RIP|BRRIP|BDRIP|BluRay(.*)REMUX)|(?i)BluRay(.*)\.(AVC|VC-1)\.|BluRay', re.I)
regex_resolution = re.compile(r'(sd|480p|480i|720p|720i|1080p|1080i|2160p)', re.I)
class Torrentsearch(object):
def __init__(self):
self.logger = logging.getLogger('modules.torrentsearch')
self.rb = rarbg.Rarbg()
htpc.MODULES.append({
'name': 'Torrents',
'id': 'torrentsearch',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'torrentsearch_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'torrentsearch_name'},
{'type': 'bool', 'label': 'Enable BTN', 'name': 'torrents_btn_enabled'},
{'type': 'password', 'label': 'BTN apikey', 'name': 'torrents_btn_apikey'},
{'type': 'bool', 'label': 'Norbits', 'name': 'torrents_norbits_enabled'},
{'type': 'text', 'label': 'Norbits username', 'name': 'torrents_norbits_username'},
{'type': 'password', 'label': 'Norbits passkey', 'name': 'torrents_norbits_passkey'},
{'type': 'bool', 'label': 'PTP', 'name': 'torrents_ptp_enabled'},
{'type': 'text', 'label': 'PTP username', 'name': 'torrents_ptp_username'},
{'type': 'password', 'label': 'PTP password', 'name': 'torrents_ptp_password'},
{'type': 'password', 'label': 'PTP passkey', 'name': 'torrents_ptp_passkey'},
{'type': 'bool', 'label': 'Rarbg', 'name': 'torrents_rarbg_enabled'},
{'type': 'bool', 'label': 'KAT', 'name': 'torrents_ka_enabled'},
{'type': 'bool', 'label': 'Torrent project', 'name': 'torrents_torrentproject_enabled', 'desc': 'DTH tracker'},
{'type': 'bool', 'label': 'Jackett', 'name': 'torrents_jackett_enabled'},
{'type': 'text', 'label': 'Jackett host', 'name': 'torrents_jackett_host'},
{'type': 'text', 'label': 'Jackett port', 'name': 'torrents_jackett_port'},
{'type': 'bool', 'label': 'Jackett ssl', 'name': 'torrents_jackett_ssl'},
{'type': 'password', 'label': 'Jackett apikey', 'name': 'torrents_jackett_apikey'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '/jackett', 'desc': 'Page title link. E.g /jackett or https://rarbg.to/', 'name': 'torrents_reverse_proxy_link'}
]
})
@cherrypy.expose()
@require()
def index(self, query='', **kwargs):
return htpc.LOOKUP.get_template('torrentsearch.html').render(query=query, scriptname='torrentsearch', torrentproviders=self.torrentproviders(), webinterface=self.webinterface())
def webinterface(self):
# Return the reverse proxy url if specified
return htpc.settings.get('torrents_reverse_proxy_link')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def search(self, query=None, provider='all'):
self.logger.debug(query)
self.logger.debug(provider)
r = []
if provider == 'all':
if htpc.settings.get('torrents_btn_enabled'):
r += self.btn(query)
if htpc.settings.get('torrents_norbits_enabled'):
r += self.search_norbits(query, 'all')
if htpc.settings.get('torrents_ka_enabled'):
r += self.search_ka(query)
if htpc.settings.get('torrents_ptp_enabled'):
r += self.search_ptp(query, 'movie')
if htpc.settings.get('torrents_rarbg_enabled'):
r += self.search_rarbg(query, None)
if htpc.settings.get('torrents_torrentproject_enabled'):
r += self.search_torrentproject(query, None)
if htpc.settings.get('torrents_jackett_enabled'):
r += self.search_jackett(query, None)
elif provider == 'btn':
if htpc.settings.get('torrents_btn_enabled'):
r += self.btn(query)
elif provider == 'rarbg':
if htpc.settings.get('torrents_rarbg_enabled'):
r += self.search_rarbg(query, None)
elif provider == 'torrentproject':
if htpc.settings.get('torrents_torrentproject_enabled'):
r += self.search_torrentproject(query, 'all')
elif provider == 'kat':
if htpc.settings.get('torrents_ka_enabled'):
r += self.search_ka(query)
elif provider == 'norbits':
if htpc.settings.get('torrents_norbits_enabled'):
r += self.search_norbits(query, 'all')
elif provider == 'jackett':
if htpc.settings.get('torrents_jackett_enabled'):
r += self.search_jackett(query, '')
for res in r:
if not res.get('Source') or res.get('Source') == 'N/A':
source = re.search(regex_source, res['ReleaseName'])
if source:
source = source.group()
else:
source = 'N/A'
res['Source'] = source
if not res.get('Codec') or res.get('Codec') == 'N/A':
codec = re.search(regex_codec, res['ReleaseName'])
if codec:
codec = codec.group()
else:
codec = 'N/A'
res['Codec'] = codec
if not res.get('Resolution') or res.get('Resolution') == 'N/A':
resolution = re.search(regex_resolution, res['ReleaseName'])
if resolution:
resolution = resolution.group()
else:
resolution = 'N/A'
res['Resolution'] = resolution
self.logger.debug('Found %s torrents in total' % len(r))
return r
def btn(self, query=None):
result = None
try:
btn = jsonrpclib.Server('https://api.broadcasthe.net')
result = btn.getTorrents(htpc.settings.get('torrents_btn_apikey', ''), query, 999)
except Exception as e:
self.logger.error("Failed to fetch search results from BTN %s" % e)
return []
search_results = []
try:
if result:
if 'torrents' in result:
for k, v in result['torrents'].iteritems():
v["BrowseURL"] = 'https://broadcasthe.net/torrents.php?id=%s&torrentid=%s' % (v['GroupID'], v['TorrentID'])
v["Provider"] = "btn"
search_results.append(v)
return search_results
else:
return search_results
else:
return search_results
except Exception as e:
self.logger.error("Failed to fetch search results from BTN %s" % e)
return []
def torrentproviders(self):
torrentproviders = []
if htpc.settings.get('torrents_btn_apikey') and htpc.settings.get('torrents_btn_enabled') == 1:
torrentproviders.append('BTN')
if (htpc.settings.get('torrents_norbits_enabled') == 1 and
htpc.settings.get('torrents_norbits_passkey') and htpc.settings.get('torrents_norbits_username')):
torrentproviders.append('norbits')
if htpc.settings.get('torrents_ka_enabled') == 1:
torrentproviders.append('KAT')
if (htpc.settings.get('torrents_ptp_enabled') == 1 and htpc.settings.get('torrents_ptp_passkey')
and htpc.settings.get('torrents_ptp_username') and htpc.settings.get('torrents_ptp_password')):
torrentproviders.append('PTP')
if htpc.settings.get('torrents_rarbg_enabled') == 1:
torrentproviders.append('rarbg')
if htpc.settings.get('torrents_torrentproject_enabled') == 1:
torrentproviders.append('torrentproject')
if (htpc.settings.get('torrents_jackett_enabled') == 1 and htpc.settings.get('torrents_jackett_host') and
htpc.settings.get('torrents_jackett_port') and htpc.settings.get('torrents_jackett_apikey')):
torrentproviders.append('jackett')
return torrentproviders
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def getclients(self):
l = []
qbt = {}
trans = {}
utor = {}
delu = {}
rtor = {}
if htpc.settings.get('qbittorrent_enable', ''):
qbt['title'] = 'qBittorrent'
qbt['active'] = 1
qbt['path'] = 'qbittorrent/to_client/'
l.append(qbt)
else:
qbt['title'] = 'qBittorrent'
qbt['active'] = 0
qbt['path'] = 'qbittorrent/command/'
l.append(qbt)
if htpc.settings.get('transmission_enable', ''):
trans['title'] = 'transmission'
trans['active'] = 1
trans['path'] = 'transmission/to_client/'
l.append(trans)
else:
trans['title'] = 'transmission'
trans['active'] = 0
trans['path'] = 'transmission/to_client/'
l.append(trans)
if htpc.settings.get('deluge_enable', ''):
delu['title'] = 'Deluge'
delu['active'] = 1
delu['path'] = 'deluge/to_client'
l.append(delu)
else:
delu['title'] = 'Deluge'
delu['active'] = 0
delu['path'] = 'deluge/to_client'
l.append(delu)
if htpc.settings.get('utorrent_enable', ''):
utor['title'] = 'uTorrent'
utor['active'] = 1
utor['path'] = 'utorrent/to_client/'
l.append(utor)
else:
utor['title'] = 'uTorrent'
utor['active'] = 0
utor['path'] = 'utorrent/to_client/'
l.append(utor)
if htpc.settings.get('rtorrent_enable', ''):
rtor['title'] = 'rTorrent'
rtor['active'] = 1
rtor['path'] = 'rtorrent/to_client'
l.append(rtor)
else:
rtor['title'] = 'rTorrent'
rtor['active'] = 0
rtor['path'] = 'rtorrent/to_client'
l.append(rtor)
return l
def search_norbits(self, q, cat):
results = norbits.search(q, cat)
return results
def search_ka(self, q, cat="all"):
return ka.search(q, cat)
def search_ptp(self, q, cat):
return ptp.search(q, cat)
def search_rarbg(self, q, cat):
return self.rb.search(q, cat)
def search_torrentproject(self, q, cat):
return torrentproject.Torrentproject().search(q, cat)
def search_jackett(self, q, cat='all'):
return jackett2.jackett(q, cat)
| 40.57971 | 191 | 0.554018 |
0c154a33aa5304085497fac8a3cc0ac839b7bfe0 | 719 | py | Python | level21-zip_bz2_reverse.py | feliposz/python-challenge-solutions | 2d0d8fb6f29e69ce9e42539b88eb1fb37985419c | [
"MIT"
] | null | null | null | level21-zip_bz2_reverse.py | feliposz/python-challenge-solutions | 2d0d8fb6f29e69ce9e42539b88eb1fb37985419c | [
"MIT"
] | null | null | null | level21-zip_bz2_reverse.py | feliposz/python-challenge-solutions | 2d0d8fb6f29e69ce9e42539b88eb1fb37985419c | [
"MIT"
] | null | null | null | import cys_magic
import zlib, bz2
packFileName = "unreal\\package.pack"
print(cys_magic.file(packFileName))
pack = open(packFileName, "rb")
contents = pack.read()
zflag = False
bz2flag = False
reverse = False
log = ''
while True:
try:
contents = zlib.decompress(contents)
zflag = True
reverse = False
print(".", end="")
except:
zflag = False
try:
contents = bz2.decompress(contents)
bz2flag = True
reverse = False
print("@", end="")
except:
bz2flag = False
if (zflag == False and bz2flag == False):
if (reverse):
break
contents = contents[::-1]
reverse = True
print(contents)
| 18.435897 | 45 | 0.573018 |
92de7812eac73a49a75f42b730f581ee7deed78d | 20,218 | py | Python | sdk/python/pulumi_azure_native/network/v20171001/route_table.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20171001/route_table.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20171001/route_table.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteTableInitArgs', 'RouteTable']
@pulumi.input_type
class RouteTableInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteTable resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[bool] disable_bgp_route_propagation: Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] etag: Gets a unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input['RouteArgs']]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if disable_bgp_route_propagation is not None:
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_table_name is not None:
pulumi.set(__self__, "route_table_name", route_table_name)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:
"""
Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@disable_bgp_route_propagation.setter
def disable_bgp_route_propagation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_bgp_route_propagation", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="routeTableName")
def route_table_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route table.
"""
return pulumi.get(self, "route_table_name")
@route_table_name.setter
def route_table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_table_name", value)
@property
@pulumi.getter
def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]):
pulumi.set(self, "routes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteTable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_bgp_route_propagation: Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] etag: Gets a unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteTableInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param RouteTableInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteTableInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = disable_bgp_route_propagation
__props__.__dict__["etag"] = etag
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table_name"] = route_table_name
__props__.__dict__["routes"] = routes
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteTable"), pulumi.Alias(type_="azure-native:network:RouteTable"), pulumi.Alias(type_="azure-nextgen:network:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20201101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20201101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20210201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20210301:RouteTable")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteTable, __self__).__init__(
'azure-native:network/v20171001:RouteTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteTable':
"""
Get an existing RouteTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routes"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:
"""
Gets or sets whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.RouteResponse']]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 55.69697 | 5,091 | 0.683302 |
b0131f1cd6429d7284573e88dcde24d8ce328555 | 5,075 | py | Python | reviewboard/scmtools/managers.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 2 | 2020-06-19T14:57:49.000Z | 2020-06-19T15:17:40.000Z | reviewboard/scmtools/managers.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 1 | 2019-08-03T01:48:33.000Z | 2019-08-03T01:48:33.000Z | reviewboard/scmtools/managers.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db.models import Manager, Q
from django.db.models.query import QuerySet
_TOOL_CACHE = {}
class ToolQuerySet(QuerySet):
def get(self, *args, **kwargs):
pk = kwargs.get('id__exact', None)
if pk is None:
return super(ToolQuerySet, self).get(*args, **kwargs)
if not _TOOL_CACHE:
# Precompute the cache to reduce lookups.
for tool in self.model.objects.all():
_TOOL_CACHE[tool.pk] = tool
if pk not in _TOOL_CACHE:
# We'll try to look up the Tool anyway, since it may have been
# added since. This will also ensure the proper exception is
# raised if not found.
_TOOL_CACHE[pk] = super(ToolQuerySet, self).get(*args, **kwargs)
return _TOOL_CACHE[pk]
class ToolManager(Manager):
"""Manages Tool models.
Any get() operations performed (directly or indirectly through a
ForeignKey) will go through a cache to attempt to minimize Tool
lookups.
The Tool cache is never cleared, but as Tool objects should never
be modified by hand (they're registered when doing an rb-site upgrade,
and then the server process must be reloaded), this shouldn't be a
problem.
"""
use_for_related_fields = True
def get_queryset(self):
"""Return a QuerySet for Tool models.
Returns:
ToolQuerySet:
The new QuerySet instance.
"""
return ToolQuerySet(self.model, using=self.db)
class RepositoryManager(Manager):
"""A manager for Repository models."""
def accessible(self, user, visible_only=True, local_site=None,
show_all_local_sites=False):
"""Return a queryset for repositories accessible by the given user.
For superusers, all public and private repositories will be returned.
For regular users, only repositories that are public or that the user
is on the access lists for (directly or through a review group) will
be returned.
For anonymous users, only public repositories will be returned.
The returned list is further filtered down based on the
``visible_only``, ``local_site``, and ``show_all_local_sites``
parameters.
Args:
user (django.contrib.auth.models.User):
The user that must have access to any returned repositories.
visible_only (bool, optional):
Whether only visible repositories should be returned.
local_site (reviewboard.site.models.LocalSite, optional):
A specific :term:`Local Site` that the repositories must be
associated with. By default, this will only return
repositories not part of a site.
show_all_local_sites (bool, optional):
Whether repositories from all :term:`Local Sites` should be
returned. This cannot be ``True`` if a ``local_site`` argument
was provided.
Returns:
django.db.models.query.QuerySet:
The resulting queryset.
"""
if user.is_superuser:
qs = self.all()
if visible_only:
qs = qs.filter(visible=True)
else:
q = Q(public=True)
if visible_only:
# We allow accessible() to return hidden repositories if the
# user is a member, so we must perform this check here.
q &= Q(visible=True)
if user.is_authenticated():
q |= (Q(users__pk=user.pk) |
Q(review_groups__users=user.pk))
qs = self.filter(q)
if show_all_local_sites:
assert local_site is None
else:
qs = qs.filter(local_site=local_site)
return qs.distinct()
def accessible_ids(self, *args, **kwargs):
"""Return IDs of repositories that are accessible by the given user.
This wraps :py:meth:`accessible` and takes the same arguments.
Args:
*args (tuple):
Positional arguments to pass to :py:meth:`accessible`.
**kwargs (dict):
Keyword arguments to pass to :py:meth:`accessible`.
Returns:
list of int:
The list of IDs.
"""
return self.accessible(*args, **kwargs).values_list('pk', flat=True)
def can_create(self, user, local_site=None):
return user.has_perm('scmtools.add_repository', local_site)
def encrypt_plain_text_passwords(self):
"""Encrypts any stored plain-text passwords."""
qs = self.exclude(
Q(encrypted_password=None) |
Q(encrypted_password='') |
Q(encrypted_password__startswith=
self.model.ENCRYPTED_PASSWORD_PREFIX))
qs = qs.only('encrypted_password')
for repository in qs:
# This will trigger a migration of the password.
repository.password
| 32.954545 | 78 | 0.608276 |
8de238d3687b719802cabb53d8b4cc06b444554d | 16,500 | py | Python | localstack/services/apigateway/apigateway_listener.py | SVemulapalli/localstack | c5079d7c2053efd10ea2e2dfde782f643173576b | [
"Apache-2.0"
] | null | null | null | localstack/services/apigateway/apigateway_listener.py | SVemulapalli/localstack | c5079d7c2053efd10ea2e2dfde782f643173576b | [
"Apache-2.0"
] | null | null | null | localstack/services/apigateway/apigateway_listener.py | SVemulapalli/localstack | c5079d7c2053efd10ea2e2dfde782f643173576b | [
"Apache-2.0"
] | null | null | null | import re
import json
import time
import logging
import requests
import datetime
from flask import Response as FlaskResponse
from six.moves.urllib_parse import urljoin
from requests.models import Response
from localstack.utils import common
from localstack.config import TEST_KINESIS_URL, TEST_SQS_URL
from localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST, TEST_AWS_ACCOUNT_ID
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, to_bytes
from localstack.utils.analytics import event_publisher
from localstack.services.kinesis import kinesis_listener
from localstack.services.awslambda import lambda_api
from localstack.services.apigateway import helpers
from localstack.services.generic_proxy import ProxyListener
from localstack.utils.aws.aws_responses import flask_to_requests_response, requests_response, LambdaResponse
from localstack.services.apigateway.helpers import (get_resource_for_path,
handle_authorizers, extract_query_string_params,
extract_path_params, make_error_response, get_cors_response)
# set up logger
LOGGER = logging.getLogger(__name__)
# regex path patterns
PATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\-]+)/authorizers(\?.*)?'
PATH_REGEX_RESPONSES = r'^/restapis/([A-Za-z0-9_\-]+)/gatewayresponses(/[A-Za-z0-9_\-]+)?(\?.*)?'
PATH_REGEX_USER_REQUEST = r'^/restapis/([A-Za-z0-9_\-]+)/([A-Za-z0-9_\-]+)/%s/(.*)$' % PATH_USER_REQUEST
# Maps API IDs to list of gateway responses
GATEWAY_RESPONSES = {}
class AuthorizationError(Exception):
pass
class ProxyListenerApiGateway(ProxyListener):
def forward_request(self, method, path, data, headers):
if re.match(PATH_REGEX_USER_REQUEST, path):
search_match = re.search(PATH_REGEX_USER_REQUEST, path)
api_id = search_match.group(1)
stage = search_match.group(2)
relative_path_w_query_params = '/%s' % search_match.group(3)
try:
return invoke_rest_api(api_id, stage, method, relative_path_w_query_params, data, headers, path=path)
except AuthorizationError as e:
return make_error_response('Not authorized to invoke REST API %s: %s' % (api_id, e), 403)
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
if re.match(PATH_REGEX_RESPONSES, path):
search_match = re.search(PATH_REGEX_RESPONSES, path)
api_id = search_match.group(1)
if method == 'GET':
return get_gateway_responses(api_id)
if method == 'PUT':
response_type = search_match.group(2).lstrip('/')
return put_gateway_response(api_id, response_type, data)
return True
def return_response(self, method, path, data, headers, response):
# fix backend issue (missing support for API documentation)
if re.match(r'/restapis/[^/]+/documentation/versions', path):
if response.status_code == 404:
return requests_response({'position': '1', 'items': []})
# publish event
if method == 'POST' and path == '/restapis':
content = json.loads(to_str(response.content))
event_publisher.fire_event(event_publisher.EVENT_APIGW_CREATE_API,
payload={'a': event_publisher.get_hash(content['id'])})
api_regex = r'^/restapis/([a-zA-Z0-9\-]+)$'
if method == 'DELETE' and re.match(api_regex, path):
api_id = re.sub(api_regex, r'\1', path)
event_publisher.fire_event(event_publisher.EVENT_APIGW_DELETE_API,
payload={'a': event_publisher.get_hash(api_id)})
# ------------
# API METHODS
# ------------
def get_gateway_responses(api_id):
result = GATEWAY_RESPONSES.get(api_id, [])
base_path = '/restapis/%s/gatewayresponses' % api_id
href = 'http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html'
def item(i):
i['_links'] = {
'self': {
'href': '%s/%s' % (base_path, i['responseType'])
},
'gatewayresponse:put': {
'href': '%s/{response_type}' % base_path,
'templated': True
},
'gatewayresponse:update': {
'href': '%s/%s' % (base_path, i['responseType'])
}
}
i['responseParameters'] = i.get('responseParameters', {})
i['responseTemplates'] = i.get('responseTemplates', {})
return i
result = {
'_links': {
'curies': {
'href': href,
'name': 'gatewayresponse',
'templated': True
},
'self': {'href': base_path},
'first': {'href': base_path},
'gatewayresponse:by-type': {
'href': '%s/{response_type}' % base_path,
'templated': True
},
'item': [{'href': '%s/%s' % (base_path, r['responseType'])} for r in result]
},
'_embedded': {
'item': [item(i) for i in result]
},
# Note: Looks like the format required by aws CLI ("item" at top level) differs from the docs:
# https://docs.aws.amazon.com/apigateway/api-reference/resource/gateway-responses/
'item': [item(i) for i in result]
}
return result
def put_gateway_response(api_id, response_type, data):
GATEWAY_RESPONSES[api_id] = GATEWAY_RESPONSES.get(api_id, [])
data['responseType'] = response_type
GATEWAY_RESPONSES[api_id].append(data)
return data
def run_authorizer(api_id, headers, authorizer):
# TODO implement authorizers
pass
def authorize_invocation(api_id, headers):
client = aws_stack.connect_to_service('apigateway')
authorizers = client.get_authorizers(restApiId=api_id, limit=100).get('items', [])
for authorizer in authorizers:
run_authorizer(api_id, headers, authorizer)
def validate_api_key(api_key, stage):
key = None
usage_plan_id = None
client = aws_stack.connect_to_service('apigateway')
usage_plans = client.get_usage_plans()
for item in usage_plans.get('items', []):
api_stages = item.get('apiStages', [])
for api_stage in api_stages:
if api_stage.get('stage') == stage:
usage_plan_id = item.get('id')
if not usage_plan_id:
return False
usage_plan_keys = client.get_usage_plan_keys(usagePlanId=usage_plan_id)
for item in usage_plan_keys.get('items', []):
key = item.get('value')
if key != api_key:
return False
return True
def is_api_key_valid(is_api_key_required, headers, stage):
if not is_api_key_required:
return True
api_key = headers.get('X-API-Key')
if not api_key:
return False
return validate_api_key(api_key, stage)
def update_content_length(response):
if response and response.content:
response.headers['Content-Length'] = str(len(response.content))
def invoke_rest_api(api_id, stage, method, invocation_path, data, headers, path=None):
path = path or invocation_path
relative_path, query_string_params = extract_query_string_params(path=invocation_path)
# run gateway authorizers for this request
authorize_invocation(api_id, headers)
path_map = helpers.get_rest_api_paths(rest_api_id=api_id)
try:
extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
except Exception:
return make_error_response('Unable to find path %s' % path, 404)
api_key_required = resource.get('resourceMethods', {}).get(method, {}).get('apiKeyRequired')
if not is_api_key_valid(api_key_required, headers, stage):
return make_error_response('Access denied - invalid API key', 403)
integrations = resource.get('resourceMethods', {})
integration = integrations.get(method, {})
if not integration:
integration = integrations.get('ANY', {})
integration = integration.get('methodIntegration')
if not integration:
if method == 'OPTIONS' and 'Origin' in headers:
# default to returning CORS headers if this is an OPTIONS request
return get_cors_response(headers)
return make_error_response('Unable to find integration for path %s' % path, 404)
uri = integration.get('uri')
if integration['type'] == 'AWS':
if 'kinesis:action/' in uri:
if uri.endswith('kinesis:action/PutRecords'):
target = kinesis_listener.ACTION_PUT_RECORDS
if uri.endswith('kinesis:action/ListStreams'):
target = kinesis_listener.ACTION_LIST_STREAMS
template = integration['requestTemplates'][APPLICATION_JSON]
new_request = aws_stack.render_velocity_template(template, data)
# forward records to target kinesis stream
headers = aws_stack.mock_aws_request_headers(service='kinesis')
headers['X-Amz-Target'] = target
result = common.make_http_request(url=TEST_KINESIS_URL,
method='POST', data=new_request, headers=headers)
return result
if method == 'POST':
if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri:
template = integration['requestTemplates'][APPLICATION_JSON]
account_id, queue = uri.split('/')[-2:]
region_name = uri.split(':')[3]
new_request = '%s&QueueName=%s' % (aws_stack.render_velocity_template(template, data), queue)
headers = aws_stack.mock_aws_request_headers(service='sqs', region_name=region_name)
url = urljoin(TEST_SQS_URL, '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue))
result = common.make_http_request(url, method='POST', headers=headers, data=new_request)
return result
msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (uri, method)
LOGGER.warning(msg)
return make_error_response(msg, 404)
elif integration['type'] == 'AWS_PROXY':
if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:
func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]
data_str = json.dumps(data) if isinstance(data, (dict, list)) else to_str(data)
account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]
source_ip = headers['X-Forwarded-For'].split(',')[-2]
# Sample request context:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
request_context = {
# adding stage to the request context path.
# https://github.com/localstack/localstack/issues/2210
'path': '/' + stage + relative_path,
'accountId': account_id,
'resourceId': resource.get('id'),
'stage': stage,
'identity': {
'accountId': account_id,
'sourceIp': source_ip,
'userAgent': headers['User-Agent'],
},
'httpMethod': method,
'protocol': 'HTTP/1.1',
'requestTime': datetime.datetime.utcnow(),
'requestTimeEpoch': int(time.time() * 1000),
}
try:
path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)
except Exception:
path_params = {}
result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str, stage, api_id,
headers, path_params=path_params,
query_string_params=query_string_params,
method=method, resource_path=path,
request_context=request_context)
if isinstance(result, FlaskResponse):
return flask_to_requests_response(result)
if isinstance(result, Response):
return result
response = LambdaResponse()
parsed_result = result if isinstance(result, dict) else json.loads(str(result))
parsed_result = common.json_safe(parsed_result)
parsed_result = {} if parsed_result is None else parsed_result
response.status_code = int(parsed_result.get('statusCode', 200))
parsed_headers = parsed_result.get('headers', {})
if parsed_headers is not None:
response.headers.update(parsed_headers)
try:
if isinstance(parsed_result['body'], dict):
response.content = json.dumps(parsed_result['body'])
else:
response.content = to_bytes(parsed_result['body'])
except Exception:
response._content = '{}'
update_content_length(response)
response.multi_value_headers = parsed_result.get('multiValueHeaders') or {}
return response
elif uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri:
# arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1]
action = uri.split(':dynamodb:action')[1].split('&Table=')[0]
if 'PutItem' in action and method == 'PUT':
response_template = path_map.get(relative_path, {}).get('resourceMethods', {})\
.get(method, {}).get('methodIntegration', {}).\
get('integrationResponses', {}).get('200', {}).get('responseTemplates', {})\
.get('application/json', None)
if response_template is None:
msg = 'Invalid response template defined in integration response.'
return make_error_response(msg, 404)
response_template = json.loads(response_template)
if response_template['TableName'] != table_name:
msg = 'Invalid table name specified in integration response template.'
return make_error_response(msg, 404)
dynamo_client = aws_stack.connect_to_resource('dynamodb')
table = dynamo_client.Table(table_name)
event_data = {}
data_dict = json.loads(data)
for key, _ in response_template['Item'].items():
event_data[key] = data_dict[key]
table.put_item(Item=event_data)
response = requests_response(event_data, headers=aws_stack.mock_aws_request_headers())
return response
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
return make_error_response(msg, 404)
elif integration['type'] in ['HTTP_PROXY', 'HTTP']:
function = getattr(requests, method.lower())
if integration['type'] == 'HTTP':
# apply custom request template
template = integration.get('requestTemplates', {}).get(APPLICATION_JSON)
if template:
data = aws_stack.render_velocity_template(template, data)
if isinstance(data, dict):
data = json.dumps(data)
result = function(integration['uri'], data=data, headers=headers)
if integration['type'] == 'HTTP':
# apply custom response template
template = integration.get('responseTemplates', {}).get(APPLICATION_JSON)
if template and result.content:
result._content = aws_stack.render_velocity_template(template, result.content)
update_content_length(result)
return result
else:
msg = ('API Gateway integration type "%s" for method "%s" not yet implemented' %
(integration['type'], method))
LOGGER.warning(msg)
return make_error_response(msg, 404)
# instantiate listener
UPDATE_APIGATEWAY = ProxyListenerApiGateway()
| 42.416452 | 184 | 0.618242 |
52c652d0feeee6df4aac3ed08fa7d17ca5dfbb8f | 3,454 | py | Python | my_classes/Tuples/.history/name_tuples_20210722114320.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/Tuples/.history/name_tuples_20210722114320.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/Tuples/.history/name_tuples_20210722114320.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Tuple as Data Structure
We have see how we interpreted tuples as data structures
The position of the object contained in the tuple gives it meaning
For example, we can represent a 2D coordinate as: (10, 20)
x y
If pt is a position tuple, we can retrieve the x and x, y = pt or x = pt[0]
y coordinates using: y = py[1]
For example, to calculate the distance of pt from the origin we could write:
dist = math.sgrt(pt[0] ** 2 + pt[1] ** 2)
Now this is not very readable, and if someone sees this code they will have ti know thatpt[0] mans the x-coordinate and pt[1] means the y-coordinate.
This is not very transparent.
# Using a class instead.
At this point, in order to make things clearer for the reader (not the complier, the reader), we might want to approach this using a class method instead.
"""
from calendar import month
import symbol
class Point2D:
def __init__(self, x, y): # pt = Point2D(10, 20)
self.x = x
self.y = y
class Stock:
def --init__(self, symbol, year, month, day, open, high, low, close):
self.symbol = symbol
self.year = year
self.month = month
self.day = day # Class approach # Tuple Approach
self.open # djia.symbol # djia[0]
self.high # djia.open # djia[4]
self.low = low # djia.close # djia[7]
self. close = close
# djia.high - djia.low # djia[5] - djia[6]
""" Extra stuff
At the very least we shouldimpliment the __eq__ method too
-> Point(10, 20) == Point(10, 20) -> True
"""
class Point2D:
def __init__(self, x, y): # pt = Point2D(10, 20)
self.x = x
self.y = y
def __repr__(self):
return f'Point2D(x={self.x}, y={self.y}'
def __eq_(self, other):
if isinstance(other, Point2D):
return self.x == other.x and self.y == other.y
else:
return False
""" Named Tuples to the rescue
There are other reasons to seek another approach, We cover those in the coding video
Amonst other thing, Point2D objects are mutable - something we may not want!
There's a lot to like using tuples to represent simple data structures
The real drawback is that we have to know what the positions mean, and remember this in our code.
If we ever need to change the structure of our tuple in our code (like inserting a value that we forgot) and most likely our code will break!
Named Tuples to the rescue
Named tuples give meaningful name to positions:
They subclass tuple, and add a layer to assign property names to the positional elements
Located in the collections standard library module
from collections import nametuple
named tuple is a function (not a type) which generates a new class -> class factory
that new class inherits from tuple
but also provides named properties to access elements of the tuple
but an instance of that class is still a tuple
Generating Named Tuple Classes
We have to understand that namedtuple is a class factory
namedtuple needs a few things to generate this class:
the class name we want to use
"""
| 32.584906 | 154 | 0.615518 |
2b8c696f61def54035ba6d0878995139210695fc | 25,003 | py | Python | rest-api-server/main.py | OddballSports-tv/tidbyt-scoreboard | 35dc66826966eb1a93c1fd1bbfe57367b2f29741 | [
"Apache-2.0"
] | null | null | null | rest-api-server/main.py | OddballSports-tv/tidbyt-scoreboard | 35dc66826966eb1a93c1fd1bbfe57367b2f29741 | [
"Apache-2.0"
] | null | null | null | rest-api-server/main.py | OddballSports-tv/tidbyt-scoreboard | 35dc66826966eb1a93c1fd1bbfe57367b2f29741 | [
"Apache-2.0"
] | null | null | null | # imports
from flask import Flask
from flask import request
from flask import abort
from functools import wraps
from flask import json
from flask import jsonify
from flask_cors import CORS
from markupsafe import escape
from PIL import Image, ImageDraw
import os
import base64
from io import BytesIO
from google.cloud import datastore
from google.oauth2 import service_account
from google.cloud import language
from datetime import datetime
import isodate
import uuid
# Google Cloud Credentials
# NOTE: enable this environment variable for local testing and disable it before deployment
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/drhoffma/oddballsports_git/tidbyt-scoreboard/oddballsportstvdev-e010e1ec7ca7.json"
client = datastore.Client(
project="oddballsportstvdev"
)
# Flask App
app = Flask(__name__)
CORS(app)
# decorator which ensures a valid API key is passed in the headers
def require_appkey(view_function):
@wraps(view_function)
# the new, post-decoration function. Note *args and **kwargs here.
def decorated_function(*args, **kwargs):
# grab the authorization header
headers = request.headers
auth = headers.get("X-Api-Key")
# query for api keys
apikey_query = client.query(kind="api_key")
apikeys = apikey_query.fetch()
apikeys = {r.key.id_or_name: r for r in apikeys}
if auth not in apikeys:
abort(401)
else:
return view_function(*args, **kwargs)
return decorated_function
@app.route("/venue/list", methods=["GET"])
@require_appkey
def venue_list():
try:
venue_query = client.query(kind="venue")
venue_results = venue_query.fetch()
venues = {r.key.id_or_name: r for r in venue_results}
results = {
"status": "success",
"venues": venues
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/venue/add", methods=["POST"])
@require_appkey
def venue_add():
"""
JSON Data expected:
{
"venue": {
"name": "Tuman's Tap Room",
"city": "Chicago"
}
}
"""
try:
data = request.get_json()
venue_key = client.key("venue", data["venue"]["name"])
court_key = client.key("court", parent=venue_key)
entity = datastore.Entity(venue_key)
entity.update({
"name": data["venue"]["name"],
"city": data["venue"]["city"]
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/court/list", methods=["GET"])
@require_appkey
def court_list():
try:
# query for venues
venue_query = client.query(kind="venue")
venue_results = venue_query.fetch()
venue_ids = [r.key.id_or_name for r in venue_results]
# query for courts asscoiated with venues and build dictionary
courts = {}
for venue_id in venue_ids:
query = client.query(kind="court", ancestor=client.key("venue", venue_id))
court_results = query.fetch()
courts[venue_id] = []
for court in court_results:
courts[venue_id].append(court)
results = {
"status": "success",
"courts": courts
}
except Exception as e:
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/court/list/<venue>", methods=["GET"])
@require_appkey
def court_list_per_venue(venue):
try:
query = client.query(kind="court", ancestor=client.key("venue", venue))
courts = query.fetch()
results = {
"status": "success",
"courts": {
venue: list(courts)
}
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/court/add", methods=["POST"])
@require_appkey
def court_add():
"""
JSON Data expected:
{
"venue": {
"name": "Cleos",
"court": {
"name": "Patio",
"dimensions": "30x8",
"ends": ["Alley", "Tables"]
}
}
}
"""
try:
data = request.get_json()
parent_key = client.key("venue", data["venue"]["name"])
key = client.key("court", data["venue"]["court"]["name"], parent=parent_key)
entity = datastore.Entity(key)
entity.update({
"name": data["venue"]["court"]["name"],
"dimensions": data["venue"]["court"]["dimensions"],
"ends": data["venue"]["court"]["ends"]
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/tidbyt/list", methods=["GET"])
@require_appkey
def tidbyt_list():
try:
query = client.query(kind="tidbyt")
tidbyts = query.fetch()
tidbyts = {r.key.id_or_name: r for r in tidbyts}
results = {
"status": "success",
"tidbyts": tidbyts
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/tidbyt/add", methods=["POST"])
@require_appkey
def tidbyt_add():
"""
JSON Data expected:
{
"tidbyt": {
"name": "abc-0000",
"device_id": "insert-device-id",
"api_key": "insert-api-key"
}
}
"""
try:
data = request.get_json()
key = client.key("tidbyt", data["tidbyt"]["name"])
entity = datastore.Entity(key)
entity.update({
"device_id": data["tidbyt"]["device_id"],
"api_key": data["tidbyt"]["api_key"]
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/add", methods=["POST"])
@require_appkey
def game_add():
"""
JSON Data expected:
{
"game": {
"team_a": "Daddy's",
"team_b": "Rats",
"venue": "Cleo's", | optional
"court": "Fence", | optional
"time_scheduled": isodate.isodatetime.datetime_isoformat(datetime.now()),
"team_a_ball_color_pattern": "yellow", | optional
"team_b_ball_color_pattern": "pink" | optional
"throwing_pairs": {
"team_a": {
"Alley": [
"David Hoffman",
"Jamie Lescher"
],
"Tables": [
"Elizabeth Hoffman",
"Chris Todaro"
]
},
"team_b": {
"Alley": [
"Alex Gara",
"Nick"
],
"Tables": [
"Scott Sansbury",
"Lydia Gara"
]
}
},
}
}
"""
try:
data = request.get_json()
if "venue" not in data["game"]:
data["game"]["venue"] = "unassigned"
if "court" not in data["game"]:
data["game"]["court"] = "unassigned"
if "team_a_ball_color_pattern" not in data["game"]:
data["game"]["team_a_ball_color_pattern"] = "red"
if "team_b_ball_color_pattern" not in data["game"]:
data["game"]["team_b_ball_color_pattern"] = "blue"
if "timer_duration" not in data["game"]:
data["game"]["timer_duration"] = str(isodate.isoduration.duration_isoformat(isodate.duration.Duration(minutes=20)))
if "time_scheduled" not in data["game"]:
data["game"]["time_scheduled"] = isodate.isodatetime.datetime_isoformat(
datetime.now())
game_id = str(uuid.uuid4())
key = client.key("game", game_id)
entity = datastore.Entity(key)
entity.update({
"game_id": game_id,
"team_a": data["game"]["team_a"],
"team_b": data["game"]["team_b"],
"venue": data["game"]["venue"],
"court": data["game"]["court"],
"team_a_ball_color_pattern": data["game"]["team_a_ball_color_pattern"],
"team_b_ball_color_pattern": data["game"]["team_b_ball_color_pattern"],
"team_a_score": 0,
"team_b_score": 0,
"timer_duration": data["game"]["timer_duration"],
"time_scheduled": data["game"]["time_scheduled"],
"paused": False,
"in_progress": False,
"throwing_pairs": data["game"]["throwing_pairs"],
"frames": []
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return "success {}".format(game_id)
@app.route("/game/list", methods=["GET"])
@require_appkey
def game_list():
try:
query = client.query(kind="game")
games = query.fetch()
games = {r.key.id_or_name: r for r in games}
results = {
"status": "success",
"games": games
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/game/list/<game_id>", methods=["GET"])
@require_appkey
def game_list_by_id(game_id):
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
results = {
"status": "success",
"games": {
game_id: entity
}
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
@app.route("/game/run/start/<game_id>")
@require_appkey
def game_run_start(game_id):
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
if not entity["in_progress"]:
# calculate the end game time
starts_at = datetime.now()
duration = isodate.isoduration.parse_duration(entity["timer_duration"])
ends_at = starts_at + duration
# update the game
entity.update({
"time_started_at": str(isodate.isodatetime.datetime_isoformat(starts_at)),
"timer_ends_at": str(isodate.isodatetime.datetime_isoformat(ends_at)),
"in_progress": True
})
client.put(entity)
else:
raise ValueError("Game is already started")
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/run/end/<game_id>")
@require_appkey
def game_run_end(game_id):
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
if entity["in_progress"]:
# end game time is now
ended_at = datetime.now()
# update the game
entity.update({
"time_ended_at": str(isodate.isodatetime.datetime_isoformat(ended_at)),
"in_progress": False
})
client.put(entity)
else:
raise ValueError("Game is already ended")
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/run/pause/<game_id>")
@require_appkey
def game_run_pause(game_id):
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
try:
paused = entity["paused"]
except KeyError:
paused = False
if entity["in_progress"] and not paused:
# paused game time is now
paused = datetime.now()
# update the game
entity.update({
"time_paused": str(isodate.isodatetime.datetime_isoformat(paused)),
"paused": True
})
client.put(entity)
else:
raise ValueError("Game is not in progress; can't be paused")
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/run/resume/<game_id>")
@require_appkey
def game_run_resume(game_id):
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
if entity["in_progress"] and entity["paused"]:
old_ends_at = isodate.isodatetime.parse_datetime(entity["timer_ends_at"])
time_paused = isodate.isodatetime.parse_datetime(entity["time_paused"])
time_resumed = datetime.now()
try:
cumulative_time_paused_duration = isodate.isoduration.parse_duration(entity["time_cumulative_time_paused_duration"])
except:
cumulative_time_paused_duration = isodate.isoduration.parse_duration(isodate.isoduration.duration_isoformat(isodate.duration.Duration(seconds=0)))
cumulative_time_paused_duration = cumulative_time_paused_duration + (time_resumed - time_paused)
new_ends_at = old_ends_at + (time_resumed - time_paused)
# update the game
entity.update({
"time_resumed": str(isodate.isodatetime.datetime_isoformat(time_resumed)),
"timer_ends_at": str(isodate.isodatetime.datetime_isoformat(new_ends_at)),
"time_cumulative_time_paused_duration": str(isodate.isoduration.duration_isoformat(cumulative_time_paused_duration)),
"paused": False
})
client.put(entity)
else:
raise ValueError("Game is not in progress; can't be paused")
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/set_value/<game_id>", methods=["POST"])
@require_appkey
def game_set_value(game_id):
"""
JSON Data expected:
{
"team_a_ball_color_pattern": "green",
}
"""
try:
# grab the json data
data = request.get_json()
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
# update the data
entity.update(data)
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/game/run/set_score/<game_id>", methods=["POST"])
@require_appkey
def game_run_set_score(game_id):
"""
JSON Data expected:
{
"team_a_score": 4,
"team_b_score": 3,
"append_frame": {
"side": "Alley",
"pallino_control": "team_b",
"team_a_points": 0,
"team_b_points": 2
}
}
"""
try:
# grab the json data
data = request.get_json()
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
# grab and append the frames
frames = entity["frames"]
frames.append(data["append_frame"])
# ensure game is not ended
if entity["in_progress"] and not entity["paused"]:
# update the game
entity.update({
"team_a_score": data["team_a_score"],
"team_b_score": data["team_b_score"],
"frames": frames
})
client.put(entity)
else:
raise ValueError("Game is already ended")
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/tidbyt/<tidbyt_id>/set_game/<game_id>")
@require_appkey
def game_run_set_scoreboard_display(tidbyt_id, game_id):
try:
# grab the tidbyt
key = client.key("tidbyt", tidbyt_id)
entity = client.get(key)
# update the game
entity.update({
"game_id": game_id
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/lucky_score/<game_id>")
@require_appkey
def lucky_score(game_id):
mode = 'RGBA'
size = (64, 22)
color = (00, 00, 00)
image = Image.new(mode, size, color)
try:
# grab the game
key = client.key("game", game_id)
entity = client.get(key)
team_a_score = str(entity["team_a_score"]).zfill(2)
team_b_score = str(entity["team_b_score"]).zfill(2)
team_a_ball_color_pattern = entity["team_a_ball_color_pattern"]
team_b_ball_color_pattern = entity["team_b_ball_color_pattern"]
colors = {
"red": (255, 0, 0),
"blue": (0, 0, 255),
"green": (0, 135, 62),
"pink": (255,192,203),
"yellow": (255, 255, 0),
"orange": (255, 128, 0),
"black": (0, 0, 0)
}
# team colors
team_a_color = colors[team_a_ball_color_pattern]
team_b_color = colors[team_b_ball_color_pattern]
# team box backgrounds
size = (32, 22)
team_a_background = Image.new(mode, size, team_a_color)
team_b_background = Image.new(mode, size, team_b_color)
# place the rectangles on the background
image.paste(team_a_background, (0, 0))
image.paste(team_b_background, (32, 0))
# concatenate the scores
scores_str = team_a_score + team_b_score
for i in range(4):
foreground = Image.open(os.path.join("luckiest_digits", scores_str[i] + ".png"))
image.paste(foreground, (i * 16, 0), foreground)
buffered = BytesIO()
image.save(buffered, format="PNG")
image_str = base64.b64encode(buffered.getvalue())
# calculate the time remaining
if not entity["paused"] and entity["in_progress"]:
ends_at = isodate.isodatetime.parse_datetime(entity["timer_ends_at"])
now = datetime.now()
duration_remaining = ends_at - now
if str(duration_remaining)[0] == "-":
duration_remaining = "0:00:00"
else:
duration_remaining = str(duration_remaining)[:7]
elif not entity["paused"] and not entity["in_progress"]:
duration_remaining = "NOT STARTED"
else:
duration_remaining = "PAUSED"
except:
image = Image.open(os.path.join("oddball_graphics", "obie_red.png"))
buffered = BytesIO()
image.save(buffered, format="PNG")
image_str = base64.b64encode(buffered.getvalue())
return json.dumps({
"0": image_str.decode("utf-8"),
"time_str": "0:00:00",
"team_a": "",
"team_b": ""
})
return json.dumps({
"0": image_str.decode("utf-8"),
"time_str": duration_remaining,
"team_a": entity["team_a"],
"team_b": entity["team_b"]
})
@app.route("/user/add/<google_id>", methods=["POST"])
@require_appkey
def user_add(google_id):
"""
JSON Data expected:
{
"user": {
# required
"firstname": "Jane",
"lastname": "Doe",
"email": "jane.doe@yahoo.com",
"roles": ["referee", "player"],
"active_subscriber": false,
# optional
"nickname": "",
"phone": "555-555-5555",
"gender": "non-binary",
"league": ["abc_chicago"],
"instagram": "",
"twitter": "",
"badges": []
}
}
"""
try:
data = request.get_json()
user_key = client.key("user", google_id)
entity = datastore.Entity(user_key)
# defaults
if "nickname" not in data["user"]:
data["user"]["nickname"] = ""
if "avatar_base64" not in data["user"]:
data["user"]["avatar_base64"] = ""
if "phone" not in data["user"]:
data["user"]["phone"] = ""
if "gender" not in data["user"]:
data["user"]["gender"] = "other"
if "leagues" not in data["user"]:
data["user"]["leagues"] = []
if "instagram" not in data["user"]:
data["user"]["instagram"] = ""
if "twitter" not in data["user"]:
data["user"]["twitter"] = ""
if "badges" not in data["user"]:
data["user"]["badges"] = []
# ensure leagues and badges are lists
if not isinstance(data["user"]["roles"], list):
raise ValueError("Roles must be a list")
if not isinstance(data["user"]["leagues"], list):
raise ValueError("Leagues must be a list")
if not isinstance(data["user"]["badges"], list):
raise ValueError("Badges must be a list")
entity.update({
# required
"firstname": data["user"]["firstname"],
"lastname": data["user"]["lastname"],
"email": data["user"]["email"],
"active_subscriber": data["user"]["active_subscriber"],
"roles": data["user"]["roles"],
# optional
"nickname": data["user"]["nickname"],
"avatar_base64": data["user"]["avatar_base64"],
"phone": data["user"]["phone"],
"gender": data["user"]["gender"],
"leagues": data["user"]["leagues"],
"instagram": data["user"]["instagram"],
"twitter": data["user"]["twitter"],
"badges": data["user"]["badges"],
})
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/user/update/<google_id>", methods=["POST"])
@require_appkey
def user_update(google_id):
"""
JSON Data expected:
{
"user": {
"key": "value",
"append_league": "league",
"append_badge": "badge"
}
}
"""
try:
data = request.get_json()
user_key = client.key("user", google_id)
entity = client.get(user_key)
if "badges" in data["user"]:
raise ValueError("only use 'append_badges' key since badges can only be added")
# ensure leagues, roles, and badges are lists
if "roles" in data["user"]:
if not isinstance(data["user"]["roles"], list):
raise ValueError("Roles must be a list")
if "leagues" in data["user"]:
if not isinstance(data["user"]["leagues"], list):
raise ValueError("Leagues must be a list")
if "append_badges" in data["user"]:
if not isinstance(data["user"]["append_badges"], list):
raise ValueError("'append_badges' must be a list")
else:
data["user"]["badges"] = entity["badges"] + data["user"]["append_badges"]
del data["user"]["append_badges"]
entity.update(data["user"])
client.put(entity)
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps({
"status": "success"
})
@app.route("/user/list", methods=["GET"])
@require_appkey
def user_list():
try:
query = client.query(kind="user")
users = query.fetch()
users = {r.key.id_or_name: r for r in users}
results = {
"status": "success",
"users": users
}
except Exception as e:
print(str(e))
return json.dumps({
"status": "exception: {}".format(repr(e))
})
return json.dumps(results)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) | 30.015606 | 162 | 0.544615 |
784c65288205c2c2abd2bb6f065f0d721ff8d0e6 | 70 | py | Python | tests/filesystem/__init__.py | mehrdad-shokri/retdec-regression-tests-framework | 9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3 | [
"MIT"
] | 21 | 2017-12-12T20:38:43.000Z | 2019-04-14T12:46:10.000Z | tests/filesystem/__init__.py | mehrdad-shokri/retdec-regression-tests-framework | 9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3 | [
"MIT"
] | 6 | 2018-01-06T13:32:23.000Z | 2018-09-14T15:09:11.000Z | tests/filesystem/__init__.py | mehrdad-shokri/retdec-regression-tests-framework | 9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3 | [
"MIT"
] | 11 | 2017-12-12T20:38:46.000Z | 2018-07-19T03:12:03.000Z | """
Tests for the :mod:`regression_tests.filesystem` package.
"""
| 17.5 | 61 | 0.671429 |
f2a56270f15b5a1ab0a32cb3abea2b8058c2f1af | 25,959 | py | Python | amaml_reach_train.py | ZiyeHu/mil | a240aac7734ecc129788c944e3623aff1aa1b6f0 | [
"MIT"
] | null | null | null | amaml_reach_train.py | ZiyeHu/mil | a240aac7734ecc129788c944e3623aff1aa1b6f0 | [
"MIT"
] | null | null | null | amaml_reach_train.py | ZiyeHu/mil | a240aac7734ecc129788c944e3623aff1aa1b6f0 | [
"MIT"
] | null | null | null | import numpy as np
import random
import tensorflow as tf
import logging
import imageio
import os
import gym
from functools import reduce
from operator import mul
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data_generator import DataGenerator
# from amaml import MIL
from amaml import MIL
from evaluation.eval_reach import evaluate_vision_reach
from evaluation.eval_push import evaluate_push
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
LOGGER = logging.getLogger(__name__)
flags.DEFINE_string('experiment', 'sim_vision_reach', 'sim_vision_reach or sim_push')
flags.DEFINE_string('demo_file', 'mil_data/data/sim_vision_reach/',
'path to the directory where demo files that containing robot states and actions are stored')
flags.DEFINE_string('demo_gif_dir', 'mil_data/data/sim_vision_reach/', 'path to the videos of demonstrations')
flags.DEFINE_string('gif_prefix', 'color', 'prefix of the video directory for each task, e.g. object_0 for task 0')
flags.DEFINE_integer('im_width', 80,
'width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach')
flags.DEFINE_integer('im_height', 64,
'height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach')
flags.DEFINE_integer('num_channels', 3, 'number of channels of the images in the demo videos')
flags.DEFINE_integer('T', 50, 'time horizon of the demo videos, 50 for reach, 100 for push')
flags.DEFINE_bool('hsv', False, 'convert the image to HSV format')
flags.DEFINE_bool('use_noisy_demos', False, 'use noisy demonstrations or not (for domain shift)')
flags.DEFINE_string('noisy_demo_gif_dir', None, 'path to the videos of noisy demonstrations')
flags.DEFINE_string('noisy_demo_file', None,
'path to the directory where noisy demo files that containing robot states and actions are stored')
flags.DEFINE_bool('no_action', True, 'do not include actions in the demonstrations for inner update')
flags.DEFINE_bool('no_state', False, 'do not include states in the demonstrations during training')
flags.DEFINE_bool('no_final_eept', False, 'do not include final ee pos in the demonstrations for inner update')
flags.DEFINE_bool('zero_state', True,
'zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)')
flags.DEFINE_bool('two_arms', False, 'use two-arm structure when state is zeroed-out')
flags.DEFINE_integer('training_set_size', 750, 'size of the training set, 1500 for sim_reach, 693 for sim push, and \
-1 for all data except those in validation set')
flags.DEFINE_integer('val_set_size', 150, 'size of the training set, 150 for sim_reach and 76 for sim push')
## Training options
flags.DEFINE_integer('metatrain_iterations', 30000,'number of metatraining iterations.') # 30k for pushing, 50k for reaching and placing
flags.DEFINE_integer('meta_batch_size', 25,'number of tasks sampled per meta-update') # 25 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_float('meta_lr', 1e-3, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 1,
'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('train_update_lr', 1e-3,
'step size alpha for inner gradient update.') # 0.001 for reaching, 0.01 for pushing and placing
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.') # 5 for placing
flags.DEFINE_bool('clip', True, 'use gradient clipping for fast gradient')
flags.DEFINE_float('clip_max', 20.0, 'maximum clipping value for fast gradient')
flags.DEFINE_float('clip_min', -20.0, 'minimum clipping value for fast gradient')
flags.DEFINE_bool('fc_bt', True, 'use bias transformation for the first fc layer')
flags.DEFINE_bool('all_fc_bt', False, 'use bias transformation for all fc layers')
flags.DEFINE_bool('conv_bt', False, 'use bias transformation for the first conv layer, N/A for using pretraining')
flags.DEFINE_integer('bt_dim', 10, 'the dimension of bias transformation for FC layers')
flags.DEFINE_string('pretrain_weight_path', 'N/A', 'path to pretrained weights')
flags.DEFINE_bool('train_pretrain_conv1', False, 'whether to finetune the pretrained weights')
flags.DEFINE_bool('two_head', True, 'use two-head architecture')
flags.DEFINE_bool('learn_final_eept', False, 'learn an auxiliary loss for predicting final end-effector pose')
flags.DEFINE_bool('learn_final_eept_whole_traj', False, 'learn an auxiliary loss for predicting final end-effector pose \
by passing the whole trajectory of eepts (used for video-only models)')
flags.DEFINE_bool('stopgrad_final_eept', True,
'stop the gradient when concatenate the predicted final eept with the feature points')
flags.DEFINE_integer('final_eept_min', 6, 'first index of the final eept in the action array')
flags.DEFINE_integer('final_eept_max', 8, 'last index of the final eept in the action array')
flags.DEFINE_float('final_eept_loss_eps', 0.1, 'the coefficient of the auxiliary loss')
flags.DEFINE_float('act_loss_eps', 1.0, 'the coefficient of the action loss')
flags.DEFINE_float('loss_multiplier', 100.0,
'the constant multiplied with the loss value, 100 for reach and 50 for push')
flags.DEFINE_bool('use_l1_l2_loss', False, 'use a loss with combination of l1 and l2')
flags.DEFINE_float('l2_eps', 0.01, 'coeffcient of l2 loss')
flags.DEFINE_bool('shuffle_val', False, 'whether to choose the validation set via shuffling or not')
## Model options
flags.DEFINE_integer('random_seed', 0, 'random seed for training')
flags.DEFINE_bool('fp', True, 'use spatial soft-argmax or not')
flags.DEFINE_string('norm', 'layer_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_bool('dropout', False, 'use dropout for fc layers or not')
flags.DEFINE_float('keep_prob', 0.5, 'keep probability for dropout')
flags.DEFINE_integer('num_filters', 5, 'number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching.')
flags.DEFINE_integer('filter_size', 3, 'filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_conv_layers', 5, 'number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_strides', 3, 'number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_integer('num_fc_layers', 3, 'number of fully-connected layers')
flags.DEFINE_integer('layer_size', 200, 'hidden dimension of fully-connected layers')
flags.DEFINE_bool('temporal_conv_2_head', True,
'whether or not to use temporal convolutions for the two-head architecture in video-only setting.')
flags.DEFINE_bool('temporal_conv_2_head_ee', False, 'whether or not to use temporal convolutions for the two-head architecture in video-only setting \
for predicting the ee pose.')
flags.DEFINE_integer('temporal_filter_size', 10, 'filter size for temporal convolution')
flags.DEFINE_integer('temporal_num_filters', 32, 'number of filters for temporal convolution')
flags.DEFINE_integer('temporal_num_filters_ee', 64, 'number of filters for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers_ee', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_string('init', 'xavier', 'initializer for conv weights. Choose among random, xavier, and he')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
# flags.DEFINE_string('log_dirs', 'logs/sim_reach_temporal_conv_with_bicycle', 'directory for summaries and checkpoints.')
flags.DEFINE_string('log_dirs', 'logs/sim_reach_temporal_conv', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('restore_iter', 0, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training \
(use if you want to test with a different number).')
flags.DEFINE_integer('test_update_batch_size', 1, 'number of demos used during test time')
flags.DEFINE_float('gpu_memory_fraction', 0.9, 'fraction of memory used in gpu')
flags.DEFINE_bool('record_gifs', True, 'record gifs during evaluation')
flags.DEFINE_bool('rl_update_batch_size', 1, 'number of demos used during rl time')
flags.DEFINE_bool('learn_bicycle', False, 'learning strategy')
flags.DEFINE_bool('compare_learn', True, 'learning strategy')
flags.DEFINE_integer('begin_restore_iter', 29100, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('end_restore_iter', 29999, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('embed_size', 5, 'size of embedding')
flags.DEFINE_integer('action_size', 2, 'size of embedding')
flags.DEFINE_float('margin', 1.0, 'margin of loss')
flags.DEFINE_float('margin_coefficient', 1, 'margin of loss')
def get_num_params():
nums=0
for variable in tf.trainable_variables():
shape= variable.get_shape()
nums+=reduce(mul, [dim.value for dim in shape], 1)
return nums
def train(graph, model, saver, sess, data_generator, log_dir, restore_itr=0):
"""
Train the model.
"""
PRINT_INTERVAL = 1
TEST_PRINT_INTERVAL = PRINT_INTERVAL*5
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 100
TOTAL_ITERS = FLAGS.metatrain_iterations
prelosses, postlosses = [], []
save_dir = log_dir + '/model'
train_writer = tf.summary.FileWriter(log_dir, graph)
print('calling train***************************')
# actual training.
if restore_itr == 0:
training_range = range(TOTAL_ITERS)
else:
training_range = range(restore_itr+1, TOTAL_ITERS)
for itr in training_range:
state, tgt_mu = data_generator.generate_data_batch(itr)
statea = state[:, :FLAGS.update_batch_size*FLAGS.T, :]
stateb = state[:, FLAGS.update_batch_size*FLAGS.T:, :]
actiona = tgt_mu[:, :FLAGS.update_batch_size*FLAGS.T, :]
actionb = tgt_mu[:, FLAGS.update_batch_size*FLAGS.T:, :]
# print("data_generator.all_mix_training_filenames",len(data_generator.all_mix_training_filenames))
# print(itr, 'data_generator.all_mix_training_filenames', data_generator.all_mix_training_filenames[itr*FLAGS.meta_batch_size*3],
# data_generator.all_mix_training_filenames[itr*FLAGS.meta_batch_size*3+1],
# data_generator.all_mix_training_filenames[itr*FLAGS.meta_batch_size*3+2])
# split_test=tf.split(state, 2)
# print('state',state.shape, 'split_test',split_test)
feed_dict = {model.statea: statea,
model.stateb: stateb,
model.actiona: actiona,
model.actionb: actionb}
# input_tensors = [model.train_op, model.total_loss1, model.total_losses2[model.num_updates-1], model.total_semantic_loss]
# input_tensors = [model.train_op, model.total_loss1, model.semantic_outputb, model.compare_semantic_outputb, model.different_semantic_outputb, model.total_losses2[model.num_updates-1], model.total_semantic_loss]
# if itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0:
# input_tensors.extend([model.train_summ_op, model.total_loss1, model.total_losses2[model.num_updates-1]])
input_tensors = [model.train_op, model.total_loss1, model.total_mix_loss,
model.total_semantic_loss, model.total_losses2[model.num_updates - 1]]
with graph.as_default():
parameters = get_num_params()
print('total parameters', parameters)
results = sess.run(input_tensors, feed_dict=feed_dict)
with open('logs/sim_reach_temporal_conv/reach_traing_loss.txt', 'a') as f:
# f.write("%d %f %f\n" % (itr, np.mean(results[-2]), np.mean(results[-1])))
# f.write("%d %f\n" % (itr, np.mean(results[-1])))
f.write("%d %f %f\n" % (itr, np.mean(results[-2]), np.mean(results[-1])))
print('Iteration %d: pre_loss is %.2f, average loss is %.2f, pos_lossa is %.2f, pos_lossb is %.2f' % (itr, results[-4], results[-3], results[-2], results[-1]))
# print(results[-5].shape, results[-4].shape, results[-3].shape)
# print(results[-5][-1][-1].shape, results[-4][-1][-1].shape, results[-3][-1][-1].shape)
# print(results[-5][-1][-1], results[-4][-1][-1], results[-3][-1][-1])
# f.write("%d %s %s %s\n" % (itr, str(results[-5][0][-1]), str(results[-4][0][-1]), str(results[-3][0][-1])))
# f.write( "%d %s %s %s\n" % (itr, data_generator.all_mix_training_filenames[itr * FLAGS.meta_batch_size * 3],
# data_generator.all_mix_training_filenames[itr * FLAGS.meta_batch_size * 3 + 1],
# data_generator.all_mix_training_filenames[itr * FLAGS.meta_batch_size * 3 + 2]))
# if itr != 0 and itr % SUMMARY_INTERVAL == 0:
# prelosses.append(results[-2])
# # train_writer.add_summary(results[-3], itr)
# postlosses.append(results[-1])
#
# if itr != 0 and itr % PRINT_INTERVAL == 0:
# print 'Iteration %d: average preloss is %.2f, average postloss is %.2f' % (itr, np.mean(prelosses), np.mean(postlosses))
# prelosses, postlosses = [], []
# if itr != 0 and itr % TEST_PRINT_INTERVAL == 0:
# if FLAGS.val_set_size > 0:
# input_tensors = [model.val_summ_op, model.val_total_loss1, model.val_total_losses2[model.num_updates-1]]
# val_state, val_act = data_generator.generate_data_batch(itr, train=False)
# statea = val_state[:, :FLAGS.update_batch_size*FLAGS.T, :]make_compare_batch_data
# stateb = val_state[:, FLAGS.update_batch_size*FLAGS.T:, :]
# actiona = val_act[:, :FLAGS.update_batch_size*FLAGS.T, :]
# actionb = val_act[:, FLAGS.update_batch_size*FLAGS.T:, :]
# feed_dict = {model.statea: statea,
# model.stateb: stateb,
# model.actiona: actiona,
# model.actionb: actionb}
# with graph.as_default():
# results = sess.run(input_tensors, feed_dict=feed_dict)
# train_writer.add_summary(results[0], itr)
# print 'Test results: average preloss is %.2f, average postloss is %.2f' % (np.mean(results[1]), np.mean(results[2]))
if itr != 0 and (itr % SAVE_INTERVAL == 0 or itr == training_range[-1]):
print 'Saving model to: %s' % (save_dir + '_%d' % itr)
with graph.as_default():
saver.save(sess, save_dir + '_%d' % itr)
def generate_test_demos(data_generator):
if not FLAGS.use_noisy_demos:
n_folders = len(data_generator.demos.keys())
demos = data_generator.demos
else:
n_folders = len(data_generator.noisy_demos.keys())
demos = data_generator.noisy_demos
policy_demo_idx = [np.random.choice(n_demo, replace=False, size=FLAGS.test_update_batch_size) \
for n_demo in [demos[i]['demoX'].shape[0] for i in xrange(n_folders)]]
selected_demoO, selected_demoX, selected_demoU = [], [], []
for i in xrange(n_folders):
selected_cond = np.array(demos[i]['demoConditions'])[np.arange(len(demos[i]['demoConditions'])) == policy_demo_idx[i]]
Xs, Us, Os = [], [], []
for idx in selected_cond:
if FLAGS.use_noisy_demos:
demo_gif_dir = data_generator.noisy_demo_gif_dir
else:
demo_gif_dir = data_generator.demo_gif_dir
O = np.array(imageio.mimread(demo_gif_dir + data_generator.gif_prefix + '_%d/cond%d.samp0.gif' % (i, idx)))[:, :, :, :3]
O = np.transpose(O, [0, 3, 2, 1]) # transpose to mujoco setting for images
O = O.reshape(FLAGS.T, -1) / 255.0 # normalize
Os.append(O)
Xs.append(demos[i]['demoX'][np.arange(demos[i]['demoX'].shape[0]) == policy_demo_idx[i]].squeeze())
Us.append(demos[i]['demoU'][np.arange(demos[i]['demoU'].shape[0]) == policy_demo_idx[i]].squeeze())
selected_demoO.append(np.array(Os))
selected_demoX.append(np.array(Xs))
selected_demoU.append(np.array(Us))
print "Finished collecting demos for testing"
selected_demo = dict(selected_demoX=selected_demoX, selected_demoU=selected_demoU, selected_demoO=selected_demoO)
data_generator.selected_demo = selected_demo
def main():
tf.set_random_seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
# Build up environment to prevent segfault
if not FLAGS.train:
if 'reach' in FLAGS.experiment:
env = gym.make('ReacherMILTest-v1')
ob = env.reset()
# import pdb; pdb.set_trace()
graph = tf.Graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
sess = tf.Session(graph=graph, config=tf_config)
network_config = {
'num_filters': [FLAGS.num_filters]*FLAGS.num_conv_layers,
'strides': [[1, 2, 2, 1]]*FLAGS.num_strides + [[1, 1, 1, 1]]*(FLAGS.num_conv_layers-FLAGS.num_strides),
'filter_size': FLAGS.filter_size,
'image_width': FLAGS.im_width,
'image_height': FLAGS.im_height,
'image_channels': FLAGS.num_channels,
'n_layers': FLAGS.num_fc_layers,
'layer_size': FLAGS.layer_size,
'initialization': FLAGS.init,
}
data_generator = DataGenerator()
state_idx = data_generator.state_idx
img_idx = range(len(state_idx), len(state_idx)+FLAGS.im_height*FLAGS.im_width*FLAGS.num_channels)
# need to compute x_idx and img_idx from data_generator
model = MIL(data_generator._dU, state_idx=state_idx, img_idx=img_idx, network_config=network_config)
# TODO: figure out how to save summaries and checkpoints
exp_string = FLAGS.experiment+ '.' + FLAGS.init + '_init.' + str(FLAGS.num_conv_layers) + '_conv' + '.' + str(FLAGS.num_strides) + '_strides' + '.' + str(FLAGS.num_filters) + '_filters' + \
'.' + str(FLAGS.num_fc_layers) + '_fc' + '.' + str(FLAGS.layer_size) + '_dim' + '.bt_dim_' + str(FLAGS.bt_dim) + '.mbs_'+str(FLAGS.meta_batch_size) + \
'.ubs_' + str(FLAGS.update_batch_size) + '.numstep_' + str(FLAGS.num_updates) + '.updatelr_' + str(FLAGS.train_update_lr)
if FLAGS.clip:
exp_string += '.clip_' + str(int(FLAGS.clip_max))
if FLAGS.conv_bt:
exp_string += '.conv_bt'
if FLAGS.all_fc_bt:
exp_string += '.all_fc_bt'
if FLAGS.fp:
exp_string += '.fp'
if FLAGS.learn_final_eept:
exp_string += '.learn_ee_pos'
if FLAGS.no_action:
exp_string += '.no_action'
if FLAGS.zero_state:
exp_string += '.zero_state'
if FLAGS.two_head:
exp_string += '.two_heads'
if FLAGS.two_arms:
exp_string += '.two_arms'
if FLAGS.temporal_conv_2_head:
exp_string += '.1d_conv_act_' + str(FLAGS.temporal_num_layers) + '_' + str(FLAGS.temporal_num_filters)
if FLAGS.temporal_conv_2_head_ee:
exp_string += '_ee_' + str(FLAGS.temporal_num_layers_ee) + '_' + str(FLAGS.temporal_num_filters_ee)
exp_string += '_' + str(FLAGS.temporal_filter_size) + 'x1_filters'
if FLAGS.training_set_size != -1:
exp_string += '.' + str(FLAGS.training_set_size) + '_trials'
log_dir = FLAGS.log_dirs + '/' + exp_string
# put here for now
if FLAGS.train:
data_generator.generate_batches(noisy=FLAGS.use_noisy_demos)
with graph.as_default():
# train_image_tensors = data_generator.make_batch_tensor(network_config, restore_iter=FLAGS.restore_iter)
train_image_tensors = data_generator.make_compare_batch_tensor(network_config, restore_iter=FLAGS.restore_iter)
inputa = train_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
inputb = train_image_tensors[:, FLAGS.update_batch_size * FLAGS.T:(FLAGS.update_batch_size +1) * FLAGS.T, :]
inputc = train_image_tensors[:, (FLAGS.update_batch_size + 1) * FLAGS.T:, :]
# train_input_tensors = {'inputa': inputa, 'inputb': inputb}
train_input_tensors = {'inputa': inputa, 'inputb': inputb, 'inputc': inputc}
# val_image_tensors = data_generator.make_batch_tensor(network_config, restore_iter=FLAGS.restore_iter, train=False)
# inputa = val_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
# inputb = val_image_tensors[:, FLAGS.update_batch_size*FLAGS.T:, :]
# val_input_tensors = {'inputa': inputa, 'inputb': inputb}
model.init_network(graph, input_tensors=train_input_tensors, restore_iter=FLAGS.restore_iter)
# model.init_network(graph, input_tensors=val_input_tensors, restore_iter=FLAGS.restore_iter, prefix='Validation_')
else:
model.init_network(graph, prefix='Testing')
with graph.as_default():
# Set up saver.
saver = tf.train.Saver(max_to_keep=10)
# Initialize variables.
init_op = tf.global_variables_initializer()
sess.run(init_op, feed_dict=None)
# Start queue runners (used for loading videos on the fly)
tf.train.start_queue_runners(sess=sess)
if FLAGS.resume:
model_file = tf.train.latest_checkpoint(log_dir)
if FLAGS.restore_iter > 0:
model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.restore_iter)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1+6:])
print("Restoring model weights from " + model_file)
with graph.as_default():
saver.restore(sess, model_file)
if FLAGS.train:
train(graph, model, saver, sess, data_generator, log_dir, restore_itr=FLAGS.restore_iter)
else:
model_file = tf.train.latest_checkpoint(log_dir)
if (FLAGS.begin_restore_iter != FLAGS.end_restore_iter):
iter_index = FLAGS.begin_restore_iter
while iter_index <= FLAGS.end_restore_iter:
print('iter_index', iter_index)
if FLAGS.restore_iter >= 0:
model_file = model_file[:model_file.index('model')] + 'model_' + str(iter_index)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1 + 6:])
print("Restoring model weights from " + model_file)
# saver = tf.train.Saver()
saver.restore(sess, model_file)
if 'reach' in FLAGS.experiment:
env = gym.make('ReacherMILTest-v1')
env.reset()
generate_test_demos(data_generator)
evaluate_vision_reach(env, graph, model, data_generator, sess, exp_string, FLAGS.record_gifs, log_dir)
# evaluate_rl_vision_reach(graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dirs)
elif 'push' in FLAGS.experiment:
evaluate_push(sess, graph, model, data_generator, exp_string, log_dir, FLAGS.demo_file + '/', save_video=FLAGS.record_gifs)
iter_index += 100
else:
if FLAGS.restore_iter > 0:
model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.restore_iter)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1 + 6:])
print("Restoring model weights from " + model_file)
# saver = tf.train.Saver()
saver.restore(sess, model_file)
if 'reach' in FLAGS.experiment:
env = gym.make('ReacherMILTest-v1')
env.reset()
generate_test_demos(data_generator)
evaluate_vision_reach(env, graph, model, data_generator, sess, exp_string, FLAGS.record_gifs, log_dir)
# evaluate_vision_reach(env, graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dir)
# evaluate_rl_vision_reach(graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dirs)
elif 'push' in FLAGS.experiment:
evaluate_push(sess, graph, model, data_generator, exp_string, log_dir, FLAGS.demo_file + '/', save_video=FLAGS.record_gifs)
# else:
# if 'reach' in FLAGS.experiment:
# generate_test_demos(data_generator)
# evaluate_vision_reach(env, graph, model, data_generator, sess, exp_string, FLAGS.record_gifs, log_dir)
# elif 'push' in FLAGS.experiment:
# evaluate_push(sess, graph, model, data_generator, exp_string, log_dir, FLAGS.demo_file + '/', save_video=FLAGS.record_gifs)
# else:
# raise NotImplementedError
if __name__ == "__main__":
main() | 59.813364 | 220 | 0.677607 |
3e79ab4525d6f1914056bf8ccce7c0151a9ecb58 | 408 | py | Python | um7/setup.py | philsuth/um7 | 2b76775011c9b1faf60272a8d91722ca5c473167 | [
"MIT"
] | null | null | null | um7/setup.py | philsuth/um7 | 2b76775011c9b1faf60272a8d91722ca5c473167 | [
"MIT"
] | null | null | null | um7/setup.py | philsuth/um7 | 2b76775011c9b1faf60272a8d91722ca5c473167 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='um7',
version='0.16+rct0',
description='Classes to interface with CH Robotics / Redshift Labs UM7 IMU',
url='https://github.com/philsuth/um7',
author='Till Busch, Daniel Kurek, Phil Sutherland',
author_email='phils@rct-global.com',
license='MIT',
packages=['um7'],
install_requires=['pyserial'],
zip_safe=False)
| 31.384615 | 82 | 0.654412 |
371dcab456c83b309642376df08d1c4f09dfffe1 | 1,402 | py | Python | utils/gyb_syntax_support/CommonNodes.py | elizachen/swift | be1a2c334c5bc02051779d0151b8b95805f4e911 | [
"Apache-2.0"
] | 1 | 2018-02-24T06:55:39.000Z | 2018-02-24T06:55:39.000Z | utils/gyb_syntax_support/CommonNodes.py | elizachen/swift | be1a2c334c5bc02051779d0151b8b95805f4e911 | [
"Apache-2.0"
] | null | null | null | utils/gyb_syntax_support/CommonNodes.py | elizachen/swift | be1a2c334c5bc02051779d0151b8b95805f4e911 | [
"Apache-2.0"
] | null | null | null | from Child import Child
from Node import Node # noqa: I201
COMMON_NODES = [
Node('Decl', kind='Syntax'),
Node('Expr', kind='Syntax'),
Node('Stmt', kind='Syntax'),
Node('Type', kind='Syntax'),
Node('Pattern', kind='Syntax'),
Node('UnknownDecl', kind='Decl'),
Node('UnknownExpr', kind='Expr'),
Node('UnknownStmt', kind='Stmt'),
Node('UnknownType', kind='Type'),
Node('UnknownPattern', kind='Pattern'),
# code-block-item = (decl | stmt | expr) ';'?
Node('CodeBlockItem', kind='Syntax',
children=[
Child('Item', kind='Syntax',
node_choices=[
Child('Decl', kind='Decl'),
Child('Stmt', kind='Stmt'),
Child('Expr', kind='Expr'),
]),
Child('Semicolon', kind='SemicolonToken',
is_optional=True),
]),
# code-block-item-list -> code-block-item code-block-item-list?
Node('CodeBlockItemList', kind='SyntaxCollection',
element='CodeBlockItem'),
# code-block -> '{' stmt-list '}'
Node('CodeBlock', kind='Syntax',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Statements', kind='CodeBlockItemList'),
Child('RightBrace', kind='RightBraceToken'),
]),
]
| 33.380952 | 67 | 0.531384 |
f4ec9eacb8f5af393fb0a78c159f591e1c40400e | 1,087 | py | Python | src/streamlink/plugins/latina.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | 4 | 2020-10-17T06:35:39.000Z | 2021-05-14T20:00:01.000Z | src/streamlink/plugins/latina.py | TheDrHax/streamlink | 4dfd0d516fd8484438389518985e3b5131b7a253 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/latina.py | TheDrHax/streamlink | 4dfd0d516fd8484438389518985e3b5131b7a253 | [
"BSD-2-Clause"
] | null | null | null | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents
from streamlink.plugin.api.utils import itertags
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?latina\.pe/tvenvivo"
))
class Latina(Plugin):
title = "Latina"
def _get_streams(self):
self.session.http.headers.update({
"User-Agent": useragents.CHROME,
"Referer": self.url})
self.session.http.get(self.url)
stream_url = None
for div in itertags(self.session.http.get(self.url).text, "div"):
if div.attributes.get("id") == "player":
stream_url = div.attributes.get("data-stream")
if stream_url:
log.debug("URL={0}".format(stream_url))
return HLSStream.parse_variant_playlist(self.session,
stream_url,
name_fmt="{pixels}_{bitrate}")
__plugin__ = Latina
| 29.378378 | 82 | 0.602576 |
68449348227933efc770bedbbdf707394da32c1e | 2,311 | py | Python | data/test/test_encryption.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | data/test/test_encryption.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | data/test/test_encryption.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from data.encryption import FieldEncrypter, _VERSIONS, DecryptionFailureException
@pytest.mark.parametrize(
"test_data",
[
"",
"hello world",
"wassup?!",
"IGZ2Y8KUN3EFWAZZXR3D7U4V5NXDVYZI5VGU6STPB6KM83PAB8WRGM32RD9FW0C0",
"JLRFBYS1EHKUE73S99HWOQWNPGLUZTBRF5HQEFUJS5BK3XVB54RNXYV4AUMJXCMC",
"a" * 3,
"a" * 4,
"a" * 5,
"a" * 31,
"a" * 32,
"a" * 33,
"a" * 150,
"😇",
],
)
@pytest.mark.parametrize("version", list(_VERSIONS.keys()))
@pytest.mark.parametrize(
"secret_key",
[
"test1234",
"thisisanothercoolsecretkeyhere",
"107383705745765174750346070528443780244192102846031525796571939503548634055845",
],
)
@pytest.mark.parametrize("use_valid_key", [True, False,])
def test_encryption(test_data, version, secret_key, use_valid_key):
encrypter = FieldEncrypter(secret_key, version)
encrypted = encrypter.encrypt_value(test_data, field_max_length=255)
assert encrypted != test_data
if use_valid_key:
decrypted = encrypter.decrypt_value(encrypted)
assert decrypted == test_data
with pytest.raises(DecryptionFailureException):
encrypter.decrypt_value("somerandomvalue")
else:
decrypter = FieldEncrypter("some other key", version)
with pytest.raises(DecryptionFailureException):
decrypter.decrypt_value(encrypted)
@pytest.mark.parametrize(
"secret_key, encrypted_value, expected_decrypted_value",
[
("test1234", "v0$$iE+87Qefu/2i+5zC87nlUtOskypk8MUUDS/QZPs=", ""),
("test1234", "v0$$XTxqlz/Kw8s9WKw+GaSvXFEKgpO/a2cGNhvnozzkaUh4C+FgHqZqnA==", "hello world"),
(
"test1234",
"v0$$9LadVsSvfAr9r1OvghSYcJqrJpv46t+U6NgLKrcFY6y2bQsASIN36g==",
"hello world",
),
(
"\1\2\3\4\5\6",
"v0$$2wwWX8IhUYzuh4cyMgSXF3MEVDlEhrf0CNimTghlHgCuK6E4+bLJb1xJOKxsXMs=",
"hello world, again",
),
],
)
def test_encryption_value(secret_key, encrypted_value, expected_decrypted_value):
encrypter = FieldEncrypter(secret_key)
decrypted = encrypter.decrypt_value(encrypted_value)
assert decrypted == expected_decrypted_value
| 30.813333 | 100 | 0.655128 |
989d878a499c40ebea3c3e64e960bbc3f0e579ce | 4,574 | py | Python | test/Fortran/F03.py | clemens-tolboom/scons | cd722bdc5f6b1163d56246ee0afc63c28ecc138e | [
"MIT"
] | null | null | null | test/Fortran/F03.py | clemens-tolboom/scons | cd722bdc5f6b1163d56246ee0afc63c28ecc138e | [
"MIT"
] | null | null | null | test/Fortran/F03.py | clemens-tolboom/scons | cd722bdc5f6b1163d56246ee0afc63c28ecc138e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.file_fixture('mylink.py')
test.file_fixture(['fixture', 'myfortran.py'])
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F03 = r'%(_python_)s myfortran.py f03',
FORTRAN = r'%(_python_)s myfortran.py fortran')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
env.Program(target = 'test13', source = 'test13.f03')
env.Program(target = 'test14', source = 'test14.F03')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortran\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortran\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortran\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortran\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortran\n")
test.write('test13.f03', "This is a .f03 file.\n#link\n#f03\n")
test.write('test14.F03', "This is a .F03 file.\n#link\n#f03\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
test.must_match('test13' + _exe, "This is a .f03 file.\n")
test.must_match('test14' + _exe, "This is a .F03 file.\n")
fc = 'f03'
g03 = test.detect_tool(fc)
if g03:
test.file_fixture('wrapper.py')
test.write('SConstruct', """
foo = Environment(F03 = '%(fc)s')
f03 = foo.Dictionary('F03')
bar = foo.Clone(F03 = r'%(_python_)s wrapper.py ' + f03)
foo.Program(target = 'foo', source = 'foo.f03')
bar.Program(target = 'bar', source = 'bar.f03')
""" % locals())
test.write('foo.f03', r"""
PROGRAM FOO
PRINT *,'foo.f03'
STOP
END
""")
test.write('bar.f03', r"""
PROGRAM BAR
PRINT *,'bar.f03'
STOP
END
""")
test.run(arguments = 'foo' + _exe, stderr = None)
test.run(program = test.workpath('foo'), stdout = " foo.f03\n")
test.must_not_exist('wrapper.out')
import sys
if sys.platform[:5] == 'sunos':
test.run(arguments = 'bar' + _exe, stderr = None)
else:
test.run(arguments = 'bar' + _exe)
test.run(program = test.workpath('bar'), stdout = " bar.f03\n")
test.must_match('wrapper.out', "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.651515 | 73 | 0.670748 |
30a230036f9b79d5a880fce4f01c833f88b77345 | 968 | py | Python | test/automation/test/test_all.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 3 | 2022-01-12T06:51:51.000Z | 2022-02-23T18:54:33.000Z | test/automation/test/test_all.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 6 | 2021-08-31T19:21:26.000Z | 2022-01-03T05:53:42.000Z | test/automation/test/test_all.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 8 | 2021-08-12T08:07:49.000Z | 2022-01-25T04:40:51.000Z | from .test_public_website import test_public_website
from .test_asr import test_asr_record
from .test_leaderboard import test_leaderboard
from .test_cards import test_cards
from .test_elements import test_elements_with_browser
def perform_testing_all(login,driver):
#public-pages-testing
driver=test_asr_record(driver)
driver=test_public_website(driver)
driver=test_leaderboard(driver)
driver=test_cards(driver,show_str=True)
status,driver=login(driver)
if status:
#after-login-functions
driver=test_elements_with_browser(driver)
return driver
def perform_testing_partly(login,driver):
#public-pages-testing
#driver=test_asr_record(driver)
driver=test_public_website(driver)
driver=test_leaderboard(driver)
driver=test_cards(driver,show_str=False)
status,driver=login(driver)
if status:
#after-login-functions
driver=test_elements_with_browser(driver)
return driver
| 32.266667 | 53 | 0.772727 |
9f96f901e31777ccf6cd485241625e2fea2d3674 | 12,915 | py | Python | poly_plotter.py | yhsmiley/lsc-cnn | fd4a99b2822a2f8b4beace4a3b8b1fe2f1792dbd | [
"MIT"
] | 2 | 2020-08-21T04:37:24.000Z | 2021-03-21T22:32:37.000Z | poly_plotter.py | levan92/lsc-cnn | 27ad9465798e65c7c99e22a448ccaac9880e35d8 | [
"MIT"
] | null | null | null | poly_plotter.py | levan92/lsc-cnn | 27ad9465798e65c7c99e22a448ccaac9880e35d8 | [
"MIT"
] | 3 | 2019-09-19T09:29:38.000Z | 2020-02-24T11:27:04.000Z | import cv2
import copy
import numpy as np
# if __name__ == '__main__':
# from misc import init_imshow, show
# else:
# from .misc import init_imshow, show
mouse_pt = None
click_pt = None
start_click_pt = None
stored_rect_pts = None
pre_adjust_mode = []
adjust_mode = []
move_start_pt = None
anchor_rect_pts = None
edge_buffer = 6
edge_x = -1
edge_y = -1
buff = 10 #line buffer
def mouse_events_handler(event, x, y, flags, param):
global mouse_pt, click_pt, edge_x, edge_y
if (0 - edge_buffer) <= x <= (0 + edge_buffer):
x = -1
edge_x = 0
elif (frame_size[1]-1 - edge_buffer) <= x <= (frame_size[1]-1 + edge_buffer):
x = frame_size[1] - 2
edge_x = frame_size[1] - 1
else:
edge_x = -1
if (0 - edge_buffer) <= y <= (0 + edge_buffer):
y = -1
edge_y = 0
elif (frame_size[0]-1 - edge_buffer) <= y <= (frame_size[0]-1 + edge_buffer):
y = frame_size[0]-2
edge_y = frame_size[0]-1
else:
edge_y = -1
if event == cv2.EVENT_MOUSEMOVE:
mouse_pt = (x,y)
if event == cv2.EVENT_LBUTTONDOWN:
click_pt = (x,y)
# print(click_pt)
# print('frame size:', frame_size)
# print('Click! {}'.format(click_pt))
def edge_drawing(frame):
colour = (50,50,255)
THICC = edge_buffer + 1
if edge_x >= 0:
cv2.line(frame, (edge_x,0), (edge_x, frame_size[0]-1), colour, THICC)
if edge_y >= 0:
cv2.line(frame, (0, edge_y), (frame_size[1]-1, edge_y), colour, THICC)
return frame
def draw_crosshair(frame):
global mouse_pt
if mouse_pt:
colour = (0,255,0)
THICC = 2
frameDC = copy.deepcopy(frame)
h, w = frameDC.shape[:2]
vertical_start = (mouse_pt[0], 0)
vertical_end = (mouse_pt[0], h-1)
horizontal_start = (0, mouse_pt[1])
horizontal_end = (w-1, mouse_pt[1])
# print('vertical:',vertical_start, vertical_end)
# print('horizontal:', horizontal_start, horizontal_end)
cv2.line(frameDC, vertical_start, vertical_end, colour, THICC)
cv2.line(frameDC, horizontal_start, horizontal_end, colour, THICC)
return frameDC
else:
return frame
def check_adjust_multi(frame):
global pre_adjust_mode, mouse_pt, click_pt, multi_stored_rect_pts
if mouse_pt and multi_stored_rect_pts and not click_pt and not start_click_pt:
for i, bb in enumerate(multi_stored_rect_pts):
if bb is None:
continue
rect_xmin = bb[0][0]
rect_ymin = bb[0][1]
rect_xmax = bb[1][0]
rect_ymax = bb[1][1]
colour = (0,0,255)
THICCC = 6
buff = 6
pre_adjust_mode = []
if (rect_xmin + buff < mouse_pt[0] < rect_xmax - buff ) and (rect_ymin + buff < mouse_pt[1] < rect_ymax - buff ):
cv2.line(frame, (rect_xmin, rect_ymin), (rect_xmax, rect_ymin), colour, THICCC)
cv2.line(frame, (rect_xmin, rect_ymax), (rect_xmax, rect_ymax), colour, THICCC)
cv2.line(frame, (rect_xmin, rect_ymin), (rect_xmin, rect_ymax), colour, THICCC)
cv2.line(frame, (rect_xmax, rect_ymin), (rect_xmax, rect_ymax), colour, THICCC)
if not pre_adjust_mode:
pre_adjust_mode.append(i)
pre_adjust_mode.append('move')
else:
if (rect_xmin <= mouse_pt[0] <= rect_xmax) and (rect_ymin - buff) <= mouse_pt[1] <= (rect_ymin + buff):
cv2.line(frame, (rect_xmin, rect_ymin), (rect_xmax, rect_ymin), colour, THICCC)
if not pre_adjust_mode:
pre_adjust_mode.append(i)
pre_adjust_mode.append('top')
if (rect_xmin <= mouse_pt[0] <= rect_xmax) and (rect_ymax - buff) <= mouse_pt[1] <= (rect_ymax + buff):
cv2.line(frame, (rect_xmin, rect_ymax), (rect_xmax, rect_ymax), colour, THICCC)
if not pre_adjust_mode:
pre_adjust_mode.append(i)
pre_adjust_mode.append('bot')
if (rect_ymin <= mouse_pt[1] <= rect_ymax) and (rect_xmin - buff) <= mouse_pt[0] <= (rect_xmin + buff):
cv2.line(frame, (rect_xmin, rect_ymin), (rect_xmin, rect_ymax), colour, THICCC)
if not pre_adjust_mode:
pre_adjust_mode.append(i)
pre_adjust_mode.append('left')
if (rect_ymin <= mouse_pt[1] <= rect_ymax) and (rect_xmax - buff) <= mouse_pt[0] <= (rect_xmax + buff):
cv2.line(frame, (rect_xmax, rect_ymin), (rect_xmax, rect_ymax), colour, THICCC)
if not pre_adjust_mode:
pre_adjust_mode.append(i)
pre_adjust_mode.append('right')
if pre_adjust_mode:
break
def process_rect(pt1, pt2):
xmin = min(pt1[0], pt2[0])
xmax = max(pt1[0], pt2[0])
ymin = min(pt1[1], pt2[1])
ymax = max(pt1[1], pt2[1])
return [[xmin, ymin], [xmax, ymax]]
def adjust_rect():
global mouse_pt, adjust_mode
temp_stored_rect_pts = [[None, None], [None, None]]
try:
if mouse_pt is not None:
if 'left' in adjust_mode:
temp_stored_rect_pts[0][0] = mouse_pt[0]
if 'right' in adjust_mode:
temp_stored_rect_pts[1][0] = mouse_pt[0]
if 'top' in adjust_mode:
temp_stored_rect_pts[0][1] = mouse_pt[1]
if 'bot' in adjust_mode:
temp_stored_rect_pts[1][1] = mouse_pt[1]
except Exception as e:
print('Exception occured: {}'.format(e))
return False, None
return True, temp_stored_rect_pts
def move_rect(anchor_rect_pts):
global mouse_pt
temp_stored_rect_pts = [[None, None], [None, None]]
if mouse_pt is not None:
diff_x = mouse_pt[0] - move_start_pt[0]
diff_y = mouse_pt[1] - move_start_pt[1]
temp_stored_rect_pts[0][0] = anchor_rect_pts[0][0] + diff_x
temp_stored_rect_pts[0][1] = anchor_rect_pts[0][1] + diff_y
temp_stored_rect_pts[1][0] = anchor_rect_pts[1][0] + diff_x
temp_stored_rect_pts[1][1] = anchor_rect_pts[1][1] + diff_y
return True, temp_stored_rect_pts
def update(old, new):
for i, corner in enumerate(new):
for j, value in enumerate(corner):
if value:
old[i][j] = value
# print(i,j,value)
return old
def process_multi(frame, single_mode):
global start_click_pt, click_pt, multi_stored_rect_pts, pre_adjust_mode, adjust_mode, confs, move_start_pt, anchor_rect_pts
if pre_adjust_mode and not adjust_mode and not start_click_pt and click_pt:
# print('[Drawer] Adjust mode on')
if 'move' in pre_adjust_mode:
move_start_pt = click_pt
anchor_rect_pts = copy.deepcopy(multi_stored_rect_pts[pre_adjust_mode[0]])
adjust_mode = pre_adjust_mode
pre_adjust_mode = []
click_pt = None
elif adjust_mode and move_start_pt is None and mouse_pt is not None and not click_pt:
# print('[Drawer] Adjusting')
ret, temp_stored_rect_pts = adjust_rect()
if ret:
multi_stored_rect_pts[adjust_mode[0]] = update(multi_stored_rect_pts[adjust_mode[0]], temp_stored_rect_pts)
elif adjust_mode and move_start_pt is not None and mouse_pt is not None and not click_pt:
# print('[Drawer] Moving')
ret, temp_stored_rect_pts = move_rect(anchor_rect_pts)
if ret:
multi_stored_rect_pts[adjust_mode[0]] = update(multi_stored_rect_pts[adjust_mode[0]], temp_stored_rect_pts)
elif adjust_mode or (single_mode and start_click_pt and click_pt):
# stored_rect_pts = process_rect(*stored_rect_pts)
if adjust_mode:
multi_stored_rect_pts[adjust_mode[0]] = process_rect(*multi_stored_rect_pts[adjust_mode[0]])
confs[adjust_mode[0]] = 'User-drawn'
else:
multi_stored_rect_pts[0] = process_rect(start_click_pt, click_pt)
confs[0] = 'User-drawn'
adjust_mode = []
move_start_pt = None
click_pt = None
start_click_pt = None
# print('[Drawer] Adjust mode ended. New rect: {}'.format(stored_rect_pts))
elif single_mode and click_pt and not start_click_pt:
start_click_pt = click_pt
click_pt = None
elif single_mode and start_click_pt and mouse_pt and not click_pt:
frameDC = copy.deepcopy(frame)
colour = (0,0,255)
THICC = 2
cv2.rectangle(frameDC, start_click_pt, mouse_pt, colour, THICC)
return frameDC
else:
click_pt = None
return frame
# def draw_stored_rect(frame):
# if stored_rect_pts and not start_click_pt:
# frameDC = copy.deepcopy(frame)
# colour = (200,200,0)
# THICC = 2
# cv2.rectangle(frameDC, tuple(stored_rect_pts[0]), tuple(stored_rect_pts[1]), colour, THICC)
# return frameDC
# return frame
def draw_stored_rect_multi(frame):
frameDC = copy.deepcopy(frame)
if multi_stored_rect_pts and not start_click_pt:
colour = (200,200,0)
THICC = 2
for bb in multi_stored_rect_pts:
if bb:
cv2.rectangle(frameDC, tuple(bb[0]), tuple(bb[1]), colour, THICC)
return frameDC
def post_process(rect, confidence='Unknown'):
'''
rect: list of 2 tuples (x, y)
'''
# l = min(rect[0][0], rect[1][0])
# t = min(rect[0][1], rect[1][1])
# r = max(rect[0][0], rect[1][0])
# b = max(rect[0][1], rect[1][1])
l = rect[0][0]
t = rect[0][1]
r = rect[1][0]
b = rect[1][1]
w = r - l + 1
h = b - t + 1
bb_dict = {'rect':{ 'l':l, 't':t, 'b':b, 'r':r, 'w':w, 'h':h },
'confidence': confidence}
return bb_dict
def post_process_multi(bbs, confs):
proc_bbs = []
for i, bb in enumerate(bbs):
proc_bbs.append(post_process(bb, confs[i]))
return proc_bbs
def viz_poly(frame):
frameDC = deepcopy(frame)
cv2.circle(frameDC, )
return frameDC
def poly_draw(frame):
global mouse_pt, click_pt, start_click_pt, multi_stored_rect_pts, pre_adjust_mode, adjust_mode, confs, frame_size
mouse_pt = None
click_pt = None
start_click_pt = None
stored_rect_pts = None
pre_adjust_mode = []
adjust_mode = []
window_name = 'Draw BB'
conf = None
frame_size = frame.shape[:2]
multi_stored_rect_pts = [None]
confs = [None]
res = None
# if init_bb:
# stored_rect_pts, conf = pre_process(init_bb)
# multi_stored_rect_pts[0] = stored_rect_pts
# confs[0] = conf
# shower.start(window_name)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
# cv2.resizeWindow(window_name, 1920, 1080)
# cv2.moveWindow(window_name, *screen_loc)
# cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, mouse_events_handler)
polygon = []
frame_h, frame_w = frame.shape[:2]
while True:
# click_pt = None
# cv2.moveWindow(window_name, *screen_loc)
frameSHOW = draw_crosshair(frame)
frameSHOW = edge_drawing(frameSHOW)
if mouse_pt:
cv2.putText(frameSHOW, '(x){}, (y){}'.format(mouse_pt[0], mouse_pt[1]), (0, frame_h - 10), cv2.FONT_HERSHEY_DUPLEX, 2, (255,255,255), 3 )
# cv2.putText(frameSHOW, '{},{}'.format(mouse_pt[0], mouse_pt[1]), (10, frame_h-10), cv2.FONT_HERSHEY_SIMPLEX, 10, (255,255,255), 2 )
if click_pt:
polygon.append(click_pt)
click_pt = None
if len(polygon) > 0:
for poly in polygon:
cv2.circle(frameSHOW, poly, 9, (0,0,255), -1)
cv2.imshow(window_name, frameSHOW)
key = cv2.waitKey(1) & 0xFF
if key == 13 or key==32: # Enter or Spacebar
# if not pre_adjust_mode and not adjust_mode and not start_click_pt and multi_stored_rect_pts[0] is not None:
# res = post_process_multi(multi_stored_rect_pts, confs)[0]
res = polygon
break
elif key == ord('c'):
res = None
break
# mouse_pt = None
# click_pt = None
# start_click_pt = None
# stored_rect_pts = None
# cv2.destroyAllWindows()
return res
if __name__ == '__main__':
import os
import sys
import time
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('img')
args = argparser.parse_args()
assert os.path.exists(args.img),'Img path given does not exist!'
# img_path = '/home/dh/Workspace/tracknotation/cache/vid_cache/mexico_airport_drone_short_frames/3.jpg'
frame = cv2.imread(args.img)
res = poly_draw(frame)
res = [str(x) for x in np.array(res).flatten()]
print(",".join(res))
| 37.112069 | 149 | 0.603794 |
b86e5f28e3154cd6c8cc12a760941d90cde7f764 | 259 | py | Python | tex/make_message.py | salimfadhley/hoax1 | c48c1721f2882a4e8e242067314f793a28f4bfc0 | [
"MIT"
] | null | null | null | tex/make_message.py | salimfadhley/hoax1 | c48c1721f2882a4e8e242067314f793a28f4bfc0 | [
"MIT"
] | null | null | null | tex/make_message.py | salimfadhley/hoax1 | c48c1721f2882a4e8e242067314f793a28f4bfc0 | [
"MIT"
] | null | null | null | import base64
import codecs
rot13 = codecs.getencoder( "rot-13" )
CLEARTEXT_MESSAGE:bytes = rot13("Mark Steele is a gullible fuckwit!")[0].encode("utf-8")
with open("message.txt", "w") as f:
f.write(base64.b64encode(CLEARTEXT_MESSAGE).decode("utf-8"))
| 25.9 | 88 | 0.718147 |
47d302fff49f69f3247200731c62e824c3532945 | 2,036 | py | Python | selfdrive/test/helpers.py | alvaro-blz/openpilot | 09ad35beebef1c904d8751e52ae60c4762ea5b95 | [
"MIT"
] | 3 | 2020-10-04T03:55:59.000Z | 2021-05-13T06:34:02.000Z | selfdrive/test/helpers.py | alvaro-blz/openpilot | 09ad35beebef1c904d8751e52ae60c4762ea5b95 | [
"MIT"
] | null | null | null | selfdrive/test/helpers.py | alvaro-blz/openpilot | 09ad35beebef1c904d8751e52ae60c4762ea5b95 | [
"MIT"
] | 4 | 2020-09-16T00:02:07.000Z | 2020-11-24T06:02:08.000Z | import time
import subprocess
from functools import wraps
from nose.tools import nottest
from common.hardware import PC
from common.apk import update_apks, start_offroad, pm_apply_packages, android_packages
from common.params import Params
from selfdrive.version import training_version, terms_version
from selfdrive.manager import start_managed_process, kill_managed_process, get_running
def set_params_enabled():
params = Params()
params.put("HasAcceptedTerms", terms_version)
params.put("HasCompletedSetup", "1")
params.put("OpenpilotEnabledToggle", "1")
params.put("CommunityFeaturesToggle", "1")
params.put("Passive", "0")
params.put("CompletedTrainingVersion", training_version)
def phone_only(x):
if PC:
return nottest(x)
else:
return x
def with_processes(processes, init_time=0):
def wrapper(func):
@wraps(func)
def wrap(*args, **kwargs):
# start and assert started
for p in processes:
start_managed_process(p)
time.sleep(init_time)
assert all(get_running()[name].exitcode is None for name in processes)
# call the function
try:
func(*args, **kwargs)
# assert processes are still started
assert all(get_running()[name].exitcode is None for name in processes)
finally:
# kill and assert all stopped
for p in processes:
kill_managed_process(p)
assert len(get_running()) == 0
return wrap
return wrapper
def with_apks():
def wrapper(func):
@wraps(func)
def wrap():
update_apks()
pm_apply_packages('enable')
start_offroad()
func()
try:
for package in android_packages:
apk_is_running = (subprocess.call(["pidof", package]) == 0)
assert apk_is_running, package
finally:
pm_apply_packages('disable')
for package in android_packages:
apk_is_not_running = (subprocess.call(["pidof", package]) == 1)
assert apk_is_not_running, package
return wrap
return wrapper
| 28.676056 | 86 | 0.686149 |
e43df2f1314d850d8d72a7ef0f30305f7f0db843 | 6,946 | py | Python | nni/experiment/launcher.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | 1 | 2021-08-22T12:04:23.000Z | 2021-08-22T12:04:23.000Z | nni/experiment/launcher.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | null | null | null | nni/experiment/launcher.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import contextlib
import logging
from pathlib import Path
import socket
from subprocess import Popen
import sys
import time
from typing import Optional, Tuple
import colorama
import nni_node # pylint: disable=import-error
import nni.runtime.protocol
from .config import ExperimentConfig
from .pipe import Pipe
from . import rest
from ..tools.nnictl.config_utils import Experiments, Config
from ..tools.nnictl.nnictl_utils import update_experiment
_logger = logging.getLogger('nni.experiment')
def start_experiment(exp_id: str, config: ExperimentConfig, port: int, debug: bool, mode: str = 'new') -> Popen:
proc = None
config.validate(initialized_tuner=False)
_ensure_port_idle(port)
if mode != 'view':
if isinstance(config.training_service, list): # hybrid training service
_ensure_port_idle(port + 1, 'Hybrid training service requires an additional port')
elif config.training_service.platform in ['remote', 'openpai', 'kubeflow', 'frameworkcontroller', 'adl']:
_ensure_port_idle(port + 1, f'{config.training_service.platform} requires an additional port')
try:
_logger.info('Creating experiment, Experiment ID: %s', colorama.Fore.CYAN + exp_id + colorama.Style.RESET_ALL)
start_time, proc = _start_rest_server(config, port, debug, exp_id, mode=mode)
_logger.info('Statring web server...')
_check_rest_server(port)
platform = 'hybrid' if isinstance(config.training_service, list) else config.training_service.platform
_save_experiment_information(exp_id, port, start_time, platform,
config.experiment_name, proc.pid, str(config.experiment_working_directory))
_logger.info('Setting up...')
rest.post(port, '/experiment', config.json())
return proc
except Exception as e:
_logger.error('Create experiment failed')
if proc is not None:
with contextlib.suppress(Exception):
proc.kill()
raise e
def start_experiment_retiarii(exp_id: str, config: ExperimentConfig, port: int, debug: bool) -> Popen:
pipe = None
proc = None
config.validate(initialized_tuner=True)
_ensure_port_idle(port)
if isinstance(config.training_service, list): # hybrid training service
_ensure_port_idle(port + 1, 'Hybrid training service requires an additional port')
elif config.training_service.platform in ['remote', 'openpai', 'kubeflow', 'frameworkcontroller', 'adl']:
_ensure_port_idle(port + 1, f'{config.training_service.platform} requires an additional port')
try:
_logger.info('Creating experiment, Experiment ID: %s', colorama.Fore.CYAN + exp_id + colorama.Style.RESET_ALL)
pipe = Pipe(exp_id)
start_time, proc = _start_rest_server(config, port, debug, exp_id, pipe.path)
_logger.info('Connecting IPC pipe...')
pipe_file = pipe.connect()
nni.runtime.protocol._in_file = pipe_file
nni.runtime.protocol._out_file = pipe_file
_logger.info('Statring web server...')
_check_rest_server(port)
platform = 'hybrid' if isinstance(config.training_service, list) else config.training_service.platform
_save_experiment_information(exp_id, port, start_time, platform,
config.experiment_name, proc.pid, config.experiment_working_directory)
_logger.info('Setting up...')
rest.post(port, '/experiment', config.json())
return proc, pipe
except Exception as e:
_logger.error('Create experiment failed')
if proc is not None:
with contextlib.suppress(Exception):
proc.kill()
if pipe is not None:
with contextlib.suppress(Exception):
pipe.close()
raise e
def _ensure_port_idle(port: int, message: Optional[str] = None) -> None:
sock = socket.socket()
if sock.connect_ex(('localhost', port)) == 0:
sock.close()
message = f'(message)' if message else ''
raise RuntimeError(f'Port {port} is not idle {message}')
def _start_rest_server(config: ExperimentConfig, port: int, debug: bool, experiment_id: str, pipe_path: str = None,
mode: str = 'new') -> Tuple[int, Popen]:
if isinstance(config.training_service, list):
ts = 'hybrid'
else:
ts = config.training_service.platform
if ts == 'openpai':
ts = 'pai'
args = {
'port': port,
'mode': ts,
'experiment_id': experiment_id,
'start_mode': mode,
'log_dir': config.experiment_working_directory,
'log_level': 'debug' if debug else 'info'
}
if pipe_path is not None:
args['dispatcher_pipe'] = pipe_path
if mode == 'view':
args['start_mode'] = 'resume'
args['readonly'] = 'true'
node_dir = Path(nni_node.__path__[0])
node = str(node_dir / ('node.exe' if sys.platform == 'win32' else 'node'))
main_js = str(node_dir / 'main.js')
cmd = [node, '--max-old-space-size=4096', main_js]
for arg_key, arg_value in args.items():
cmd.append('--' + arg_key)
cmd.append(str(arg_value))
if sys.platform == 'win32':
from subprocess import CREATE_NEW_PROCESS_GROUP
proc = Popen(cmd, cwd=node_dir, creationflags=CREATE_NEW_PROCESS_GROUP)
else:
if pipe_path is None:
import os
proc = Popen(cmd, cwd=node_dir, preexec_fn=os.setpgrp)
else:
proc = Popen(cmd, cwd=node_dir)
return int(time.time() * 1000), proc
def _check_rest_server(port: int, retry: int = 3) -> None:
for i in range(retry):
with contextlib.suppress(Exception):
rest.get(port, '/check-status')
return
if i > 0:
_logger.warning('Timeout, retry...')
time.sleep(1)
rest.get(port, '/check-status')
def _save_experiment_information(experiment_id: str, port: int, start_time: int, platform: str, name: str, pid: int, logDir: str) -> None:
experiments_config = Experiments()
experiments_config.add_experiment(experiment_id, port, start_time, platform, name, pid=pid, logDir=logDir)
def get_stopped_experiment_config(exp_id: str, mode: str) -> None:
update_experiment()
experiments_config = Experiments()
experiments_dict = experiments_config.get_all_experiments()
experiment_metadata = experiments_dict.get(exp_id)
if experiment_metadata is None:
_logger.error('Id %s not exist!', exp_id)
return
if experiment_metadata['status'] != 'STOPPED':
_logger.error('Only stopped experiments can be %sed!', mode)
return
experiment_config = Config(exp_id, experiment_metadata['logDir']).get_config()
config = ExperimentConfig(**experiment_config)
return config
| 39.022472 | 138 | 0.663115 |
3dbb66679763d00fb93a405be5321050adaf85be | 65,803 | py | Python | nxt_editor/main_window.py | Mikfr83/nxt_editor | ea419c2a53817cd2b1a9fbedfd328193cde4a17f | [
"MIT"
] | null | null | null | nxt_editor/main_window.py | Mikfr83/nxt_editor | ea419c2a53817cd2b1a9fbedfd328193cde4a17f | [
"MIT"
] | null | null | null | nxt_editor/main_window.py | Mikfr83/nxt_editor | ea419c2a53817cd2b1a9fbedfd328193cde4a17f | [
"MIT"
] | null | null | null | # Built-in
import os
import sys
import logging
import subprocess
import traceback
from collections import OrderedDict
import webbrowser
from functools import partial
import time
# External
from Qt import QtWidgets
from Qt import QtGui
from Qt import QtCore
# Internal
import nxt_editor
from nxt_editor import user_dir
from nxt.session import Session
from nxt_editor.constants import EDITOR_VERSION
from nxt_editor.stage_view import StageView
from nxt_editor.stage_model import StageModel
from nxt_editor.dockwidgets import (DockWidgetBase, CodeEditor, PropertyEditor,
HotkeyEditor, LayerManager, OutputLog,
HistoryView, WidgetBuilder, BuildView,
FindRepDockWidget)
from nxt_editor.dockwidgets.output_log import (FileTailingThread,
QtLogStreamHandler)
from nxt_editor.dockwidgets.code_editor import NxtCodeEditor
from nxt import nxt_log, nxt_io, nxt_layer
from nxt_editor.dialogs import (NxtFileDialog, NxtWarningDialog,
UnsavedLayersDialogue, UnsavedChangesMessage)
from nxt_editor import actions, LoggingSignaler
from nxt.constants import (API_VERSION, GRAPH_VERSION, USER_PLUGIN_DIR,
NXT_DCC_ENV_VAR, is_standalone)
from nxt.remote.client import NxtClient
import nxt.remote.contexts
from nxt_editor import qresources
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
class MainWindow(QtWidgets.QMainWindow):
"""The main window of the nxt UI. Includes the menu bar, tool bar, and dock widgets."""
tab_changed = QtCore.Signal()
close_signal = QtCore.Signal()
new_log_signal = QtCore.Signal(logging.LogRecord)
def __init__(self, filepath=None, parent=None, start_rpc=True):
"""Create NXT window.
:param parent: parent to attach this UI to.
:type parent: QtWidgets.QtWidgets.QWidget
"""
self.in_startup = True
pixmap = QtGui.QPixmap(':icons/icons/nxt.svg')
self.splash_screen = QtWidgets.QSplashScreen(pixmap)
self.splash_screen.show()
self.splash_screen.showMessage('Starting nxt...',
QtCore.Qt.AlignCenter, QtCore.Qt.white)
QtWidgets.QApplication.processEvents()
super(MainWindow, self).__init__(parent=parent)
self.new_log_signal.connect(self.handle_remote_log)
old_cwd = os.getcwd()
ui_dir = os.path.dirname(__file__)
os.chdir(ui_dir)
# Test to see if we're launching from a git branch, if so the title
# bar will be updated for easy reference.
# Used to hide the stderr from the user as it doesn't matter
f = open(nxt_io.generate_temp_file('NxtGitErr'))
try:
git_out = subprocess.check_output(["git", "branch"],
stderr=f).decode("utf8")
cur = next(line for line in git_out.split("\n")
if line.startswith("*"))
current_branch = cur.strip("*").strip()
except: # Broad because Maya
# Failed to run git branch, attempting fallback method
try:
with open('../../.git/HEAD') as f:
head = f.read()
_, __, current_branch = head.rpartition('/')
except:
# Could not determine git branch, must be pip package.
current_branch = ''
finally:
f.close()
os.chdir(old_cwd)
if is_standalone():
context = 'standalone'
else:
context = os.environ.get(NXT_DCC_ENV_VAR) or ''
self.host_app = context
self.setWindowTitle("nxt {} - Editor v{} | Graph v{} | API v{} "
"(Python {}) {}".format(self.host_app,
EDITOR_VERSION.VERSION_STR,
GRAPH_VERSION.VERSION_STR,
API_VERSION.VERSION_STR,
'.'.join([str(n) for n in sys.version_info[:3]]),
current_branch))
self.setObjectName('Main Window')
self.zoom_keys = QtGui.QKeySequence(QtCore.Qt.Key_Alt)
self.zoom_keys_down = False
self._held_keys = []
self._closing = False
self.last_focused_start = 0 # Start point focus tracker
# FIXME: Fix with MV signal
self.last_focused_tab = -1 # Tab tracker for upating the comp layer
# set app icon
self.app_icon = QtGui.QIcon(pixmap)
self.setWindowIcon(self.app_icon)
# set style sheet
style_file = QtCore.QFile(':styles/styles/dark/dark.qss')
style_file.open(QtCore.QFile.ReadOnly)
self.stylesheet = str(style_file.readAll())
self.setStyleSheet(self.stylesheet)
# fonts
font_db = QtGui.QFontDatabase()
font_db.addApplicationFont(":fonts/fonts/RobotoMono/RobotoMono-Regular.ttf")
font_db.addApplicationFont(":fonts/fonts/Roboto/Roboto-Regular.ttf")
# nxt object in charge of loaded graphs
self.nxt = Session()
# APPLICATION WIDE ACTIONS
# TODO: All the actions should be connected to functions in nxt not
# view
self.splash_screen.showMessage('Setting up hotkeys...',
QtCore.Qt.AlignCenter, QtCore.Qt.white)
self.app_actions = actions.AppActions(self)
self.addActions(self.app_actions.actions())
# NODE ACTIONS
self.node_actions = actions.NodeActions(self)
# PROPERTY ACTIONS
self.property_manager_actions = actions.PropertyEditorActions(self)
# NODE COMMENT ACTIONS
self.node_comment_actions = actions.NodeCommentActions(self)
# LAYER ACTIONS
self.layer_actions = actions.LayerActions(self)
# ALIGNMENT ACTIONS
self.alignment_actions = actions.AlignmentActions(self)
# DISPLAY ACTIONS
self.display_actions = actions.DisplayActions(self)
# VIEW ACTIONS
self.view_actions = actions.StageViewActions(self)
# EXEC ACTIONS
self.execute_actions = actions.ExecuteActions(self)
self.addAction(self.execute_actions.stop_exec_action)
# CODE EDITOR ACTIONS
self.code_editor_actions = actions.CodeEditorActions(self)
# TOOL BARS
self.authoring_toolbar = NodeAuthoringToolBar(self)
self.addToolBar(self.authoring_toolbar)
self.execute_toolbar = ExecuteToolBar(self)
self.addToolBar(self.execute_toolbar)
self.display_toolbar = DisplayToolBar(self)
self.addToolBar(self.display_toolbar)
self.align_distribute_toolbar = AlignDistributeToolBar(self)
self.addToolBar(self.align_distribute_toolbar)
# TABS WIDGET
self.open_files_tab_widget = OpenFilesTabWidget(parent=self)
self.open_files = {} # TODO: Doesn't this duplicate what Nxt does?
self.previous_view = None
# graph tabs
self.open_files_tab_widget.currentChanged.connect(self.on_tab_change)
self.setCentralWidget(self.open_files_tab_widget)
self.splash_screen.showMessage('Setting up dockwidgets...',
QtCore.Qt.AlignCenter, QtCore.Qt.white)
# Dock Widgets
# hotkey editor
self.hotkey_editor = HotkeyEditor(parent=self)
self.hotkey_editor.hide()
# property editor
self.property_editor = PropertyEditor(parent=self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.property_editor)
# code editor
self.code_editor = CodeEditor(parent=self)
self.code_editor.editor.viewport().installEventFilter(self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.code_editor)
# Find and Replace
self.find_rep = FindRepDockWidget(parent=self)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.find_rep)
self.find_rep.hide()
# layer manager
self.layer_manager = LayerManager(parent=self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.layer_manager)
# history view
self.history_view = HistoryView(parent=self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.history_view)
# build View
self.build_view = BuildView(parent=self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.build_view)
# output log
self.output_log = OutputLog(parent=self)
self.output_log.hide()
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.output_log)
# workflow tools
self.workflow_tools = WidgetBuilder(parent=self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.workflow_tools)
self.setCorner(QtCore.Qt.BottomRightCorner,
QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner,
QtCore.Qt.LeftDockWidgetArea)
self.setTabPosition(QtCore.Qt.AllDockWidgetAreas,
QtWidgets.QTabWidget.North)
# status bar
self.status_bar = QtWidgets.QStatusBar()
self.status_bar.setSizeGripEnabled(False)
self.status_bar.setContentsMargins(4, 4, 4, 4)
self.status_bar.setStyleSheet('color: lightGrey; background-color: #232323; border: 4px solid #3E3E3E')
self.setStatusBar(self.status_bar)
self.log_button = QtWidgets.QPushButton("Show Log")
self.log_button.setMinimumWidth(75)
self.log_button.setStyleSheet(self.stylesheet)
self.output_log.visibilityChanged.connect(self.refresh_log_button)
self.log_button.clicked.connect(self.log_button_clicked)
self.status_bar.addPermanentWidget(self.log_button)
self.refresh_log_button()
self.logger = logging.getLogger('nxt')
self.logger.addHandler(StatusBarHandler(self.status_bar))
self.state_last_hidden = None
# TODO set and load default geometry
# TODO determine and create sensible default position and size for the window, perhaps 80% of available screen?
# print QDesktopWidget.availableGeometry(self)
self.resize(1600, 800)
self.resizeDocks([self.property_editor, self.code_editor], [400, 300], QtCore.Qt.Vertical)
if filepath:
self.load_file(filepath=filepath)
else:
self.new_tab()
# menu bar
# TODO: Depends on dock widgets this should change
self.menu_bar = MenuBar(self)
self.setMenuBar(self.menu_bar)
self.menuBar().setNativeMenuBar(False)
self.display_actions.resolve_action.setChecked(True)
# Rpc startup
self.rpc_log_tail = None
if start_rpc:
self.startup_rpc_server(join=False)
# Should this be a signal? Like Startup done, now you can refresh?
self.splash_screen.finish(self)
self.in_startup = False
t = QtCore.QTimer()
t.setInterval(256)
def failure_check():
if self.view:
self.view.failure_check()
t.stop()
t.timeout.connect(failure_check)
t.start()
app = QtWidgets.QApplication.instance()
app.aboutToQuit.connect(self.shutdown_rpc_server)
# RPC
def startup_rpc_server(self, join=True):
t = StartRPCThread(self)
t.start()
if join:
t.wait()
else:
txt = 'Waiting on rpc server...'
txt_len = len(txt)
self.count = 0
def tick():
self.splash_screen.showMessage(txt[:self.count % -txt_len],
QtCore.Qt.AlignCenter,
QtCore.Qt.white)
self.count += 1
timer = QtCore.QTimer()
timer.setInterval(100)
timer.timeout.connect(tick)
t.finished.connect(timer.stop)
timer.start()
while not t.isFinished():
QtWidgets.QApplication.processEvents()
@staticmethod
def handle_remote_log(record):
logger.handle(record)
def shutdown_rpc_server(self):
if self.model:
self.model.processing.emit(True)
self.safe_stop_rpc_tailing()
self.nxt.shutdown_rpc_server()
if self.model:
self.model.processing.emit(False)
if not self.rpc_log_tail:
return
wait_started = time.time()
while not self.rpc_log_tail.isFinished():
QtWidgets.QApplication.processEvents()
if time.time() - wait_started > 5:
logger.error('Failed to stop rpc log tail!')
return
self.rpc_log_tail = None
def safe_stop_rpc_tailing(self):
if not self.rpc_log_tail:
return
self.handle_rpc_tailing_signals(False)
self.rpc_log_tail.requestInterruption()
def handle_rpc_tailing_signals(self, state):
if not self.rpc_log_tail:
return
raw_write_func = self.output_log._write_raw_output
rich_write_func = self.output_log.write_rich_output
if state:
self.rpc_log_tail.new_text.connect(raw_write_func)
self.rpc_log_tail.new_text.connect(rich_write_func)
else:
self.rpc_log_tail.new_text.disconnect(raw_write_func)
self.rpc_log_tail.new_text.disconnect(rich_write_func)
def event(self, event):
if event.type() == QtCore.QEvent.WindowDeactivate:
self._held_keys = []
self.zoom_keys_down = False
return super(MainWindow, self).event(event)
@staticmethod
def set_waiting_cursor(state=True):
if state:
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
else:
QtWidgets.QApplication.restoreOverrideCursor()
@staticmethod
def create_remote_context(place_holder_text='',
interpreter_exe=sys.executable,
context_graph=None, exe_script_args=()):
cur_context = nxt.remote.contexts.get_current_context_exe_name()
pop_up = QtWidgets.QDialog()
pop_up.setWindowTitle('Create context for "{}"'.format(cur_context))
v_layout = QtWidgets.QVBoxLayout()
pop_up.setLayout(v_layout)
label = QtWidgets.QPlainTextEdit()
info = ('Create remote context for your host '
'Python interpreter/DCC\n'
'Type your desired name in the box below '
'and click create.'.format(cur_context))
label.setPlainText(info)
label.setReadOnly(True)
font_metric = QtGui.QFontMetrics(label.document().defaultFont())
text_size = font_metric.size(QtCore.Qt.TextExpandTabs, info)
label.setFixedSize(text_size.width() + 50, text_size.height() + 30)
v_layout.addWidget(label)
h_layout = QtWidgets.QHBoxLayout()
v_layout.addLayout(h_layout)
name = QtWidgets.QLineEdit()
name.setPlaceholderText(str(place_holder_text))
name.setText(str(place_holder_text))
create_button = QtWidgets.QPushButton('Create!')
h_layout.addWidget(name)
h_layout.addWidget(create_button)
def do_create():
try:
nxt.create_context(name.text(),
interpreter_exe=interpreter_exe,
context_graph=context_graph,
exe_script_args=exe_script_args)
pop_up.close()
except (IOError, NameError) as e:
info = str(e)
msg = 'Failed to create context!'
logger.error(info)
nxt_editor.dialogs.NxtWarningDialog.show_message(msg,
info=info)
create_button.pressed.connect(do_create)
pop_up.exec_()
def get_global_actions(self):
"""Get a list of NxtActions with the WindowShortcut context
:return: List of NxtActions
"""
global_actions = []
for action in self.get_all_nxt_actions():
if action.shortcutContext() == QtCore.Qt.WindowShortcut:
global_actions += [action]
return global_actions
def get_all_nxt_actions(self):
"""Get a list of all NxtActions via the NxtActionContainer objects
:return: List of NxtActions
"""
all_actions = []
all_containers = self.findChildren(actions.NxtActionContainer)
for container in all_containers:
all_actions += container.actions()
return all_actions
def get_hotkey_map(self):
"""Get a map of NxtAction containers and their actions in an
ordered dict where each key is a row row for a QAbstractTableModel.
:return: OrderedDict
"""
hotkeys = OrderedDict()
# Action container objects in the order we wish to display them
action_containers = [self.app_actions, self.alignment_actions,
self.display_actions, self.view_actions,
self.layer_actions, self.node_actions,
self.property_manager_actions,
self.node_comment_actions,
self.execute_actions, self.code_editor_actions]
for container in action_containers:
hotkeys[container.objectName()] = container.get_action_data()
return hotkeys
@property
def view(self):
return self.get_current_view()
@property
def model(self):
if self.view:
return self.view.model
def new_tab(self, initial_stage=None, update=True):
"""Open a new graph view, optionally on a specific initial graph.
Create necessary model, pass to nxt to connect graph to model,
create tab for new file.
:param initial_stage: Graph object to make new view of.
:type initial_stage: nxt.core.Graph.Graph
:param update: If true the different views will update
:type update: bool
"""
self.set_waiting_cursor(True)
# get new graph
if initial_stage:
stage = initial_stage
else:
stage = self.nxt.new_file()
# create model
model = StageModel(stage=stage)
model.processing.connect(self.set_waiting_cursor)
model.request_ding.connect(self.ding)
model.layer_alias_changed.connect(partial(self.update_tab_title, model))
# create view
view = StageView(model=model, parent=self)
# setup tab
tab_index = self.open_files_tab_widget.count()
self.open_files[model.uid] = {'stage': stage, 'model': model,
'view': view}
self.open_files_tab_widget.addTab(view, stage._name)
if update:
self.open_files_tab_widget.setCurrentIndex(tab_index)
self.layer_manager.set_stage_model(model)
model.layer_color_changed.connect(self.update_target_color)
model.target_layer_changed.connect(self.update_target_color)
model.comp_layer_changed.connect(self.update_target_color)
self.update_target_color()
self.update_grid_action()
self.update() # TODO: Make this better
self.set_waiting_cursor(False)
@staticmethod
def ding():
if user_dir.user_prefs.get(user_dir.USER_PREF.DING, True):
QtWidgets.QApplication.instance().beep()
def center_view(self):
target_graph_view = self.get_current_view()
if target_graph_view:
target_graph_view.centerOn(0, 0)
def load_file(self, filepath=None):
"""Open an NxtFileDialog to allow user to select .nxt file to open.
Attempt to open resulting choice. If an attempt is made to open a
file that is already open, we will just focus that tab.
:param filepath: path to file on disk
:type filepath: str
:return: bool -- whether or not the file was successfully loaded.
:rtype: bool
"""
if not filepath:
# TODO: The dialog should register the last opened folder into the
# user_dir and use that as the starting dir for file dialogs
# that aren't intrinicly tied to a layers real_path
real_path = None
try:
real_path = self.model.stage.top_layer.real_path
except AttributeError:
pass
_dir = os.path.dirname(real_path or os.getcwd())
potential_path = NxtFileDialog.system_file_dialog(base_dir=_dir)
if not potential_path:
logger.debug("No file selected to load.")
return
else:
potential_path = filepath
# Try to load the file path via nxt
self.set_waiting_cursor(True)
new_stage = None
try:
new_stage = self.nxt.load_file(potential_path)
except IOError as e:
NxtWarningDialog.show_message("Failed to Open", str(e))
self.set_waiting_cursor(False)
if new_stage:
self.new_tab(initial_stage=new_stage, update=not self.in_startup)
user_dir.editor_cache[user_dir.USER_PREF.LAST_OPEN] = potential_path
def save_open_tab(self):
"""Save the file that corresponds to the currently selected tab."""
self.nxt.save_file(self.get_current_tab_file_path())
def save_open_tab_as(self):
raise NotImplementedError
"""Open a QtWidgets.QFileDialog to allow user to select where to save the open tab. Then save."""
# Todo: start us in last directory - not C:
save_path = QtWidgets.QFileDialog.getSaveFileName(filter="nxt files (*.json *.nxt)", dir="C:")[0]
current_tab_path = self.get_current_tab_file_path()
self.nxt.save_file(current_tab_path, save_path)
new_name = self.open_files[self.open_files_tab_widget.currentIndex()]['stage']._name
self.open_files_tab_widget.setTabText(self.open_files_tab_widget.currentIndex(), new_name)
def save_all_layers(self):
if not self.model:
return
for layer in self.model.stage._sub_layers:
self.save_layer(layer)
def save_layer(self, layer=None):
if not layer:
layer = self.model.target_layer
if not layer:
return
if not layer.real_path:
self.save_layer_as(layer, open_in_new_tab=False)
else:
self.set_waiting_cursor(True)
self.nxt.save_layer(layer)
user_dir.editor_cache[user_dir.USER_PREF.LAST_OPEN] = layer.real_path
self.view.update_filepath()
try:
self.model.effected_layers.remove(layer.real_path)
except KeyError: # Layer may not have been changed
pass
self.model.layer_saved.emit(layer.real_path)
self.set_waiting_cursor(False)
def save_layer_as(self, layer=None, open_in_new_tab=True):
if not layer:
layer = self.model.display_layer
old_real_path = layer.real_path
old_path = layer.filepath
if not old_real_path:
open_in_new_tab = False
base_dir = os.path.join(user_dir.USER_DIR, layer.get_alias())
else:
base_dir = layer.real_path
caption = 'Save "{}"'.format(layer.get_alias())
save_path = NxtFileDialog.system_file_dialog(base_dir, 'save',
caption=caption)
if not save_path:
return
self.set_waiting_cursor(True)
self.nxt.save_layer(layer, filepath=save_path)
user_dir.editor_cache[user_dir.USER_PREF.LAST_OPEN] = layer.real_path
layer.filepath = old_path
if open_in_new_tab:
self.load_file(save_path)
layer.real_path = old_real_path
elif layer is self.model.top_layer:
tab_idx = self.open_files_tab_widget.currentIndex()
self.open_files_tab_widget.setTabText(tab_idx, layer.alias)
tab_idx = self.open_files_tab_widget.currentIndex()
self.on_tab_change(tab_idx)
self.set_waiting_cursor(False)
def open_source(self, layer):
if not layer:
layer = self.model.display_layer
self.load_file(layer.real_path)
def find_startpoint(self):
"""Cycles through start points"""
if not self.model:
return
start_nodes = self.model.get_start_nodes()
start_node_len = len(start_nodes)
if not start_node_len:
logger.warning("No start nodes found.")
return
in_range = self.last_focused_start in range(start_node_len)
if in_range:
idx = self.last_focused_start
else:
idx = 0
self.last_focused_start = idx
self.last_focused_start += 1
self.model.select_and_frame(start_nodes[idx])
def align_left(self):
logger.info('align left')
def align_hcenter(self):
logger.info('align hcenter')
def align_right(self):
logger.info('align right')
def align_top(self):
logger.info('align top')
def align_vcenter(self):
logger.info('align vcenter')
def align_bottom(self):
logger.info('align bottom')
def distribute_horizontal(self):
logger.info('distribute horizontal')
def distribute_vertical(self):
logger.info('distribute vertical')
def undo(self):
current_view = self.get_current_view()
if current_view:
model = current_view.model
model.undo()
def redo(self):
current_view = self.get_current_view()
if current_view:
model = current_view.model
model.redo()
def refresh_log_button(self):
if self.output_log.isVisible():
self.log_button.setText("Hide Log")
else:
self.log_button.setText("Show Log")
def log_button_clicked(self):
if self.output_log.isVisible():
self.output_log.hide()
return
self.output_log.show()
self.output_log.raise_()
def update_tab_title(self, model, layer_changed):
tab_idx = self.open_files_tab_widget.currentIndex()
view = self.open_files_tab_widget.widget(tab_idx)
cur_model = view.model
if model is not cur_model:
return
if layer_changed != model.top_layer.real_path:
return
new_title = model.get_layer_alias(layer_changed)
self.open_files_tab_widget.setTabText(tab_idx, new_title)
def on_tab_change(self, tab_index):
"""Happens every tab change. Used to keep the dock widgets aware of
the current graph model.
"""
view = self.open_files_tab_widget.widget(tab_index)
if not view:
return
if view == self.previous_view:
return
self.previous_view = view
uid = view.model.uid
self.last_focused_start = 0
if uid in self.open_files.keys():
model = self.open_files[uid]['model']
layer_path = model.get_layer_path(model.top_layer)
title = model.get_layer_alias(layer_path)
self.open_files_tab_widget.setTabText(tab_index, title)
self.property_editor.set_stage_model(model)
self.code_editor.set_stage_model(model)
self.layer_manager.set_stage_model(model)
self.history_view.set_stage_model(model)
self.workflow_tools.set_stage_model(model)
self.build_view.set_stage_model(model)
self.find_rep.set_stage_model(model)
self.output_log.set_stage_model(model)
self.update_target_color()
logger.debug("Successfully set up new tab.")
self.last_focused_tab = tab_index
self.update_implicit_action()
self.update_grid_action()
model.destroy_cmd_port.connect(self.update_cmd_port_action)
else:
logger.critical("Failed to set up new tab.")
view.setFocus()
self.tab_changed.emit()
def get_current_tab_file_path(self):
"""Get the file path of the currently open tab.
:return: File path of the currently open tab.
:rtype:str
"""
if not self.model:
return
return self.model.get_layer_path(self.model.stage.top_layer)
def get_current_tab_model(self):
"""Get the file path of the currently open tab.
:return: File path of the currently open tab.
"""
idx = self.open_files_tab_widget.currentIndex()
widget = self.open_files_tab_widget.widget(idx)
if widget:
uid = widget.model.uid
return self.open_files[uid]['model']
def get_current_view(self):
return self.open_files_tab_widget.currentWidget()
def update_cmd_port_action(self):
self.execute_actions.enable_cmd_port_action.blockSignals(True)
if self.model:
state = self.model.use_cmd_port
else:
state = False
self.execute_actions.enable_cmd_port_action.setChecked(state)
self.execute_actions.enable_cmd_port_action.blockSignals(False)
def update_grid_action(self):
self.view_actions.grid_action.blockSignals(True)
self.view_actions.grid_action.setChecked(self.model.show_grid)
self.view_actions.grid_action.blockSignals(False)
def update_implicit_action(self):
self.view_actions.implicit_action.blockSignals(True)
state = self.model.implicit_connections
self.view_actions.implicit_action.setChecked(state)
self.view_actions.implicit_action.blockSignals(False)
def update_target_color(self):
disp_layer = self.model.display_layer
color = self.model.get_layer_color(disp_layer)
# update widgets
self.open_files_tab_widget.setStyleSheet('padding: 1; border: 1px solid %s' % color)
self.open_files_tab_widget.update()
self.code_editor.update_border_color()
self.property_editor.update_styles()
def keyPressEvent(self, event):
key = event.key()
if key not in self._held_keys:
self._held_keys.append(key)
self.zoom_keys_down = False
match = QtGui.QKeySequence(*self._held_keys).matches(self.zoom_keys)
if match == QtGui.QKeySequence.SequenceMatch.ExactMatch:
self.zoom_keys_down = True
event.accept()
def keyReleaseEvent(self, event):
key = event.key()
if key in self._held_keys:
self._held_keys.remove(key)
self.zoom_keys_down = False
match = QtGui.QKeySequence(*self._held_keys).matches(self.zoom_keys)
if match == QtGui.QKeySequence.SequenceMatch.ExactMatch:
self.zoom_keys_down = True
def eventFilter(self, widget, event):
# enter editing after update_code_is_local
if event.type() == QtCore.QEvent.MouseButtonDblClick:
if isinstance(widget.parent(), NxtCodeEditor):
self.code_editor.update_code_is_local()
self.code_editor.enter_editing()
return False
def show(self):
"""Centering after the window is shown because the center is based on the window's size."""
# Todo: add previous rect to the bookmarks data for the layer - use this instead of center if it exists
super(MainWindow, self).show()
self.center_view()
def showEvent(self, event):
if self.state_last_hidden:
self.restoreState(self.state_last_hidden)
super(MainWindow, self).showEvent(event)
return
state_key = user_dir.EDITOR_CACHE.WINODW_STATE
geo_key = user_dir.EDITOR_CACHE.MAIN_WIN_GEO
saved_state = user_dir.editor_cache.get(state_key)
if saved_state:
self.restoreState(QtCore.QByteArray(saved_state))
saved_geo = user_dir.editor_cache.get(geo_key)
if saved_geo:
self.restoreGeometry(QtCore.QByteArray(saved_geo))
state_key = user_dir.EDITOR_CACHE.NODE_PROPERTY_STATE
property_state = user_dir.editor_cache.get(state_key)
if property_state:
self.property_editor.model.state = property_state
if self.view:
self.view.setFocus()
super(MainWindow, self).showEvent(event)
def hideEvent(self, event):
self.state_last_hidden = self.saveState()
super(MainWindow, self).hideEvent(event)
def closeEvent(self, event):
"""Check for unsaved work before accepting the event. If the event
is accepted we also save the state of the UI before closing."""
if self._closing:
self._closing = False
event.ignore()
return
dirty_models = []
for open_file_dict in self.open_files.values():
unsaved = open_file_dict['model'].get_unsaved_changes()
if unsaved:
dirty_models += [open_file_dict['model']]
if dirty_models:
resp = UnsavedLayersDialogue.save_before_exit(dirty_models, self)
if resp == QtWidgets.QDialog.Rejected:
event.ignore()
return
event.accept()
self.shutdown_rpc_server()
# Window state
state_key = user_dir.EDITOR_CACHE.WINODW_STATE
geo_key = user_dir.EDITOR_CACHE.MAIN_WIN_GEO
user_dir.editor_cache[state_key] = self.saveState()
user_dir.editor_cache[geo_key] = self.saveGeometry()
state_key = user_dir.EDITOR_CACHE.NODE_PROPERTY_STATE
property_state = self.property_editor.model.state
if property_state:
user_dir.editor_cache[state_key] = str(property_state)
nxt_log.stop_session_log(self.nxt.log_file)
# Close our dock widgets.
for child in self.children():
if isinstance(child, DockWidgetBase):
child.close()
# Save closing session
closing_session = []
for file_dict in self.open_files.values():
model = file_dict['model']
real_path = model.top_layer.real_path
if not real_path:
continue
closing_session += [str(real_path)]
if closing_session:
pref_key = user_dir.EDITOR_CACHE.LAST_CLOSED
last_sessions = user_dir.editor_cache.get(pref_key, [])
last_sessions += [closing_session]
user_dir.editor_cache[pref_key] = last_sessions
self._closing = True
self.close_signal.emit()
super(MainWindow, self).closeEvent(event)
def validate_layers_saved(self, model=None, single_layer=None):
model = model or self.model
if single_layer:
layers = [single_layer]
else:
layers = model.stage._sub_layers
unsaved = model.get_unsaved_changes(layers=layers)
if unsaved and not single_layer:
resp = UnsavedLayersDialogue.save_before_exit([model], self)
if resp == QtWidgets.QDialog.Rejected:
return False
elif unsaved and single_layer:
info = 'Layer "{}" has unsaved changes!'.format(single_layer.alias)
resp = UnsavedChangesMessage.save_before_close(info=info)
save = UnsavedChangesMessage.Save
cancel = UnsavedChangesMessage.Cancel
if resp == cancel:
return False
if resp == save:
self.save_layer(single_layer)
return True
return True
class ToolBar(QtWidgets.QToolBar):
def __init__(self, parent=None):
super(ToolBar, self).__init__(parent=parent)
self.setFixedHeight(32)
self.setIconSize(QtCore.QSize(19, 19))
class NodeAuthoringToolBar(ToolBar):
def __init__(self, parent=None):
super(NodeAuthoringToolBar, self).__init__(parent=parent)
self.setObjectName('Node Authoring')
self.main_window = parent
self.node_actions = self.main_window.node_actions
self.main = QtWidgets.QWidget()
self.addWidget(self.main)
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.main.setLayout(self.layout)
# add node
self.addAction(self.node_actions.add_node_action)
# delete node
self.addAction(self.node_actions.delete_node_action)
self.addSeparator()
# duplicate node
self.addAction(self.node_actions.duplicate_node_action)
# instance node
self.addAction(self.node_actions.instance_node_action)
# remove instance node
self.addAction(self.node_actions.remove_instance_action)
self.addSeparator()
# cut node
self.addAction(self.node_actions.cut_node_action)
# copy node
self.addAction(self.node_actions.copy_node_action)
# paste node
self.addAction(self.node_actions.paste_node_action)
self.addSeparator()
# localize node
self.addAction(self.node_actions.localize_node_action)
# revert node
self.addAction(self.node_actions.revert_node_action)
self.addSeparator()
# select all
self.addAction(self.node_actions.select_all_action)
class AlignDistributeToolBar(ToolBar):
def __init__(self, parent=None):
super(AlignDistributeToolBar, self).__init__(parent=parent)
self.setObjectName('Alignment Tools')
self.main_window = parent
# ACTIONS
self.addActions(self.main_window.alignment_actions.actions())
class ExecuteToolBar(ToolBar):
def __init__(self, parent=None):
super(ExecuteToolBar, self).__init__(parent=parent)
self.setObjectName('Execute Tools')
self.main_window = parent
self.exec_actions = self.main_window.execute_actions
self.addActions([self.exec_actions.execute_graph_action,
self.exec_actions.stop_exec_action,
self.exec_actions.execute_selected_action,
self.exec_actions.execute_from_action,
self.exec_actions.execute_hierarchy_action])
self.addSeparator()
self.addActions([self.exec_actions.add_start_action,
self.exec_actions.remove_start_action,
self.exec_actions.find_start_action])
self.addSeparator()
self.addActions([self.exec_actions.add_break_action,
self.exec_actions.remove_break_action,
self.exec_actions.clear_breaks_action])
class DisplayToolBar(ToolBar):
def __init__(self, parent=None):
super(DisplayToolBar, self).__init__(parent=parent)
self.setObjectName('Display Tools')
self.main_window = parent
self.view_actions = self.main_window.view_actions
self.display_actions = self.main_window.display_actions
self.addAction(self.display_actions.raw_action)
self.addAction(self.display_actions.resolve_action)
self.addAction(self.display_actions.cached_action)
self.addSeparator()
# Connection view
self.addAction(self.view_actions.grid_action)
self.addAction(self.view_actions.implicit_action)
self.addSeparator()
self.addAction(self.view_actions.frame_all_action)
self.addAction(self.view_actions.frame_selection_action)
self.addSeparator()
self.addAction(self.view_actions.hide_attrs_action)
self.addAction(self.view_actions.disp_local_attrs_action)
self.addAction(self.view_actions.disp_inst_attrs_action)
self.addAction(self.view_actions.disp_all_attrs_action)
class MenuBar(QtWidgets.QMenuBar):
"""Menu bar for nxt main window"""
def __init__(self, parent=None):
super(MenuBar, self).__init__(parent=parent)
self.main_window = parent
self.app_actions = parent.app_actions # type: actions.AppActions
self.exec_actions = parent.execute_actions # type: actions.ExecuteActions
self.node_actions = parent.node_actions # type: actions.NodeActions
self.ce_actions = parent.code_editor_actions # type: actions.CodeEditorActions
self.display_actions = parent.display_actions # type: actions.DisplayActions
self.view_actions = parent.view_actions # type: actions.StageViewActions
self.layer_actions = parent.layer_actions # type: actions.LayerActions
# File Menu
self.file_menu = self.addMenu('File')
self.file_menu.setTearOffEnabled(True)
# ACTIONS
# Something of note:
# Menu actions with multi key shortcuts are (in general) act like
# application level shortcuts on OSX. Single key shortcuts however
# do not work in the same way. There is a workaround for this if Qt
# never fixes how menus are made and we get complaints from osx users.
# https://thebreakfastpost.com/2014/06/03/single-key-menu-shortcuts-with-qt5-on-osx/
# New tab
self.file_menu.addAction(self.main_window.app_actions.new_graph_action)
# Open file
self.file_menu.addAction(self.main_window.app_actions.open_file_action)
# Recent files
self.load_recent_menu = RecentFilesMenu(action_target=self.main_window.load_file)
self.file_menu.addMenu(self.load_recent_menu)
self.file_menu.addAction(self.layer_actions.save_layer_action)
self.file_menu.addAction(self.layer_actions.save_layer_as_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.layer_actions.save_all_layers_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.layer_actions.new_layer_above_action)
self.file_menu.addAction(self.layer_actions.new_layer_below_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.layer_actions.ref_layer_above_action)
self.file_menu.addAction(self.layer_actions.ref_layer_below_action)
self.file_menu.addSeparator()
self.builtins_menu = QtWidgets.QMenu('Reference Builtin Graph')
self.builtins_menu.aboutToShow.connect(partial(populate_builtins_menu,
qmenu=self.builtins_menu,
main_window=self.main_window))
self.file_menu.addMenu(self.builtins_menu)
# Close app
self.file_menu.addSeparator()
self.file_menu.addAction(self.main_window.app_actions.close_tab_action)
self.file_menu.addAction(self.main_window.app_actions.close_action)
# Edit Menu
self.edit_menu = self.addMenu('Edit')
self.edit_menu.setTearOffEnabled(True)
self.edit_menu.addAction(self.main_window.app_actions.undo_action)
self.edit_menu.addAction(self.main_window.app_actions.redo_action)
self.edit_menu.addSeparator()
self.edit_menu.addAction(self.node_actions.copy_node_action)
self.edit_menu.addAction(self.node_actions.cut_node_action)
self.edit_menu.addAction(self.node_actions.paste_node_action)
self.edit_menu.addAction(self.node_actions.delete_node_action)
self.edit_menu.addSeparator()
self.edit_menu.addAction(self.node_actions.select_all_action)
# view menu
self.view_menu = self.addMenu('View')
self.view_menu.setTearOffEnabled(True)
self.view_menu.addAction(self.view_actions.frame_selection_action)
self.view_menu.addAction(self.view_actions.frame_all_action)
self.view_menu.addSeparator()
self.view_menu.addAction(self.display_actions.raw_action)
self.view_menu.addAction(self.display_actions.resolve_action)
self.view_menu.addAction(self.display_actions.cached_action)
self.view_menu.addSeparator()
self.view_menu.addAction(self.view_actions.implicit_action)
self.view_menu.addAction(self.view_actions.grid_action)
self.view_opt_menu = self.view_menu.addMenu('Options')
self.view_opt_menu.setTearOffEnabled(True)
self.view_opt_menu.addAction(self.view_actions.tooltip_action)
self.view_opt_menu.addAction(self.layer_actions.lay_manger_table_action)
self.view_opt_menu.addAction(self.ce_actions.overlay_message_action)
# graph menu
self.graph_menu = self.addMenu('Graph')
self.graph_menu.setTearOffEnabled(True)
self.graph_menu.addAction(self.node_actions.add_node_action)
# execute menu
self.execute_menu = self.addMenu('Execute')
self.execute_menu.setTearOffEnabled(True)
self.execute_menu.addAction(self.exec_actions.execute_from_action)
self.execute_menu.addAction(self.exec_actions.execute_selected_action)
self.execute_menu.addAction(self.exec_actions.execute_hierarchy_action)
self.execute_menu.addAction(self.exec_actions.execute_graph_action)
self.execute_menu.addAction(self.exec_actions.clear_cache_action)
self.execute_menu.addAction(self.exec_actions.wt_recomp_action)
# Populate action data for window actions
self.app_actions.layer_manager_action.setData(parent.layer_manager)
self.app_actions.property_editor_action.setData(parent.property_editor)
self.app_actions.code_editor_action.setData(parent.code_editor)
self.app_actions.history_view_action.setData(parent.history_view)
self.app_actions.build_view_action.setData(parent.build_view)
self.app_actions.output_log_action.setData(parent.output_log)
self.app_actions.hotkey_editor_action.setData(parent.hotkey_editor)
self.app_actions.workflow_tools_action.setData(parent.workflow_tools)
# window menu
self.window_menu = self.addMenu('Window')
self.window_menu.aboutToShow.connect(self.populate_window_menu)
self.window_menu.triggered.connect(self.window_action_triggered)
self.window_menu_actions = [
self.app_actions.layer_manager_action,
self.app_actions.property_editor_action,
self.app_actions.code_editor_action,
self.app_actions.history_view_action,
self.app_actions.build_view_action,
self.app_actions.output_log_action,
self.app_actions.hotkey_editor_action,
self.app_actions.workflow_tools_action
]
self.populate_window_menu()
# Remote Menu
self.remote_menu = self.addMenu('Remote')
remote_context_action = self.remote_menu.addAction('Create Remote '
'Context')
remote_context_func = self.main_window.create_remote_context
remote_context_action.triggered.connect(remote_context_func)
if not is_standalone():
remote_context_action.setEnabled(False)
self.remote_menu.addSeparator()
self.remote_menu.addAction(self.exec_actions.enable_cmd_port_action)
self.remote_menu.addSeparator()
self.remote_menu.addAction(self.exec_actions.startup_rpc_action)
self.remote_menu.addAction(self.exec_actions.shutdown_rpc_action)
self.options_menu = self.addMenu('Options')
self.options_menu.addAction(self.app_actions.toggle_ding_action)
self.options_view_sub = self.options_menu.addMenu('View')
self.options_view_sub.setTearOffEnabled(True)
self.options_view_sub.addActions(self.view_opt_menu.actions())
# Help Menu
self.help_menu = self.addMenu('Help')
self.help_menu.setTearOffEnabled(True)
prefs_dir_action = self.help_menu.addAction('Open Prefs Dir')
prefs_dir_action.triggered.connect(self.open_prefs_dir)
config_dir_action = self.help_menu.addAction('Open Plugins Dir')
config_dir_action.triggered.connect(self.open_plugins_dir)
self.help_menu.addSeparator()
self.help_menu.addAction(self.main_window.app_actions.docs_action)
github_action = self.help_menu.addAction('GitHub')
url = 'https://github.com/nxt-dev/nxt_editor'
github_action.triggered.connect(partial(webbrowser.open_new, url))
self.help_menu.addSeparator()
del_resources = self.help_menu.addAction('Clear UI Icon Cache')
del_resources.triggered.connect(self.delete_resources_pyc)
self.help_menu.addSeparator()
# Secret Menu
self.secret_menu = self.help_menu.addMenu('Developer Options')
self.secret_menu.setTearOffEnabled(True)
test_log_action = self.secret_menu.addAction('test logging')
test_log_action.triggered.connect(self.__test_all_logging)
print_action = self.secret_menu.addAction('test print')
print_action.triggered.connect(self.__test_print)
critical_action = self.secret_menu.addAction('test remove layer')
critical_action.triggered.connect(self.__test_rm_layer)
uncaught_exception = self.secret_menu.addAction('uncaught exception')
uncaught_exception.triggered.connect(self.__force_uncaught_exception)
compile_selection = self.secret_menu.addAction('compile selection')
compile_selection.triggered.connect(self.__compile_node_code)
save_cache = self.secret_menu.addAction('save cached')
save_cache.triggered.connect(self.__save_cache_layer)
load_cache = self.secret_menu.addAction('load cached')
load_cache.triggered.connect(self.__load_cache_layer)
rpc_ping = self.secret_menu.addAction('rpc ping')
rpc_ping.triggered.connect(self.__rpc_ping)
force_kill_rpc = self.secret_menu.addAction('force kill rpc')
force_kill_rpc.triggered.connect(self.__force_kill_rpc)
# Debugger function
test_graph_action = self.secret_menu.addAction('Debugger')
test_graph_action.triggered.connect(self.__debug)
# Force redraw
force_redraw_action = self.secret_menu.addAction(
'Force Redraw')
force_redraw_action.triggered.connect(self.__force_redraw)
# Force rebuild stage
force_build_stage_action = self.secret_menu.addAction('Force Update')
force_build_stage_action.triggered.connect(self.__force_build_stage)
self.help_menu.addSeparator()
about_action = self.help_menu.addAction('About')
about_action.triggered.connect(self.about_message)
def eventFilter(self, widget, event):
if event.type() == QtCore.QEvent.Type.ShortcutOverride:
return True
return False
def populate_window_menu(self):
self.window_menu.clear()
for action in self.window_menu_actions:
widget = action.data()
action.setChecked(widget.isVisible())
self.window_menu.addAction(action)
self.window_menu.addSeparator()
for file_dict in self.main_window.open_files.values():
widget = file_dict['view']
name = file_dict['model'].top_layer.get_alias()
new_action = self.window_menu.addAction(name)
new_action.setData(widget)
def window_action_triggered(self, action=None):
if not action:
# Sometimes Qt sends us this signal with no action.
return
widget = action.data()
tab_index = self.main_window.open_files_tab_widget.indexOf(widget)
if tab_index is not -1:
self.main_window.open_files_tab_widget.setCurrentIndex(tab_index)
return
if action.isChecked():
widget.show()
widget.raise_()
else:
widget.close()
@staticmethod
def open_prefs_dir():
d = user_dir.PREF_DIR
if 'darwin' in sys.platform:
os.system('open {}'.format(d))
elif 'win' in sys.platform:
os.startfile(d)
else:
try:
os.system('xdg-open {}'.format(d))
except:
logger.exception('Failed to open user dir')
@staticmethod
def open_plugins_dir():
d = USER_PLUGIN_DIR
if 'darwin' in sys.platform:
os.system('open {}'.format(d))
elif 'win' in sys.platform:
os.startfile(d)
else:
try:
os.system('xdg-open {}'.format(d))
except:
logger.exception('Failed to open user config dir')
def about_message(self):
text = ('nxt {} \n'
'graph v{}\n'
'api v{}\n'
'editor v{}\n'
'Copyright (c) 2015-2020 '
'The nxt Authors').format(self.main_window.host_app,
GRAPH_VERSION.VERSION_STR,
API_VERSION.VERSION_STR,
EDITOR_VERSION.VERSION_STR)
message_box = QtWidgets.QMessageBox()
message_box.setWindowTitle('About nxt '
'({})'.format(EDITOR_VERSION.VERSION_STR))
message_box.setText(text)
message_box.setStandardButtons(message_box.Close)
message_box.setIcon(message_box.Icon.Information)
message_box.exec_()
@staticmethod
def delete_resources_pyc():
ui_dir = os.path.dirname(__file__)
resources_file = os.path.join(ui_dir, 'qresources.py').replace(os.sep,
'/')
resources_file_c = os.path.join(ui_dir, 'qresources.pyc').replace(os.sep,
'/')
success = False
if os.path.isfile(resources_file):
try:
os.remove(resources_file)
success = True
except:
logger.exception('Failed to delete "{}" please do so '
'manually.'.format(resources_file))
if os.path.isfile(resources_file_c):
try:
os.remove(resources_file_c)
success = True
except:
logger.exception('Failed to delete "{}" please do so '
'manually.'.format(resources_file_c))
success = False
if success:
logger.info('Cleared UI icon cache, please restart nxt.')
from . import make_resources
make_resources()
def __test_print(self):
"""prints a simple message for output log debug"""
print('Test print please ignore')
def __test_all_logging(self):
done = []
for level_num in logging._levelNames:
if not isinstance(level_num, int):
level_num = logging.getLevelName(level_num)
if level_num in done:
continue
done += [level_num]
logger.log(level_num, 'Testing logger level '
'{}'.format(logging.getLevelName(level_num)))
def __test_rm_layer(self):
nxt_object = self.parent().nxt
stage_key = nxt_object._loaded_files.keys()[0]
stage = nxt_object._loaded_files[stage_key]
stage.remove_sublayer(1)
model = self.parent().model
model.update_comp_layer()
def __force_build_stage(self):
self.main_window.model.update_comp_layer(rebuild=True)
def __force_redraw(self):
view = self.parent().view
view.update_view()
def __force_uncaught_exception(self):
print(foo)
def __compile_node_code(self):
"""Test the compile of a node's compute and if it works in the console
:return:
"""
path = self.main_window.model.selection[0]
comp_layer = self.main_window.model.comp_layer
rt_layer = self.main_window.model.stage.setup_runtime_layer(comp_layer)
rt_node = rt_layer.lookup(path)
from runtime import GraphError, Console
import nxt.stage as _stage
g = {'__stage__': self.main_window.model.stage,
'STAGE': rt_layer,
'w': _stage.w,
}
func = self.main_window.model.stage.get_node_code(rt_node,
rt_layer)
console = Console(g, node_path=path)
g['func'] = func
g['self'] = rt_node
try:
console.runcode(func)
except GraphError:
pass
def __debug(self):
model = self.main_window.model
stage = model.stage
target_layer = model.target_layer
comp_layer = model.comp_layer
nxt_object = self.parent().nxt
stages = []
layers = []
for k in nxt_object._loaded_files.keys():
stages.append(nxt_object._loaded_files[k])
for stage in stages:
stage.debug = True
for l in stage._sub_layers:
layers.append(l)
return
def __load_cache_layer(self):
filt = 'nxt files (*.nxt)'
file_path = QtWidgets.QFileDialog.getOpenFileName(filter=filt)[0]
if not file_path:
return
layer_data = nxt_io.load_file_data(file_path)
model = self.main_window.model
cache_layer = nxt_layer.CacheLayer.load_from_layer_data(layer_data)
if not model.current_rt_layer:
model.current_rt_layer = nxt_layer.CompLayer()
model.current_rt_layer.cache_layer = cache_layer
def __save_cache_layer(self):
curr_rt = self.main_window.model.current_rt_layer
if not curr_rt:
logger.info("No cache data to save")
return
filt = 'nxt files (*.nxt)'
file_path = QtWidgets.QFileDialog.getSaveFileName(filter=filt)[0]
if not file_path:
return
curr_rt.cache_layer.save(file_path)
def __rpc_ping(self):
proxy = NxtClient()
proxy.is_alive()
def __force_kill_rpc(self):
proxy = NxtClient()
proxy.kill()
class OpenFilesTabWidget(QtWidgets.QTabWidget):
def __init__(self, parent=None):
super(OpenFilesTabWidget, self).__init__(parent=parent)
self.main_window = parent
self.setTabsClosable(True)
self.setMovable(True)
self.tabCloseRequested.connect(self.close_tab)
def close_tab(self, index):
model = self.widget(index).model
safe_to_close = self.main_window.validate_layers_saved(model=model)
if not safe_to_close:
self.main_window.set_waiting_cursor(False)
return
self.main_window.set_waiting_cursor(True)
self.widget(index).clear()
uid = self.widget(index).model.uid
self.parent().nxt.unload_file(uid)
tab_data = self.parent().open_files.pop(uid)
model = tab_data['model']
view = tab_data['view']
view.deleteLater()
model.deleteLater()
self.removeTab(index)
self.main_window.tab_changed.emit()
real_path = model.top_layer.real_path
if not real_path:
self.main_window.set_waiting_cursor(False)
return
pref_key = user_dir.EDITOR_CACHE.LAST_CLOSED
last_sessions = user_dir.editor_cache.get(pref_key, [])
last_sessions += [[str(real_path)]]
user_dir.editor_cache[pref_key] = last_sessions
self.main_window.set_waiting_cursor(False)
class RecentFilesMenu(QtWidgets.QMenu):
def __init__(self, action_target=None):
super(RecentFilesMenu, self).__init__('Open Recent')
self.aboutToShow.connect(self.refresh_list)
self.action_target = action_target
self.triggered.connect(self.recent_selected)
def refresh_list(self):
self.clear()
recents = user_dir.editor_cache.get(user_dir.USER_PREF.RECENT_FILES, [])
if not recents:
action = self.addAction('No recents found')
action.setEnabled(False)
for file_path in recents:
self.addAction(str(file_path))
def recent_selected(self, action):
self.action_target(action.text())
class StatusBarHandler(logging.Handler):
def __init__(self, output_log=None):
logging.Handler.__init__(self, level=logging.DEBUG)
self.output_template = "{level} | {module}: \"{message}\""
self.output_log = output_log
self.signaller = LoggingSignaler()
self.signaller.signal.connect(self.update)
def emit(self, record):
return
self.signaller.signal.emit(record)
def update(self, record):
out_message = self.output_template.format(level=record.levelname,
module=record.module,
message=record.getMessage())
if self.output_log:
self.output_log.showMessage(out_message)
class StartRPCThread(QtCore.QThread):
def __init__(self, main_window):
super(StartRPCThread, self).__init__()
self.main_window = main_window
def run(self):
if self.main_window.model:
self.main_window.model.processing.emit(True)
# We setup the log file here so we're tailing it _before_ we start
# the server up.
rpc_log = nxt_io.generate_temp_file(suffix='.nxtlog')
# Setup rpc server log tail
self.main_window.safe_stop_rpc_tailing()
self.main_window.rpc_log_tail = FileTailingThread(rpc_log)
self.main_window.handle_rpc_tailing_signals(True)
self.main_window.rpc_log_tail.start()
sh = QtLogStreamHandler.get_handler(self.main_window.new_log_signal)
try:
self.main_window.nxt._start_rpc_server(custom_stdout=True,
rpc_log_filepath=rpc_log,
socket_log=True,
stream_handler=sh)
except OSError:
logger.warning('Failed to start/connect to rpc server. Please try '
'starting the rpc server via the UI')
if self.main_window.model:
self.main_window.model.processing.emit(False)
return
remote_rpc_log_file_path = None
if not self.main_window.nxt.rpc_server:
proxy = NxtClient()
try:
remote_rpc_log_file_path = proxy.get_log_location()
except:
logger.warning('Failed to tail remote rpc server log!')
if remote_rpc_log_file_path:
self.main_window.rpc_log_tail.watch_path = remote_rpc_log_file_path
with open(remote_rpc_log_file_path, 'r') as fp:
text = fp.read()
end_pos = len(text)
self.main_window.rpc_log_tail.last_read_pos = end_pos
if self.main_window.model:
self.main_window.model.processing.emit(False)
def populate_builtins_menu(qmenu, main_window, layer=None):
"""Populates a QMenu object with actions for referencing each builtin layer.
:param qmenu: QMenu object to be filled with actions
:param main_window: nxt MainWindow
:param layer: Optional layer to reference builtin layer under, if none is
supplied the target layer is used.
:return: QMenu
"""
qmenu.clear()
stage_model = main_window.model
if not stage_model:
enable = False
idx = -1
else:
enable = True
layer = layer or stage_model.target_layer
idx = layer.layer_idx() + 1
for file_name in os.listdir(nxt_io.BUILTIN_GRAPHS_DIR):
if not file_name.endswith('.nxt'):
continue
new_action = qmenu.addAction(file_name)
path = '${var}/{file_name}'.format(var=nxt_io.BUILTIN_GRAPHS_ENV_VAR,
file_name=file_name)
if enable:
new_action.triggered.connect(partial(stage_model.reference_layer,
path, idx))
new_action.setEnabled(enable)
return qmenu
def nxt_execpthook(typ, value, tb):
if 'nxt' not in tb.tb_frame.f_code.co_filename:
return og_excepthook(typ, value, tb)
logger.error('NXT encountered an Uncaught exception!')
traceback.print_tb(tb)
message = ('Please copy the error details and send to an nxt '
'developer.\n'
'Save your work immediately.')
# TODO: Get the last few lines from the session log and put them here
details = ''.join(traceback.format_exception(typ, value, tb))
logger.exception(details)
style_file = QtCore.QFile(':styles/styles/dark/dark.qss')
style_file.open(QtCore.QFile.ReadOnly)
stylesheet = str(style_file.readAll())
dialog = NxtWarningDialog('Uncaught Exception!', message, details)
dialog.setStyleSheet(stylesheet)
dialog.exec_()
def catch_exceptions():
debugger_attached = 'pydevd' in sys.modules
return not debugger_attached
if sys.excepthook is not nxt_execpthook:
og_excepthook = sys.excepthook
if catch_exceptions():
sys.excepthook = nxt_execpthook
| 41.178348 | 119 | 0.63953 |
58e00459697805d8f1e7adbc2795e9616fc70667 | 3,717 | py | Python | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 106 | 2015-07-21T16:18:26.000Z | 2022-03-31T06:45:34.000Z | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 21 | 2015-07-11T03:48:28.000Z | 2022-01-18T12:57:30.000Z | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 26 | 2015-07-22T22:38:21.000Z | 2022-03-14T10:11:56.000Z | #!/usr/bin/env python3
import argparse
import os
import sys
import traceback
from lib import core, utilities, run
from lib.attributes import Attributes
from lib.database import Database
def process_arguments():
"""
Uses the argparse module to parse commandline arguments.
Returns:
Dictionary of parsed commandline arguments.
"""
parser = argparse.ArgumentParser(
description='Calculate the scores of a set of repositories.'
)
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Delete cloned repositories from the disk when done.'
)
parser.add_argument(
'-c',
'--config',
type=argparse.FileType('r'),
default='config.json',
dest='config_file',
help='Path to the configuration file.'
)
parser.add_argument(
'-m',
'--manifest',
type=argparse.FileType('r'),
default='manifest.json',
dest='manifest_file',
help='Path to the manifest file.'
)
parser.add_argument(
'-r',
'--repositories-root',
dest='repositories_root',
help='Path to the root of downloaded repositories.'
)
parser.add_argument(
'-s',
'--repositories-sample',
type=argparse.FileType('r'),
dest='repositories_sample',
help='A file containing newline-separated GHTorrent project ids'
)
parser.add_argument(
'-k',
'--key-string',
type=str,
dest='key_string',
default=None,
required=False,
help='String of attribute initials. Uppercase to persist data'
)
parser.add_argument(
'-n',
'--num-processes',
type=int,
dest='num_processes',
default=1,
required=False,
help=(
'Number of processes to spawn when processing repositories'
' from the samples file.'
)
)
parser.add_argument(
'--goldenset',
action='store_true',
dest='goldenset',
help=(
'Indicate that the repositories sample file contains projects'
' from the Golden Set.'
)
)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
"""
Main execution flow.
"""
try:
args = process_arguments()
config = utilities.read(args.config_file)
manifest = utilities.read(args.manifest_file)
# TODO: Refactor
core.config = config
utilities.TOKENIZER = core.Tokenizer()
database = Database(config['options']['datasource'])
globaloptions = {
'today': config['options']['today'],
'timeout': config['options']['timeout']
}
attributes = Attributes(
manifest['attributes'], database, args.cleanup, args.key_string,
**globaloptions
)
if not os.path.exists(args.repositories_root):
os.makedirs(args.repositories_root, exist_ok=True)
table = 'reaper_results'
if args.goldenset:
table = 'reaper_goldenset'
_run = run.Run(
args.repositories_root, attributes, database,
config['options']['threshold'], args.num_processes
)
_run.run([int(line) for line in args.repositories_sample], table)
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\rCaught interrupt, killing all children...')
| 26.361702 | 76 | 0.584073 |
2325ca98e7636876fccf2eb5a9b9a1466b871466 | 12,352 | py | Python | src/loaders/load.py | asrayousuf/Eva | f652e5d398556055490c146f37e7a2d7a9d091f3 | [
"Apache-2.0"
] | 1 | 2019-11-06T03:30:08.000Z | 2019-11-06T03:30:08.000Z | src/loaders/load.py | asrayousuf/Eva | f652e5d398556055490c146f37e7a2d7a9d091f3 | [
"Apache-2.0"
] | 1 | 2019-11-18T03:09:56.000Z | 2019-11-18T03:09:56.000Z | src/loaders/load.py | asrayousuf/Eva | f652e5d398556055490c146f37e7a2d7a9d091f3 | [
"Apache-2.0"
] | null | null | null | """
This folder contains all util functions needed to load the dataset with
annotation.
Demo could be run with the command
python loaders/load.py
@Jaeho Bang
"""
import os
import time
import xml.etree.ElementTree as ET
import cv2
import numpy as np
import pandas as pd
from . import TaskManager
# Make this return a dictionary of label to data for the whole dataset
class Load:
def __init__(self, image_width=960, image_height=540):
self.data_dict = {}
self.label_dict = {}
self.vehicle_type_filters = ['car', 'van', 'bus', 'others']
self.speed_filters = [40, 50, 60, 65, 70]
self.intersection_filters = ["pt335", "pt342", "pt211", "pt208"]
self.color_filters = ['white', 'black', 'silver', 'red']
self.image_width = image_width
self.image_height = image_height
self.image_channels = 3
self.task_manager = TaskManager.TaskManager()
@staticmethod
def image_eval(image_str):
image_str = ' '.join(image_str.split())
image_str = image_str.replace(" ", ",")
image_str = image_str[0] + image_str[2:]
evaled_image = np.array(eval(image_str))
height = 540
width = 960
channels = 3
return evaled_image.reshape(height, width, channels)
@staticmethod
def save(filename, panda_data):
project_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))) # Eva / eva
csv_folder = os.path.join(project_dir, "data", "pandas")
if os.path.exists(csv_folder) is False:
os.makedirs(csv_folder)
csv_filename = os.path.join(csv_folder, filename)
panda_data.to_csv(csv_filename, sep=",", index=None)
def load(self, dir_dict):
# we can extract speed, vehicle_type from the XML
# we need to extract color, intersection from code
train_image_dir = dir_dict['train_image']
test_image_dir = dir_dict['test_image']
train_anno_dir = dir_dict['train_anno']
labels_list = ["vehicle_type", "color", "speed", "intersection"]
if __debug__:
print("Inside load, starting image loading...")
train_img_array = self._load_images(train_image_dir)
if __debug__:
print(("Done loading train images.. shape of matrix is " + str(
train_img_array.shape)))
vehicle_type_labels, speed_labels, color_labels, intersection_labels \
= self._load_XML(train_anno_dir, train_img_array)
if __debug__:
print(("Done loading the labels.. length of labels is " + str(
len(vehicle_type_labels))))
# n_samples, height, width, channels = train_img_array.shape
# train_img_array = train_img_array.reshape(n_samples,
# height*width*channels)
if __debug__:
print(("train img array flatten is ", str(train_img_array.shape)))
data_table = list(zip(vehicle_type_labels, color_labels, speed_labels,
intersection_labels))
if __debug__:
print(("data_table shape is ", str(len(data_table))))
columns = labels_list
dt_train = pd.DataFrame(data=data_table, columns=columns)
if __debug__:
print("Done making panda table for train")
dt_test = None
if test_image_dir is not None:
test_img_list = self._load_images(test_image_dir)
if __debug__:
print(("Done loading test images.. shape of matrix is " + str(
test_img_list.shape)))
dt_test = pd.DataFrame(data=list(test_img_list), columns=['image'])
if __debug__:
print("Done making panda table for test")
return [train_img_array, dt_train, dt_test]
def _convert_speed(self, original_speed):
"""
TODO: Need to actually not use this function, because we need to
find out what the original speed values mean
TODO: However, in the meantime, we will use this extrapolation....
:param original_speed:
:return: converted_speed
"""
speed_range = [0.0, 20.0]
converted_range = [0.0, 100.0]
return original_speed * 5
def _load_XML(self, directory, images):
car_labels = []
speed_labels = []
color_labels = []
intersection_labels = []
for root, subdirs, files in os.walk(directory):
files.sort()
for file in files:
file_path = os.path.join(root, file)
if ".swp" in file_path:
continue
tree = ET.parse(file_path)
tree_root = tree.getroot()
start_frame_num = 1
start_frame = True
for frame in tree_root.iter('frame'):
curr_frame_num = int(frame.attrib['num'])
if start_frame and curr_frame_num != start_frame_num:
car_labels.append(
[None] * (curr_frame_num - start_frame_num))
speed_labels.append(
[None] * (curr_frame_num - start_frame_num))
car_per_frame = []
speed_per_frame = []
color_per_frame = []
intersection_per_frame = []
bboxes = []
for box in frame.iter('box'):
left = int(eval(box.attrib['left']))
top = int(eval(box.attrib['top']))
right = left + int(eval(box.attrib['width']))
bottom = top + int(eval(box.attrib['height']))
bboxes.append([left, top, right, bottom])
# curr_frame_num -1 comes from the fact that indexes
# start from 0 whereas the start_frame_num = 1
color_per_frame = self.task_manager.call_color(
images[curr_frame_num - 1], bboxes)
# if __debug__: print("colors detected in this frame are
# " ,
# str(color_per_frame))
scene = file.replace(".xml",
"") # MVI_20011.xml -> MVI_20011
intersection_per_frame = \
self.task_manager.call_intersection(
images[curr_frame_num - 1], scene, bboxes)
for att in frame.iter('attribute'):
if (att.attrib['vehicle_type']):
car_per_frame.append(att.attrib['vehicle_type'])
if (att.attrib['speed']):
speed_per_frame.append(self._convert_speed(
float(att.attrib['speed'])))
assert (len(car_per_frame) == len(speed_per_frame))
assert (len(car_per_frame) == len(color_per_frame))
assert (len(car_per_frame) == len(intersection_per_frame))
if len(car_per_frame) == 0:
car_labels.append(None)
else:
car_labels.append(car_per_frame)
if len(speed_per_frame) == 0:
speed_labels.append(None)
else:
speed_labels.append(speed_per_frame)
if len(color_per_frame) == 0:
color_labels.append(None)
else:
color_labels.append(color_per_frame)
if len(intersection_per_frame) == 0:
intersection_labels.append(None)
else:
intersection_labels.append(intersection_per_frame)
start_frame = False
return [car_labels, speed_labels, color_labels, intersection_labels]
def _load_images(self, image_dir, downsize_rate=1, grayscale=False):
print("image directory is ", image_dir)
file_names = []
for root, subdirs, files in os.walk(image_dir):
files.sort()
for file in files:
if '.jpg' in file:
file_names.append(os.path.join(root, file))
file_names.append(os.path.join(root, file))
print("Number of files added: ", len(file_names))
if grayscale is False:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, self.image_channels),
dtype=np.uint8)
else:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, 1), dtype=np.uint8)
for i in range(len(file_names)):
file_name = file_names[i]
if grayscale:
img = cv2.imread(file_name, 0)
img = cv2.resize(img, (self.image_height // downsize_rate,
self.image_width // downsize_rate))
else:
img = cv2.imread(file_name)
img = cv2.resize(img, (self.image_width // downsize_rate,
self.image_height // downsize_rate))
img_table[i] = img
return img_table
def load_images_nn(self, image_dir, downsize_rate=1, grayscale=False):
"""
Loading images in a non normalized form
:param image_dir:
:param downsize_rate:
:param grayscale:
:return:
"""
file_names = []
for root, subdirs, files in os.walk(image_dir):
files.sort()
for file in files:
file_names.append(os.path.join(root, file))
if grayscale is False:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, self.image_channels),
dtype=np.int16)
else:
img_table = np.ndarray(shape=(
len(file_names), self.image_height // downsize_rate,
self.image_width // downsize_rate, 1), dtype=np.int16)
for i in range(len(file_names)):
file_name = file_names[i]
if grayscale:
img = cv2.imread(file_name, 0)
else:
img = cv2.imread(file_name, 1)
img = cv2.resize(img, (self.image_width // downsize_rate,
self.image_height // downsize_rate))
img_table[i] = img[:, :, np.newaxis]
return img_table
class LoadTest:
def __init__(self, load):
self.load = load
def run(self):
start_time = time.time()
eva_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
train_image_dir = os.path.join(eva_dir, "data", "ua_detrac",
"small-data")
# test_image_dir = os.path.join(eva_dir, "data", "ua_detrac",
# "test_images")
test_image_dir = None
train_anno_dir = os.path.join(eva_dir, "data", "ua_detrac",
"small-annotation")
dir_dict = {"train_image": train_image_dir,
"test_image": test_image_dir,
"train_anno": train_anno_dir}
if __debug__:
print(("train image dir: " + train_image_dir))
# print("test image dir: " + test_image_dir)
print(("train annotation dir: " + train_anno_dir))
dt_train, dt_test = self.load.load(dir_dict)
Load().save("small.csv", dt_train)
if __debug__:
print(("--- Total Execution Time : %.3f seconds ---" % (
time.time() - start_time)))
print((dt_train.shape))
if test_image_dir is not None:
print((dt_test.shape))
if __name__ == "__main__":
load = Load()
load_test = LoadTest(load)
# load_test.run()
panda_table = Load().load_from_csv("small.csv")
a = 1 + 2
if __debug__:
print(("panda shape is " + str(panda_table.shape)))
| 38.6 | 79 | 0.545903 |
9c06ee2f427e8c529e15a72f61593168a7679620 | 2,236 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/__init__.py | tc79/python-project-template | bd2521252365d46ca3a2ba00abef1e0b4c8a1f1c | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/__init__.py | tc79/python-project-template | bd2521252365d46ca3a2ba00abef1e0b4c8a1f1c | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/__init__.py | tc79/python-project-template | bd2521252365d46ca3a2ba00abef1e0b4c8a1f1c | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
# {{ cookiecutter.project_name }} v{{ cookiecutter.version }}
# {{ cookiecutter.project_short_description }}
# Copyright © {{ cookiecutter.year }}, {{ cookiecutter.company }}.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the author of this software nor the names of
# contributors to this software may be used to endorse or promote
# products derived from this software without specific prior written
# consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
{{ cookiecutter.project_short_description }}
:Copyright: © {{ cookiecutter.year }}, {{ cookiecutter.company }}.
:License: BSD (see /LICENSE).
"""
__title__ = '{{ cookiecutter.project_name }}'
__version__ = '{{ cookiecutter.version }}'
__author__ = '{{ cookiecutter.full_name }}'
__license__ = '3-clause BSD'
__docformat__ = 'restructuredtext en'
__all__ = ()
# import gettext
# G = gettext.translation('{{ cookiecutter.repo_name }}', '/usr/share/locale', fallback='C')
# _ = G.gettext
| 42.188679 | 92 | 0.749106 |
e7998116ae01af13dd6c85afdcaf4b845cf2ad94 | 3,594 | py | Python | TicTacToe/tictactoe_board.py | sbeignez/Playground | 3e59888b97c988dbb072bfb7ce026af657083d96 | [
"MIT"
] | null | null | null | TicTacToe/tictactoe_board.py | sbeignez/Playground | 3e59888b97c988dbb072bfb7ce026af657083d96 | [
"MIT"
] | null | null | null | TicTacToe/tictactoe_board.py | sbeignez/Playground | 3e59888b97c988dbb072bfb7ce026af657083d96 | [
"MIT"
] | null | null | null |
from hmac import trans_36
from tictactoe import TicTacToe
import random
class Board:
def __init__(self, m=3, n=3, k=3, list = []):
self.cols = m
self.rows = n
if list:
self.board = list
else:
self.board = [TicTacToe.BOARD_EMPTY for _ in range(self.cols * self.rows)]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.board == other.board
else:
return False
def __hash__(self) -> int:
return (str(self.board)).__hash__()
def reset_random(self):
self.board = [random.choice([TicTacToe.BOARD_EMPTY, TicTacToe.BOARD_X, TicTacToe.BOARD_O ]) for _ in range(self.cols * self.rows)]
def set_board(self, m=3, n=3, list = [] ):
self.board = list
self.cols = m
self.row = n
row1 = [0, 1, 2]
row2 = [3, 4, 5]
row3 = [6, 7, 8]
col1 = [0, 3, 6]
col2 = [1, 4, 7]
col3 = [2, 5, 8]
dia1 = [0, 4, 8]
dia2 = [2, 4, 6]
lines = [row1, row2, row3, col1, col2, col3, dia1, dia2]
def check_win(self, player):
for line in self.lines:
if self.check_line(line, player):
return True
return False
def check_line(self, line, player):
for i in line:
if self.board[i] != player:
return False
return True
rot90 = { 0: 6, 1: 3, 2: 0, 3: 7, 4: 4, 5: 1, 6: 8, 7: 5, 8: 2 }
symY = { 0: 2, 1: 1, 2: 0, 3: 5, 4: 4, 5: 3, 6: 8, 7: 7, 8: 6 }
transformations = { "rot90" : rot90, "symY" : symY}
group = [ [], [rot90], [rot90, rot90], [rot90, rot90, rot90], [symY], [symY, rot90], [symY, rot90, rot90], [symY, rot90, rot90, rot90] ]
# a, rotation 90
# b, symmetry on y axis
# $S_{sym} = { 1, a, a^2, a^3, b, b, ba, ba^2, ba^3}$
def is_symmetric(board1, board2):
return any( Board.transform(board1, t).board == board2.board for t in Board.group )
def display_text(self):
print("")
print(" +" + "---+" * self.cols)
i = 0
for row in range(self.rows):
print(str(row+1).rjust(2) + "|", end="")
for col in range(self.cols):
print(" " + self.board[i]+" |", end="")
i += 1
print("\n +" + "---+" * self.cols)
print(" A B C ")
print("")
def transform_step(board, transformation):
if not (board.rows == board.cols):
return None
b = [ board.board[transformation[i]] for i in range(len(board.board))]
return Board(board.cols, board.rows, 3, b)
def transform(board, transformations):
# print("trans", end=", " )
for t in transformations:
board = Board.transform_step(board, t)
# print(board.board)
return board
def group_boards(board):
return set( Board.transform(board, trans_list) for trans_list in Board.group )
b = Board(3,3,3)
b.reset_random()
b.display_text()
c = Board.transform(b, [Board.rot90])
b.display_text()
c.display_text()
B = [ Board.transform(b, trans_list) for trans_list in Board.group ]
print("--"*10)
for b in B:
b.display_text()
print("--"*10)
B[0].display_text()
x = Board.is_symmetric(b, B[2])
print(x)
print("--"*10)
x1 = Board(3,3,3,['O', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
x2 = Board(3,3,3,[' ', 'O', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
x3 = Board(3,3,3,[' ', ' ', ' ', ' ', '0', ' ', ' ', ' ', ' '])
X = Board.group_boards(x1)
print("--"*10)
for b in X:
b.display_text() | 28.299213 | 140 | 0.516138 |
22c727102a304d24cca378a8b5eeb3ab1d0ff003 | 5,642 | py | Python | tests/test_lstm.py | JesseTG/Sock | 97b2f76dae324708a26bb46ce466680e6e4c769e | [
"BSD-3-Clause"
] | null | null | null | tests/test_lstm.py | JesseTG/Sock | 97b2f76dae324708a26bb46ce466680e6e4c769e | [
"BSD-3-Clause"
] | null | null | null | tests/test_lstm.py | JesseTG/Sock | 97b2f76dae324708a26bb46ce466680e6e4c769e | [
"BSD-3-Clause"
] | null | null | null | import pytest
import torch
from tests.marks import *
from sock.model.data import WordEmbeddings, sentence_label_pad, sentence_pad
from sock.model.nn import ContextualLSTM
@modes("cpu", "cuda")
def test_devices_are_the_same(lstm: ContextualLSTM, glove_embedding: WordEmbeddings):
assert lstm.device == glove_embedding.device
def test_create_lstm(lstm: ContextualLSTM):
assert lstm is not None
def test_has_modules(lstm: ContextualLSTM):
modules = tuple(lstm.modules())
assert modules != []
def test_has_parameters(lstm: ContextualLSTM):
parameters = tuple(lstm.parameters())
assert parameters != []
@modes("cuda", "dp")
def test_lstm_moves_all_data_to_cuda(lstm: ContextualLSTM):
for p in lstm.parameters():
assert p.is_cuda
@modes("cuda")
def test_lstm_moves_embeddings_to_cuda(lstm_cuda: ContextualLSTM):
assert lstm_cuda.embeddings.weight.is_cuda
@modes("dp")
def test_lstm_moves_embeddings_to_cuda_in_dp_mode(lstm_dp):
assert lstm_dp.module.embeddings.weight.is_cuda
@modes("cuda", "dp")
def test_lstm_needs_input_from_same_device(lstm: ContextualLSTM):
with pytest.raises(RuntimeError):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 78, 3, 1], dtype=torch.long, device="cpu")
])
lstm(encoding)
def test_lstm_evaluates(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([7, 1, 5, 78, 3, 1], dtype=torch.long, device=device)
])
result = lstm(encoding)
assert torch.is_tensor(result)
assert result.device == device
@pytest.mark.benchmark(group="test_bench_lstm_evaluates")
def test_bench_lstm_evaluates(benchmark, lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([7, 1, 5, 78, 3, 1], dtype=torch.long, device=device)
] * 1000)
result = benchmark(lstm, encoding)
assert torch.is_tensor(result)
assert result.device == device
def test_lstm_rejects_list_of_lists(lstm: ContextualLSTM):
encoding = [
[0, 1, 5, 8, 3, 1],
[1, 4, 6, 1, 9, 7],
[9, 0, 6, 9, 9, 0],
[2, 3, 6, 1, 2, 4],
]
with pytest.raises(Exception):
result = lstm(encoding)
def test_lstm_rejects_tensor(lstm: ContextualLSTM, device: torch.device):
encoding = torch.tensor([
[0, 1, 5, 8, 3, 1],
[1, 4, 6, 1, 9, 7],
[9, 0, 6, 9, 9, 0],
[2, 3, 6, 1, 2, 4],
], dtype=torch.long, device=device)
with pytest.raises(Exception):
result = lstm(encoding)
def test_lstm_evaluates_batches_of_same_length(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_evaluates_batches_of_different_length_unsorted(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7, 9, 1], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_evaluates_batches_of_different_length_in_sorted(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([1, 4, 6, 1, 9, 7, 9, 1], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4, 4], dtype=torch.long, device=device),
torch.tensor([0, 1, 5, 8, 3], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_returns_1d_float_tensor(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert result.dtype.is_floating_point
assert result.shape == torch.Size([len(encoding[0])])
def test_lstm_in_training_mode_by_default(lstm: ContextualLSTM):
assert lstm.training
def test_lstm_eval_sets_eval_mode(lstm: ContextualLSTM):
lstm.eval()
assert not lstm.training
def test_lstm_train_false_sets_eval_mode(lstm: ContextualLSTM):
lstm.train(False)
assert not lstm.training
def test_lstm_results_have_no_gradient_with_no_grad(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
with torch.no_grad():
result = lstm(encoding)
assert not result.requires_grad
def test_get_lstm_cpu(request, lstm_cpu: ContextualLSTM):
assert lstm_cpu is not None
assert type(lstm_cpu) == ContextualLSTM
assert lstm_cpu.device.type == "cpu"
| 31.171271 | 106 | 0.667494 |
9b38837e44ddf801b104146b4174410e41ef737a | 297 | py | Python | tests/basics/builtin_delattr.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | 8 | 2017-01-08T19:45:01.000Z | 2020-09-07T04:39:10.000Z | tests/basics/builtin_delattr.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | null | null | null | tests/basics/builtin_delattr.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | 2 | 2017-07-27T19:45:05.000Z | 2020-08-02T19:00:33.000Z | # test builtin delattr
try:
delattr
except:
import sys
print("SKIP")
sys.exit()
class A: pass
a = A()
a.x = 1
print(a.x)
delattr(a, 'x')
try:
a.x
except AttributeError:
print('AttributeError')
try:
delattr(a, 'x')
except AttributeError:
print('AttributeError')
| 11.88 | 27 | 0.622896 |
e48412bb7edaeaab7561952a83cb5df97a9aeb17 | 15,044 | py | Python | ml-agents/mlagents/trainers/sac_transfer/trainer.py | ycsun2017/ml-agents | 81eaaad4b0bec6e7ba16a3bb4c003208db846984 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/sac_transfer/trainer.py | ycsun2017/ml-agents | 81eaaad4b0bec6e7ba16a3bb4c003208db846984 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/sac_transfer/trainer.py | ycsun2017/ml-agents | 81eaaad4b0bec6e7ba16a3bb4c003208db846984 | [
"Apache-2.0"
] | null | null | null | # ## ML-Agent Learning (SAC)
# Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290
# and implemented in https://github.com/hill-a/stable-baselines
from collections import defaultdict
from typing import Dict, cast
import os
import numpy as np
from mlagents.trainers.policy.checkpoint_manager import ModelCheckpoint
from mlagents_envs.logging_util import get_logger
from mlagents_envs.timers import timed
from mlagents_envs.base_env import BehaviorSpec
from mlagents.trainers.buffer import BufferKey, RewardSignalUtil
from mlagents.trainers.policy import Policy
from mlagents.trainers.trainer.rl_trainer import RLTrainer
from mlagents.trainers.policy.torch_policy import TorchPolicy
from mlagents.trainers.sac_transfer.optimizer_torch import TorchSACTransferOptimizer
from mlagents.trainers.trajectory import Trajectory, ObsUtil
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.settings import TrainerSettings, SACTransferSettings
logger = get_logger(__name__)
BUFFER_TRUNCATE_PERCENT = 0.8
class SACTransferTrainer(RLTrainer):
"""
The SACTrainer is an implementation of the SAC algorithm, with support
for discrete actions and recurrent networks.
"""
def __init__(
self,
behavior_name: str,
reward_buff_cap: int,
trainer_settings: TrainerSettings,
training: bool,
load: bool,
seed: int,
artifact_path: str,
):
"""
Responsible for collecting experiences and training SAC model.
:param behavior_name: The name of the behavior associated with trainer config
:param reward_buff_cap: Max reward history to track in the reward buffer
:param trainer_settings: The parameters for the trainer.
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param artifact_path: The directory within which to store artifacts from this trainer.
"""
super().__init__(
behavior_name,
trainer_settings,
training,
load,
artifact_path,
reward_buff_cap,
)
self.seed = seed
self.policy: Policy = None # type: ignore
self.optimizer: TorchSACOptimizer = None # type: ignore
self.hyperparameters: SACSettings = cast(
SACTransferSettings, trainer_settings.hyperparameters
)
self.step = 0
# Don't divide by zero
self.update_steps = 1
self.reward_signal_update_steps = 1
self.steps_per_update = self.hyperparameters.steps_per_update
self.reward_signal_steps_per_update = (
self.hyperparameters.reward_signal_steps_per_update
)
self.checkpoint_replay_buffer = self.hyperparameters.save_replay_buffer
print("using SAC transfer trainer")
print(self.hyperparameters)
def _checkpoint(self) -> ModelCheckpoint:
"""
Writes a checkpoint model to memory
Overrides the default to save the replay buffer.
"""
ckpt = super()._checkpoint()
if self.checkpoint_replay_buffer:
self.save_replay_buffer()
return ckpt
def save_model(self) -> None:
"""
Saves the final training model to memory
Overrides the default to save the replay buffer.
"""
super().save_model()
if self.checkpoint_replay_buffer:
self.save_replay_buffer()
def save_replay_buffer(self) -> None:
"""
Save the training buffer's update buffer to a pickle file.
"""
filename = os.path.join(self.artifact_path, "last_replay_buffer.hdf5")
logger.info(f"Saving Experience Replay Buffer to {filename}")
with open(filename, "wb") as file_object:
self.update_buffer.save_to_file(file_object)
def load_replay_buffer(self) -> None:
"""
Loads the last saved replay buffer from a file.
"""
filename = os.path.join(self.artifact_path, "last_replay_buffer.hdf5")
logger.info(f"Loading Experience Replay Buffer from {filename}")
with open(filename, "rb+") as file_object:
self.update_buffer.load_from_file(file_object)
logger.info(
"Experience replay buffer has {} experiences.".format(
self.update_buffer.num_experiences
)
)
def _process_trajectory(self, trajectory: Trajectory) -> None:
"""
Takes a trajectory and processes it, putting it into the replay buffer.
"""
super()._process_trajectory(trajectory)
last_step = trajectory.steps[-1]
agent_id = trajectory.agent_id # All the agents should have the same ID
agent_buffer_trajectory = trajectory.to_agentbuffer()
# Update the normalization
if self.is_training:
self.policy.update_normalization(agent_buffer_trajectory)
# Evaluate all reward functions for reporting purposes
self.collected_rewards["environment"][agent_id] += np.sum(
agent_buffer_trajectory[BufferKey.ENVIRONMENT_REWARDS]
)
for name, reward_signal in self.optimizer.reward_signals.items():
evaluate_result = (
reward_signal.evaluate(agent_buffer_trajectory) * reward_signal.strength
)
# Report the reward signals
self.collected_rewards[name][agent_id] += np.sum(evaluate_result)
# Get all value estimates for reporting purposes
(
value_estimates,
_,
value_memories,
) = self.optimizer.get_trajectory_value_estimates(
agent_buffer_trajectory, trajectory.next_obs, trajectory.done_reached
)
if value_memories is not None:
agent_buffer_trajectory[BufferKey.CRITIC_MEMORY].set(value_memories)
for name, v in value_estimates.items():
self._stats_reporter.add_stat(
f"Policy/{self.optimizer.reward_signals[name].name.capitalize()} Value",
np.mean(v),
)
# Bootstrap using the last step rather than the bootstrap step if max step is reached.
# Set last element to duplicate obs and remove dones.
if last_step.interrupted:
last_step_obs = last_step.obs
for i, obs in enumerate(last_step_obs):
agent_buffer_trajectory[ObsUtil.get_name_at_next(i)][-1] = obs
agent_buffer_trajectory[BufferKey.DONE][-1] = False
# Append to update buffer
agent_buffer_trajectory.resequence_and_append(
self.update_buffer, training_length=self.policy.sequence_length
)
if trajectory.done_reached:
self._update_end_episode_stats(agent_id, self.optimizer)
def _is_ready_update(self) -> bool:
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not _update_policy() can be run
"""
return (
self.update_buffer.num_experiences >= self.hyperparameters.batch_size
and self.step >= self.hyperparameters.buffer_init_steps
)
@timed
def _update_policy(self) -> bool:
"""
Update the SAC policy and reward signals. The reward signal generators are updated using different mini batches.
By default we imitate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times.
:return: Whether or not the policy was updated.
"""
policy_was_updated = self._update_sac_policy()
self._update_reward_signals()
return policy_was_updated
def maybe_load_replay_buffer(self):
# Load the replay buffer if load
if self.load and self.checkpoint_replay_buffer:
try:
self.load_replay_buffer()
except (AttributeError, FileNotFoundError):
logger.warning(
"Replay buffer was unable to load, starting from scratch."
)
logger.debug(
"Loaded update buffer with {} sequences".format(
self.update_buffer.num_experiences
)
)
def create_torch_policy(
self, parsed_behavior_id: BehaviorIdentifiers, behavior_spec: BehaviorSpec
) -> TorchPolicy:
"""
Creates a policy with a PyTorch backend and SAC hyperparameters
:param parsed_behavior_id:
:param behavior_spec: specifications for policy construction
:return policy
"""
policy = TorchPolicy(
self.seed,
behavior_spec,
self.trainer_settings,
condition_sigma_on_obs=True,
tanh_squash=True,
separate_critic=True,
)
self.maybe_load_replay_buffer()
return policy
def _update_sac_policy(self) -> bool:
"""
Uses update_buffer to update the policy. We sample the update_buffer and update
until the steps_per_update ratio is met.
"""
has_updated = False
self.cumulative_returns_since_policy_update.clear()
n_sequences = max(
int(self.hyperparameters.batch_size / self.policy.sequence_length), 1
)
batch_update_stats: Dict[str, list] = defaultdict(list)
while (
self.step - self.hyperparameters.buffer_init_steps
) / self.update_steps > self.steps_per_update:
logger.debug(f"Updating SAC policy at step {self.step}")
buffer = self.update_buffer
if self.update_buffer.num_experiences >= self.hyperparameters.batch_size:
sampled_minibatch = buffer.sample_mini_batch(
self.hyperparameters.batch_size,
sequence_length=self.policy.sequence_length,
)
# Get rewards for each reward
for name, signal in self.optimizer.reward_signals.items():
sampled_minibatch[RewardSignalUtil.rewards_key(name)] = (
signal.evaluate(sampled_minibatch) * signal.strength
)
update_stats = self.optimizer.update(sampled_minibatch, n_sequences)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
self.update_steps += 1
for stat, stat_list in batch_update_stats.items():
self._stats_reporter.add_stat(stat, np.mean(stat_list))
has_updated = True
if self.optimizer.bc_module:
update_stats = self.optimizer.bc_module.update()
for stat, val in update_stats.items():
self._stats_reporter.add_stat(stat, val)
# Truncate update buffer if neccessary. Truncate more than we need to to avoid truncating
# a large buffer at each update.
if self.update_buffer.num_experiences > self.hyperparameters.buffer_size:
self.update_buffer.truncate(
int(self.hyperparameters.buffer_size * BUFFER_TRUNCATE_PERCENT)
)
return has_updated
def _update_reward_signals(self) -> None:
"""
Iterate through the reward signals and update them. Unlike in PPO,
do it separate from the policy so that it can be done at a different
interval.
This function should only be used to simulate
http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times. Normally, the reward signal
and policy are updated in parallel.
"""
buffer = self.update_buffer
n_sequences = max(
int(self.hyperparameters.batch_size / self.policy.sequence_length), 1
)
batch_update_stats: Dict[str, list] = defaultdict(list)
while (
self.step - self.hyperparameters.buffer_init_steps
) / self.reward_signal_update_steps > self.reward_signal_steps_per_update:
# Get minibatches for reward signal update if needed
reward_signal_minibatches = {}
for name in self.optimizer.reward_signals.keys():
logger.debug(f"Updating {name} at step {self.step}")
if name != "extrinsic":
reward_signal_minibatches[name] = buffer.sample_mini_batch(
self.hyperparameters.batch_size,
sequence_length=self.policy.sequence_length,
)
update_stats = self.optimizer.update_reward_signals(
reward_signal_minibatches, n_sequences
)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
self.reward_signal_update_steps += 1
for stat, stat_list in batch_update_stats.items():
self._stats_reporter.add_stat(stat, np.mean(stat_list))
def create_sac_optimizer(self) -> TorchSACTransferOptimizer:
return TorchSACTransferOptimizer( # type: ignore
cast(TorchPolicy, self.policy), self.trainer_settings # type: ignore
) # type: ignore
def add_policy(
self, parsed_behavior_id: BehaviorIdentifiers, policy: Policy
) -> None:
"""
Adds policy to trainer.
"""
if self.policy:
logger.warning(
"Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
train adversarial games.".format(
self.__class__.__name__
)
)
self.policy = policy
self.policies[parsed_behavior_id.behavior_id] = policy
self.optimizer = self.create_sac_optimizer()
for _reward_signal in self.optimizer.reward_signals.keys():
self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
self.model_saver.register(self.policy)
self.model_saver.register(self.optimizer)
self.model_saver.initialize_or_load()
# Needed to resume loads properly
self.step = policy.get_current_step()
# Assume steps were updated at the correct ratio before
self.update_steps = int(max(1, self.step / self.steps_per_update))
self.reward_signal_update_steps = int(
max(1, self.step / self.reward_signal_steps_per_update)
)
def get_policy(self, name_behavior_id: str) -> Policy:
"""
Gets policy from trainer associated with name_behavior_id
:param name_behavior_id: full identifier of policy
"""
return self.policy
| 40.224599 | 122 | 0.644443 |
8db30592a1583a9ddc22718ab608005a3f23f411 | 3,177 | py | Python | tests/aat/api/v1/client/models/bulk_stop_packet_captures_request.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 20 | 2019-12-04T01:28:52.000Z | 2022-03-17T14:09:34.000Z | tests/aat/api/v1/client/models/bulk_stop_packet_captures_request.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 115 | 2020-02-04T21:29:54.000Z | 2022-02-17T13:33:51.000Z | tests/aat/api/v1/client/models/bulk_stop_packet_captures_request.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 16 | 2019-12-03T16:41:18.000Z | 2021-11-06T04:44:11.000Z | # coding: utf-8
"""
OpenPerf API
REST API interface for OpenPerf # noqa: E501
OpenAPI spec version: 1
Contact: support@spirent.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BulkStopPacketCapturesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ids': 'list[str]'
}
attribute_map = {
'ids': 'ids'
}
def __init__(self, ids=None): # noqa: E501
"""BulkStopPacketCapturesRequest - a model defined in Swagger""" # noqa: E501
self._ids = None
self.discriminator = None
self.ids = ids
@property
def ids(self):
"""Gets the ids of this BulkStopPacketCapturesRequest. # noqa: E501
List of capture identifiers # noqa: E501
:return: The ids of this BulkStopPacketCapturesRequest. # noqa: E501
:rtype: list[str]
"""
return self._ids
@ids.setter
def ids(self, ids):
"""Sets the ids of this BulkStopPacketCapturesRequest.
List of capture identifiers # noqa: E501
:param ids: The ids of this BulkStopPacketCapturesRequest. # noqa: E501
:type: list[str]
"""
self._ids = ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkStopPacketCapturesRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BulkStopPacketCapturesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.387931 | 86 | 0.563425 |
a219ae0c928d1bd914a24417fb7084b8b3c51e51 | 466 | py | Python | vortex/errors.py | sihrc/vortex | 4b913946c8c1743a5cd7a07b80bc7ab516ce12f2 | [
"MIT"
] | null | null | null | vortex/errors.py | sihrc/vortex | 4b913946c8c1743a5cd7a07b80bc7ab516ce12f2 | [
"MIT"
] | 6 | 2018-08-04T21:29:11.000Z | 2021-05-16T05:30:34.000Z | vortex/errors.py | sihrc/vortex | 4b913946c8c1743a5cd7a07b80bc7ab516ce12f2 | [
"MIT"
] | 1 | 2019-08-22T11:48:30.000Z | 2019-08-22T11:48:30.000Z | class VortexException(Exception):
def __init__(self, message, code=400, **kwargs):
self.code = code
self.message = message
self.body = kwargs
super().__init__("{}. status={}".format(message, code))
def to_dict(self):
return {"message": self.message, "code": self.code, "body": self.body}
class UnhandledException(VortexException):
def __init__(self):
super().__init__("Internal Server Error", code=500)
| 31.066667 | 78 | 0.639485 |
fab833f38855e2d66b6fed6fb6c8fb32a5695b0e | 22 | py | Python | couch/datadog_checks/couch/__about__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2021-05-14T20:00:35.000Z | 2021-05-14T20:00:35.000Z | couch/datadog_checks/couch/__about__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | null | null | null | couch/datadog_checks/couch/__about__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2021-09-07T12:35:18.000Z | 2021-09-07T12:35:18.000Z | __version__ = "2.6.1"
| 11 | 21 | 0.636364 |
a32ab0503cebb92ce647798914203ad545f98d7a | 3,524 | py | Python | examples/random_forest_importances.py | joshloyal/drforest | ab1e3f01cab36f15f1c37b82f71421cd025c901e | [
"MIT"
] | 2 | 2021-09-22T12:15:43.000Z | 2022-01-04T12:59:50.000Z | examples/random_forest_importances.py | joshloyal/drforest | ab1e3f01cab36f15f1c37b82f71421cd025c901e | [
"MIT"
] | null | null | null | examples/random_forest_importances.py | joshloyal/drforest | ab1e3f01cab36f15f1c37b82f71421cd025c901e | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from drforest.datasets import make_simulation1
from drforest.ensemble import DimensionReductionForestRegressor
from drforest.ensemble import permutation_importance
plt.rc('font', family='serif')
fontsize = 14
n_samples = 2000
n_features = 5
X, y = make_simulation1(
n_samples=n_samples, noise=1, n_features=n_features, random_state=1234)
forest = DimensionReductionForestRegressor(
n_estimators=500, store_X_y=True, n_jobs=-1,
min_samples_leaf=3, max_features=None,
random_state=42).fit(X, y)
x0 = np.zeros(n_features)
x0[:2] = np.array([-1.5, 1.5])
local_direc_x0 = forest.local_principal_direction(x0)
local_direc_x0 *= np.sign(local_direc_x0[0])
x1 = np.zeros(n_features)
x1[:2] = [0.5, -0.5]
local_direc_x1 = forest.local_principal_direction(x1)
local_direc_x1 *= np.sign(local_direc_x1[0])
#forest = RandomForestRegressor(n_estimators=500,
# min_samples_leaf=3,
# n_jobs=-1, max_features=None,
# oob_score=True,
# random_state=42).fit(X, y)
#
#forest_imp = permutation_importance(
# forest, X, y, random_state=forest.random_state)
#forest_imp /= np.sum(forest_imp)
forest_imp = forest.feature_importances_
#order = np.argsort(forest_imp)
fig, ax = plt.subplots(figsize=(18, 5), ncols=4)
def f(x, y):
r1 = x - y
r2 = x + y
return (20 * np.maximum(
np.maximum(np.exp(-2 * r1 ** 2), np.exp(-r2 ** 2)),
2 * np.exp(-0.5 * (x ** 2 + y ** 2))))
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
ax[0].contour(X, Y, Z, 3, colors='black', linestyles='--', levels=5, linewidths=1.5)
ax[0].imshow(Z, extent=[-3, 3, -3, 3], origin='lower', cmap='YlGnBu_r', alpha=0.5)
ax[0].scatter([-1.5, 0.5], [1.5, -0.5], color=None, edgecolor='black')
ax[0].annotate(r'(-1.5, 1.5)', (-1.5, 1.5), xytext=(-1.4, 1.6), fontname='Sans', weight='bold')
ax[0].annotate(r'(0.5, -0.5)', (0.5, -0.5), xytext=(0.6, -0.4), fontname='Sans', weight='bold')
ax[0].set_aspect('equal')
ax[1].bar(np.arange(1, n_features + 1), forest_imp, color='gray')
ax[1].set_ylabel('Importance', fontsize=fontsize)
#ax[1].set_title('Random Forest', fontsize=fontsize)
ax[1].set_xlabel(None)
ax[1].axhline(0, color='black', linestyle='-')
ax[1].set_ylim(-1, 1)
ax[1].set_xlabel('Variable', fontsize=fontsize)
ax[1].text(3.5, 0.8, 'Global', fontsize=16)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x0]
ax[2].bar(np.arange(1, n_features + 1), local_direc_x0, color=color)
#ax[2].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[2].axhline(0, color='black', linestyle='-', lw=1)
ax[2].set_ylim(-1, 1)
ax[2].set_xlabel('Variable', fontsize=fontsize)
ax[2].text(2.5, 0.8, '$\mathbf{x}_0 = (-1.5, 1.5, 0, 0, 0)$', fontsize=12)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x1]
ax[3].bar(np.arange(1, n_features + 1), local_direc_x1, color=color)
#ax[3].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[3].set_xlabel('Variable', fontsize=fontsize)
ax[3].invert_yaxis()
ax[3].axhline(0, color='black', linestyle='-', lw=1)
ax[3].text(2.5, 0.8, '$\mathbf{x}_0 = (0.5, -0.5, 0, 0, 0)$', fontsize=12)
ax[3].set_ylim(-1, 1)
plt.subplots_adjust(wspace=0.3, left=0.03, right=0.985)
fig.savefig('local_lpd.png', dpi=300, bbox_inches='tight')
| 36.329897 | 95 | 0.663451 |
c3dde282e9f9e170d3833590eec5a39905cf7796 | 7,119 | py | Python | tests/standard/fido2/extensions/test_hmac_secret.py | rgerganov/fido2-tests | 7881689b86a2e9ad5aa9ba2aa9c0747bd406b643 | [
"Apache-2.0",
"MIT"
] | 3 | 2020-02-05T03:36:21.000Z | 2020-03-05T21:34:32.000Z | tests/standard/fido2/extensions/test_hmac_secret.py | antonio-fr/fido2-tests | cb3e3a66aa139b5b2cfd8e6f8cf8f5511d8931be | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/standard/fido2/extensions/test_hmac_secret.py | antonio-fr/fido2-tests | cb3e3a66aa139b5b2cfd8e6f8cf8f5511d8931be | [
"Apache-2.0",
"MIT"
] | null | null | null | import pytest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from fido2.ctap import CtapError
from fido2.utils import hmac_sha256, sha256
from tests.utils import FidoRequest, shannon_entropy, verify
def get_salt_params(cipher, shared_secret, salts):
enc = cipher.encryptor()
salt_enc = b""
for salt in salts:
salt_enc += enc.update(salt)
salt_enc += enc.finalize()
salt_auth = hmac_sha256(shared_secret, salt_enc)[:16]
return salt_enc, salt_auth
salt1 = b"\xa5" * 32
salt2 = b"\x96" * 32
salt3 = b"\x03" * 32
salt4 = b"\x5a" * 16
salt5 = b"\x96" * 64
@pytest.fixture(scope="module")
def MCHmacSecret(resetDevice,):
req = FidoRequest(extensions={"hmac-secret": True}, options={"rk": True})
res = resetDevice.sendMC(*req.toMC())
setattr(res, "request", req)
return res
@pytest.fixture(scope="class")
def sharedSecret(device, MCHmacSecret):
return device.client.pin_protocol.get_shared_secret()
@pytest.fixture(scope="class")
def cipher(device, sharedSecret):
key_agreement, shared_secret = sharedSecret
return Cipher(
algorithms.AES(shared_secret), modes.CBC(b"\x00" * 16), default_backend()
)
class TestHmacSecret(object):
def test_hmac_secret_make_credential(self, MCHmacSecret):
assert MCHmacSecret.auth_data.extensions
assert "hmac-secret" in MCHmacSecret.auth_data.extensions
assert MCHmacSecret.auth_data.extensions["hmac-secret"] == True
def test_hmac_secret_info(self, info):
assert "hmac-secret" in info.extensions
def test_fake_extension(self, device):
req = FidoRequest(extensions={"tetris": True})
res = device.sendMC(*req.toMC())
def test_get_shared_secret(self, sharedSecret):
pass
@pytest.mark.parametrize("salts", [(salt1,), (salt1, salt2)])
def test_hmac_secret_entropy(self, device, MCHmacSecret, cipher, sharedSecret, salts):
print("salts:", salts)
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, salts)
req = FidoRequest(
extensions={"hmac-secret": {1: key_agreement, 2: salt_enc, 3: salt_auth}}
)
auth = device.sendGA(*req.toGA())
ext = auth.auth_data.extensions
assert ext
assert "hmac-secret" in ext
assert isinstance(ext["hmac-secret"], bytes)
assert len(ext["hmac-secret"]) == len(salts) * 32
verify(MCHmacSecret, auth, req.cdh)
dec = cipher.decryptor()
key = dec.update(ext["hmac-secret"]) + dec.finalize()
print(shannon_entropy(ext["hmac-secret"]))
if len(salts) == 1:
assert shannon_entropy(ext["hmac-secret"]) > 4.6
assert shannon_entropy(key) > 4.6
if len(salts) == 2:
assert shannon_entropy(ext["hmac-secret"]) > 5.4
assert shannon_entropy(key) > 5.4
def get_output(self, device, MCHmacSecret, cipher, sharedSecret, salts):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, salts)
req = FidoRequest(
extensions={"hmac-secret": {1: key_agreement, 2: salt_enc, 3: salt_auth}}
)
auth = device.sendGA(*req.toGA())
ext = auth.auth_data.extensions
assert ext
assert "hmac-secret" in ext
assert isinstance(ext["hmac-secret"], bytes)
assert len(ext["hmac-secret"]) == len(salts) * 32
verify(MCHmacSecret, auth, req.cdh)
dec = cipher.decryptor()
output = dec.update(ext["hmac-secret"]) + dec.finalize()
if len(salts) == 2:
return (output[0:32], output[32:64])
else:
return output
def test_hmac_secret_sanity(self, device, MCHmacSecret, cipher, sharedSecret):
output1 = self.get_output(device, MCHmacSecret, cipher, sharedSecret, (salt1,))
output12 = self.get_output(device, MCHmacSecret, cipher, sharedSecret, (salt1, salt2))
output21 = self.get_output(device, MCHmacSecret, cipher, sharedSecret, (salt2, salt1))
assert output12[0] == output1
assert output21[1] == output1
assert output21[0] == output12[1]
assert output12[0] != output12[1]
def test_missing_keyAgreement(self, device, cipher, sharedSecret):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, (salt3,))
req = FidoRequest(extensions={"hmac-secret": {2: salt_enc, 3: salt_auth}})
with pytest.raises(CtapError):
device.sendGA(*req.toGA())
def test_missing_saltAuth(self, device, cipher, sharedSecret):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, (salt3,))
req = FidoRequest(extensions={"hmac-secret": {1: key_agreement, 2: salt_enc}})
with pytest.raises(CtapError) as e:
device.sendGA(*req.toGA())
assert e.value.code == CtapError.ERR.MISSING_PARAMETER
def test_missing_saltEnc(self, device, cipher, sharedSecret):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, (salt3,))
req = FidoRequest(extensions={"hmac-secret": {1: key_agreement, 3: salt_auth}})
with pytest.raises(CtapError) as e:
device.sendGA(*req.toGA())
assert e.value.code == CtapError.ERR.MISSING_PARAMETER
def test_bad_auth(self, device, cipher, sharedSecret):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, (salt3,))
bad_auth = list(salt_auth[:])
bad_auth[len(bad_auth) // 2] = bad_auth[len(bad_auth) // 2] ^ 1
bad_auth = bytes(bad_auth)
req = FidoRequest(
extensions={"hmac-secret": {1: key_agreement, 2: salt_enc, 3: bad_auth}}
)
with pytest.raises(CtapError) as e:
device.sendGA(*req.toGA())
assert e.value.code == CtapError.ERR.EXTENSION_FIRST
@pytest.mark.parametrize("salts", [(salt4,), (salt4, salt5)])
def test_invalid_salt_length(self, device, cipher, sharedSecret, salts):
key_agreement, shared_secret = sharedSecret
salt_enc, salt_auth = get_salt_params(cipher, shared_secret, salts)
req = FidoRequest(
extensions={"hmac-secret": {1: key_agreement, 2: salt_enc, 3: salt_auth}}
)
with pytest.raises(CtapError) as e:
device.sendGA(*req.toGA())
assert e.value.code == CtapError.ERR.INVALID_LENGTH
# auth = self.testGA(
# "Send GA request with incorrect salt length %d, expect INVALID_LENGTH"
#% len(salt_enc),
# rp["id"],
# cdh,
# other={
# "extensions": {
# "hmac-secret": {1: key_agreement, 2: salt_enc, 3: salt_auth}
# }
# },
# expectedError=CtapError.ERR.INVALID_LENGTH,
# )
| 35.242574 | 94 | 0.648687 |
148bfdbc9bdc74cea8946bfe747d00ee3de5df62 | 956 | py | Python | prov_vo/migrations/0003_rename_entity_datatype.py | kristinriebe/django-prov-vo | 5bd86eb58833fe591004e6ef431b2b3deae7a62c | [
"Apache-2.0"
] | 1 | 2018-12-11T05:53:55.000Z | 2018-12-11T05:53:55.000Z | prov_vo/migrations/0003_rename_entity_datatype.py | kristinriebe/django-prov-vo | 5bd86eb58833fe591004e6ef431b2b3deae7a62c | [
"Apache-2.0"
] | null | null | null | prov_vo/migrations/0003_rename_entity_datatype.py | kristinriebe/django-prov-vo | 5bd86eb58833fe591004e6ef431b2b3deae7a62c | [
"Apache-2.0"
] | 1 | 2021-06-23T13:09:05.000Z | 2021-06-23T13:09:05.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-15 22:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prov_vo', '0002_parameter_entityrights'),
]
operations = [
migrations.RemoveField(
model_name='entity',
name='dataType',
),
migrations.AddField(
model_name='entity',
name='datatype',
field=models.CharField(blank=True, choices=[('vo:boolean', 'vo:boolean'), ('vo:bit', 'vo:bit'), ('vo:unsignedByte', 'vo:unsignedByte'), ('vo:short', 'vo:short'), ('vo:int', 'vo:int'), ('vo:long', 'vo:long'), ('vo:char', 'vo:char'), ('vo:unicodeChar', 'vo:unicodeChar'), ('vo:float', 'vo:float'), ('vo:double', 'vo:double'), ('vo:floatComplex', 'vo:floatComplex'), ('vo:doubleComplex', 'vo:doubleComplex')], max_length=128, null=True),
),
]
| 38.24 | 446 | 0.600418 |
52c12395a31c3beb95946b66ced5665803206452 | 1,936 | py | Python | tests/test_analyze.py | edumotya/cvdata | 9c3f6ea3520b564b3386ce1149a7ce40b4d14e7a | [
"MIT"
] | 15 | 2020-01-22T16:10:35.000Z | 2022-01-09T13:27:32.000Z | tests/test_analyze.py | edumotya/cvdata | 9c3f6ea3520b564b3386ce1149a7ce40b4d14e7a | [
"MIT"
] | 94 | 2019-11-14T14:40:33.000Z | 2022-01-10T06:38:44.000Z | tests/test_analyze.py | edumotya/cvdata | 9c3f6ea3520b564b3386ce1149a7ce40b4d14e7a | [
"MIT"
] | 8 | 2020-03-10T11:10:06.000Z | 2022-01-09T13:30:00.000Z | import logging
import os
import pytest
from cvdata import analyze
# ------------------------------------------------------------------------------
# disable logging messages
logging.disable(logging.CRITICAL)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_count_labels(
data_dir,
):
"""
Test for the cvdata.analyze.count_labels() function
:param data_dir: temporary directory into which test files will be loaded
"""
annotation_format = "kitti"
annotation_file_path = os.path.join(str(data_dir), annotation_format, "kitti_1.txt")
label_counts = analyze.count_labels(annotation_file_path, annotation_format)
assert label_counts["person"] == 4
assert label_counts["truck"] == 1
assert label_counts["car"] == 1
annotation_format = "pascal"
annotation_file_path = os.path.join(str(data_dir), annotation_format, "pascal_1.xml")
label_counts = analyze.count_labels(annotation_file_path, annotation_format)
assert label_counts["person"] == 2
assert label_counts["car"] == 1
annotation_format = "darknet"
annotation_file_path = os.path.join(str(data_dir), annotation_format, "darknet_1.txt")
label_counts = analyze.count_labels(annotation_file_path, annotation_format)
assert label_counts["1"] == 2
assert label_counts["2"] == 1
assert label_counts["3"] == 1
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_count_tfrecord_examples(
data_dir,
):
"""
Test for the cvdata.analyze.count_tfrecord_examples() function
:param data_dir: temporary directory into which test files will be loaded
"""
tfrecord_dir = os.path.join(str(data_dir), "tfrecord")
example_count = analyze.count_tfrecord_examples(tfrecord_dir)
assert example_count == 100
| 32.813559 | 90 | 0.633781 |
76f099ee9bb4d5a69e6021466e1407a4f6a49372 | 8,610 | py | Python | model.py | young-geng/iColorTF | 81d151fba6a405769ed5aade845fda3e1f66a33c | [
"MIT"
] | 1 | 2018-03-15T15:29:11.000Z | 2018-03-15T15:29:11.000Z | model.py | young-geng/iColorTF | 81d151fba6a405769ed5aade845fda3e1f66a33c | [
"MIT"
] | null | null | null | model.py | young-geng/iColorTF | 81d151fba6a405769ed5aade845fda3e1f66a33c | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def pad2d(input_tensor, padding):
return tf.pad(
input_tensor, [[0, 0], [padding, padding], [padding, padding], [0, 0]],
'CONSTANT'
)
def conv2d(input_tensor, filters, kernel_size=3, strides=1,
padding=1, dilation=1):
if padding == 0:
padded = input_tensor
else:
padded = pad2d(input_tensor, padding)
return tf.layers.conv2d(
padded,
filters=filters, kernel_size=kernel_size, strides=strides,
padding='VALID', data_format='channels_last',
dilation_rate=(dilation, dilation)
)
def deconv2d(input_tensor, filters, kernel_size=3, strides=1):
# Deconvolution
return tf.layers.conv2d_transpose(
input_tensor,
filters=filters, kernel_size=kernel_size, strides=strides,
padding='SAME', data_format='channels_last'
)
def subsample2d(input_tensor, strides=2):
return tf.nn.avg_pool(
input_tensor, ksize=[1, 1, 1, 1], strides=[1, strides, strides, 1],
padding='VALID', data_format='NHWC'
) # Use average pool to simulate a subsample in height and width
def batch_norm(input_tensor, training):
return tf.layers.batch_normalization(
input_tensor, training=training
)
def relu(input_tensor):
return tf.nn.relu(input_tensor)
def conv2d_relu(*args, **kwargs):
c = conv2d(*args, **kwargs)
r = relu(c)
return c, r
def smooth_l1(input_tensor):
abs_val = tf.abs(input_tensor)
return tf.where(
tf.less_equal(abs_val, 1.0),
0.5 * tf.square(input_tensor),
abs_val - 0.5
)
class iColorUNet(object):
def __init__(self, data_l, groud_truth_ab, reveal_ab_mask):
self.net = AttrDict()
net = self.net
net.data_l = data_l
net.reveal_ab_mask = reveal_ab_mask
net.groud_truth_ab = groud_truth_ab
net.groud_truth_lab = tf.concat([data_l, groud_truth_ab], axis=3)
net.reveal_lab = tf.concat(
[tf.ones_like(data_l) * 60,
tf.slice(reveal_ab_mask, [0, 0, 0, 0], [-1, -1, -1, 2])],
axis=3
)
net.reveal_mask = tf.slice(
reveal_ab_mask, [0, 0, 0, 2], [-1, -1, -1, 1]
)
net.is_training = tf.placeholder_with_default(False, [])
self.build_unet()
def build_unet(self):
net = self.net
net.data_l_meansub = net.data_l - 50.0
# Note here we use the caffe tradition of channel first
net.bw_conv1_1 = conv2d(net.data_l_meansub, filters=64)
net.ab_conv1_1 = conv2d(net.reveal_ab_mask, filters=64)
net.conv1_1 = net.bw_conv1_1 + net.ab_conv1_1
net.relu1_1 = relu(net.conv1_1)
net.conv1_2, net.relu1_2 = conv2d_relu(net.relu1_1, filters=64)
net.conv1_2norm = batch_norm(net.relu1_2, training=net.is_training)
# Conv2
net.conv1_2norm_ss = subsample2d(net.conv1_2norm)
net.conv2_1, net.relu2_1 = conv2d_relu(net.conv1_2norm_ss, filters=128)
net.conv2_2, net.relu2_2 = conv2d_relu(net.relu2_1, filters=128)
net.conv2_2norm = batch_norm(net.relu2_2, training=net.is_training)
# Conv3
net.conv2_2norm_ss = subsample2d(net.conv2_2norm)
net.conv3_1, net.relu3_1 = conv2d_relu(net.conv2_2norm_ss, filters=256)
net.conv3_2, net.relu3_2 = conv2d_relu(net.relu3_1, filters=256)
net.conv3_3, net.relu3_3 = conv2d_relu(net.relu3_2, filters=256)
net.conv3_3norm = batch_norm(net.relu3_3, training=net.is_training)
# Conv4
net.conv3_3norm_ss = subsample2d(net.conv3_3norm)
net.conv4_1, net.relu4_1 = conv2d_relu(net.conv3_3norm_ss, filters=512)
net.conv4_2, net.relu4_2 = conv2d_relu(net.relu4_1, filters=512)
net.conv4_3, net.relu4_3 = conv2d_relu(net.relu4_2, filters=512)
net.conv4_3norm = batch_norm(net.relu4_3, training=net.is_training)
# Conv 5
net.conv5_1, net.relu5_1 = conv2d_relu(
net.conv4_3norm, filters=512, padding=2, dilation=2
)
net.conv5_2, net.relu5_2 = conv2d_relu(
net.relu5_1, filters=512, padding=2, dilation=2
)
net.conv5_3, net.relu5_3 = conv2d_relu(
net.relu5_2, filters=512, padding=2, dilation=2
)
net.conv5_3norm = batch_norm(net.relu5_3, training=net.is_training)
# Conv 6
net.conv6_1, net.relu6_1 = conv2d_relu(
net.conv5_3norm, filters=512, padding=2, dilation=2
)
net.conv6_2, net.relu6_2 = conv2d_relu(
net.relu6_1, filters=512, padding=2, dilation=2
)
net.conv6_3, net.relu6_3 = conv2d_relu(
net.relu6_2, filters=512, padding=2, dilation=2
)
net.conv6_3norm = batch_norm(net.relu6_3, training=net.is_training)
# Conv 7
net.conv7_1, net.relu7_1 = conv2d_relu(
net.conv6_3norm, filters=512
)
net.conv7_2, net.relu7_2 = conv2d_relu(
net.relu7_1, filters=512
)
net.conv7_3, net.relu7_3 = conv2d_relu(
net.relu7_2, filters=512
)
net.conv7_3norm = batch_norm(net.relu7_3, training=net.is_training)
# Conv8
net.conv3_3_short = conv2d(net.conv3_3norm, filters=256)
net.conv8_1 = deconv2d(
net.conv7_3norm, filters=256, kernel_size=4, strides=2
)
net.conv8_1_comb = net.conv8_1 + net.conv3_3_short
net.relu8_1_comb = relu(net.conv8_1_comb)
net.conv8_2, net.relu8_2 = conv2d_relu(
net.relu8_1_comb, filters=256
)
net.conv8_3, net.relu8_3 = conv2d_relu(
net.relu8_2, filters=256
)
net.conv8_3norm = batch_norm(net.relu8_3, training=net.is_training)
# Conv9
net.conv9_1 = deconv2d(
net.conv8_3norm, filters=128, kernel_size=4, strides=2
)
net.conv2_2_short = conv2d(
net.conv2_2norm, filters=128
)
net.conv9_1_comb = net.conv2_2_short + net.conv9_1
net.relu9_1_comb = relu(net.conv9_1_comb)
net.conv9_2, net.relu9_2 = conv2d_relu(
net.relu9_1_comb, filters=128
)
net.conv9_2norm = batch_norm(net.relu9_2, training=net.is_training)
# Conv10
net.conv1_2_short = conv2d(
net.conv1_2norm, filters=128
)
net.conv10_1 = deconv2d(
net.conv9_2norm, filters=128, kernel_size=4, strides=2
)
net.conv10_1_comb = net.conv1_2_short + net.conv10_1
net.relu10_1_comb = relu(net.conv10_1_comb)
net.conv10_2, net.relu10_2 = conv2d_relu(
net.relu10_1_comb, filters=128
)
net.conv10_ab = conv2d(
net.relu10_2, filters=2, kernel_size=1, padding=0
)
net.pred_ab_1 = tf.tanh(net.conv10_ab)
net.pred_ab_2 = net.pred_ab_1 * 100
net.pred_lab = tf.concat([net.data_l, net.pred_ab_2], axis=3)
net.loss_ab = tf.reduce_mean(
smooth_l1(net.pred_ab_2 - net.groud_truth_ab)
)
@property
def prediction_ab(self):
return self.net.pred_ab_2
@property
def prediction_lab(self):
return self.net.pred_lab
@property
def loss(self):
return self.net.loss_ab
@property
def is_training(self):
return self.net.is_training
@property
def groud_truth_lab(self):
return self.net.groud_truth_lab
@property
def reveal_lab(self):
return self.net.reveal_lab
@property
def reveal_mask(self):
return self.net.reveal_mask
| 27.684887 | 79 | 0.566086 |
c25dfff9856298ab2af664ac926d16d6fa88eea8 | 253 | py | Python | taskqueue/__init__.py | neurodata/python-task-queue | f2b20d6b008e2c0cead418441c60d7dc07188848 | [
"BSD-3-Clause"
] | null | null | null | taskqueue/__init__.py | neurodata/python-task-queue | f2b20d6b008e2c0cead418441c60d7dc07188848 | [
"BSD-3-Clause"
] | null | null | null | taskqueue/__init__.py | neurodata/python-task-queue | f2b20d6b008e2c0cead418441c60d7dc07188848 | [
"BSD-3-Clause"
] | null | null | null | from .registered_task import RegisteredTask, MockTask, PrintTask
from .taskqueue import TaskQueue, MockTaskQueue, LocalTaskQueue
from .secrets import (
QUEUE_NAME, TEST_QUEUE_NAME, QUEUE_TYPE,
PROJECT_NAME, AWS_DEFAULT_REGION
)
__version__ = 0.9.0 | 31.625 | 64 | 0.818182 |
0513336c72cde352c087cd6cbffe9f7c6b149cd9 | 1,186 | py | Python | run/kerberom/modules/rom/_crypto/ARC4.py | zaza568/yo | 7e32382280647a0f07f74cd5fd54fb6ba68afd6e | [
"OLDAP-2.4"
] | 1 | 2021-10-08T17:49:57.000Z | 2021-10-08T17:49:57.000Z | run/kerberom/modules/rom/_crypto/ARC4.py | zaza568/yo | 7e32382280647a0f07f74cd5fd54fb6ba68afd6e | [
"OLDAP-2.4"
] | null | null | null | run/kerberom/modules/rom/_crypto/ARC4.py | zaza568/yo | 7e32382280647a0f07f74cd5fd54fb6ba68afd6e | [
"OLDAP-2.4"
] | null | null | null | # ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <eddy (dot) maaalou (at) gmail (dot) com> wrote this file. As long as you
# retain this notice you can do whatever you want with this stuff. If we meet
# some day, and you think this stuff is worth it, you can buy me a beer in
# return. Fist0urs
# ----------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
# by Fist0urs
class ARC4Cipher(object):
def __init__(self, key):
self.key = key
def encrypt(self, data):
S = range(256)
j = 0
out = []
for i in range(256):
j = (j + S[i] + ord( self.key[i % len(self.key)] )) % 256
S[i] , S[j] = S[j] , S[i]
i = j = 0
for char in data:
i = ( i + 1 ) % 256
j = ( j + S[i] ) % 256
S[i] , S[j] = S[j] , S[i]
out.append(chr(ord(char) ^ S[(S[i] + S[j]) % 256]))
return ''.join(out)
def decrypt(self, data):
return self.encrypt(data)
def new(key):
return ARC4Cipher(key)
| 30.410256 | 78 | 0.43339 |
02883bcd4f62c4155c945ce3fadf1273e1942e7b | 284 | py | Python | app/user/urls.py | thein3000/recipe-app-api | b5cd78e534bf7a298c5c30a6b4dc70b7bebe5c7c | [
"MIT"
] | null | null | null | app/user/urls.py | thein3000/recipe-app-api | b5cd78e534bf7a298c5c30a6b4dc70b7bebe5c7c | [
"MIT"
] | 6 | 2020-05-15T10:53:08.000Z | 2022-02-10T14:31:30.000Z | app/user/urls.py | thein3000/recipe-app-api | b5cd78e534bf7a298c5c30a6b4dc70b7bebe5c7c | [
"MIT"
] | null | null | null | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
] | 28.4 | 67 | 0.68662 |
fec8ee5ffb1b7c6a327b0731ab25d3b4909ffb54 | 9,496 | py | Python | utils/plot_utils.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | 1 | 2020-08-21T03:47:33.000Z | 2020-08-21T03:47:33.000Z | utils/plot_utils.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | null | null | null | utils/plot_utils.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | null | null | null | import datetime
import itertools
import os
import numpy as np
from pdb import set_trace
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pdb import set_trace
def plot_split_count(counts_train, counts_test, path, name='split_count', save_figure=True, overwrite=True):
import matplotlib.pyplot as plt
N = len(counts_train)
train_means = [val for key, val in counts_train.items()]
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.barh(ind, train_means, width, color='r')
test_means = [val for key, val in counts_test.items()]
rects2 = ax.barh(ind + width, test_means, width, color='y')
# add some text for labels, title and axes ticks
ax.set_xlabel('Counts')
ax.set_title('Number of label occurences')
ax.set_yticks(ind + width / 2)
ax.set_yticklabels([key for key, val in counts_train.items()], fontsize=8)
ax.legend((rects1[0], rects2[0]), ('Train', 'Test'))
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
fig.savefig(figure_path)
else:
print("Figure already existed under given name. Saved with current time stamp")
figure_path = os.path.join(path, name + '_{date:%Y-%m-%d_%H-%M-%S}.jpg'.format(date=datetime.datetime.now()))
fig.savefig(figure_path)
plt.close()
return
def _autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
_autolabel(rects1)
_autolabel(rects2)
return
def plot_mean(mean, path, name='mean', ylabel='loss', save_figure=True, overwrite=True):
# plots the mean and 1 sigma interval of given mean and std array.
# path is where to store and name is unique indicate of figure
fig = plt.figure()
n = len(mean)
epochs = np.arange(1, n+1, dtype=np.int32)
plt.plot(epochs, mean)
plt.xlabel('epoch')
plt.ylabel(ylabel)
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
fig.savefig(figure_path)
else:
print("Figure already existed under given name. Saved with current time stamp")
figure_path = os.path.join(path, name + '_{date:%Y-%m-%d_%H-%M-%S}.jpg'.format(date=datetime.datetime.now()))
fig.savefig(figure_path)
plt.close()
return
def plot_multiple_mean(mean, path, labels, name='multiple_mean', ylabel='accuracy', save_figure=True, overwrite=True):
# plots multiple results in one figue.
# mean and std:
# given as (M, N) array where m indicates
# the current experiment and n the epoch of the experiment
# name:
# list of names for each experiment
plt.figure()
M = mean.shape[0]
N = mean.shape[1]
epochs = np.arange(1, N+1)
colors = plt.cm.hsv(np.linspace(0, 1, N)).tolist()
for m in range(M):
plt.plot(epochs, mean[m, :], color=colors[m], label=labels[m])
plt.xlabel('Epoch')
plt.ylabel(ylabel)
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
plt.savefig(figure_path)
else:
plt.savefig(figure_path)
print("Figure already existed under given name. Saved with ccurrent time stamp")
figure_path = os.path.join(path, name + '{date:%Y-%m-%d_%H:%M:%S}.jpg'.format(date=datetime.datetime.now()))
plt.savefig(figure_path)
plt.close()
return
def plot_confusion_matrix(cm, classes, path, name='confusion_matrix',
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Blues, save_figure=True, overwrite=True):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
# cm = np.divide(cm.astype('float'), cm.sum(axis=1)[:, np.newaxis], out=np.zeros_like(cm.astype('float')), where=cm.sum(axis=1)[:, np.newaxis]!=0)
cm = np.divide(cm.astype('float'), cm.sum(axis=1)[:, np.newaxis])
else:
pass
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, fontsize=10, rotation=90)
plt.yticks(tick_marks, classes, fontsize=10)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
plt.savefig(figure_path)
else:
plt.savefig(figure_path)
print("Figure already existed under given name. Saved with ccurrent time stamp")
figure_path = os.path.join(path, name + '{date:%Y-%m-%d_%H:%M:%S}.jpg'.format(date=datetime.datetime.now()))
plt.savefig(figure_path)
plt.close()
return
def plot_results(mean, std, path, name, save_figure=True, overwrite=True):
# plots the mean and 1 sigma interval of given mean and std array.
# path is where to store and name is unique indicate of figure
fig = plt.figure()
n = len(mean)
epochs = np.arange(1, n+1, dtype=np.int32)
plt.errorbar(epochs, mean, std)
plt.xlabel('Epoch')
plt.ylabel('Return')
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
fig.savefig(figure_path)
else:
print("Figure already existed under given name. Saved with current time stamp")
figure_path = os.path.join(path, name + '_{date:%Y-%m-%d_%H-%M-%S}.jpg'.format(date=datetime.datetime.now()))
fig.savefig(figure_path)
plt.close()
return
def plot_multiple(mean, std, path, labels, name, save_figure=True, overwrite=True):
# plots multiple results in one figue.
# mean and std:
# given as (M, N) array where m indicates
# the current experiment and n the epoch of the experiment
# name:
# list of names for each experiment
plt.figure()
M = mean.shape[0]
N = mean.shape[1]
epochs = np.arange(1, N+1)
colors = plt.cm.hsv(np.linspace(0, 1, N)).tolist()
for m in range(M):
plt.errorbar(epochs, mean[m, :], std[m, :], color=colors[m], label=labels[m])
plt.xlabel('Epoch')
plt.ylabel('Return')
# save figure, if already exists then save under same name with current time stamp
if save_figure:
if not os.path.exists(path):
os.makedirs(path)
figure_path = os.path.join(path, name + '.jpg')
if not os.path.isfile(figure_path) or overwrite:
plt.savefig(figure_path)
else:
print("Figure already existed under given name. Saved with ccurrent time stamp")
figure_path = os.path.join(path, name + '{date:%Y-%m-%d_%H:%M:%S}.jpg'.format(date=datetime.datetime.now()))
plt.savefig(figure_path)
plt.close()
return
def save_statistics(mean, std, path, name):
if not os.path.exists(path):
os.makedirs(path)
# save mean
mean = np.asarray(mean)
np.save(os.path.join(path, name + '_mean'), mean)
# save std
std = np.asarray(std)
np.save(os.path.join(path, name + '_std'), std)
def concat_frames(video_file, outdir):
reader = imageio.get_reader(video_file)
for i, img in enumerate(reader):
if i == 0:
concat_img = img
continue
else:
concat_img = np.concatenate([concat_img, img], axis=1)
plt.imsave(join(outdir, video_file.split('.mp4')[0] + '.jpg'), concat_img)
def concat_frames_nosave(frames):
for i, img in enumerate(frames):
if i == 0:
concat_img = img
continue
else:
concat_img = np.concatenate([concat_img, img], axis=2)
return concat_img
| 37.09375 | 155 | 0.61984 |
af78851ae3d35243a32a9d7ac8b0ec23f0157102 | 10,276 | py | Python | src/uvm/seq/uvm_sequencer.py | rodrigomelo9/uvm-python | e3127eba2cc1519a61dc6f736d862a8dcd6fce20 | [
"Apache-2.0"
] | 140 | 2020-01-18T00:14:17.000Z | 2022-03-29T10:57:24.000Z | src/uvm/seq/uvm_sequencer.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 24 | 2020-01-18T18:40:58.000Z | 2021-03-25T17:39:07.000Z | src/uvm/seq/uvm_sequencer.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 34 | 2020-01-18T12:22:59.000Z | 2022-02-11T07:03:11.000Z | #//----------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2014 NVIDIA Corporation
#// Copyright 2019-2021 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
from typing import List
from .uvm_sequencer_param_base import UVMSequencerParamBase
from ..tlm1.uvm_sqr_connections import UVMSeqItemPullImp
from ..macros import uvm_component_utils
from ..base.uvm_globals import (uvm_check_output_args, uvm_zero_delay,
uvm_report_info, uvm_report_error)
from ..base.uvm_object_globals import *
FATAL_MSG1 = ("Item_done() called with no outstanding requests." +
" Each call to item_done() must be paired with a previous call to "
+ " get_next_item().")
class UVMSequencer(UVMSequencerParamBase):
"""
Group: Sequencer Interface
This is an interface for communicating with sequencers.
The interface is defined as::
Requests:
async def get_next_item (request: List)
async def try_next_item (request: List)
async def get (request: List)
async def peek (request: List)
Responses:
def item_done (response=None)
async def put (response: UVMSequenceItem)
Sync Control:
async def wait_for_sequences()
def has_do_available()
See `UVMSqrIfBase` for information about this interface.
"""
def __init__(self, name, parent=None):
"""
Standard component constructor that creates an instance of this class
using the given `name` and `parent`, if any.
Args:
name (str): Name of the sequencer
parent (UVMComponent): Parent component
"""
UVMSequencerParamBase.__init__(self, name, parent)
# // Variable: seq_item_export
# // This export provides access to this sequencer's implementation of the
# // sequencer interface.
self.seq_item_export = UVMSeqItemPullImp("seq_item_export", self)
self.sequence_item_requested = False
self.get_next_item_called = False
def stop_sequences(self):
"""
Tells the sequencer to kill all sequences and child sequences currently
operating on the sequencer, and remove all requests, locks and responses
that are currently queued. This essentially resets the sequencer to an
idle state.
"""
super().stop_sequences()
self.sequence_item_requested = 0
self.get_next_item_called = 0
# Empty the request fifo
if self.m_req_fifo.used():
uvm_report_info(self.get_full_name(),
"Sequences stopped. Removing request from sequencer fifo")
t = []
while self.m_req_fifo.try_get(t):
t = []
# extern virtual function string get_type_name()
async def get_next_item(self, t):
"""
Retrieves the next available item from a sequence.
Args:
t (list): Empty list into which item is appended
"""
uvm_check_output_args([t])
# req_item = None
# If a sequence_item has already been requested, then get_next_item()
# should not be called again until item_done() has been called.
if self.get_next_item_called is True:
self.uvm_report_error(self.get_full_name(),
"Get_next_item called twice without item_done or get in between", UVM_NONE)
if self.sequence_item_requested is False:
await self.m_select_sequence()
# Set flag indicating that the item has been requested to ensure that item_done or get
# is called between requests
self.sequence_item_requested = True
self.get_next_item_called = True
await self.m_req_fifo.peek(t)
async def try_next_item(self, t: List):
"""
Retrieves the next available item from a sequence if one is available.
Args:
t (List): Empty list into which item is appended
"""
if self.get_next_item_called == 1:
uvm_report_error(self.get_full_name(),
"get_next_item/try_next_item called twice without item_done or get in between",
UVM_NONE)
return
# allow state from last transaction to settle such that sequences'
# relevancy can be determined with up-to-date information
await self.wait_for_sequences()
# choose the sequence based on relevancy
selected_sequence = self.m_choose_next_request()
# return if none available
if selected_sequence == -1:
# t = None
return
# now, allow chosen sequence to resume
self.m_set_arbitration_completed(self.arb_sequence_q[selected_sequence].request_id)
seq = self.arb_sequence_q[selected_sequence].sequence_ptr
self.arb_sequence_q.delete(selected_sequence)
self.m_update_lists()
self.sequence_item_requested = True
self.get_next_item_called = 1
# give it one NBA to put a new item in the fifo
await self.wait_for_sequences()
# attempt to get the item; if it fails, produce an error and return
if not self.m_req_fifo.try_peek(t):
uvm_report_error("TRY_NEXT_BLOCKED", ("try_next_item: the selected sequence '" +
seq.get_full_name() + "' did not produce an item within an NBA delay. " +
"Sequences should not consume time between calls to start_item and finish_item. " +
"Returning null item."), UVM_NONE)
def item_done(self, item=None):
"""
Indicates that the request is completed.
Args:
item (UVMSequenceItem): Related sequence item.
"""
t = []
# Set flag to allow next get_next_item or peek to get a new sequence_item
self.sequence_item_requested = False
self.get_next_item_called = False
if self.m_req_fifo.try_get(t) is False:
self.uvm_report_fatal(self.get_full_name(), FATAL_MSG1)
else:
t = t[0]
self.m_wait_for_item_sequence_id = t.get_sequence_id()
self.m_wait_for_item_transaction_id = t.get_transaction_id()
if item is not None:
self.seq_item_export.put_response(item)
# Grant any locks as soon as possible
self.grant_queued_locks()
async def put(self, t):
"""
Sends a response back to the sequence that issued the request.
Args:
t (UVMSequenceItem): Response item.
"""
self.put_response(t)
await uvm_zero_delay()
async def get(self, t: List):
"""
Retrieves the next available item from a sequence into the given list.
Args:
t (List): List to hold the response
"""
if self.sequence_item_requested == 0:
await self.m_select_sequence()
self.sequence_item_requested = 1
await self.m_req_fifo.peek(t)
self.item_done()
async def peek(self, t: List):
"""
Gets the current request item if one is in the FIFO.
Args:
t (List): List for the output request.
"""
if self.sequence_item_requested == 0:
await self.m_select_sequence()
# Set flag indicating that the item has been requested to ensure that item_done or get
# is called between requests
self.sequence_item_requested = 1
await self.m_req_fifo.peek(t)
"""
Documented here for clarity, implemented in `UVMSequencerBase`
Task: `wait_for_sequences`
Waits for a sequence to have a new item available.
Function: `has_do_available`
Returns 1 if any sequence running on this sequencer is ready to supply
a transaction, 0 otherwise.
"""
#
# //-----------------
# // Internal Methods
# //-----------------
# // Do not use directly, not part of standard
#
# extern function void item_done_trigger(RSP item = null)
# function RSP item_done_get_trigger_data()
# return last_rsp(0)
# endfunction
# extern protected virtual function int m_find_number_driver_connections()
uvm_component_utils(UVMSequencer)
#//------------------------------------------------------------------------------
#// IMPLEMENTATION
#//------------------------------------------------------------------------------
#
#function string uvm_sequencer::get_type_name()
# return "uvm_sequencer"
#endfunction
#
#//-----------------
#// Internal Methods
#//-----------------
#
#// m_find_number_driver_connections
#// --------------------------------
#// Counting the number of of connections is done at end of
#// elaboration and the start of run. If the user neglects to
#// call super in one or the other, the sequencer will still
#// have the correct value
#
#function int uvm_sequencer::m_find_number_driver_connections()
# uvm_port_component_base provided_to_port_list[string]
# uvm_port_component_base seq_port_base
#
# // Check that the seq_item_pull_port is connected
# seq_port_base = seq_item_export.get_comp()
# seq_port_base.get_provided_to(provided_to_port_list)
# return provided_to_port_list.num()
#endfunction
#
#
#
#
#// item_done_trigger
#// -----------------
#
#function void uvm_sequencer::item_done_trigger(RSP item = null)
# item_done(item)
#endfunction
| 33.802632 | 99 | 0.625535 |
e2367c3b3c5d2fd1b22f5af2201ffb2770ce4a9c | 3,975 | py | Python | assignments/2018/assignment2/cs231n/gradient_check.py | dpetrini/cs231n.github.io | 7dc3be43c523889eafebdef3dc65ef35aab69d16 | [
"MIT"
] | 2 | 2021-11-04T18:35:47.000Z | 2021-11-09T01:43:36.000Z | hw2/cs682/gradient_check.py | michael940716/CS682_LCN | 32ca389b642387b637422e4383c9e709779f9b4c | [
"MIT"
] | null | null | null | hw2/cs682/gradient_check.py | michael940716/CS682_LCN | 32ca389b642387b637422e4383c9e709779f9b4c | [
"MIT"
] | null | null | null | from __future__ import print_function
from builtins import range
#from past.builtins import xrange
import numpy as np
from random import randrange
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
"""
Compute numeric gradients for a function that operates on input
and output blobs.
We assume that f accepts several input blobs as arguments, followed by a
blob where outputs will be written. For example, f might be called like:
f(x, w, out)
where x and w are input Blobs, and the result of f will be written to out.
Inputs:
- f: function
- inputs: tuple of input blobs
- output: output blob
- h: step size
"""
numeric_diffs = []
for input_blob in inputs:
diff = np.zeros_like(input_blob.diffs)
it = np.nditer(input_blob.vals, flags=['multi_index'],
op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
orig = input_blob.vals[idx]
input_blob.vals[idx] = orig + h
f(*(inputs + (output,)))
pos = np.copy(output.vals)
input_blob.vals[idx] = orig - h
f(*(inputs + (output,)))
neg = np.copy(output.vals)
input_blob.vals[idx] = orig
diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)
it.iternext()
numeric_diffs.append(diff)
return numeric_diffs
def eval_numerical_gradient_net(net, inputs, output, h=1e-5):
return eval_numerical_gradient_blobs(lambda *args: net.forward(),
inputs, output, h=h)
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = (abs(grad_numerical - grad_analytic) /
(abs(grad_numerical) + abs(grad_analytic)))
print('numerical: %f analytic: %f, relative error: %e'
%(grad_numerical, grad_analytic, rel_error))
| 30.576923 | 78 | 0.588428 |
47e9233ad594e27e559e049bc7d0a16c1d909729 | 3,206 | py | Python | _includes/vis_kode/euklids_algoritme copy.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null | _includes/vis_kode/euklids_algoritme copy.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null | _includes/vis_kode/euklids_algoritme copy.py | Andremartiny/AndreMartiny.github.io | 54d6ebadb735bc865ee152a59d6ee964a0cf9c0c | [
"MIT"
] | null | null | null |
# def FellesFaktorAv(a, b):
# while a != b:
# a, b = max(a, b), min(a, b) #Definerer x til å være den største av x og y.
# a = a - b
# return a
# def EuklidsMetodeForFellesFaktorAv(a, b):
# while a!= 0:
# a, b = max(a, b), min(a, b) #Definerer x til å være den største av x og y.
# a = a % b
# return b
# print(EuklidsMetodeForFellesFaktorAv(5,15))
# def EM1(a, b):
# likninger = [] # Her lagrer vi alle likningene våre som lister
# while True:
# a, b= max(a, b), min(a, b) # lar a være det største tallet
# helltallsdivisjon = a // b
# rest = a % b
# if rest == 0: # Vi vil fortsette til vi har funnet felles faktor
# break
# # Legger til likningen a = heltallsdivisjon * b + rest
# likninger.append([a, helltallsdivisjon, b, rest])
# a = rest
# return likninger
# def LosDiofantiskLikningMedKoeffisienter(a, b):
# print(f"\nFelles faktor er {FellesFaktorAv(a,b)}.\n")
# likninger = EM1(a,b)
# for koeffisienter in likninger:
# print(f"{koeffisienter[0]} = {koeffisienter[1]} · {koeffisienter[2]} + {koeffisienter[3]}\n")
# print("Vi reverserer nå prosessen \n\n")
# sistelikning = likninger[-1]
# reversering = [[sistelikning[-1], 1, sistelikning[0], -sistelikning[1], sistelikning[2]]]
# print(f"{reversering[-1][0]} = {reversering[-1][1]} · {reversering[-1][2]}"+ ("+" if reversering[-1][3]> 0 else "-") +f" {reversering[-1][3]} · {reversering[-1][4]}\n")
# for i in range(len(likninger)-1):
# d = reversering[-1][-2]
# r_nminus1 = likninger[-i-2][0]
# c = reversering[-1][1]
# c_n = likninger[-i-2][1]
# r_n = likninger[-i-1][0]
# reversering.append([likninger[-1][-1], d, r_nminus1, (c+d*(-c_n)), r_n])
# print(f"{reversering[-1][0]} = {reversering[-1][1]} · {reversering[-1][2]} "+ ("+" if reversering[-1][3]> 0 else "") +f" {reversering[-1][3]} · {reversering[-1][4]}\n")
def RekursivLosningAvDiofantiskLikningMedKoeffisienter(a, b):
if a < b:
return RekursivLosningAvDiofantiskLikningMedKoeffisienter(b, a)
if (b == 0):
print(f"\nStørste felles faktor er {a}\n")
# sff(a,b) = a = a * 1 + b * 0
return a, 1, 0
else:
# Ved å finne løsning på ssf(a,b) = b * x + (a % b) * y
# kan vi bruke at (a % b) = a - (a // b) * b.
# Dermed er
# ssf(a,b) = a* y + b* (x - (a // b) * y )
print(f"{a} = {b} · {a // b} + {a % b}")
sff, x, y = RekursivLosningAvDiofantiskLikningMedKoeffisienter(b, a % b)
# print(f"{sff} = {a} · {y} + {b} · ({x} - ({a // b}) · {y})")
x, y = y, (x-(a // b) * y)
print(f"{sff} = {a} · {x} + {b} · {y}")
return sff, x, y
RekursivLosningAvDiofantiskLikningMedKoeffisienter(1027, 729)
# def LosningAvDiofantiskLikningMedKoeffisienter(a, b):
# if a < b:
# return LosningAvDiofantiskLikningMedKoeffisienter(b, a)
# if (b == 0):
# return a, 1, 0
# else:
# sff, x, y = LosningAvDiofantiskLikningMedKoeffisienter(b, a % b)
# x, y = y, (x - (a // b) * y)
# return sff, x, y | 41.102564 | 178 | 0.538677 |
3ea86f9ae400ebc58053648c97439d8b2e382d8d | 1,718 | py | Python | tests/testUtils.py | ivanvladimir/ShiCo | e8566896bdc212a556675a0e3a6bbab522bd8271 | [
"Apache-2.0"
] | null | null | null | tests/testUtils.py | ivanvladimir/ShiCo | e8566896bdc212a556675a0e3a6bbab522bd8271 | [
"Apache-2.0"
] | 15 | 2018-09-27T12:58:19.000Z | 2020-04-14T11:39:15.000Z | tests/testUtils.py | ivanvladimir/ShiCo | e8566896bdc212a556675a0e3a6bbab522bd8271 | [
"Apache-2.0"
] | 1 | 2020-06-03T12:43:49.000Z | 2020-06-03T12:43:49.000Z | import unittest
from shico import utils as shU
import numpy as np
class TestUtils(unittest.TestCase):
'''Tests for utils'''
@classmethod
def setUpClass(self):
windowSize = 15
self.y1 = 1960
self.y2 = 1970
self.y0 = self.y1-windowSize
self.yN = self.y1+windowSize
self.yRange = np.linspace(self.y0, self.yN)
def testJSD(self):
'''Test JSD weighting function'''
self._doTests(shU.weightJSD, 'JSD')
def testGaussian(self):
'''Test Gaussian weighting function'''
self._doTests(shU.weightGauss, 'Gaussian')
# Test with non-default C
self._doTests(lambda y1, y2: shU.weightGauss(y1, y2, c=5), 'Gaussian')
def testLinear(self):
'''Test linear weighting function'''
self._doTests(shU.weightLinear, 'Linear')
# Test with non-default A
self._doTests(lambda y1, y2: shU.weightLinear(y1, y2, a=5), 'Linear')
def _doTests(self, f, name):
''' Apply sanity checks to the given weighting function. '''
self.assertEqual(f(self.y1, self.y1), 1,
name + ' should be 1 for the same number')
self.assertEqual(f(self.y1, self.y1), 1,
name + ' should be symmetric')
self.assertGreater(f(self.y1, self.y1), 0,
name + ' should be positive')
# Test function in a range
wRange = np.array([f(self.y1, yi) for yi in self.yRange])
self.assertLessEqual(wRange.max(), 1,
name + ' should have upper bound 1')
self.assertGreaterEqual(wRange.min(), 0,
name + ' should have lower bound 0')
| 35.791667 | 78 | 0.574505 |
4f5bfd85920518554cf86cc03b71d1dfe7678f73 | 4,577 | py | Python | apphub/adversarial_training/ecc_hinge/ecc_hinge_tf.py | fastestimator/fastestimator | a8ea30c5da2d92ff8aa0de0084d10c86fb8dfd10 | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | apphub/adversarial_training/ecc_hinge/ecc_hinge_tf.py | fastestimator/fastestimator | a8ea30c5da2d92ff8aa0de0084d10c86fb8dfd10 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | apphub/adversarial_training/ecc_hinge/ecc_hinge_tf.py | fastestimator/fastestimator | a8ea30c5da2d92ff8aa0de0084d10c86fb8dfd10 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tempfile
from tensorflow.python.keras.layers import Concatenate, Conv2D, Dense, Flatten, Input, MaxPooling2D
from tensorflow.python.keras.models import Model
import fastestimator as fe
from fastestimator.dataset.data import cifair10
from fastestimator.op.numpyop.univariate import Hadamard, Normalize
from fastestimator.op.tensorop import UnHadamard
from fastestimator.op.tensorop.gradient import FGSM, Watch
from fastestimator.op.tensorop.loss import Hinge
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.metric import Accuracy
def ecc_lenet(input_shape=(32, 32, 3), classes=10, code_length=None):
inputs = Input(input_shape)
conv1 = Conv2D(32, (3, 3), activation='elu')(inputs)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='elu')(pool1)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(64, (3, 3), activation='elu')(pool2)
flat = Flatten()(conv3)
# Create multiple heads
code_length = code_length or max(16, 1 << (classes - 1).bit_length())
n_heads = code_length // 4
heads = [Dense(16, activation='elu')(flat) for _ in range(n_heads)]
heads2 = [Dense(code_length // n_heads, activation='tanh')(head) for head in heads]
outputs = Concatenate()(heads2)
return Model(inputs=inputs, outputs=outputs)
def get_estimator(epsilon=0.04,
epochs=20,
batch_size=32,
code_length=16,
train_steps_per_epoch=None,
eval_steps_per_epoch=None,
save_dir=tempfile.mkdtemp()):
# step 1
train_data, eval_data = cifair10.load_data()
test_data = eval_data.split(0.5)
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
test_data=test_data,
batch_size=batch_size,
ops=[
Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
Hadamard(inputs="y", outputs="y_code", n_classes=10)
])
# step 2
model = fe.build(model_fn=lambda: ecc_lenet(code_length=code_length), optimizer_fn='adam')
network = fe.Network(ops=[
Watch(inputs="x", mode=('eval', 'test')),
ModelOp(model=model, inputs="x", outputs="y_pred_code"),
Hinge(inputs=("y_pred_code", "y_code"), outputs="base_hinge"),
UpdateOp(model=model, loss_name="base_hinge"),
UnHadamard(inputs="y_pred_code", outputs="y_pred", n_classes=10, mode=('eval', 'test')),
# The adversarial attack:
FGSM(data="x", loss="base_hinge", outputs="x_adverse_hinge", epsilon=epsilon, mode=('eval', 'test')),
ModelOp(model=model, inputs="x_adverse_hinge", outputs="y_pred_adv_hinge_code", mode=('eval', 'test')),
Hinge(inputs=("y_pred_adv_hinge_code", "y_code"), outputs="adv_hinge", mode=('eval', 'test')),
UnHadamard(inputs="y_pred_adv_hinge_code", outputs="y_pred_adv_hinge", n_classes=10, mode=('eval', 'test')),
])
# step 3
traces = [
Accuracy(true_key="y", pred_key="y_pred", output_name="base_accuracy"),
Accuracy(true_key="y", pred_key="y_pred_adv_hinge", output_name="adversarial_accuracy"),
BestModelSaver(model=model, save_dir=save_dir, metric="base_hinge", save_best_mode="min", load_best_final=True)
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch,
eval_steps_per_epoch=eval_steps_per_epoch,
monitor_names=["adv_hinge"])
return estimator
if __name__ == "__main__":
est = get_estimator()
est.fit()
est.test()
| 44.436893 | 119 | 0.653048 |
17290ec9441e3710d21add89af52c722b1e1be64 | 24,978 | py | Python | tests/test_asg.py | ktravis/cloud-custodian | d5f61d8a09f8a37a85777b527ee87c363040fbd1 | [
"Apache-2.0"
] | null | null | null | tests/test_asg.py | ktravis/cloud-custodian | d5f61d8a09f8a37a85777b527ee87c363040fbd1 | [
"Apache-2.0"
] | null | null | null | tests/test_asg.py | ktravis/cloud-custodian | d5f61d8a09f8a37a85777b527ee87c363040fbd1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
from dateutil import zoneinfo
from .common import BaseTest
from botocore.exceptions import ClientError
from c7n.resources.asg import NotEncryptedFilter
class LaunchConfigTest(BaseTest):
def test_config_unused(self):
factory = self.replay_flight_data('test_launch_config_unused')
p = self.load_policy({
'name': 'unused-cfg',
'resource': 'launch-config',
'filters': [{'type': 'unused'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['LaunchConfigurationName'],
'CloudClusterCopy')
def test_config_delete(self):
factory = self.replay_flight_data('test_launch_config_delete')
p = self.load_policy({
'name': 'delete-cfg',
'resource': 'launch-config',
'filters': [{
'LaunchConfigurationName': 'CloudClusterCopy'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['LaunchConfigurationName'],
'CloudClusterCopy')
class AutoScalingTest(BaseTest):
def get_ec2_tags(self, ec2, instance_id):
results = ec2.describe_tags(
Filters=[
{'Name': 'resource-id',
'Values': [instance_id]},
{'Name': 'resource-type',
'Values': ['instance']}])['Tags']
return {t['Key']: t['Value'] for t in results}
def test_asg_delete(self):
factory = self.replay_flight_data('test_asg_delete')
p = self.load_policy({
'name': 'asg-delete',
'resource': 'asg',
'filters': [
{'AutoScalingGroupName': 'ContainersFTW'}],
'actions': [{'type': 'delete', 'force': True}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['AutoScalingGroupName'], 'ContainersFTW')
def test_asg_non_encrypted_filter(self):
factory = self.replay_flight_data('test_asg_non_encrypted_filter')
p = self.load_policy({
'name': 'asg-encrypted-filter',
'resource': 'asg',
'filters': [{'type': 'not-encrypted'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['Unencrypted'], ['Image', 'LaunchConfig'])
def test_get_bad_snapshot_malformed(self):
operation_name = "DescribeSnapshots"
error_response = {
'Error': {
'Message': 'Invalid id: "snap-malformedsnap"',
'Code': 'InvalidSnapshotID.Malformed'}
}
e = ClientError(error_response, operation_name)
snap = NotEncryptedFilter.get_bad_snapshot(e)
self.assertEquals(snap, "snap-malformedsnap")
def test_get_bad_snapshot_notfound(self):
operation_name = "DescribeSnapshots"
error_response = {
'Error': {
'Message': "The snapshot 'snap-notfound' does not exist.",
'Code': 'InvalidSnapshot.NotFound'}
}
e = ClientError(error_response, operation_name)
snap = NotEncryptedFilter.get_bad_snapshot(e)
self.assertEquals(snap, "snap-notfound")
def test_asg_image_age_filter(self):
factory = self.replay_flight_data('test_asg_image_age_filter')
p = self.load_policy({
'name': 'asg-cfg-filter',
'resource': 'asg',
'filters': [
{'type': 'image-age',
'days': 90}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_asg_config_filter(self):
factory = self.replay_flight_data('test_asg_config_filter')
p = self.load_policy({
'name': 'asg-cfg-filter',
'resource': 'asg',
'filters': [
{'type': 'launch-config',
'key': 'ImageId',
'value': 'ami-9abea4fb'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_asg_vpc_filter(self):
factory = self.replay_flight_data('test_asg_vpc_filter')
p = self.load_policy({
'name': 'asg-vpc-filter',
'resource': 'asg',
'filters': [
{'type': 'vpc-id',
'value': 'vpc-d2d616b5'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['LaunchConfigurationName'], 'foo-bar')
def test_asg_tag_and_propagate(self):
factory = self.replay_flight_data('test_asg_tag')
p = self.load_policy({
'name': 'asg-tag',
'resource': 'asg',
'filters': [
{'tag:Platform': 'ubuntu'}],
'actions': [
{'type': 'tag',
'key': 'CustomerId', 'value': 'GetSome',
'propagate': True},
{'type': 'propagate-tags',
'trim': True, 'tags': ['CustomerId', 'Platform']}
]
}, session_factory=factory)
session = factory()
client = session.client('autoscaling')
# Put an orphan tag on an instance
result = client.describe_auto_scaling_groups()[
'AutoScalingGroups'].pop()
ec2 = session.client('ec2')
instance_id = result['Instances'][0]['InstanceId']
ec2.create_tags(
Resources=[instance_id],
Tags=[{'Key': 'Home', 'Value': 'Earth'}])
# Run the policy
resources = p.run()
self.assertEqual(len(resources), 1)
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: (t['Value'], t['PropagateAtLaunch'])
for t in result['Tags']}
self.assertTrue('CustomerId' in tag_map)
self.assertEqual(tag_map['CustomerId'][0], 'GetSome')
self.assertEqual(tag_map['CustomerId'][1], True)
tag_map = self.get_ec2_tags(ec2, instance_id)
self.assertTrue('CustomerId' in tag_map)
self.assertFalse('Home' in tag_map)
def test_asg_remove_tag(self):
factory = self.replay_flight_data('test_asg_remove_tag')
p = self.load_policy({
'name': 'asg-remove-tag',
'resource': 'asg',
'filters': [
{'tag:CustomerId': 'not-null'}],
'actions': [
{'type': 'remove-tag',
'key': 'CustomerId'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: (t['Value'], t['PropagateAtLaunch'])
for t in result['Tags']}
self.assertFalse('CustomerId' in tag_map)
def test_asg_mark_for_op(self):
factory = self.replay_flight_data('test_asg_mark_for_op')
p = self.load_policy({
'name': 'asg-mark-for-op',
'resource': 'asg',
'filters': [
{'tag:Platform': 'ubuntu'}],
'actions': [
{'type': 'mark-for-op', 'key': 'custodian_action',
'op': 'suspend', 'days': 1}
],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: t['Value'] for t in result['Tags']}
self.assertTrue('custodian_action' in tag_map)
self.assertTrue('suspend@' in tag_map['custodian_action'])
def test_asg_mark_for_op_hours(self):
session_factory = self.replay_flight_data('test_asg_mark_for_op_hours')
session = session_factory(region='us-east-1')
asg = session.client('autoscaling')
localtz = zoneinfo.gettz('America/New_York')
dt = datetime.now(localtz)
dt = dt.replace(year=2018, month=2, day=20, hour=12, minute=42,
second=0, microsecond=0)
policy = self.load_policy({
'name': 'asg-mark-for-op-hours',
'resource': 'asg',
'filters': [
{'tag:Service': 'absent'}
],
'actions': [
{'type': 'mark-for-op',
'op': 'delete',
'hours': 1}
],
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
describe_auto_scaling_groups = asg.describe_auto_scaling_groups(
AutoScalingGroupNames=['marked']
)
resource=describe_auto_scaling_groups['AutoScalingGroups'][0]
tags = [
t['Value'] for t in resource['Tags'] if t['Key'] == 'maid_status']
result = datetime.strptime(
tags[0].strip().split('@', 1)[-1], '%Y/%m/%d %H%M %Z').replace(
tzinfo=localtz)
self.assertEqual(result, dt)
def test_asg_marked_for_op_hours(self):
session_factory = self.replay_flight_data('test_asg_marked_for_op_hours')
policy = self.load_policy({
'name': 'asg-marked-for-delete',
'resource': 'asg',
'filters': [{
'type': 'marked-for-op',
'op': 'delete'
}]
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['AutoScalingGroupName'], 'marked')
def test_asg_rename_tag(self):
factory = self.replay_flight_data('test_asg_rename')
p = self.load_policy({
'name': 'asg-rename-tag',
'resource': 'asg',
'filters': [
{'tag:Platform': 'ubuntu'}],
'actions': [
{'type': 'rename-tag', 'source': 'Platform', 'dest': 'Linux'}
],
}, session_factory=factory)
# Fetch ASG
session = factory()
client = session.client('autoscaling')
result = client.describe_auto_scaling_groups()['AutoScalingGroups'].pop()
# Fetch instance and make sure it has tags
ec2 = session.client('ec2')
instance_id = result['Instances'][0]['InstanceId']
tag_map = self.get_ec2_tags(ec2, instance_id)
self.assertTrue('Platform' in tag_map)
self.assertFalse('Linux' in tag_map)
# Run the policy
resources = p.run()
self.assertEqual(len(resources), 1)
# Validate the ASG tag changed
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: (t['Value'], t['PropagateAtLaunch'])
for t in result['Tags']}
self.assertFalse('Platform' in tag_map)
self.assertTrue('Linux' in tag_map)
tag_map = self.get_ec2_tags(ec2, instance_id)
self.assertFalse('Platform' in tag_map)
self.assertTrue('Linux' in tag_map)
def test_asg_suspend(self):
factory = self.replay_flight_data('test_asg_suspend')
p = self.load_policy({
'name': 'asg-suspend',
'resource': 'asg',
'filters': [
{'tag:Platform': 'not-null'}],
'actions': ['suspend'],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
self.assertTrue(result['SuspendedProcesses'])
def test_asg_suspend_when_no_instances(self):
factory = self.replay_flight_data('test_asg_suspend_when_no_instances')
client = factory().client('autoscaling')
# Ensure we have a non-suspended ASG with no instances
name = 'zero-instances'
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[name])['AutoScalingGroups'].pop()
self.assertEqual(len(result['SuspendedProcesses']), 0)
self.assertEqual(len(result['Instances']), 0)
# Run policy and verify suspend occurs
p = self.load_policy({
'name': 'asg-suspend',
'resource': 'asg',
'filters': [
{'AutoScalingGroupName': name}],
'actions': ['suspend'],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[name])['AutoScalingGroups'].pop()
self.assertTrue(result['SuspendedProcesses'])
def test_asg_resume(self):
factory = self.replay_flight_data('test_asg_resume')
p = self.load_policy({
'name': 'asg-suspend',
'resource': 'asg',
'filters': [
{'tag:Platform': 'not-null'}],
'actions': [
{'type': 'resume', 'delay': 0.1}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
self.assertFalse(result['SuspendedProcesses'])
def test_asg_resize_save_to_tag(self):
factory = self.replay_flight_data('test_asg_resize_save_to_tag')
p = self.load_policy({
'name': 'asg-resize',
'resource': 'asg',
'filters': [
{'tag:CustodianUnitTest': 'not-null'}],
'actions': [
{'type': 'resize', 'min-size': 0, 'desired-size': 0,
'save-options-tag': 'OffHoursPrevious'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
# test that we set ASG size to zero
self.assertEqual(result['MinSize'], 0)
self.assertEqual(result['DesiredCapacity'], 0)
tag_map = {t['Key']: t['Value'] for t in result['Tags']}
# test that we saved state to a tag
self.assertTrue('OffHoursPrevious' in tag_map)
self.assertEqual(tag_map['OffHoursPrevious'],
'DesiredCapacity=2;MinSize=2;MaxSize=2')
def test_asg_resize_restore_from_tag(self):
factory = self.replay_flight_data('test_asg_resize_restore_from_tag')
p = self.load_policy({
'name': 'asg-resize',
'resource': 'asg',
'filters': [
{'tag:CustodianUnitTest': 'not-null'},
{'tag:OffHoursPrevious': 'not-null'}],
'actions': [
{'type': 'resize', 'restore-options-tag': 'OffHoursPrevious'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
# test that we set ASG min and desired back from 0 to 2
self.assertEqual(result['MinSize'], 2)
self.assertEqual(result['DesiredCapacity'], 2)
def test_asg_resize_to_current(self):
factory = self.replay_flight_data('test_asg_resize_to_current')
# test scenario:
# - create ASG with min=2, desired=2 running in account A
# - launch config specifies a test AMI in account B
# - remove permissions on the AMI for account A
# - kill one of the 2 running instances, wait until the ASG sees that
# - leaves min=2, desired=2, running=1 and it's unable to launch more
p = self.load_policy({
'name': 'asg-resize',
'resource': 'asg',
'filters': [
{'type': 'capacity-delta'},
{'tag:CustodianUnitTest': 'not-null'}],
'actions': [
{'type': 'resize', 'desired-size': 'current'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
# test that we changed ASG min and desired from 2 to 1
self.assertEqual(result['MinSize'], 1)
self.assertEqual(result['DesiredCapacity'], 1)
def test_asg_third_ami_filter(self):
factory = self.replay_flight_data('test_asg_invalid_third_ami')
p = self.load_policy({
'name': 'asg-invalid-filter-3ami',
'resource': 'asg',
'filters': ['invalid']}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_asg_invalid_filter_good(self):
factory = self.replay_flight_data('test_asg_invalid_filter_good')
p = self.load_policy({
'name': 'asg-invalid-filter',
'resource': 'asg',
'filters': ['invalid']
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_asg_invalid_filter_bad(self):
factory = self.replay_flight_data('test_asg_invalid_filter_bad')
p = self.load_policy({
'name': 'asg-invalid-filter',
'resource': 'asg',
'filters': ['invalid']
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
s = set([x[0] for x in resources[0]['Invalid']])
self.assertTrue('invalid-subnet' in s)
self.assertTrue('invalid-security-group' in s)
def test_asg_subnet(self):
factory = self.replay_flight_data('test_asg_subnet')
p = self.load_policy({
'name': 'asg-sub',
'resource': 'asg',
'filters': [
{'type': 'subnet',
'match-resource': True,
'key': 'tag:NetworkLocation',
'value': ''}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
sorted(resources[0]['c7n:matched-subnets']),
sorted(['subnet-65dbce1d', 'subnet-b77a4ffd', 'subnet-db9f62b2']))
def test_asg_security_group_not_matched(self):
factory = self.replay_flight_data(
'test_asg_security_group_not_matched')
p = self.load_policy({
'name': 'asg-sg',
'resource': 'asg',
'filters': [
{'type': 'security-group',
'key': 'tag:NetworkLocation',
'op': 'not-equal',
'value': ''}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['c7n:matched-security-groups'], ['sg-0b3d3377'])
def test_asg_security_group(self):
factory = self.replay_flight_data('test_asg_security_group')
p = self.load_policy({
'name': 'asg-sg',
'resource': 'asg',
'filters': [
{'type': 'security-group',
'key': 'GroupName',
'value': 'default'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['AutoScalingGroupName'], 'ContainersFTW')
def test_asg_propagate_tag_filter(self):
session = self.replay_flight_data('test_asg_propagate_tag_filter')
policy = self.load_policy({
'name': 'asg-propagated-tag-filter',
'resource': 'asg',
'filters': [{
'type': 'progagated-tags',
'keys': ['Tag01', 'Tag02', 'Tag03']}
]}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['AutoScalingGroupName'], 'c7n.asg.ec2.01')
def test_asg_propagate_tag_missing(self):
session = self.replay_flight_data('test_asg_propagate_tag_missing')
policy = self.load_policy({
'name': 'asg-propagated-tag-filter',
'resource': 'asg',
'filters': [{
'type': 'progagated-tags',
'match': False,
'keys': ['Tag01', 'Tag02', 'Tag03']}
]}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
sorted([r['AutoScalingGroupName'] for r in resources]),
['c7n.asg.ec2.02', 'c7n.asg.ec2.03'])
def test_asg_not_propagate_tag_match(self):
session = self.replay_flight_data('test_asg_not_propagate_match')
policy = self.load_policy({
'name': 'asg-propagated-tag-filter',
'resource': 'asg',
'filters': [{
'type': 'progagated-tags',
'keys': ['Tag01', 'Tag02', 'Tag03'],
'propagate': False}
]}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['AutoScalingGroupName'], 'c7n-asg-np-match')
def test_asg_not_propagate_tag_missing(self):
session = self.replay_flight_data('test_asg_not_propagate_missing')
policy = self.load_policy({
'name': 'asg-propagated-tag-filter',
'resource': 'asg',
'filters': [{
'type': 'progagated-tags',
'keys': ['Tag01', 'Tag02', 'Tag03'],
'match': False,
'propagate': False}
]}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['AutoScalingGroupName'], 'c7n-asg-np-missing')
def test_asg_filter_capacity_delta_match(self):
factory = self.replay_flight_data('test_asg_filter_capacity_delta_match')
p = self.load_policy({
'name': 'asg-capacity-delta',
'resource': 'asg',
'filters': [
{'type': 'capacity-delta'},
{'tag:CustodianUnitTest': 'not-null'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_asg_filter_capacity_delta_nomatch(self):
factory = self.replay_flight_data('test_asg_filter_capacity_delta_nomatch')
p = self.load_policy({
'name': 'asg-capacity-delta',
'resource': 'asg',
'filters': [
{'type': 'capacity-delta'},
{'tag:CustodianUnitTest': 'not-null'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 0)
| 40.028846 | 83 | 0.56854 |
ba8d8f1529bb9705b02cc4d9d0567bb0141c4361 | 874 | py | Python | python/ethereum/etherscan/erc721.py | stoooops/potpourri | 07f38981766b6b4133f397109ac20ce2c6e2b76b | [
"MIT"
] | null | null | null | python/ethereum/etherscan/erc721.py | stoooops/potpourri | 07f38981766b6b4133f397109ac20ce2c6e2b76b | [
"MIT"
] | null | null | null | python/ethereum/etherscan/erc721.py | stoooops/potpourri | 07f38981766b6b4133f397109ac20ce2c6e2b76b | [
"MIT"
] | null | null | null | from typing import Dict
from potpourri.python.ethereum.etherscan.base import BaseEventDetailed
class ERC721Transfer(BaseEventDetailed):
def __init__(self, data: Dict[str, str]):
super().__init__(data=data)
assert self._input == "deprecated", f"Expected 'deprecated' value for 'input'. Got: '{self._input}'"
self._token_decimal: int = int(data["tokenDecimal"])
self._token_id: int = int(data["tokenID"])
self._token_name: str = data["tokenName"]
self._token_symbol: str = data["tokenSymbol"]
@property
def token_decimal(self) -> int:
return self._token_decimal
@property
def token_id(self) -> int:
return self._token_id
@property
def token_name(self) -> str:
return self._token_name
@property
def token_symbol(self) -> str:
return self._token_symbol
| 27.3125 | 108 | 0.661327 |
dfcb7d260c7aa1ff061479c85cdde80bd5c89dd4 | 23,906 | py | Python | test/lib/ufe/testAttribute.py | fractal-picture/maya-usd | f86a2e64372c85d26dabc0c1214b6f315d5224c8 | [
"Apache-2.0"
] | null | null | null | test/lib/ufe/testAttribute.py | fractal-picture/maya-usd | f86a2e64372c85d26dabc0c1214b6f315d5224c8 | [
"Apache-2.0"
] | null | null | null | test/lib/ufe/testAttribute.py | fractal-picture/maya-usd | f86a2e64372c85d26dabc0c1214b6f315d5224c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2019 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import mayaUtils
import testUtils
import usdUtils
from pxr import UsdGeom
from maya import cmds
from maya import standalone
from maya.internal.ufeSupport import ufeCmdWrapper as ufeCmd
import ufe
import os
import random
import unittest
class TestObserver(ufe.Observer):
def __init__(self):
super(TestObserver, self).__init__()
self._notifications = 0
def __call__(self, notification):
if(os.getenv('UFE_PREVIEW_VERSION_NUM', '0000') >= '2036'):
if isinstance(notification, ufe.AttributeValueChanged):
self._notifications += 1
else:
if isinstance(notification, ufe.AttributeChanged):
self._notifications += 1
@property
def notifications(self):
return self._notifications
class AttributeTestCase(unittest.TestCase):
'''Verify the Attribute UFE interface, for multiple runtimes.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
# Open top_layer.ma scene in testSamples
mayaUtils.openTopLayerScene()
random.seed()
@classmethod
def tearDownClass(cls):
# See comments in MayaUFEPickWalkTesting.tearDownClass
cmds.file(new=True, force=True)
standalone.uninitialize()
def setUp(self):
'''Called initially to set up the maya test environment'''
self.assertTrue(self.pluginsLoaded)
def assertVectorAlmostEqual(self, ufeVector, usdVector):
testUtils.assertVectorAlmostEqual(
self, ufeVector.vector, usdVector)
def assertColorAlmostEqual(self, ufeColor, usdColor):
for va, vb in zip(ufeColor.color, usdColor):
self.assertAlmostEqual(va, vb, places=6)
def runUndoRedo(self, attr, newVal, decimalPlaces=None):
oldVal = attr.get()
assert oldVal != newVal, "Undo / redo testing requires setting a value different from the current value"
ufeCmd.execute(attr.setCmd(newVal))
if decimalPlaces is not None:
self.assertAlmostEqual(attr.get(), newVal, decimalPlaces)
newVal = attr.get()
else:
self.assertEqual(attr.get(), newVal)
cmds.undo()
self.assertEqual(attr.get(), oldVal)
cmds.redo()
self.assertEqual(attr.get(), newVal)
def runTestAttribute(self, path, attrName, ufeAttrClass, ufeAttrType):
'''Engine method to run attribute test.'''
# Create the UFE/USD attribute for this test from the input path.
# Get a UFE scene item the input path in the scene.
itemPath = ufe.Path([
mayaUtils.createUfePathSegment("|transform1|proxyShape1"),
usdUtils.createUfePathSegment(path)])
ufeItem = ufe.Hierarchy.createItem(itemPath)
# Get the USD prim for this item.
usdPrim = usdUtils.getPrimFromSceneItem(ufeItem)
# Create the attributes interface for the item.
ufeAttrs = ufe.Attributes.attributes(ufeItem)
self.assertIsNotNone(ufeAttrs)
# Get the USDAttribute for the input attribute name so we can use it to
# compare to UFE.
usdAttr = usdPrim.GetAttribute(attrName)
self.assertIsNotNone(usdAttr)
# Get the attribute that matches the input name and make sure it matches
# the class type of UFE attribute class passed in.
self.assertTrue(ufeAttrs.hasAttribute(attrName))
ufeAttr = ufeAttrs.attribute(attrName)
self.assertIsInstance(ufeAttr, ufeAttrClass)
# Verify that the attribute type matches the input UFE type.
self.assertEqual(ufeAttr.type, ufeAttrType)
# Verify that the scene item the attribute was created with matches
# what is stored in the UFE attribute.
self.assertEqual(ufeAttr.sceneItem(), ufeItem)
# Verify that this attribute has a value. Note: all the attributes that
# are tested by this method are assumed to have a value.
self.assertTrue(ufeAttr.hasValue())
# Verify that the name matched what we created the attribute from.
self.assertEqual(ufeAttr.name, attrName)
# Test that the string representation of the value is not empty.
self.assertTrue(str(ufeAttr))
return ufeAttr, usdAttr
def testAttributeGeneric(self):
'''Test the Generic attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the xformOpOrder attribute which is
# an unsupported USD type, so it will be a UFE Generic attribute.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35',
attrName=UsdGeom.Tokens.xformOpOrder,
ufeAttrClass=ufe.AttributeGeneric,
ufeAttrType=ufe.Attribute.kGeneric)
# Now we test the Generic specific methods.
self.assertEqual(ufeAttr.nativeType(), usdAttr.GetTypeName().type.typeName)
def testAttributeEnumString(self):
'''Test the EnumString attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an EnumString type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35',
attrName=UsdGeom.Tokens.visibility,
ufeAttrClass=ufe.AttributeEnumString,
ufeAttrType=ufe.Attribute.kEnumString)
# Now we test the EnumString specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Make sure 'inherited' is in the list of allowed tokens.
visEnumValues = ufeAttr.getEnumValues()
self.assertIn(UsdGeom.Tokens.inherited, visEnumValues)
# Change to 'invisible' and verify the return in UFE.
ufeAttr.set(UsdGeom.Tokens.invisible)
self.assertEqual(ufeAttr.get(), UsdGeom.Tokens.invisible)
# Verify that the new UFE value matches what is directly in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Change back to 'inherited' using a command.
self.runUndoRedo(ufeAttr, UsdGeom.Tokens.inherited)
def testAttributeBool(self):
'''Test the Bool attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an bool type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/mesh',
attrName='doubleSided',
ufeAttrClass=ufe.AttributeBool,
ufeAttrType=ufe.Attribute.kBool)
# Now we test the Bool specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with the opposite boolean value.
ufeAttr.set(not ufeAttr.get())
# Then make sure that new UFE value matches what it in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr, not ufeAttr.get())
def testAttributeInt(self):
'''Test the Int attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an integer type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/Base',
attrName='inputAOV',
ufeAttrClass=ufe.AttributeInt,
ufeAttrType=ufe.Attribute.kInt)
# Now we test the Int specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with a different int value.
ufeAttr.set(ufeAttr.get() + random.randint(1,5))
# Then make sure that new UFE value matches what it in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr, ufeAttr.get()+1)
def testAttributeFloat(self):
'''Test the Float attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an float type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/Base',
attrName='anisotropic',
ufeAttrClass=ufe.AttributeFloat,
ufeAttrType=ufe.Attribute.kFloat)
# Now we test the Float specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with a different float value.
ufeAttr.set(random.random())
# Then make sure that new UFE value matches what it in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Python floating-point numbers are doubles. If stored in a float
# attribute, the resulting precision will be less than the original
# Python value.
self.runUndoRedo(ufeAttr, ufeAttr.get() + 1.0, decimalPlaces=6)
def _testAttributeDouble(self):
'''Test the Double attribute type.'''
# I could not find an double attribute to test with
pass
def testAttributeStringString(self):
'''Test the String (String) attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an string type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/BallTexture',
attrName='filename',
ufeAttrClass=ufe.AttributeString,
ufeAttrType=ufe.Attribute.kString)
# Now we test the String specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with a different string value.
# Note: this ball uses the ball8.tex
ufeAttr.set('./tex/ball7.tex')
# Then make sure that new UFE value matches what it in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr, 'potato')
def testAttributeStringToken(self):
'''Test the String (Token) attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an string type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/BallTexture',
attrName='filter',
ufeAttrClass=ufe.AttributeString,
ufeAttrType=ufe.Attribute.kString)
# Now we test the String specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with a different string value.
# Note: this attribute is initially set to token 'Box'
ufeAttr.set('Sphere')
# Then make sure that new UFE value matches what it in USD.
self.assertEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr, 'Box')
def testAttributeColorFloat3(self):
'''Test the ColorFloat3 attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an ColorFloat3 type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/Base',
attrName='emitColor',
ufeAttrClass=ufe.AttributeColorFloat3,
ufeAttrType=ufe.Attribute.kColorFloat3)
# Now we test the ColorFloat3 specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertColorAlmostEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with some random color values.
vec = ufe.Color3f(random.random(), random.random(), random.random())
ufeAttr.set(vec)
# Then make sure that new UFE value matches what it in USD.
self.assertColorAlmostEqual(ufeAttr.get(), usdAttr.Get())
# The following causes a segmentation fault on CentOS 7.
# self.runUndoRedo(ufeAttr,
# ufe.Color3f(vec.r()+1.0, vec.g()+2.0, vec.b()+3.0))
# Entered as MAYA-102168.
newVec = ufe.Color3f(vec.color[0]+1.0, vec.color[1]+2.0, vec.color[2]+3.0)
self.runUndoRedo(ufeAttr, newVec)
def _testAttributeInt3(self):
'''Test the Int3 attribute type.'''
# I could not find an int3 attribute to test with.
pass
def testAttributeFloat3(self):
'''Test the Float3 attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an Float3 type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35/Looks/BallLook/Base',
attrName='bumpNormal',
ufeAttrClass=ufe.AttributeFloat3,
ufeAttrType=ufe.Attribute.kFloat3)
# Now we test the Float3 specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertVectorAlmostEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with some random values.
vec = ufe.Vector3f(random.random(), random.random(), random.random())
ufeAttr.set(vec)
# Then make sure that new UFE value matches what it in USD.
self.assertVectorAlmostEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr,
ufe.Vector3f(vec.x()+1.0, vec.y()+2.0, vec.z()+3.0))
def testAttributeDouble3(self):
'''Test the Double3 attribute type.'''
# Use our engine method to run the bulk of the test (all the stuff from
# the Attribute base class). We use the visibility attribute which is
# an Double3 type.
ufeAttr, usdAttr = attrDict = self.runTestAttribute(
path='/Room_set/Props/Ball_35',
attrName='xformOp:translate',
ufeAttrClass=ufe.AttributeDouble3,
ufeAttrType=ufe.Attribute.kDouble3)
# Now we test the Double3 specific methods.
# Compare the initial UFE value to that directly from USD.
self.assertVectorAlmostEqual(ufeAttr.get(), usdAttr.Get())
# Set the attribute in UFE with some random values.
vec = ufe.Vector3d(random.uniform(-100, 100), random.uniform(-100, 100), random.uniform(-100, 100))
ufeAttr.set(vec)
# Then make sure that new UFE value matches what it in USD.
self.assertVectorAlmostEqual(ufeAttr.get(), usdAttr.Get())
self.runUndoRedo(ufeAttr,
ufe.Vector3d(vec.x()-1.0, vec.y()-2.0, vec.z()-3.0))
def testObservation(self):
'''Test Attributes observation interface.
Test both global attribute observation and per-node attribute
observation.
'''
# Create three observers, one for global attribute observation, and two
# on different UFE items.
proxyShapePathSegment = mayaUtils.createUfePathSegment(
"|transform1|proxyShape1")
path = ufe.Path([
proxyShapePathSegment,
usdUtils.createUfePathSegment('/Room_set/Props/Ball_34')])
ball34 = ufe.Hierarchy.createItem(path)
path = ufe.Path([
proxyShapePathSegment,
usdUtils.createUfePathSegment('/Room_set/Props/Ball_35')])
ball35 = ufe.Hierarchy.createItem(path)
(ball34Obs, ball35Obs, globalObs) = [TestObserver() for i in range(3)]
# Maya registers a single global observer on startup.
self.assertEqual(ufe.Attributes.nbObservers(), 1)
# No item-specific observers.
self.assertFalse(ufe.Attributes.hasObservers(ball34.path()))
self.assertFalse(ufe.Attributes.hasObservers(ball35.path()))
self.assertEqual(ufe.Attributes.nbObservers(ball34), 0)
self.assertEqual(ufe.Attributes.nbObservers(ball35), 0)
self.assertFalse(ufe.Attributes.hasObserver(ball34, ball34Obs))
self.assertFalse(ufe.Attributes.hasObserver(ball35, ball35Obs))
# No notifications yet.
self.assertEqual(ball34Obs.notifications, 0)
self.assertEqual(ball35Obs.notifications, 0)
self.assertEqual(globalObs.notifications, 0)
# Add a global observer.
ufe.Attributes.addObserver(globalObs)
self.assertEqual(ufe.Attributes.nbObservers(), 2)
self.assertFalse(ufe.Attributes.hasObservers(ball34.path()))
self.assertFalse(ufe.Attributes.hasObservers(ball35.path()))
self.assertEqual(ufe.Attributes.nbObservers(ball34), 0)
self.assertEqual(ufe.Attributes.nbObservers(ball35), 0)
self.assertFalse(ufe.Attributes.hasObserver(ball34, ball34Obs))
self.assertFalse(ufe.Attributes.hasObserver(ball35, ball35Obs))
# Add item-specific observers.
ufe.Attributes.addObserver(ball34, ball34Obs)
self.assertEqual(ufe.Attributes.nbObservers(), 2)
self.assertTrue(ufe.Attributes.hasObservers(ball34.path()))
self.assertFalse(ufe.Attributes.hasObservers(ball35.path()))
self.assertEqual(ufe.Attributes.nbObservers(ball34), 1)
self.assertEqual(ufe.Attributes.nbObservers(ball35), 0)
self.assertTrue(ufe.Attributes.hasObserver(ball34, ball34Obs))
self.assertFalse(ufe.Attributes.hasObserver(ball34, ball35Obs))
self.assertFalse(ufe.Attributes.hasObserver(ball35, ball35Obs))
ufe.Attributes.addObserver(ball35, ball35Obs)
self.assertTrue(ufe.Attributes.hasObservers(ball35.path()))
self.assertEqual(ufe.Attributes.nbObservers(ball34), 1)
self.assertEqual(ufe.Attributes.nbObservers(ball35), 1)
self.assertTrue(ufe.Attributes.hasObserver(ball35, ball35Obs))
self.assertFalse(ufe.Attributes.hasObserver(ball35, ball34Obs))
# Make a change to ball34, global and ball34 observers change.
ball34Attrs = ufe.Attributes.attributes(ball34)
ball34XlateAttr = ball34Attrs.attribute('xformOp:translate')
self.assertEqual(ball34Obs.notifications, 0)
ufeCmd.execute(ball34XlateAttr.setCmd(ufe.Vector3d(1, 2, 3)))
self.assertEqual(ball34Obs.notifications, 1)
self.assertEqual(ball35Obs.notifications, 0)
self.assertEqual(globalObs.notifications, 1)
# Undo, redo
cmds.undo()
self.assertEqual(ball34Obs.notifications, 2)
self.assertEqual(ball35Obs.notifications, 0)
self.assertEqual(globalObs.notifications, 2)
cmds.redo()
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 0)
self.assertEqual(globalObs.notifications, 3)
# Make a change to ball35, global and ball35 observers change.
ball35Attrs = ufe.Attributes.attributes(ball35)
ball35XlateAttr = ball35Attrs.attribute('xformOp:translate')
ufeCmd.execute(ball35XlateAttr.setCmd(ufe.Vector3d(1, 2, 3)))
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 1)
self.assertEqual(globalObs.notifications, 4)
# Undo, redo
cmds.undo()
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 2)
self.assertEqual(globalObs.notifications, 5)
cmds.redo()
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 3)
self.assertEqual(globalObs.notifications, 6)
# Test removeObserver.
ufe.Attributes.removeObserver(ball34, ball34Obs)
self.assertFalse(ufe.Attributes.hasObservers(ball34.path()))
self.assertTrue(ufe.Attributes.hasObservers(ball35.path()))
self.assertEqual(ufe.Attributes.nbObservers(ball34), 0)
self.assertEqual(ufe.Attributes.nbObservers(ball35), 1)
self.assertFalse(ufe.Attributes.hasObserver(ball34, ball34Obs))
ufeCmd.execute(ball34XlateAttr.setCmd(ufe.Vector3d(4, 5, 6)))
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 3)
self.assertEqual(globalObs.notifications, 7)
ufe.Attributes.removeObserver(globalObs)
self.assertEqual(ufe.Attributes.nbObservers(), 1)
ufeCmd.execute(ball34XlateAttr.setCmd(ufe.Vector3d(7, 8, 9)))
self.assertEqual(ball34Obs.notifications, 3)
self.assertEqual(ball35Obs.notifications, 3)
self.assertEqual(globalObs.notifications, 7)
# Run last to avoid file new disturbing other tests.
def testZAttrChangeRedoAfterPrimCreateRedo(self):
'''Redo attribute change after redo of prim creation.'''
cmds.file(new=True, force=True)
# Create a capsule, change one of its attributes.
import mayaUsd_createStageWithNewLayer
mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path('|stage1|stageShape1')
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
cmd = proxyShapeContextOps.doOpCmd(['Add New Prim', 'Capsule'])
ufeCmd.execute(cmd)
capsulePath = ufe.PathString.path('|stage1|stageShape1,/Capsule1')
capsuleItem = ufe.Hierarchy.createItem(capsulePath)
# Create the attributes interface for the item.
attrs = ufe.Attributes.attributes(capsuleItem)
self.assertIsNotNone(attrs)
self.assertTrue(attrs.hasAttribute('radius'))
radiusAttr = attrs.attribute('radius')
oldRadius = radiusAttr.get()
ufeCmd.execute(radiusAttr.setCmd(2))
newRadius = radiusAttr.get()
self.assertEqual(newRadius, 2)
self.assertNotEqual(oldRadius, newRadius)
# Undo 2x: undo attr change and prim creation.
cmds.undo()
cmds.undo()
# Redo 2x: prim creation, attr change.
cmds.redo()
cmds.redo()
# Re-create item, as its underlying prim was re-created.
capsuleItem = ufe.Hierarchy.createItem(capsulePath)
attrs = ufe.Attributes.attributes(capsuleItem)
radiusAttr = attrs.attribute('radius')
self.assertEqual(radiusAttr.get(), newRadius)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 38.495974 | 112 | 0.665523 |
f64a18f948b46bb54e0a595677b40a2c51263e4a | 4,003 | py | Python | jupyter/utils.py | perellonieto/background_check | a5b6549a62be276c7199e87e78a94a64af688ab9 | [
"MIT"
] | 4 | 2017-01-14T12:59:58.000Z | 2021-06-21T10:55:17.000Z | jupyter/utils.py | REFRAME/background_check | 7da967bad4a6d8cbc924b5301041f3c99ba39595 | [
"MIT"
] | null | null | null | jupyter/utils.py | REFRAME/background_check | 7da967bad4a6d8cbc924b5301041f3c99ba39595 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.stats import norm
def draw_in_row(fruits, sizes):
indices = np.argsort(sizes)
sizes_sorted = sizes[indices]
fruits_sorted = fruits[indices]
images = map(Image.open, ['./images/'+fruit+'.jpg' for fruit in
fruits_sorted])
widths, heights = zip(*(i.size for i in images))
unit_width = 200
drawing_sizes = np.array(sizes_sorted*unit_width, dtype=int)
total_width = drawing_sizes.sum()+unit_width
max_height = drawing_sizes.max()
new_im = Image.new('RGB', (total_width, max_height),color=(255,255,255,0))
drawn_ball = False
x_offset = 0
for im, drawing_size in zip(images, drawing_sizes):
if not drawn_ball and drawing_size >= unit_width:
im_aux = Image.open('./images/tennis_ball.jpg')
im_aux = im_aux.resize([unit_width, unit_width])
new_im.paste(im_aux, (x_offset,0))
drawn_ball = True
x_offset += im_aux.size[0]
im = im.resize([drawing_size,drawing_size])
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
ax = plt.figure(figsize=(7,5), dpi=80)
plt.imshow(new_im)
plt.axis('off')
class NormalDistribution(object):
def __init__(self, x=None, mu=None, sigma=None):
if x is not None:
self.fit(x)
else:
if mu is not None:
self.mu = mu
if sigma is not None:
self.sigma = sigma
def fit(self, x):
self.mu = x.mean()
self.sigma = x.std()
def pdf(self,x):
return norm.pdf(x, loc=self.mu, scale=self.sigma)
def sample(self, n):
return norm.rvs(loc=self.mu, scale=self.sigma, size=n)
class MixtureGaussians(object):
def __init__(self, gaussians, priors=None):
self.gaussians = gaussians
if priors is None:
self.priors = np.ones(self.n_gaussians)/self.n_gaussians
else:
self.priors = priors
def add_gaussian(self, gaussian, prior=None):
self.gaussians.append(gaussian)
if prior is None:
self.priors = np.ones(self.n_gaussians)/self.n_gaussians
else:
self.priors.append(prior)
@property
def n_gaussians(self):
return len(self.gaussians)
@property
def priors_norm(self):
return self.priors/np.sum(self.priors)
def pdf(self,x):
result = np.zeros_like(x, dtype=float)
for prior, gaussian in zip(self.priors_norm, self.gaussians):
result += gaussian.pdf(x)*prior
return result
def sample(self, n):
result = np.zeros(n, dtype=float)
ns = np.random.multinomial(n, self.priors_norm)
index = 0
for n_i, prior, gaussian in zip(ns, self.priors_norm, self.gaussians):
result[index:index+n_i] = gaussian.sample(n_i)
index += n_i
return result
def plot_confusion_matrix(cm, labels, title='Confusion matrix',
cmap=plt.cm.Blues, show_accuracy=True):
fig = plt.figure()
ax = fig.add_subplot(111)
res = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set_aspect(1)
cb = fig.colorbar(res)
tick_marks = np.arange(len(labels))
ax.set_xticks(tick_marks)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(tick_marks)
ax.set_yticklabels(labels)
fig.tight_layout()
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
if show_accuracy:
ax.set_title("{} (acc={:2.2f}%)".format(title,
np.true_divide(100*np.diag(cm).sum(),cm.sum())))
else:
ax.set_title(title)
width, height = cm.shape
for x in xrange(width):
for y in xrange(height):
ax.annotate(str(cm[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
if __name__ == '__main__':
pass
| 29.007246 | 78 | 0.605546 |
84244b56a7c76a183597267b03919b199281a95c | 1,111 | py | Python | oauth_toolkit_spa/views.py | oscarychen/django-oauth-toolkit-spa | d84059f4ae63330d3cf2d13c0988dd46dddcf154 | [
"MIT"
] | 4 | 2022-01-27T21:44:40.000Z | 2022-03-14T13:27:08.000Z | oauth_toolkit_spa/views.py | oscarychen/django-oauth-toolkit-spa | d84059f4ae63330d3cf2d13c0988dd46dddcf154 | [
"MIT"
] | 1 | 2022-01-19T16:28:50.000Z | 2022-01-19T17:08:29.000Z | oauth_toolkit_spa/views.py | oscarychen/django-oauth-toolkit-cookie-refresh | d84059f4ae63330d3cf2d13c0988dd46dddcf154 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework import permissions
from .mixins import OAuthToolKitMixin
class LogIn(APIView, OAuthToolKitMixin):
'''Log in API endpoint'''
permission_classes = [permissions.AllowAny]
def post(self, request, *args, **kwargs):
return self.get_login_response(request)
class TokenRefresh(APIView, OAuthToolKitMixin):
'''Token refresh API endpoint'''
permission_classes = [permissions.AllowAny]
def post(self, request, *args, **kwargs):
return self.get_refresh_response(request)
class LogOff(APIView, OAuthToolKitMixin):
'''Log off API endpoint'''
permission_classes = [permissions.AllowAny]
def post(self, request, *args, **kwargs):
return self.get_logoff_response(request)
class LogOffEverywhere(APIView, OAuthToolKitMixin):
'''Log off any signed in sessions by revoking all refresh token and access token associated with current user'''
permission_classes = [permissions.AllowAny]
def post(self, request, *args, **kwargs):
return self.get_logoff_everywhere_response(request)
| 30.861111 | 116 | 0.737174 |
bb6fc7bcc9da1a079d81f5c405e44b7ae62bedef | 2,615 | py | Python | news.py | panliming0418/science | 1a5a04bcfadc10f71adb6025e675995b237f85af | [
"Apache-2.0"
] | null | null | null | news.py | panliming0418/science | 1a5a04bcfadc10f71adb6025e675995b237f85af | [
"Apache-2.0"
] | null | null | null | news.py | panliming0418/science | 1a5a04bcfadc10f71adb6025e675995b237f85af | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import json
def getHTMLText(url):
'''
获取网页的html文档
'''
try:
#添加请求头
#不同网页的请求头不同
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Mobile Safari/537.36'
}
#获取服务器的响应内容
res = requests.get(url, headers = headers)
#判断返回状态码是否为200
res.raise_for_status()
#设置该html文档可能的编码
res.encoding = res.apparent_encoding
#返回网页HTML代码
return res.text
except:
return '产生异常'
def main():
# 保存爬取后的URL到列表中
array = []
# 保存爬取后的标题到列表中
titleArray = []
#目标网页————知乎
urls = ['https://www.zhihu.com/search?q=化石收藏&utm_content=search_suggestion&type=content', 'https://www.zhihu.com/search?q=化石形成&utm_content=search_suggestion&type=content','https://www.zhihu.com/search?type=content&q=鱼化石','https://www.zhihu.com/search?type=content&q=植物化石','https://www.zhihu.com/search?q=化石复原&utm_content=search_history&type=content']
for url in urls:
demo = getHTMLText(url)
# print(demo)
#解析HTML代码
soup = BeautifulSoup(demo, 'html.parser')
#模糊搜索HTML代码的所有包含href属性的<a>标签
a_labels = soup.find_all('a', attrs={'href': True})
#查找标题
titles = soup.find_all('span', 'Highlight')
#获取所有<a>标签中的href对应的值,即超链接
for a in a_labels:
if(str(a.get('href')).startswith('/question')):
array.append('https://www.zhihu.com' + str(a.get('href')))
for b in titles:
bb = str(b)
string2 = "<em>"
string3 = "</em>"
string4 = "</span>"
number2 = bb.find(string2)
number3 = bb.find(string3)
number4 = bb.find(string4)
result = bb[24:number2] + bb[(number2+4):number3] + bb[number3 + 5:number4]
while(result.find(string2) >= 0):
index = result.find(string2)
index2 = result.find(string3)
result = result[0:index] + result[index+4:index2] + result[index2+5:len(result)]
titleArray.append(result)
index = 0
dics = []
for a in array:
key = "url" + str(index)
url = a
topic = titleArray[index]
index = index + 1
newDic = {"topic" : topic, "url" : url}
dics.append({key:newDic})
# for a in array:
# print(a)
# for b in titleArray:
# print(b)
with open('data.json', 'w') as file:
json.dump(dics, file)
main()
| 30.057471 | 354 | 0.563671 |
2f9902bc93ef7fb4d482837412ad7ef7befd7580 | 54,442 | py | Python | configure.py | georgeslabreche/tensorflow-smartcamluvsu | dcccc9f756192c43c6d2af2d02249e518b3e0eb4 | [
"Apache-2.0"
] | 10 | 2021-04-29T16:31:02.000Z | 2021-08-10T13:17:55.000Z | configure.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 88 | 2020-11-24T08:18:10.000Z | 2022-03-25T20:28:30.000Z | configure.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 9 | 2020-11-06T22:50:15.000Z | 2021-12-30T01:45:55.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '6'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '3.1.0'
_TF_MAX_BAZEL_VERSION = '3.99.0'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD',
'tensorflow/lite/tools/benchmark/experimental/ios/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env {}="{}"'.format(var_name, str(var)))
def run_shell(cmd, allow_non_zero=False, stderr=None):
if stderr is None:
stderr = sys.stdout
if allow_non_zero:
try:
output = subprocess.check_output(cmd, stderr=stderr)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd, stderr=stderr)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
stderr = open(os.devnull, 'wb')
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
],
stderr=stderr).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'{}]: ').format(default_python_bin_path)
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: {} cannot be found.'.format(python_bin_path))
else:
print('{} is not executable. Is it the python binary?'.format(
python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [{}]\n'.format(python_lib_paths[0]))
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"{}"'.format(python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="{}"'.format(python_bin_path))
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with {} support?'.format(
query_item)
if not yes_reply:
yes_reply = '{} support will be enabled for TensorFlow.'.format(query_item)
if not no_reply:
no_reply = 'No {}'.format(yes_reply)
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: {}'.format(user_input_origin))
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(1)
stderr = open(os.devnull, 'wb')
curr_version = run_shell(['bazel', '--version'],
allow_non_zero=True,
stderr=stderr)
if curr_version.startswith('bazel '):
curr_version = curr_version.split('bazel ')[1]
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
if resolve_symlinks and os.path.islink(val):
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='21', # 21 is required for ARM64 support.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated CUDA compute capabilities '
'you want to build with.\nYou can find the compute capability of your '
'device at: https://developer.nvidia.com/cuda-gpus. Each capability '
'can be specified as "x.y" or "compute_xy" to include both virtual and'
' binary GPU code, or as "sm_xy" to only include the binary '
'code.\nPlease note that each additional compute capability '
'significantly increases your build time and binary size, and that '
'TensorFlow only supports compute capabilities >= 3.5 [Default is: '
'%s]: ' % default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
# We now support sm_35,sm_50,sm_60,compute_70.
sm_compute_match = re.match('(sm|compute)_?([0-9]+[0-9]+)',
compute_capability)
if not sm_compute_match:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = int(sm_compute_match.group(2))
if ver < 30:
print(
'ERROR: TensorFlow only supports small CUDA compute'
' capabilities of sm_30 and higher. Please re-specify the list'
' of compute capabilities excluding version %s.' % ver)
all_valid = False
if ver < 35:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than sm_35. Disable XLA when running on older GPUs.')
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def system_specific_test_config(environ_cp):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
# Each instance of --test_tag_filters or --build_tag_filters overrides all
# previous instances, so we need to build up a complete list and write a
# single list of filters for the .bazelrc file.
# Filters to use with both --test_tag_filters and --build_tag_filters
test_and_build_filters = ['-benchmark-test', '-no_oss']
# Additional filters for --test_tag_filters beyond those in
# test_and_build_filters
test_only_filters = ['-oss_serial']
if is_windows():
test_and_build_filters.append('-no_windows')
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters += ['-no_windows_gpu', '-no_gpu']
else:
test_and_build_filters.append('-gpu')
elif is_macos():
test_and_build_filters += ['-gpu', '-nomac', '-no_mac']
elif is_linux():
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters.append('-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
test_and_build_filters.append('-gpu')
# Disable tests with "v1only" tag in "v2" Bazel config, but not in "v1" config
write_to_bazelrc('test:v1 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters))
write_to_bazelrc('test:v1 --build_tag_filters=%s' %
','.join(test_and_build_filters))
write_to_bazelrc(
'test:v2 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters + ['-v1only']))
write_to_bazelrc('test:v2 --build_tag_filters=%s' %
','.join(test_and_build_filters + ['-v1only']))
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
for varname in ('PREFIX', 'LIBDIR', 'INCLUDEDIR', 'PROTOBUF_INCLUDE_PATH'):
if varname in environ_cp:
write_to_bazelrc('build --define=%s=%s' % (varname, environ_cp[varname]))
def is_reduced_optimize_huge_functions_available(environ_cp):
"""Check to see if the system supports /d2ReducedOptimizeHugeFunctions.
The above compiler flag is a new compiler flag introduced to the Visual Studio
compiler in version 16.4 (available in Visual Studio 2019, Preview edition
only, as of 2019-11-19). TensorFlow needs this flag to massively reduce
compile times, but until 16.4 is officially released, we can't depend on it.
See also
https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
Because it's very annoying to check this manually (to check the MSVC installed
versions, you need to use the registry, and it's not clear if Bazel will be
using that install version anyway), we expect enviroments who know they may
use this flag to export TF_VC_VERSION=16.4
TODO(angerson, gunan): Remove this function when TensorFlow's minimum VS
version is upgraded to 16.4.
Arguments:
environ_cp: Environment of the current execution
Returns:
boolean, whether or not /d2ReducedOptimizeHugeFunctions is available on this
machine.
"""
return float(environ_cp.get('TF_VC_VERSION', '0')) >= 16.4
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
if is_reduced_optimize_huge_functions_available(environ_cp):
write_to_bazelrc(
'build --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions'
)
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
try:
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
except subprocess.CalledProcessError as e:
print('Error checking bazel version: ', e.output.decode('UTF-8').strip())
raise e
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
if environ_cp.get('TF_ENABLE_XLA', '1') == '1':
write_to_bazelrc('build --config=xla')
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('ROCM_PATH')):
write_action_env_to_bazelrc('ROCM_PATH', environ_cp.get('ROCM_PATH'))
write_action_env_to_bazelrc('ROCM_ROOT', environ_cp.get('ROCM_PATH'))
if ((environ_cp.get('TF_NEED_ROCM') == '1') and
(environ_cp.get('TF_ENABLE_MLIR_GENERATED_GPU_KERNELS') == '1')):
write_to_bazelrc(
'build:rocm --define tensorflow_enable_mlir_generated_gpu_kernels=1')
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(environ_cp)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('mkl_aarch64', 'Build with oneDNN support for Aarch64.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
| 36.246338 | 98 | 0.679861 |
fca0f4a99119fb440eb07c792c2cf14b29fa2529 | 3,516 | py | Python | tests/test_examples.py | jasonstrimpel/zipline | 21b4d3e2bbd32e1af5ded3e4834b007fe02bd83a | [
"Apache-2.0"
] | 1 | 2020-09-24T15:25:26.000Z | 2020-09-24T15:25:26.000Z | tests/test_examples.py | jasonstrimpel/zipline | 21b4d3e2bbd32e1af5ded3e4834b007fe02bd83a | [
"Apache-2.0"
] | null | null | null | tests/test_examples.py | jasonstrimpel/zipline | 21b4d3e2bbd32e1af5ded3e4834b007fe02bd83a | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from operator import itemgetter
import tarfile
import matplotlib
from nose_parameterized import parameterized
import pandas as pd
from zipline import examples
from zipline.data.bundles import register, unregister
from zipline.testing import test_resource_path
from zipline.testing.fixtures import (
WithTmpDir,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.cache import dataframe_cache
from zipline.utils.paths import update_modified_time
# Otherwise the next line sometimes complains about being run too late.
_multiprocess_can_split_ = False
matplotlib.use('Agg')
EXAMPLE_MODULES = examples.load_example_modules()
class ExamplesTests(WithTmpDir, ZiplineTestCase):
# some columns contain values with unique ids that will not be the same
@classmethod
def init_class_fixtures(cls):
super(ExamplesTests, cls).init_class_fixtures()
register('test', lambda *args: None)
cls.add_class_callback(partial(unregister, 'test'))
with tarfile.open(test_resource_path('example_data.tar.gz')) as tar:
tar.extractall(cls.tmpdir.path)
cls.expected_perf = dataframe_cache(
cls.tmpdir.getpath(
'example_data/expected_perf/%s' %
pd.__version__.replace('.', '-'),
),
serialization='pickle',
)
market_data = ('SPY_benchmark.csv', 'treasury_curves.csv')
for data in market_data:
update_modified_time(
cls.tmpdir.getpath(
'example_data/root/data/' + data
)
)
@parameterized.expand(sorted(EXAMPLE_MODULES))
def test_example(self, example_name):
actual_perf = examples.run_example(
EXAMPLE_MODULES,
example_name,
# This should match the invocation in
# zipline/tests/resources/rebuild_example_data
environ={
'ZIPLINE_ROOT': self.tmpdir.getpath('example_data/root'),
},
)
expected_perf = self.expected_perf[example_name]
# Exclude positions column as the positions do not always have the
# same order
columns = [column for column in examples._cols_to_check
if column != 'positions']
assert_equal(
actual_perf[columns],
expected_perf[columns],
# There is a difference in the datetime columns in pandas
# 0.16 and 0.17 because in 16 they are object and in 17 they are
# datetime[ns, UTC]. We will just ignore the dtypes for now.
check_dtype=False,
)
# Sort positions by SID before comparing
assert_equal(
expected_perf['positions'].apply(sorted, key=itemgetter('sid')),
actual_perf['positions'].apply(sorted, key=itemgetter('sid')),
)
| 34.811881 | 76 | 0.670648 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.