content stringlengths 5 1.05M |
|---|
def loaddata():
data = pd.read_csv('../../data/raw/olympic_dataset.csv', low_memory=False, encoding = 'utf-8')
return data
def nocMedals (data, noc)
newdata=data[data['NOC']== team].dropna()
return newdata
def nocathletes(data, noc
newdata=data[data['NOC'] == team]
return newdata
def athletesMedals(data):
newdata=data.dropna()
return newdata
def medalsYear(data, year):
newdata=data[data['Year'] == year].dropna()
return newdata
def athletesYear(data, year):
newdata=data[data['Year'] == year]
return newdata
def nocMedalsYear(data, NOC, year):
newdata=data[(data['NOC'] == team) & (data[('Year')] == year)].dropna()
return newdata
def nocAthletesYear(data, NOC, year):
newdata=data[(data['NOC'] == team) & (data[('Year')] == year)]
return newdata
def getnoc(data, cname):
row=data[(data['Team'] == country)]
noc=row['NOC'].iloc[0]
return noc
|
""" This process performs a backup of all the application entities for the given
app ID to the local filesystem.
"""
import cPickle
import errno
import logging
import multiprocessing
import os
import random
import re
import shutil
import time
from appscale.datastore import appscale_datastore_batch
from appscale.datastore import dbconstants
from appscale.datastore import entity_utils
from appscale.datastore.zkappscale import zktransaction as zk
# The location to look at in order to verify that an app is deployed.
_SOURCE_LOCATION = '/opt/appscale/apps/'
class DatastoreBackup(multiprocessing.Process):
""" Backs up all the entities for a set application ID. """
# The amount of seconds between polling to get the backup lock.
LOCK_POLL_PERIOD = 60
# The location where the backup files will be stored.
BACKUP_FILE_LOCATION = "/opt/appscale/backups/"
# The backup filename suffix.
BACKUP_FILE_SUFFIX = ".backup"
# The number of entities retrieved in a datastore request.
BATCH_SIZE = 100
# Blob entity regular expressions.
BLOB_CHUNK_REGEX = '(.*)__BlobChunk__(.*)'
BLOB_INFO_REGEX = '(.*)__BlobInfo__(.*)'
# Retry sleep on datastore error in seconds.
DB_ERROR_PERIOD = 30
# Max backup file size in bytes.
MAX_FILE_SIZE = 100000000 # <- 100 MB
# Any kind that is of __*__ is private.
PRIVATE_KINDS = '(.*)__(.*)__(.*)'
# Any kind that is of _*_ is protected.
PROTECTED_KINDS = '(.*)_(.*)_(.*)'
def __init__(self, app_id, zoo_keeper, table_name, source_code=False,
skip_list=()):
""" Constructor.
Args:
app_id: The application ID.
zk: ZooKeeper client.
table_name: The database used (e.g. cassandra).
source_code: True when a backup of the source code is requested,
False otherwise.
skip_list: A list of Kinds to be skipped during backup; empty list if
none.
"""
multiprocessing.Process.__init__(self)
self.app_id = app_id
self.zoo_keeper = zoo_keeper
self.table = table_name
self.source_code = source_code
self.skip_kinds = skip_list
self.last_key = self.app_id + '\0' + dbconstants.TERMINATING_STRING
self.backup_timestamp = time.strftime("%Y%m%d-%H%M%S")
self.backup_dir = None
self.current_fileno = 0
self.current_file_size = 0
self.entities_backed_up = 0
self.db_access = None
def stop(self):
""" Stops the backup thread. """
pass
def set_filename(self):
""" Creates a new backup filename. Also creates the backup folder if it
doesn't exist.
Returns:
True on success, False otherwise.
"""
if not self.backup_dir:
self.backup_dir = '{0}{1}-{2}/'.format(self.BACKUP_FILE_LOCATION,
self.app_id, self.backup_timestamp)
try:
os.makedirs(self.backup_dir)
logging.info("Backup dir created: {0}".format(self.backup_dir))
except OSError, os_error:
if os_error.errno == errno.EEXIST:
logging.warn("OSError: Backup directory already exists.")
logging.error(os_error.message)
elif os_error.errno == errno.ENOSPC:
logging.error("OSError: No space left to create backup directory.")
logging.error(os_error.message)
return False
elif os_error.errno == errno.EROFS:
logging.error("OSError: READ-ONLY filesystem detected.")
logging.error(os_error.message)
return False
except IOError, io_error:
logging.error("IOError while creating backup dir.")
logging.error(io_error.message)
return False
file_name = '{0}-{1}-{2}{3}'.format(self.app_id, self.backup_timestamp,
self.current_fileno, self.BACKUP_FILE_SUFFIX)
self.filename = '{0}{1}'.format(self.backup_dir, file_name)
logging.info("Backup file: {0}".format(self.filename))
return True
def backup_source_code(self):
""" Copies the source code of the app into the backup directory.
Skips this step if the file is not found.
"""
sourcefile = '{0}{1}.tar.gz'.format(_SOURCE_LOCATION, self.app_id)
if os.path.isfile(sourcefile):
try:
shutil.copy(sourcefile, self.backup_dir)
logging.info("Source code has been successfully backed up.")
except shutil.Error, error:
logging.error("Error: {0} while backing up source code. Skipping...".\
format(error))
else:
logging.error("Couldn't find the source code for this app. Skipping...")
def run(self):
""" Starts the main loop of the backup thread. """
while True:
logging.debug("Trying to get backup lock.")
if self.get_backup_lock():
logging.info("Got the backup lock.")
self.db_access = appscale_datastore_batch.DatastoreFactory.\
getDatastore(self.table)
self.set_filename()
if self.source_code:
self.backup_source_code()
self.run_backup()
try:
self.zoo_keeper.release_lock_with_path(zk.DS_BACKUP_LOCK_PATH)
except zk.ZKTransactionException, zk_exception:
logging.error("Unable to release zk lock {0}.".\
format(str(zk_exception)))
break
else:
logging.info("Did not get the backup lock. Another instance may be "
"running.")
time.sleep(random.randint(1, self.LOCK_POLL_PERIOD))
def get_backup_lock(self):
""" Tries to acquire the lock for a datastore backup.
Returns:
True on success, False otherwise.
"""
return self.zoo_keeper.get_lock_with_path(zk.DS_BACKUP_LOCK_PATH)
def get_entity_batch(self, first_key, batch_size, start_inclusive):
""" Gets a batch of entities to operate on.
Args:
first_key: The last key from a previous query.
batch_size: The number of entities to fetch.
start_inclusive: True if first row should be included, False otherwise.
Returns:
A list of entities.
"""
batch = self.db_access.range_query(dbconstants.APP_ENTITY_TABLE,
dbconstants.APP_ENTITY_SCHEMA, first_key, self.last_key,
batch_size, start_inclusive=start_inclusive)
if batch:
logging.debug("Retrieved entities from {0} to {1}".
format(batch[0].keys()[0], batch[-1].keys()[0]))
return batch
def dump_entity(self, entity):
""" Dumps the entity content into a backup file.
Args:
entity: The entity to be backed up.
Returns:
True on success, False otherwise.
"""
# Open file and write pickled batch.
if self.current_file_size + len(entity) > self.MAX_FILE_SIZE:
self.current_fileno += 1
self.set_filename()
self.current_file_size = 0
try:
with open(self.filename, 'ab+') as file_object:
cPickle.dump(entity, file_object, cPickle.HIGHEST_PROTOCOL)
self.entities_backed_up += 1
self.current_file_size += len(entity)
except IOError as io_error:
logging.error(
"Encountered IOError while accessing backup file {0}".
format(self.filename))
logging.error(io_error.message)
return False
except OSError as os_error:
logging.error(
"Encountered OSError while accessing backup file {0}".
format(self.filename))
logging.error(os_error.message)
return False
except Exception as exception:
logging.error(
"Encountered an unexpected error while accessing backup file {0}".
format(self.filename))
logging.error(exception.message)
return False
return True
def process_entity(self, entity):
""" Verifies entity, fetches from journal if necessary and calls
dump_entity.
Args:
entity: The entity to be backed up.
Returns:
True on success, False otherwise.
"""
key = entity.keys()[0]
kind = entity_utils.get_kind_from_entity_key(key)
# Skip protected and private entities.
if re.match(self.PROTECTED_KINDS, kind) or\
re.match(self.PRIVATE_KINDS, kind):
# Do not skip blob entities.
if not re.match(self.BLOB_CHUNK_REGEX, kind) and\
not re.match(self.BLOB_INFO_REGEX, kind):
logging.debug("Skipping key: {0}".format(key))
return False
one_entity = entity[key][dbconstants.APP_ENTITY_SCHEMA[0]]
if one_entity == dbconstants.TOMBSTONE:
return False
success = True
while True:
try:
if self.dump_entity(one_entity):
logging.debug("Backed up key: {0}".format(key))
success = True
else:
success = False
finally:
if not success:
logging.error("Failed to backup entity. Retrying shortly...")
if success:
break
else:
time.sleep(self.DB_ERROR_PERIOD)
return success
def run_backup(self):
""" Runs the backup process. Loops on the entire dataset and dumps it into
a file.
"""
logging.info("Backup started")
start = time.time()
first_key = '{0}\x00'.format(self.app_id)
start_inclusive = True
entities_remaining = []
while True:
try:
# Fetch batch.
entities = entities_remaining + self.get_entity_batch(first_key,
self.BATCH_SIZE, start_inclusive)
logging.info("Processing {0} entities".format(self.BATCH_SIZE))
if not entities:
break
# Loop through entities retrieved and if not to be skipped, process.
skip = False
for entity in entities:
first_key = entity.keys()[0]
kind = entity_utils.get_kind_from_entity_key(first_key)
logging.debug("Processing key: {0}".format(first_key))
index = 1
for skip_kind in self.skip_kinds:
if re.match(skip_kind, kind):
logging.warn("Skipping entities of kind: {0}".format(skip_kind))
skip = True
first_key = first_key[:first_key.find(skip_kind)+
len(skip_kind)+1] + dbconstants.TERMINATING_STRING
self.skip_kinds = self.skip_kinds[index:]
break
index += 1
if skip:
break
self.process_entity(entity)
if not skip:
first_key = entities[-1].keys()[0]
start_inclusive = False
except dbconstants.AppScaleDBConnectionError, connection_error:
logging.error("Error getting a batch: {0}".format(connection_error))
time.sleep(self.DB_ERROR_PERIOD)
del self.db_access
time_taken = time.time() - start
logging.info("Backed up {0} entities".format(self.entities_backed_up))
logging.info("Backup took {0} seconds".format(str(time_taken)))
|
from networkx.drawing.nx_agraph import graphviz_layout |
"""classifier.py: do text classification by SVM on unbalanced data, using sklearn, evaluate the classifier with 5/10 cross validation"""
__author__ = "YuanSun"
from __future__ import division
import sys
import random
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
from sklearn import metrics, svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.cross_validation import train_test_split, KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from imblearn.over_sampling import SMOTE
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelBinarizer
from sklearn.cross_decomposition import CCA
def evaluate(model, X, y, n_folds):
sum_prec = 0
sum_recall = 0
sum_F1 = 0
sum_accu = 0
kf = KFold(len(y), n_folds=n_folds)
for train, test in kf:
# split data
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
sum_prec += metrics.precision_score(y_test, y_predict, pos_label=None, average='weighted')
sum_recall += metrics.recall_score(y_test, y_predict, pos_label=None, average='weighted')
sum_F1 += metrics.f1_score(y_test, y_predict, pos_label=None, average='weighted')
sum_accu += metrics.accuracy_score(y_test, y_predict)
print 'avg_precision =', sum_prec / n_folds
print 'avg_recall =', sum_recall / n_folds
print 'avg_F1 =', sum_F1 / n_folds
print 'avg_accuracy =', sum_accu / n_folds
def plot_confusion_matrix(model, X, y, labels, file_name, category_name, n_folds):
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
n_labels = len(labels)
cm = np.zeros([n_labels, n_labels])
kf = KFold(len(y), n_folds=n_folds)
for train, test in kf:
# split data
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
# Compute confusion matrix
cm += confusion_matrix(y_test, y_predict, labels=range(1, n_labels+1))
#cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure()
if category_name == 'agent':
tit = 'Tweets for Agent'
if category_name == 'agent_f':
tit = 'Forum Messages for Agent'
if category_name == 'step':
tit = 'Tweets for Step'
if category_name == 'step_f':
tit = 'Forum Messages for Step'
if category_name == 'purpose':
tit = 'Tweets for Purpose'
if category_name == 'purpose_f':
tit = 'Forum Messages for Purpose'
if category_name == 'sentiment':
tit = 'Tweets for Sentiment'
if category_name == 'sentiment_f':
tit = 'Forum Messages for Sentiment'
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
# add numbers in the plot
for i, cas in enumerate(cm):
for j, c in enumerate(cas):
if c>0:
plt.text(j-.2, i, c.astype(int), fontsize=14)
plt.title('Confusion matrix of classifier on ' + tit)
plt.colorbar()
tick_marks = range(n_labels)
plt.xticks(tick_marks, labels)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(file_name, format='png')
def plot_svm(model, X, y, title, file_name):
C = 1.0 # SVM regularization parameter
X = X[:, :2]
model.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.title(title)
plt.savefig(file_name, format='pdf')
def main(input_file, input_file_labels, category_name, output_path):
# read labels
df_labels = pd.read_csv(input_file_labels, sep='\s', header=None)
labels = df_labels.values[0]
data = pd.read_csv(input_file).values
X = data[:,2:].astype(np.int)
y = data[:,0].astype(np.int)
# SVM model
modelSVM = svm.SVC(kernel = 'linear', class_weight='balanced')
print "Evaluating SVM, n_fold=5"
evaluate(modelSVM, X, y, 5)
print "Evaluating SVM, n_fold=10"
evaluate(modelSVM, X, y, 10)
print "Plotting confusion matrix for SVM ..."
file_name = output_path + category_name + '-SVM.png'
plot_confusion_matrix(modelSVM, X, y, labels, file_name, category_name, 5)
print "Plotting SVM ..."
file_name = output_path + category_name + '-plot_SVM.pdf'
plot_svm(modelSVM, X, y, "svm", file_name)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
'''
This function fits the spectral correlation to multiple lorentzians.
'''
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
'''
These functions are in the spectral domain.
'''
def fit_with_lorentzian(zeta_in, spectral_corr, params):
if len(params) == 4:
popt, pconv = curve_fit(two_lorentzian, zeta_in, spectral_corr, p0=params)
elif len(params) == 7:
popt, pconv = curve_fit(three_lorentzian, zeta_in, spectral_corr, p0=params)
else:
print('Wrong number of parameters!')
print('4 for two lorentzians and 7 for three lorentzians!')
return False
return popt
# return the normalized spectral correlation of two lorentzians
def two_lorentzian(zeta_in, params):
energy_vec = zeta_in
# two lorentzians
E, gamma, A, c = params
lineshape = 1/(energy_vec**2 + 0.25 * gamma**2) + A/((energy_vec-E)**2 + 0.25 * gamma**2)
# spectral correlation of the two lorentzians
spectral_corr = np.correlate(lineshape, lineshape, 'full')
spectral_corr = spectral_corr/max(spectral_corr) + c
spectral_corr = spectral_corr/max(spectral_corr)
return spectral_corr
# return the normalized spectral correlation of three lorentzians
def three_lorentzian(zeta_in, params):
energy_vec = zeta_in
# three lorentzians
E0, E1, gamma, A0, A1, c, d = params
lineshape = 1/(energy_vec**2 + 0.25 * gamma**2) + A0/((energy_vec-E0)**2 + 0.25 * gamma**2) + A1/((energy_vec-E1)**2 + 0.25 * gamma**2)
# spectral correlation of the three lorentzians
spectral_corr = np.correlate(lineshape, lineshape, 'full')
spectral_corr = spectral_corr/max(spectral_corr) + c
spectral_corr = d*spectral_corr/max(spectral_corr)
return spectral_corr
'''
These functions are in the time domain.
'''
def fit_with_lorentzian_FFT(path_length_difference_in, interferogram, params):
popt, pconv = curve_fit(lorentzian_FFT, path_length_difference_in, interferogram, p0=params)
return popt
# return the Fourier transformed interferogram of the lorentzians
def lorentzian_FFT(path_length_difference, params):
#some constants
eV2cm = 8065.54429
cm2eV = 1 / eV2cm
# create a zeta_eV according to the input path_length_difference
N = 4097 # number of grids we generate
delta=(max(path_length_difference) - min(path_length_difference)) / (N-1)
zeta_eV = np.fft.fftshift(np.fft.fftfreq(N, delta)) * cm2eV * 1000 # in meV
# the spectral correlation using the params given on the zeta_eV
if len(params) == 4:
spectral_corr = two_lorentzian(zeta_eV, params)
elif len(params) == 7:
spectral_corr = three_lorentzian(zeta_eV, params)
else:
print('Wrong number of parameters!')
print('4 for two lorentzians and 7 for three lorentzians!')
return False
# then FFT the spectral_corr to the Fourier domain
interferogram = np.abs(np.fft.fftshift(np.fft.ifft(spectral_corr)))
return interferogram
def do_fft(ys, dw):
fys = np.fft.fft(ys) * dw
ts = np.fft.fftfreq(ys.size, d=dw/(2*np.pi))
idx = np.argsort(ts)
return ts[idx], fys[idx]
'''
The autocorrelation of a spectrum, which is the spectral correlation, is the Fourier Transform of the absolute square of the Fourier transform of the spectrum. Thus, the absolute squre of the Fourier transform of the spectrum is indeed the PCFS interferogram, in the time domain. So we only need to fit the absolute square of the Fourier transform of the spectrum to the PCFS interferogram.
'''
def lorentzian_FT(t, gamma, w):
return np.exp(-t*w*1j - 0.5*gamma*np.abs(t))
def sum_lorentzian(ks, ws, gammas, cs):
if len(ws) != len(gammas) or len(ws) != len(cs):
raise ValueError('The length of gammas, ws, and cs should be equal')
sum_lor = sum([c*0.5*gamma/np.pi/((ks-w)**2+0.25*gamma**2)
for gamma, w, c in zip(gammas, ws, cs)])
return sum_lor
def sq_fft_sum_lorentzian(t, ws, gammas, cs):
if len(ws) != len(gammas) or len(ws) != len(cs):
raise ValueError('The length of gammas, ws, and cs should be equal')
FFT_sum_lor = sum([lorentzian_FT(t,gamma,w)*c for gamma, w, c in zip(gammas, ws, cs)])
interferogram = np.abs(FFT_sum_lor)**2
return interferogram
def get_params(u):
if len(u)%3 != 2:
raise ValueError("caomei zhen de huai!")
# [omegas, gammas, cs]
u = np.abs(u)
u = np.concatenate([[0], u])
n = len(u) // 3
ws = u[:n]
gammas = u[n:2*n]
cs = u[2*n:]
return ws, gammas, cs
def fit_pcfs_wrapper(t, *u):
ws, gammas, cs = get_params(u)
return sq_fft_sum_lorentzian(t, ws, gammas, cs)
def monoexp(x, a, b):
return b*np.exp(a*x)
def find_tau(x,y):
ind = np.argmin(abs(y-np.exp(-1)))
return x[ind]
def square(t,*u):
n = int(len(u)//3)
gammas = np.abs(u[:n])
ws = u[n:2*n]
cs = np.abs(u[2*n:])
FFT_sum_lor = sum([lorentzian_FT(t,gamma,w)*c for gamma, w, c in zip(gammas, ws, cs)])
interferogram = np.abs(FFT_sum_lor)**2
return interferogram
if __name__ == '__main__':
path = 'C:\\Users\\weiwei\\Downloads\\DotE_run_one'
dotID = path.split('\\')[-1]
ts = np.loadtxt(path+'\\path_length_time.dat')
ys = np.loadtxt(path+'\\interferogram.dat')
ind = 7
ind_end = -10
ts_fit = ts[ind:ind_end]
ys_fit = ys[ind:ind_end]
# plt.plot(ts_fit,ys_fit,'-x')
# plt.show()
nsidepeak = 2
us = np.zeros(3*nsidepeak+2)
us[:nsidepeak] = [0.5,1]
us[nsidepeak:-nsidepeak-1]=[0.04,0.04,3]
us[-nsidepeak-1:] = [0.1,0.1,0.1]
res = curve_fit(fit_pcfs_wrapper, ts_fit, ys_fit, p0=us)
ws, gammas, cs = get_params(res[0])
print('ws', ws)
delta_w = ws[1:]-ws[0]
delta_w = delta_w/2/np.pi*4.13567 # convert energy difference to meV
print('delta w', delta_w)
print('gamma', gammas)
print('tau',1/gammas[0])
print('c', cs)
print('res', res[1])
# grid = np.linspace(0,100,1000)
# ys_total = sq_fft_sum_lorentzian(grid,ws,gammas,cs)
# ys_coherent = ys_total
# ys_ind = ys_coherent[1:-1]
# ts_ind = grid[1:-1]
# ys_after = ys_ind-ys_coherent[2:]
# ys_before = ys_ind-ys_coherent[:-2]
# ts_decay = ts_ind[(ys_after>0)*(ys_before>0)]
# # ts_decay = np.concatenate([[0],ts_decay])
# ys_decay = sq_fft_sum_lorentzian(ts_decay,ws,gammas,cs)
# plt.semilogy(ts_decay,ys_decay,'x')
# plt.semilogy(ts[ind:], sq_fft_sum_lorentzian(ts[ind:],ws,gammas,cs), "-", c='orange')
# plt.show()
#
# decay_p = curve_fit(monoexp, ts_decay,ys_decay,[-0.2,1])[0]
# ind = 3
plt.plot(ts[ind:], sq_fft_sum_lorentzian(ts[ind:],ws,gammas,cs), "-",c = 'orange',lw = 2, label = r'Fitted with three lorentzians $\Delta =$'+str(delta_w[0])[:5]+' mev')#+' and '+str(delta_w[1])[:6]+' meV')
plt.plot(ts[ind:], ys[ind:], 'x',c = 'grey',label = 'Raw data')
# plt.plot(ts[ind:],monoexp(ts[ind:],*decay_p), '--',c = 'r',label = r'Envelope decay $T_2/2 = $' + str(-1/decay_p[0])[:4]+' ps')
# plt.xlim([0,60])
plt.xlabel('Path length difference [ps]')
plt.ylabel(r'$g^{(2)}_{cross} - g^2_{auto}$')
plt.legend()
plt.title(dotID+' PCFS interferogram averaged')
plt.show()
ks = np.linspace(-2.5,2.5,2**11)
y = sum_lorentzian(ks,ws,gammas,cs)/max(sum_lorentzian(ks,ws,gammas,cs))
y =np.abs(np.fft.fftshift(np.fft.fft(np.abs(np.fft.fft(y))**2)))
y = y/max(y)
x = ks/2/np.pi*4.13567 # convert energy difference to meV
plt.plot(x,y)
plt.ylabel(r'Normalized $p(\zeta)$')
plt.xlabel(r'$\zeta$ [meV]')
plt.title(dotID+' Fitted Spectral Correlation')
plt.show()
# # play in the spectral domain
#
# mirror_interf = np.concatenate([ys[::-1][:-ind-1],ys[ind:]])
# mirror_time = np.concatenate([-ts[::-1][:-ind-1],ts[ind:]])
# mirror_stage_pos = mirror_time*2.997/100 # in cm
# n = len(mirror_stage_pos)
# delta = (max(mirror_stage_pos)-min(mirror_stage_pos))/(n-1)
# ks = np.linspace(min(mirror_stage_pos),max(mirror_stage_pos),n)
# #some constants
# eV2cm = 8065.54429
# cm2eV = 1 / eV2cm
# zeta_eV = np.fft.fftshift(np.fft.fftfreq(n, delta)) * cm2eV * 1000 # in meV
# spectral_correlation = np.abs(np.fft.fftshift(np.fft.fft(mirror_interf)))
# spectral_correlation = spectral_correlation/max(spectral_correlation)
# y = sum_lorentzian(ks,ws,gammas,cs)/max(sum_lorentzian(ks,ws,gammas,cs))
# y =np.abs(np.fft.fftshift(np.fft.fft(np.abs(np.fft.fft(y))**2)))
# y = y/max(y)
# x = ks/2/np.pi*4.13567
# plt.plot(zeta_eV,spectral_correlation,'-')
# # plt.plot(x,y,'r')
# plt.xlim(-2,2)
# plt.ylabel(r'Normalized $p(\zeta)$')
# plt.xlabel(r'$\zeta$ [meV]')
# plt.title(dotID+' Spectral Correlation')
# plt.show()
# plt.show()
# u = [0,0.1,1,0.06,0.06,1,1,0.1,0.1]
# n = 3
# us = curve_fit(square,zeta_eV,spectral_correlation,p0=u)[0]
# gammas = us[:n]
# ws = us[n:2*n]
# cs = us[2*n:]
# print('ws',ws)
# delta_w = ws[1:]-ws[0]
# delta_w = delta_w/2/np.pi*4.13567 # convert energy difference to meV
# print('delta w', delta_w)
# print('gamma', gammas)
# print('c', cs)
# print('res', res[1])
# plt.plot(zeta_eV,square(zeta_eV,*us))
# plt.plot(zeta_eV,spectral_correlation)
# plt.show()
|
import pickle
import sys
# Very large compute graphs require that Python recurses the pointer chain
# to depths upward of 10000 (Python default is only 1000)
sys.setrecursionlimit(50000)
def saveGraph(graph, path):
if path.split('.')[-1] != 'ccg':
print('WARN: Catamount graphs should save to .ccg filenames')
# Hacky way to store graphs for now
# TODO: Change this to a protobuf implementation
with open(path, 'wb') as outfile:
pickle.dump(graph, outfile)
def loadGraph(path):
if path.split('.')[-1] != 'ccg':
print('WARN: Catamount graphs should have .ccg filenames')
# Hacky way to load pickled graphs for now
with open(path, 'rb') as infile:
graph = pickle.load(infile)
return graph
|
import os
from xu.compa.xhash import Xash, XashList
def path2Xash(path):
folder = os.path.dirname(path)
data = os.path.basename(path)
name, ext = os.path.splitext(data)
return Xash(name, ext, folder)
def createXash(name: str, categoryId: str, tagList: dict, ext=""):
text = categoryId + name
tags = []
for t in tagList:
if isinstance(tagList[t], list):
tag = t + "#".join(tagList[t])
else:
tag = t + "#" + tagList[t]
tags.append(tag)
tagg = "--".join(tags)
if ext != "":
return text + "--" + tagg + ext
else:
return text + "--" + tagg
def newXashId(category: str, xashList: XashList = None):
if xashList is None:
text = '[' + category + '#1]'
else:
lst = xashList.findWithCategory(category)
if len(lst) > 0:
text = "[%s#%s]" % (category, str(len(lst)))
else:
text = "[%s#%s]" % (category, '1')
return text
|
# coding = utf-8
# time : 2017/12/3 22:50
# __author__ = 'lzrture'
import requests
from lxml import etree
import time
from retrying import retry
import xlwt
class CountyLeader:
def __init__(self):
self.headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/62.0.3202.94 Safari/537.36"
}
def get_province_urllist(self):
pro_start_url = "http://ldzl.people.com.cn/dfzlk/front/xian{}.htm"
print("*"*100)
pro_url_list = []
page_nums = (35,219,351,465,580,650,810,926,1028,1150,1245,1515,1692,
1809,1946,2089,2213,2279,2482,2580,2726,2807,2925,3026,3078,3106) #删掉了山东省的1357,etree.HTML一直报错
for num in page_nums:
pro_url_list.append(pro_start_url.format(num))
return pro_url_list
@retry(stop_max_attempt_number=3)
def parse_province_urllist(self,pro_url):
response = requests.get(url=pro_url,headers=self.headers,timeout=5)
assert response.status_code == 200
pro_html = response.content
# print(pro_html)
return pro_html
def parse_province_content(self,pro_html_content):
le_start_url = "http://ldzl.people.com.cn/dfzlk/front/{}"
all_leader_url = []
pro_content =etree.HTML(pro_html_content)
leader_url_list = pro_content.xpath("//div[@class='zlk_list']/ol/li/em/a/@href|//div[@class='zlk_list']/ol/li/i/a/@href")
print("当前省份共有%d位领导" %len(leader_url_list))
print("正在获取领导人信息")
[all_leader_url.append(le_start_url.format(leurl)) for leurl in leader_url_list]
return all_leader_url
@retry(stop_max_attempt_number=6)
def get_leader_content(self,all_leader_url):
leader_list = []
for leader_url in all_leader_url:
leader = {}
leader_response = requests.get(url=leader_url,headers=self.headers,timeout=30)
# assert leader_response.status_code == 200
leader_content = leader_response.content
try:
leader_html = etree.HTML(leader_content)
except Exception as e:
print(e)
# leader_html = None
leader['title'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/span/text()")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/span/text()")) > 0 else None
# print(leader['title'])
leader['name'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd//em/text()")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd//em/text()")) > 0 else None
# print(leader['name'])
leader['gender'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[1]")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[1]")) > 0 else None
# print(leader['gender'])
leader['birth'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[3]")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[3]")) > 0 else None
# print(leader['birth'])
leader['address'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[5]")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[5]")) > 0 else None
# print(leader['address'])
leader['degree'] = leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[7]")[0] \
if len(leader_html.xpath("//div[@class='fl p2j_text_center title_2j']//li//dd/p/text()[7]")) > 0 else None
# print(leader['degree'])
leader['profile'] = leader_html.xpath("//div[@class='p2j_text']/p/text()")\
if len(leader_html.xpath("//div[@class='p2j_text']/p/text()")) > 0 else None
leader_list.append(leader)
# time.sleep(1)
return leader_list
def save_leader_content(self,leader_list):
print(leader_list)
for people in leader_list:
self.ws.write(self.file_num,0,people["name"])
self.ws.write(self.file_num,1,people["title"])
self.ws.write(self.file_num,2,people["gender"])
self.ws.write(self.file_num,3,people["birth"])
self.ws.write(self.file_num,4,people["address"])
self.ws.write(self.file_num,5,people["degree"])
self.ws.write(self.file_num,6,people["profile"])
self.file_num = self.file_num + 1
# with open("领导人简历.csv", "a", encoding="utf-8") as f:
# for temp in leader_list:
# f.write(json.dumps(temp,ensure_ascii=False))
# f.write('\n')
def run(self):
# 1. 构建省级url_list
print(">>>>>>>>>>>>构建省份链接<<<<<<<<<<<<<")
pro_url_list = self.get_province_urllist()
self.wb = xlwt.Workbook()
self.file_num = 0
self.ws = self.wb.add_sheet('leader', cell_overwrite_ok=True)
for pro_url in pro_url_list:
print("正在解析:%s" % pro_url)
pro_html_content = self.parse_province_urllist(pro_url) # 2-3. 发送请求,获取各省政要url_list,获取各省政要内容
time.sleep(2)
all_leader_url = self.parse_province_content(pro_html_content)#3. 发送请求,获取各省政要内容
leader_content = self.get_leader_content(all_leader_url)
self.save_leader_content(leader_content) #5. 保存内容
self.wb.save("456.xls")
if __name__ == '__main__':
county = CountyLeader()
county.run() |
"""Module containing functions for calling sambamba."""
from .util import run_command
def sambamba_sort(input_bam,
output_bam,
threads=1,
tmp_dir=None,
log_path=None):
"""Sorts bam file using sambamba.
Parameters
----------
input_bam : Path
Path to input (unsorted) bam file.
output_bam : Path
Path for output (sorted) bam file.
threads : int
Number of threads to use.
tmp_dir : Path
Temporary directory to use.
log_path : Path
Path to log file.
"""
if tmp_dir is None:
tmp_dir = output_bam.parent / '_tmp'
args = [
'sambamba', 'sort', '-o', str(output_bam), '--tmpdir=' + str(tmp_dir),
'-t', str(threads), str(input_bam)
]
run_command(args=args, log_path=log_path)
|
lista = []
positivos = 0
for i in range(0, 6):
lista.append(float(input()))
if lista[i] > 0:
positivos += 1
print(f"{positivos} valores positivos")
|
from networkx.algorithms.operators.all import *
from networkx.algorithms.operators.binary import *
from networkx.algorithms.operators.product import *
from networkx.algorithms.operators.unary import *
|
from tsutils.menu.closable_embed_base import ClosableEmbedMenuBase
from padinfo.view.awakening_help import AwakeningHelpView
from padinfo.view.experience_curve import ExperienceCurveView
from padinfo.view.id_traceback import IdTracebackView
class ClosableEmbedMenu(ClosableEmbedMenuBase):
view_types = {
AwakeningHelpView.VIEW_TYPE: AwakeningHelpView,
IdTracebackView.VIEW_TYPE: IdTracebackView,
ExperienceCurveView.VIEW_TYPE: ExperienceCurveView,
}
|
# coding: utf-8
import logging
import os
CARDPAY_API_URL = os.getenv('CARDPAY_API_URL', 'https://sandbox.cardpay.com')
TERMINAL_CURRENCY = os.getenv('TERMINAL_CURRENCY', 'USD')
PAYMENTPAGE_TERMINAL_CODE = os.getenv('PAYMENTPAGE_TERMINAL_CODE', '18397')
PAYMENTPAGE_PASSWORD = os.getenv('PAYMENTPAGE_PASSWORD', 'FpK2cy143POj')
GATEWAY_TERMINAL_CODE = os.getenv('GATEWAY_TERMINAL_CODE', '18833')
GATEWAY_PASSWORD = os.getenv('GATEWAY_PASSWORD', 'pzQf529Wa0AV')
GATEWAY_POSTPONED_TERMINAL_CODE = os.getenv("GATEWAY_POSTPONED_TERMINAL_CODE", "18399")
GATEWAY_POSTPONED_PASSWORD = os.getenv("GATEWAY_POSTPONED_PASSWORD", "jehE149L7bHU")
EMAILS_DOMAIN = os.getenv("EMAILS_DOMAIN", "mailinator.com")
DEBUG_MODE = bool(os.getenv('DEBUG_MODE', ''))
SUCCESS_URL = 'https://httpbin.org/get?result=success'
DECLINE_URL = 'https://httpbin.org/get?result=decline'
CANCEL_URL = 'https://httpbin.org/get?result=cancel'
INPROCESS_URL = 'https://httpbin.org/get?result=inprocess'
if DEBUG_MODE:
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def create_logger(name):
logger = logging.getLogger(name)
return logger
|
import autograd.numpy as np
import scipy.interpolate
from autograd import primitive
from scipy.sparse import csr_matrix
import wh
__euler_mascheroni = 0.57721566490153286060651209008240243104215933593992
__sparse_fmt = csr_matrix
__interp1d_kind = 'linear'
__gtilde_subsample = 1
__gtilde_pickle_fn = 'VBHP/gtilde.pkl'
__gtilde_csv_fn = 'VBHP/gtilde.csv'
_gtilde_table = wh.load(__gtilde_pickle_fn)
isub = list(range(0, _gtilde_table.shape[1]-1, __gtilde_subsample)) + [_gtilde_table.shape[1]-1]
_gtilde_table = _gtilde_table[:,isub]
_gtilde_neglogz, _gtilde_value, _grad_gtilde_value =_gtilde_table
assert not np.isinf(min(_gtilde_neglogz))
_gtilde_neglogz_0, _gtilde_value_0, _grad_gtilde_value_0 = -np.inf, 0.0, 2
_gtilde_neglogz_range = (min(_gtilde_neglogz),max(_gtilde_neglogz))
imin = np.argmin(_gtilde_neglogz)
assert imin == 0
assert np.allclose(_gtilde_value_0, _gtilde_value[imin])
assert np.allclose(_grad_gtilde_value_0, _grad_gtilde_value[imin])
_gtilde_interp = scipy.interpolate.interp1d(_gtilde_neglogz, _gtilde_value, fill_value=(_gtilde_value_0, np.nan), bounds_error=False, kind=__interp1d_kind)
_grad_gtilde_interp = scipy.interpolate.interp1d(_gtilde_neglogz, _grad_gtilde_value, fill_value=(_grad_gtilde_value_0, np.nan), bounds_error=False, kind=__interp1d_kind)
def gtilde(z):
"""get the value of gtilde at -z by intersection"""
assert isinstance(z, np.ndarray)
assert np.all(z <= 0.0)
lognegz = np.log(-z)
assert np.all(lognegz <= _gtilde_neglogz_range[1]), (min(lognegz), max(lognegz), _gtilde_neglogz_range)
rval = _gtilde_interp(lognegz)
rval[z==0] = _gtilde_value_0
rval[lognegz < _gtilde_neglogz[0]] = 0.0
assert np.all(~np.isnan(rval).flatten())
return rval
def grad_gtilde(z):
"""get the value of grad of gtilde at -z by intersection"""
assert np.all(z <= 0.0)
lognegz = np.log(-z)
assert np.all(lognegz <= _gtilde_neglogz_range[1]), (min(lognegz), max(lognegz), _gtilde_neglogz_range)
rval = _grad_gtilde_interp(lognegz)
rval[z==0] = _grad_gtilde_value_0
assert not np.any(np.isnan(rval).flatten()), (np.min(z), np.max(z), np.min(lognegz), np.max(lognegz))
return rval
@primitive
def gtilde_ad(z):
return gtilde(z)
def make_grad_gtilde_ad(ans, z):
def gradient_product(g):
return g * grad_gtilde(z)
return gradient_product
gtilde_ad.defgrad(make_grad_gtilde_ad)
|
# coding=utf-8
import unittest,time
from selenium import webdriver
from shadon.log import logger
from shadon.global_control import Global_control
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
class Login(unittest.TestCase):
'''衣联网登录'''
def setUp(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
self.driver = webdriver.Chrome(executable_path='C:\python36\Scripts\chromedriver.exe',options=chrome_options)
# self.driver = webdriver.Chrome(options=chrome_options) ## 创建chrome无界面对象
self.base_url = "https://www.eelly.com/"
self.driver.implicitly_wait(10)
self.judge = False #用来判断脚本是否执行到断言,没有执行则直接把测试结果置为False,然后系统会给相关人员发送邮件
self.Ins = Global_control() #实例化导入的类,模块中的方法才能调用该类中的方法
def login(self):
'''衣联网登录'''
logger.info('开始调用login方法')
self.driver.get(self.base_url)
# self.driver.implicitly_wait(2)
self.driver.fullscreen_window()
logger.info(self.driver.title)
# driver.find_element_by_xpath(".//*[@id='js_login_info']/div[2]/a[1]").click()
self.driver.find_element_by_xpath(".//*[@class='login-link-wrap']/a[1]").click()
self.driver.find_element_by_name("account_login").send_keys("yl_7d912872")
self.driver.find_element_by_name("password").send_keys("1Q2W3e4rzz.")
self.driver.find_element_by_name("submit_login").submit()
logger.info("login......")
WebDriverWait(self.driver,30,1).until(EC.visibility_of_element_located((By.XPATH,".//*[@id='sitenav']/div/div[2]/a")))
# time.sleep(3)
try:
self.judge = True
if self.driver.title !='衣联网,服装批发市场新的领航者,广州十三行,杭州四季青2018新款品牌男装女装批发':
logger.info(self.driver.title)
self.assertEqual(u"衣联网,服装批发市场新的领航者,广州十三行,杭州四季青2018新款品牌男装女装批发", self.driver.title)
logger.info('pc 登陆成功')
except BaseException:
logger.info("断言失败")
Global_control.Run_result = False
self.Ins.screen_shot() #进行判断,看截图文件夹是否创建,创建则跳过,否则创建文件夹
self.driver.get_screenshot_as_file(Global_control.Screen_path + "/" + "衣联网登录断言失败"+ ".png")
raise "测试出现错误,需要发送邮件"
def tearDown(self):
'''关闭浏览器'''
if self.judge != True:
logger.info("login test is False")
Global_control.Run_result = False #增加一步判断,避免出现脚本未执行到断言,而系统没有抛出异常
self.Ins.screen_shot() # 进行判断,看截图文件夹是否创建,创建则跳过,否则创建文件夹
self.driver.get_screenshot_as_file(Global_control.Screen_path + "/" + "衣联网登录失败"+ ".png")
self.driver.quit()
def test_demo(self):
# 整个接口需要调用的方法,都通过该方法进行调用,按顺序调用方法
'''login》登录衣联网成功'''
Login.login(self)
def test_wap(self):
logger.info('开始wap 站点的login方法')
self.driver.get("https://m.eelly.com/member/login.html?returnUrl=%252Fmember%252Fview.html")
logger.info(self.driver.title)
self.driver.find_element_by_name("LoginForm[username]").send_keys("yl_7d912872")
self.driver.find_element_by_name("LoginForm[password]").send_keys("1Q2W3e4rzz.")
self.driver.find_element_by_id("J_submit").click()
time.sleep(2)
logger.info(self.driver.title)
try:
self.driver.find_element_by_id("J_elyMobilePage").is_displayed()
logger.info("wap 登陆成功:")
self.judge = True
except BaseException:
logger.info("wap 登陆失败")
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Login.test_demo)
unittest.TextTestRunner(verbosity=2).run(suite) |
from aoc_wim.aoc2019 import q24
test_bugs = """\
....#
#..#.
#..##
..#..
#....
"""
def test_example_b():
result = q24.part_b(test_bugs, t=10)
assert result == 99
|
import asyncio
import datetime
import logging
import ssl
from typing import List
import aiohttp
import pytz
from bitpanda import enums
from bitpanda.Pair import Pair
from bitpanda.subscriptions import Subscription, SubscriptionMgr
logger = logging.getLogger(__name__)
class BitpandaClient(object):
REST_API_URI = "https://api.exchange.bitpanda.com/public/v1/"
def __init__(self, certificate_path: str = None, api_key: str = None, api_trace_log: bool = False) -> None:
self.api_key = api_key
self.api_trace_log = api_trace_log
self.rest_session = None
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
self.ssl_context.load_verify_locations(certificate_path)
self.subscription_sets = []
async def get_currencies(self) -> dict:
return await self._create_get("currencies")
async def get_account_balances(self) -> dict:
return await self._create_get("account/balances", headers=self._get_header_api_key())
async def get_account_fees(self) -> dict:
return await self._create_get("account/fees", headers=self._get_header_api_key())
async def get_account_orders(self, from_timestamp: datetime.datetime = None, to_timestamp: datetime.datetime = None,
pair: Pair = None, with_cancelled_and_rejected: str = None,
with_just_filled_inactive: str = None,
max_page_size: str = None, cursor: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"from": from_timestamp,
"to": to_timestamp,
"instrument_code": pair,
"with_cancelled_and_rejected": with_cancelled_and_rejected,
"with_just_filled_inactive": with_just_filled_inactive,
"max_page_size": max_page_size,
"cursor": cursor,
})
try:
params["from"] = params["from"].astimezone(pytz.utc).isoformat()
except KeyError:
pass
try:
params["to"] = params["to"].astimezone(pytz.utc).isoformat()
except KeyError:
pass
return await self._create_get("account/orders", params=params, headers=self._get_header_api_key())
async def get_account_order(self, order_id: str) -> dict:
return await self._create_get("account/orders/" + order_id, headers=self._get_header_api_key())
async def get_account_order_trades(self, order_id: str) -> dict:
return await self._create_get("account/orders/" + order_id + "/trades", headers=self._get_header_api_key())
async def get_account_trades(self, from_timestamp: datetime.datetime = None, to_timestamp: datetime.datetime = None,
pair: Pair = None, max_page_size: str = None, cursor: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"from": from_timestamp,
"to": to_timestamp,
"instrument_code": pair,
"max_page_size": max_page_size,
"cursor": cursor,
})
try:
params["from"] = params["from"].astimezone(pytz.utc).isoformat()
except KeyError:
pass
try:
params["to"] = params["to"].astimezone(pytz.utc).isoformat()
except KeyError:
pass
return await self._create_get("account/trades", params=params, headers=self._get_header_api_key())
async def get_account_trade(self, trade_id: str) -> dict:
return await self._create_get("account/trades/" + trade_id, headers=self._get_header_api_key())
async def get_account_trading_volume(self) -> dict:
return await self._create_get("account/trading-volume", headers=self._get_header_api_key())
async def create_market_order(self, pair: Pair, side: enums.OrderSide, amount: str) -> dict:
data = {
"instrument_code": str(pair),
"side": side.value,
"type": "MARKET",
"amount": amount
}
return await self._create_post("account/orders", data=data, headers=self._get_header_api_key())
async def create_limit_order(self, pair: Pair, side: enums.OrderSide, amount: str, limit_price: str) -> dict:
data = {
"instrument_code": str(pair),
"side": side.value,
"type": "LIMIT",
"amount": amount,
"price": limit_price
}
return await self._create_post("account/orders", data=data, headers=self._get_header_api_key())
async def create_stop_limit_order(self, pair: Pair, side: enums.OrderSide, amount: str, limit_price: str,
stop_price: str) -> dict:
data = {
"instrument_code": str(pair),
"side": side.value,
"type": "STOP",
"amount": amount,
"price": limit_price,
"trigger_price": stop_price
}
return await self._create_post("account/orders", data=data, headers=self._get_header_api_key())
async def delete_account_orders(self, pair: Pair = None) -> dict:
params = BitpandaClient._clean_request_params({
"instrument_code": pair,
})
return await self._create_delete("account/orders", params=params, headers=self._get_header_api_key())
async def delete_account_order(self, order_id: str) -> dict:
return await self._create_delete("account/orders/" + order_id, headers=self._get_header_api_key())
async def get_candlesticks(self, pair: Pair, unit: enums.TimeUnit, period: str, from_timestamp: datetime.datetime,
to_timestamp: datetime.datetime) -> dict:
params = {
"unit": unit.value,
"period": period,
"from": from_timestamp.astimezone(pytz.utc).isoformat(),
"to": to_timestamp.astimezone(pytz.utc).isoformat(),
}
return await self._create_get("candlesticks/" + str(pair), params=params)
async def get_instruments(self) -> dict:
return await self._create_get("instruments")
async def get_order_book(self, pair: Pair, level: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"level": level,
})
return await self._create_get("order-book/" + str(pair), params=params)
async def get_time(self) -> dict:
return await self._create_get("time")
def compose_subscriptions(self, subscriptions: List[Subscription]) -> None:
self.subscription_sets.append(subscriptions)
async def start_subscriptions(self) -> None:
if len(self.subscription_sets):
done, pending = await asyncio.wait(
[asyncio.create_task(SubscriptionMgr(subscriptions, self.api_key, self.ssl_context).run()) for
subscriptions in self.subscription_sets],
return_when=asyncio.FIRST_EXCEPTION
)
for task in done:
try:
task.result()
except Exception as e:
logger.exception(f"Unrecoverable exception occurred while processing messages: {e}")
logger.info("All websockets scheduled for shutdown")
for task in pending:
if not task.cancelled():
task.cancel()
else:
raise Exception("ERROR: There are no subscriptions to be started.")
async def close(self) -> None:
session = self._get_rest_session()
if session is not None:
await session.close()
async def _create_get(self, resource: str, params: dict = None, headers: dict = None) -> dict:
return await self._create_rest_call(enums.RestCallType.GET, resource, None, params, headers)
async def _create_post(self, resource: str, data: dict = None, params: dict = None, headers: dict = None) -> dict:
return await self._create_rest_call(enums.RestCallType.POST, resource, data, params, headers)
async def _create_delete(self, resource: str, params: dict = None, headers: dict = None) -> dict:
return await self._create_rest_call(enums.RestCallType.DELETE, resource, None, params, headers)
async def _create_rest_call(self, rest_call_type: enums.RestCallType, resource: str, data: dict = None,
params: dict = None, headers: dict = None) -> dict:
if rest_call_type == enums.RestCallType.GET:
rest_call = self._get_rest_session().get(BitpandaClient.REST_API_URI + resource, json=data, params=params,
headers=headers, ssl=self.ssl_context)
elif rest_call_type == enums.RestCallType.POST:
rest_call = self._get_rest_session().post(BitpandaClient.REST_API_URI + resource, json=data, params=params,
headers=headers, ssl=self.ssl_context)
elif rest_call_type == enums.RestCallType.DELETE:
rest_call = self._get_rest_session().delete(BitpandaClient.REST_API_URI + resource, json=data,
params=params, headers=headers, ssl=self.ssl_context)
else:
raise Exception(f"Unsupported REST call type {rest_call_type}.")
logger.debug(f"> resource [{resource}], params [{params}], headers [{headers}], data [{data}]")
async with rest_call as response:
status_code = response.status
response_text = await response.text()
logger.debug(f"<: status [{status_code}], response [{response_text}]")
return {
"status_code": status_code,
"response": response_text
}
def _get_rest_session(self) -> aiohttp.ClientSession:
if self.rest_session is not None:
return self.rest_session
if self.api_trace_log:
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(BitpandaClient._on_request_start)
trace_config.on_request_end.append(BitpandaClient._on_request_end)
trace_configs = [trace_config]
else:
trace_configs = None
self.rest_session = aiohttp.ClientSession(trace_configs=trace_configs)
return self.rest_session
def _get_header_api_key(self):
header = {
"Authorization": "Bearer " + self.api_key
}
return header
@staticmethod
def _clean_request_params(params: dict) -> dict:
res = {}
for key, value in params.items():
if value is not None:
res[key] = str(value)
return res
async def _on_request_start(self, trace_config_ctx, params) -> None:
logger.debug(f"> Context: {trace_config_ctx}")
logger.debug(f"> Params: {params}")
async def _on_request_end(self, trace_config_ctx, params) -> None:
logger.debug(f"< Context: {trace_config_ctx}")
logger.debug(f"< Params: {params}")
|
from pymongo import MongoClient
from lib import DadosAbertos
import schedule
import time
import os
import sys
def coleta():
# hostname do mongodb, consultado via variavel de ambiente
server_mongo = 'mongodb'
# Conexao ao mongoDB
conn = MongoClient(server_mongo, 27017)
# Conexao ao database
banco = conn['projetoDep']
# Conexao a tabela de banco de dados
table = banco['deputados']
# Array que vai receber os dados do deputados
list_deputados = []
#Conexao com api dos dados publicos
obj = DadosAbertos()
# Listando os deputados
list_dep = obj.deputados()
#######################################
x = table.find({}, {"_id": 1})
lista_id_deputados = []
for item in x:
lista_id_deputados.append(item['_id'])
dicionario_discursos = {}
for id_deputado in lista_id_deputados:
dicionario_discursos[id_deputado] = len(obj.deputado_discursos(id_deputado))
#########################################
for dep in list_dep:
info = {
'_id' : dep['id'],
'Nome' : dep['nome'],
'Partido': dep['siglaPartido'],
'Foto' : dep['urlFoto'],
'qtd_discursos': dicionario_discursos[dep['id']]
}
list_deputados.append(info)
# Inserindo dados no mongodb
retorno = table.insert_many(list_deputados)
print(retorno)
#Criando o schedule
schedule.every().day.at("10:30").do(coleta)
schedule.every().minute.at(":17").do(coleta)
while True:
schedule.run_pending()
time.sleep(1)
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
from airflow.hooks.base_hook import BaseHook
import psycopg2
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2018, 10, 14),
"email": ["bansalshray@gmail.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
SIXTHMAN_PROD = BaseHook.get_connection("sixthman_prod")
SIXTHMAN_CONN_PASSWORD = SIXTHMAN_PROD.password
dag = DAG("nba_load_players", default_args=default_args, schedule_interval=timedelta(days=1), catchup=False)
t1 = BashOperator(
task_id="nba_load_players_task",
pool="nba_load_players",
bash_command=f"DATABASE_API_CONNECTION=postgres://sixthman:{SIXTHMAN_CONN_PASSWORD}@sixthman-prod.cbdmxavtswxu.us-west-1.rds.amazonaws.com:5432/sixthman node /usr/local/airflow/src/ingestJobs/loadPlayerData.js",
retries=3,
dag=dag
)
|
from floodsystem.geo import rivers_with_station
from floodsystem.geo import stations_by_river
from floodsystem.stationdata import build_station_list
def run():
rivers = []
riverset = set()
stations = build_station_list()
riverswithstations = rivers_with_station(stations)
for i in riverswithstations:
riverset.add(i)
for i in riverset:
rivers.append(i)
rivers.sort()
print("Rivers with a station: ", len(rivers))
result = rivers[0:10]
print("First 10 rivers having a station: ", result)
rstations = stations_by_river(stations)
aire = rstations[0]
cam = rstations[1]
thames = rstations[2]
print("stations on river aire: ", aire)
print("stations on river cam: ", cam)
print("stations on river thames: ", thames)
if __name__ == "__main__":
print("*** Task 1D: CUED Part IA Flood Warning System ***")
run() |
import re
def url_validator(url: str) -> bool:
"""
validate given string for valid url
:param str url: string url
:return: true if string is valid else false
:rtype: bool
"""
_valid = False
if url and isinstance(url, str) and re.match(r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', url):
_valid = True
if not _valid:
raise ValueError('invalid url: {}'.format(url))
|
import flask.ext.api
import os
static_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../ui/"))
app = flask.ext.api.FlaskAPI("NewsBuddy", static_folder=static_path, static_url_path="") |
import unittest
from app.models import Quotes
from app import db
class QuotesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Quote class
'''
def setUp(self):
'''
Set up method that will run before every Test.
'''
self.new_quote = Quotes("David", "Let it all work out..")
def test_instance(self):
'''
Uses 'isinstance()' function to check if the object 'self.new_quote' is an instance of the 'Quote class'.
'''
self.assertTrue(isinstance(self.new_quote,Quotes))
|
import discord
import sys
import os
import io
import json
import aiohttp
import ezjson
from discord.ext import commands
class League_Of_Legends:
def __init__(self, bot):
self.bot = bot
with open('data/apikeys.json') as f:
lol = json.load(f)
self.token = lol.get("lolapi")
@commands.command()
async def lolprofile(self, ctx, *, name=None):
if name is None:
await ctx.send("Oops! Enter your summoner name like this: `*lolprofile [summoner name]`")
else:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://na1.api.riotgames.com/lol/summoner/v3/summoners/by-name/{name}?api_key={self.token}') as resp:
resp = await resp.json()
color = discord.Color(value=0x00ff00)
em = discord.Embed(color=color, title=resp['name'])
em.add_field(name='Summoner Level', value=resp['summonerLevel'])
em.add_field(name='ID', value=resp['id'])
em.set_thumbnail(url=f"http://ddragon.leagueoflegends.com/cdn/6.24.1/img/profileicon/{resp['profileIconId']}.png")
await ctx.send(embed=em)
async with aiohttp.clientSession() as session:
async with session.get(f"https://na1.api.riotgames.com//lol/champion-mastery/v3/champion-masteries/by-summoner/{resp['id']}?api_key={self.token}") as r:
r = await r.json()
color = discord.Color(value=0x00ff00)
em = discord.Embed(color=color, title='Champion Masteries')
async with aiohttp.ClientSession() as session:
async with session.get(f'https://na1.api.riotgames.com/lol/static-data/v3/champions/{r[0]['championId']}?api_key={self.token}') as response:
response = await response.json()
em.add_field(name='Champion', value=response['name'])
em.add_field(name='Level', value=r[0]['championLevel'])
totalpoints = int(r[0]['championPointsSinceLastLevel']) + int(r[0]['championPointsUntilNextLevel'])
em.add_field(name='Progress', value=f"{r[0]['championPointsSinceLastLevel']}/{totalpoints}")
em.add_field(name='Total Points Earned', value=r[0]['championPoints'])
em.add_field(name='Tokens Earned', value=r[0]['tokensEarned'])
if r[0]['chestGranted'] is True:
cheststatus = 'Granted'
else:
cheststatus = 'Not Granted'
em.add_field(name='Chest Granted', value=cheststatus)
async with aiohttp.ClientSession() as session:
async with session.get(f'https://na1.api.riotgames.com/lol/static-data/v3/champions/{r[1]['championId']}?api_key={self.token}') as response:
response = await response.json()
em.add_field(name='Champion', value=response['name'])
em.add_field(name='Level', value=r[1]['championLevel'])
totalpoints = int(r[1]['championPointsSinceLastLevel']) + int(r[1]['championPointsUntilNextLevel'])
em.add_field(name='Progress', value=f"{r[1]['championPointsSinceLastLevel']}/{totalpoints}")
em.add_field(name='Total Points Earned', value=r[1]['championPoints'])
em.add_field(name='Tokens Earned', value=r[1]['tokensEarned'])
if r[1]['chestGranted'] is True:
cheststatus = 'Granted'
else:
cheststatus = 'Not Granted'
em.add_field(name='Chest Granted', value=cheststatus)
async with aiohttp.ClientSession() as session:
async with session.get(f'https://na1.api.riotgames.com/lol/static-data/v3/champions/{r[2]['championId']}?api_key={self.token}') as response:
response = await response.json()
em.add_field(name='Champion', value=response['name'])
em.add_field(name='Level', value=r[2]['championLevel'])
totalpoints = int(r[2]['championPointsSinceLastLevel']) + int(r[2]['championPointsUntilNextLevel'])
em.add_field(name='Progress', value=f"{r[2]['championPointsSinceLastLevel']}/{totalpoints}")
em.add_field(name='Total Points Earned', value=r[2]['championPoints'])
em.add_field(name='Tokens Earned', value=r[2]['tokensEarned'])
if r[2]['chestGranted'] is True:
cheststatus = 'Granted'
else:
cheststatus = 'Not Granted'
em.add_field(name='Chest Granted', value=cheststatus)
await ctx.send(embed=em)
except KeyError as e:
print(f"KeyError: {e}")
await ctx.send("Error finding your profile. Maybe check your name?")
def setup(bot):
bot.add_cog(League_Of_Legends(bot))
|
import numpy as np
import cv2
try:
xrange
except NameError:
xrange = range
class PatchGenerator:
def __init__(self,stepsize, imsize, winsize=None):
self.stepsize = stepsize
self.imsize = imsize
if len(self.imsize)==2:
self.imsize=(imsize[0],imsize[1],1)
if winsize is None:
self.winsize = self.stepsize
else:
self.winsize=winsize
assert self.stepsize==self.winsize # have not debuged different stepsize and winsize so this assert here
self.coords = self.generate_coords()
def generate_coords(self):
coords = {}
count=0
for x in xrange(0,self.imsize[0], self.winsize):
for y in xrange(0, self.imsize[1], self.winsize):
if (y + self.winsize) > self.imsize[1]:
y -= (y+self.winsize)-self.imsize[1]
if (x + self.winsize) > self.imsize[0]:
x -= (x+ self.winsize) - self.imsize[0]
x,y=np.clip(x,0,self.imsize[0]-self.stepsize),np.clip(y,0,self.imsize[1]- self.stepsize)
coords[count]=[x,x + self.stepsize, y, y + self.stepsize]
count += 1
return coords
def create(self, img,mask=None,nonzero=False,standarize=False,coords=None):
self.img_patches=[]
self.mask_patches=[]
self.img_org=img
if coords is None:
coords=self.coords
if len(img.shape)==2:
img=img[:,:,np.newaxis]
if mask is not None:
assert mask.shape[0:2]==img.shape[0:2]
if mask is not None and len(mask.shape)==2:
mask=mask[:,:,np.newaxis]
keep_coords={}
for i, key in enumerate(coords.keys()):
x1, x2, y1, y2 = coords[key]
has_nonzero = True
if mask is not None:
m=mask[x1:x2,y1:y2,:]
if nonzero:
if m.shape[-1]>1:
has_nonzero = (m[:,:,:]>0).any()
else:
has_nonzero = (m > 0).any()
if has_nonzero:
keep_coords[key]=coords[key]
self.mask_patches.append(m)
if has_nonzero:
if standarize:
im=img[x1:x2, y1:y2, :].astype('float32')
im0=im[im>0]
mean,std=im0.mean(),im0.std()
im=(im-mean)/std
self.img_patches.append(im)
else:
self.img_patches.append(img[x1:x2, y1:y2, :])
self.img_patches=np.asarray(self.img_patches)
self.mask_patches=np.asarray(self.mask_patches)
return self.img_patches,self.mask_patches,keep_coords
def reconstruct(self,patches,resize=None):
self.img_re = np.zeros((self.imsize[0],self.imsize[1],patches.shape[-1]),dtype=patches.dtype)
for i,key in enumerate(self.coords.keys()):
x1,x2,y1,y2=self.coords[key]
item=patches[i]
if resize is not None:
item=cv2.resize(item,resize)
if len(item.shape)==2:
item=item[:,:,np.newaxsi]
self.img_re[x1:x2,y1:y2,:]=item
return self.img_re
def get_coords(self):
return self.coords
def tests():
img = np.zeros((900,900,3))
p=PatchGenerator(512,img.shape)
for co in p.get_coords().values():
print (co,co[1]-co[0],co[3]-co[2])
patches,_,_ = p.create(img)
print (patches.shape)
if __name__=='__main__':
tests()
|
import re
import asyncio
all_clients = {}
re_http_forward_proxy = re.compile(
r'^http://([^:/]+)(?::([^/]*))?/(.*)')
async def read_http_header(reader):
header = b''
while True:
line = await reader.readline()
if not line:
return
header += line
if line == b'\r\n':
break
return header
def remore_useless_header(header):
def not_proxy_keep_alive(x):
return not x.lower().startswith('proxy-connection:')
return list(filter(not_proxy_keep_alive, header))
async def get_request_info_from_header(reader):
header = await read_http_header(reader)
if not header:
raise
header_items = header.decode().split('\r\n')
method_args = header_items[0].split(' ')
method = method_args[0]
uri = method_args[1]
tunnel_mode = (method == 'CONNECT')
print(method, uri)
if tunnel_mode:
remote_host = uri.split(':')
host = remote_host[0]
port = int(remote_host[1])
else:
m = re_http_forward_proxy.match(uri)
if not m:
raise
host = m.group(1)
port_str = m.group(2)
port = int(port_str) if port_str else 80
method_args[1] = '/' + m.group(3)
header_items[0] = ' '.join(method_args)
header_items = remore_useless_header(header_items)
new_header = '\r\n'.join(header_items).encode()
return new_header, tunnel_mode, (host, port)
async def relay_stream(read1, write1, read2, write2):
async def relay(reader, writer):
while True:
line = await reader.read(1024)
if len(line) == 0:
break
writer.write(line)
await writer.drain()
await asyncio.wait([
relay(read1, write2),
relay(read2, write1)
])
async def server_handler_impl(reader, writer):
try:
header, tunnel_mode, remote_host = \
await get_request_info_from_header(reader)
peer_reader, peer_writer = \
await asyncio.open_connection(*remote_host)
except Exception:
return
try:
if tunnel_mode:
writer.write(b'HTTP/1.1 200 Connection established\r\n\r\n')
await writer.drain()
else:
peer_writer.write(header)
await peer_writer.drain()
await relay_stream(reader, writer, peer_reader, peer_writer)
finally:
peer_writer.close()
async def server_handler(reader, writer):
routine = server_handler_impl(reader, writer)
task = asyncio.ensure_future(routine)
all_clients[task] = (reader, writer)
def client_done(task):
del all_clients[task]
writer.close()
task.add_done_callback(client_done)
async def server_loop(host, port):
def exception_handler(loop, context):
if 'exception' in context:
exception = context['exception']
if isinstance(exception, OSError):
return
loop = asyncio.get_event_loop()
loop.set_exception_handler(exception_handler)
server = await asyncio.start_server(server_handler, host, port)
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(server_loop('127.0.0.1', 9000))
|
class CtrlBase(object):
"""
The base of a MVC controller.
The only necessary method is get_view, this returns the filename (relative to the site_root)
to build the response from.
"""
def get_view(self):
"""
:return: The filename (relative to the site_root) of the view
"""
raise NotImplementedError("CtrlBase.get_view()")
class SimpleCtrl(CtrlBase):
"""
A simple controller wrapper. Best not to use, controllers should set their views themselves.
"""
def __init__(self, view, members):
"""
:param view: the view to be requested by the controller
:param members: the attributes of the controller
"""
super(SimpleCtrl, self).__init__()
self.view = view
for name, value in members.iteritems():
self.__setattr__(name, value)
def get_view(self):
return self.view
"""
The stack of controllers, used with nested controllers, semi-deprecated.
Use with push_ctrl and pop_ctrl
"""
ctrl_stack = []
"""
The current controller, used by push_ctrl and pop_ctrl
"""
curr_ctrl = None
def push_ctrl(new_ctrl):
"""
Save the current controller and set a new one
param new_ctrl: the new controller
:return: the new controller, for chaining purposes
"""
global curr_ctrl
if curr_ctrl is not None:
ctrl_stack.append(curr_ctrl)
curr_ctrl = new_ctrl
return new_ctrl
def pop_ctrl():
"""
Discards the current controller and sets the one before it
:return: The newly-set controller
"""
global curr_ctrl
if len(ctrl_stack) == 0:
curr_ctrl = None
return None
else:
curr_ctrl = ctrl_stack.pop()
return curr_ctrl
"""
The controller to set be used as an initial controller. Do not use directly, se set_init_ctrl().
Set from routable pages that want to invoke a controller.
"""
init_ctrl = None
def set_init_ctrl(ctrl):
"""
Invoke a controller after the script has finish executing.
:param ctrl: The controller to be used.
"""
global init_ctrl
init_ctrl = ctrl
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/fosslc/freeseer/
import ConfigParser
import os
class Config:
'''
This class is responsible for reading/writing settings to/from a config file.
'''
def __init__(self, configdir):
'''
Initialize settings from a configfile
'''
# Get the user's home directory
self.userhome = os.path.expanduser('~')
# Config location
self.configdir = configdir
self.configfile = os.path.abspath("%s/freeseer.conf" % self.configdir)
# Set default settings
self.videodir = os.path.abspath('%s/Videos/' % self.userhome)
self.presentations_file = os.path.abspath('%s/presentations.db' % self.configdir)
self.resolution = '0x0' # no scaling for video
self.videosrc = 'desktop'
self.videodev = 'none'
self.start_x = 0
self.start_y = 0
self.end_x = 0
self.end_y = 0
self.audiosrc = 'none'
self.audiofb = 'False'
self.key_rec = 'Ctrl+Shift+R'
self.key_stop = 'Ctrl+Shift+E'
self.auto_hide = True
self.delay_recording = 0
self.enable_video_recoding = True
self.enable_audio_recoding = True
self.enable_streaming = 'False'
self.streaming_resolution = '0x0' #no scaling for streaming
self.streaming_mount = 'stream.ogv'
self.streaming_port = '8000'
self.streaming_password = 'hackme'
self.streaming_url = '127.0.0.1'
# Map of resolution names to the actual resolution (both stream and record)
# Names should include all options available in the GUI
self.resmap = { '240p':'320x240',
'360p':'480x360',
'480p':'640x480',
'720p':'1280x720',
'1080p':'1920x1080' }
# Read in the config file
self.readConfig()
# Make the recording directory
try:
os.makedirs(self.videodir)
except OSError:
print('Video directory exists.')
def readConfig(self):
'''
Read in settings from config file if exists.
If the config file does not exist create one and set some defaults.
'''
config = ConfigParser.ConfigParser()
try:
config.readfp(open(self.configfile))
# Config file does not exist, create a default
except IOError:
self.writeConfig()
return
# Config file exists, read in the settings
try:
self.videodir = config.get('Global', 'video_directory')
self.resolution = config.get('Global', 'resolution')
self.videosrc = config.get('lastrun', 'video_source')
self.videodev = config.get('lastrun', 'video_device')
self.start_x = config.get('lastrun', 'area_start_x')
self.start_y = config.get('lastrun', 'area_start_y')
self.end_x = config.get('lastrun', 'area_end_x')
self.end_y = config.get('lastrun', 'area_end_y')
self.audiosrc = config.get('lastrun', 'audio_source')
self.audiofb = config.get('lastrun', 'audio_feedback')
self.auto_hide = config.getboolean('lastrun', 'auto_hide')
self.delay_recording = config.get('lastrun', 'delay_recording')
self.enable_streaming = config.getboolean('lastrun', 'enable_streaming')
self.enable_video_recoding = config.getboolean('lastrun','enable_video_recoding')
self.enable_audio_recoding = config.getboolean('lastrun','enable_audio_recoding')
self.streaming_resolution = config.get('Global','streaming_resolution')
self.streaming_mount = config.get('lastrun','streaming_mount')
self.streaming_port = config.get('lastrun','streaming_port')
self.streaming_password = config.get('lastrun','streaming_password')
self.streaming_url = config.get('lastrun','streaming_url')
except:
print('Corrupt config found, creating a new one.')
self.writeConfig()
def writeConfig(self):
'''
Write settings to a config file.
'''
config = ConfigParser.ConfigParser()
# Set config settings
config.add_section('Global')
config.set('Global', 'video_directory', self.videodir)
config.set('Global', 'resolution', self.resolution)
config.set('Global','streaming_resolution',self.streaming_resolution)
config.add_section('lastrun')
config.set('lastrun', 'video_source', self.videosrc)
config.set('lastrun', 'video_device', self.videodev)
config.set('lastrun', 'area_start_x', self.start_x)
config.set('lastrun', 'area_start_y', self.start_y)
config.set('lastrun', 'area_end_x', self.end_x)
config.set('lastrun', 'area_end_y', self.end_y)
config.set('lastrun', 'audio_source', self.audiosrc)
config.set('lastrun', 'audio_feedback', self.audiofb)
config.set('lastrun', 'auto_hide', self.auto_hide)
config.set('lastrun', 'delay_recording', self.delay_recording)
config.set('lastrun', 'enable_streaming', self.enable_streaming)
config.set('lastrun','enable_video_recoding',self.enable_video_recoding)
config.set('lastrun','enable_audio_recoding',self.enable_audio_recoding)
config.set('lastrun','streaming_mount',self.streaming_mount)
config.set('lastrun','streaming_port',self.streaming_port)
config.set('lastrun','streaming_password',self.streaming_password)
config.set('lastrun','streaming_url',self.streaming_url)
# Make sure the config directory exists before writing to the configfile
try:
os.makedirs(self.configdir)
except OSError:
pass # directory exists.
# Save default settings to new config file
with open(self.configfile, 'w') as configfile:
config.write(configfile)
# Config class test code
if __name__ == "__main__":
config = Config(os.path.abspath(os.path.expanduser('~/.freeseer/')))
print('\nTesting freeseer config file')
print('Video Directory at %s' % config.videodir)
print('Test complete!')
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib # pylint: disable=import-error
def is_available_on_pypi(module_name, module_version):
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
available_versions = client.package_releases(module_name, True)
return module_version in available_versions
|
import matplotlib.pyplot as plt
import cv2
import numpy as np
from skimage.feature import hog
from skimage import data, exposure
class Feature:
def __init__(self, path):
self.path=path
def hog(self):
image = cv2.imread(self.path)
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualize=True, multichannel=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
plt.show()
def sift(self):
import numpy as np
import cv2
img = cv2.imread(self.path)
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
kp = sift.detect(gray,None)
img=cv2.drawKeypoints(gray,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('sift_keypoints.jpg',img)
kp,des = sift.compute(gray,kp)
def lbp(self):
def get_pixel(img, center, x, y):
new_value = 0
try:
# If local neighbourhood pixel value is greater than or equal to center pixel values then set it to 1
if img[x][y] >= center:
new_value = 1
except:
# Exception is required when neighbourhood value of a center pixel value is null i.e. values present at boundaries.
pass
return new_value
# Function for calculating LBP
def lbp_calculated_pixel(img, x, y):
center = img[x][y]
val_ar = []
# top_left
val_ar.append(get_pixel(img, center, x-1, y-1))
# top
val_ar.append(get_pixel(img, center, x-1, y))
# top_right
val_ar.append(get_pixel(img, center, x-1, y + 1))
# right
val_ar.append(get_pixel(img, center, x, y + 1))
# bottom_right
val_ar.append(get_pixel(img, center, x + 1, y + 1))
# bottom
val_ar.append(get_pixel(img, center, x + 1, y))
# bottom_left
val_ar.append(get_pixel(img, center, x + 1, y-1))
# left
val_ar.append(get_pixel(img, center, x, y-1))
# Now, we need to convert binary
# values to decimal
power_val = [1, 2, 4, 8, 16, 32, 64, 128]
val = 0
for i in range(len(val_ar)):
val += val_ar[i] * power_val[i]
return val
img_bgr = cv2.imread(self.path, 1)
height, width, _ = img_bgr.shape
# We need to convert RGB image into gray one because gray image has one channel only.
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
# Create a numpy array as the same height and width of RGB image
img_lbp = np.zeros((height, width),np.uint8)
for i in range(0, height):
for j in range(0, width):
img_lbp[i, j] = lbp_calculated_pixel(img_gray, i, j)
plt.imshow(img_bgr)
plt.show()
plt.imshow(img_lbp, cmap ="gray")
plt.show()
print("LBP Program is finished")
f1= Feature('F:\periocular recognition\hog\im_patch.jpg')
f1.hog()
f1.sift()
f1.lbp()
|
from nmigen import *
from nmigen.hdl.rec import *
from utils import bitcount
from vga_timing import VGATiming
class VGALayout(Layout):
def __init__(self, w: int, h: int):
super().__init__([
("hsync", 1),
("vsync", 1),
("visible", 1),
("x", bitcount(w)),
("y", bitcount(h)),
("r", 1),
("g", 1),
("b", 1),
])
class VGABus(Record):
def __init__(self, timing: VGATiming):
layout = VGALayout(timing.vx, timing.vy)
super().__init__(layout)
self.timing = timing
def forward(self, m: Module, src):
m.d.comb += [
self.hsync.eq(src.hsync),
self.vsync.eq(src.vsync),
self.visible.eq(src.visible),
]
m.d.px += [
self.x.eq(src.x),
self.y.eq(src.y),
self.r.eq(src.r),
self.g.eq(src.g),
self.b.eq(src.b),
]
|
import copy
import math
import os
class SUTDHMM:
def __init__(self, k=1, special_word='#UNK#', dummy_word='#DUM#', pre_prob={}, default_emission=0.0):
# 2 layer dictionary depth-0 key is the label, depth-1 key is the word
self.emission_params = {}
self.y_count = {}
self.y_labels = []
self.x_words = [special_word, dummy_word]
self.x_given_y_count = {}
# 2 layer dictionary depth-0 key is the (i-1)-label, depth-1 key is the i-label
self.y_given_prev_y_count = {}
self.transition_params = {}
self.label_prob = pre_prob
self.special_word = special_word
self.dummy_word = dummy_word
self.default_emission = default_emission
if k > 0:
self.k = k
else:
self.k = 1
self.tokens_list = []
self.perceptron_trained = False
def load_data(self, raw_string=None, data_filename=None):
tokens_list = []
if raw_string != None:
data = os.linesep.join([s if s else self.dummy_word + ' START\n' +
self.dummy_word + ' STOP' for s in raw_string.splitlines()])
tokens_list = list(
map(lambda x: x.rsplit(' ', 1), data.split('\n')))
elif data_filename != None:
with open(data_filename) as f:
data = f.read()
data = os.linesep.join([s if s else self.dummy_word + ' STOP\n' + self.dummy_word + ' START'
for s in data.splitlines()])
tokens_list = list(
map(lambda x: x.rsplit(' ', 1), data.split('\n')))
f.close()
else:
raise Exception('No Data Input Provided!')
token_freq = {}
for token in tokens_list:
if token[0] not in token_freq:
token_freq[token[0]] = 1
else:
token_freq[token[0]] += 1
for i in range(len(tokens_list)):
if token_freq[tokens_list[i][0]] < self.k:
tokens_list[i][0] = self.special_word
for token in tokens_list:
if token[0] not in self.x_words:
self.x_words.append(token[0])
if token[1] not in self.y_labels:
self.y_labels.append(token[1])
# intialise counts and emission params
for label in self.y_labels:
if label not in self.emission_params:
self.y_count[label] = 0
self.x_given_y_count[label] = {}
self.emission_params[label] = {}
for word in self.x_words:
self.x_given_y_count[label][word] = 0
self.emission_params[label][word] = 0
# initialise count and transition params
for label in self.y_labels:
if label not in self.transition_params:
self.y_given_prev_y_count[label] = {}
self.transition_params[label] = {}
for next_label in self.y_labels:
self.y_given_prev_y_count[label][next_label] = 0
self.transition_params[label][next_label] = 0
all_labels = list(map(lambda x: x[1], tokens_list))
self.y_count = {}
for label in all_labels:
if label not in self.y_count:
self.y_count[label] = 1
else:
self.y_count[label] += 1
for label in self.y_count:
self.label_prob[label] = float(
self.y_count[label]) / len(all_labels)
self.tokens_list += tokens_list
return self.tokens_list, self.y_labels, self.x_words, self.label_prob
def calculate_emission(self):
for token in self.tokens_list:
self.x_given_y_count[token[1]][token[0]] += 1
self.emission_params = copy.deepcopy(self.x_given_y_count)
for label in self.emission_params:
for word in self.emission_params[label]:
if self.x_given_y_count[label][word] == 0:
self.emission_params[label][word] = self.default_emission
else:
self.emission_params[label][word] = float(
self.x_given_y_count[label][word]) / self.y_count[label]
return self.emission_params
def get_emission_param(self, label: str, word: str):
if word not in self.x_words:
word = self.special_word
return self.emission_params[label][word]
def predict_label_using_emission(self, word: str):
score = 0.0
predicted_label = None
for label in self.y_labels:
label_score = self.get_emission_param(label, word)
if label_score > score:
predicted_label = label
score = label_score
return predicted_label
def calculate_transition(self):
ordered_labels_list = list(map(lambda x: x[1], self.tokens_list))
for idx, label in enumerate(ordered_labels_list):
if idx < len(ordered_labels_list) - 1:
next_label = ordered_labels_list[idx + 1]
self.y_given_prev_y_count[label][next_label] += 1
# calculate trans_params
trans_params = copy.deepcopy(self.y_given_prev_y_count)
for given_label in trans_params:
if given_label == ordered_labels_list[-1]:
adjusted_count = self.y_count[given_label] - 1
else:
adjusted_count = self.y_count[given_label]
for label in trans_params[given_label]:
trans_params[given_label][label] /= float(adjusted_count)
self.transition_params = trans_params
return self.transition_params
def train(self, raw_string=None, input_filename=None):
self.load_data(raw_string=raw_string, data_filename=input_filename)
self.calculate_emission()
self.calculate_transition()
def clean_input_data(self, input_data: str):
data = input_data.split()
for idx, word in enumerate(data):
if word not in self.x_words:
data[idx] = self.special_word
return data
def viterbi(self, sentence: str):
'''pre-requisite: train must be run before this function'''
observed_words = self.clean_input_data(sentence)
cache = [{}]
# first layer
for l in self.y_labels:
trans_param = self.transition_params['START'][l]
emission_param = self.emission_params[l][observed_words[0]
] if observed_words[0] in self.emission_params[l] else self.emission_params[l]['#UNK#']
cache[0][l] = {"chance": trans_param *
emission_param, "prev": None}
# handle middle layers
for i in range(1, len(observed_words)):
cache.append({})
for l in self.y_labels:
max_prob = -math.inf
max_prev_l = None
for prev_l in self.y_labels:
trans_param = self.transition_params[prev_l][l]
emission_param = self.emission_params[l][observed_words[i]
] if observed_words[i] in self.emission_params[l] else self.emission_params[l]['#UNK#']
prob = cache[i - 1][prev_l]['chance'] * \
trans_param * emission_param
if prob > max_prob:
max_prob = prob
max_prev_l = prev_l
cache[i][l] = {'chance': max_prob, 'prev': max_prev_l}
# handle the end layer
cache.append({})
max_end_prob = -math.inf
max_end_l = None
for l in self.y_labels:
trans_param = self.transition_params[l]['STOP']
end_prob = cache[len(observed_words) -
1][l]['chance'] * trans_param
if end_prob > max_end_prob:
max_end_prob = end_prob
max_end_l = l
cache[len(observed_words)]['STOP'] = {
'chance': max_end_prob, 'prev': max_end_l}
# backtrack for optimal path
optimal_prob = cache[len(observed_words)]['STOP']['chance']
previous_l = cache[len(observed_words)]['STOP']['prev']
optimal = [previous_l]
for i in range(len(observed_words) - 1, 0, -1):
optimal.insert(0, cache[i][previous_l]['prev'])
previous = cache[i][previous_l]['prev']
return (optimal, optimal_prob)
def fwd_bwd(self, sentence: str):
observed_words = self.clean_input_data(sentence)
# forward part
forward = []
prev_forward = {}
for i, word in enumerate(observed_words):
curr_forward = {}
for l in self.y_labels:
prev_f_sum = 0
if i == 0:
trans_prob = self.transition_params['START'][l]
prev_f_sum = trans_prob
else:
for prev_l in self.y_labels:
trans_prob = self.transition_params[prev_l][l]
prev_f_sum += prev_forward[prev_l] * trans_prob
emission_prob = self.emission_params[l][word] if word in self.emission_params[
l] else self.emission_params[l]['#UNK#']
curr_forward[l] = emission_prob * prev_f_sum
forward.append(curr_forward)
prev_forward = copy.deepcopy(curr_forward)
forward_prob = 0
for l in self.y_labels:
trans_prob = self.transition_params[l]['STOP']
forward_prob += curr_forward[l] * trans_prob
# backward part
backward = []
prev_backward = {}
for i, word in enumerate(observed_words[::-1]):
curr_backward = {}
for l in self.y_labels:
curr_backward[l] = 0
if i == 0:
trans_prob = self.transition_params[l]['STOP']
emiss_prob = self.emission_params[l][word] if word in self.emission_params[
l] else self.emission_params[l][self.special_word]
curr_backward[l] = trans_prob
else:
for next_l in self.y_labels:
trans_prob = self.transition_params[l][next_l]
emm_prob = self.emission_params[l][word] if word in self.emission_params[
l] else self.emission_params[l]['#UNK#']
curr_backward[l] += trans_prob * \
emm_prob * prev_backward[next_l]
backward.insert(0, curr_backward)
prev_backward = copy.deepcopy(curr_backward)
backward_prob = 0
for l in self.y_labels:
trans_prob = self.transition_params['START'][l]
emm_prob = self.emission_params[l][observed_words[0]
] if observed_words[0] in self.emission_params[l] else self.emission_params[l]['#UNK#']
backward_prob += trans_prob * emm_prob * curr_backward[l]
# print(forward)
# print(backward)
return forward, backward
def max_marginal(self, sentence: str):
forward_p, backward_p = self.fwd_bwd(sentence)
predictions = []
for i in range(len(forward_p)):
product_p = {l: forward_p[i][l] * backward_p[i][l]
for l in self.y_labels}
predictions.append(max(product_p, key=product_p.get))
return predictions
def update_params(self, sentence: str, new_labels: list, old_labels: list):
for i in range(len(old_labels)):
self.y_count[old_labels[i]] -= 1
self.y_count[new_labels[i]] += 1
for i in range(1, len(old_labels)):
self.y_given_prev_y_count[old_labels[i - 1]][old_labels[i]] -= 1
self.y_given_prev_y_count[new_labels[i - 1]][new_labels[i]] += 1
self.transition_params[old_labels[i - 1]][old_labels[i]] = float(
self.y_given_prev_y_count[old_labels[i - 1]][old_labels[i]]) / self.y_count[old_labels[i - 1]]
self.transition_params[new_labels[i - 1]][new_labels[i]] = float(
self.y_given_prev_y_count[new_labels[i - 1]][new_labels[i]]) / self.y_count[new_labels[i - 1]]
observed_words = sentence.split()
for i, word in enumerate(observed_words):
self.x_given_y_count[old_labels[i]][word] -= 1
self.x_given_y_count[new_labels[i]][word] += 1
self.emission_params[old_labels[i]][word] = float(
self.x_given_y_count[old_labels[i]][word]) / self.y_count[old_labels[i]]
self.emission_params[new_labels[i]][word] = float(
self.x_given_y_count[new_labels[i]][word]) / self.y_count[new_labels[i]]
def train_perceptron(self, raw_string=None, input_filename=None, num_iteration=20):
self.train(raw_string=raw_string, input_filename=input_filename)
sentence_list = []
old_labels_for_sentences = []
tmp_sentence = ''
tmp_labels = []
for token in self.tokens_list:
if token[1] != 'START' and token[1] != 'STOP':
tmp_sentence += token[0] + ' '
tmp_labels.append(token[1])
elif token[1] == 'START':
tmp_sentence = ''
tmp_labels = []
elif token[1] == 'STOP':
tmp_sentence = tmp_sentence[:-1]
sentence_list.append(tmp_sentence)
old_labels_for_sentences.append(tmp_labels)
for j in range(num_iteration):
for i, sentence in enumerate(sentence_list):
old_labels = old_labels_for_sentences[i]
new_labels, chance = self.viterbi(sentence)
self.update_params(
sentence=sentence, old_labels=old_labels, new_labels=new_labels)
old_labels_for_sentences[i] = new_labels
self.perceptron_trained = True
def predict_perceptron(self, sentence: str):
if not self.perceptron_trained:
raise Exception('Perceptron Not Trained Yet')
else:
return self.viterbi(sentence)
|
import os
import signal
import subprocess
from time import sleep
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
from pymodbus.constants import Defaults
class SimpleModbusSlave:
def __init__(self, ip='0.0.0.0', port=502, discrete_inputs=10, coils=10, input_registers=10, holding_registers=10):
self.ip = ip
self.port = port
self.discrete_inputs = discrete_inputs
self.coils = coils
self.input_registers = input_registers
self.holding_registers = holding_registers
def start_server(self):
servers = os.path.abspath(os.path.realpath(__file__) + '../../../pymodbus/servers.py')
command = 'sudo python2 {} -i {} -p {}'.format(servers, self.ip, self.port)
self.process = subprocess.Popen(['sudo', 'python2', servers, '-i', self.ip, '-p', str(self.port)])
sleep(4)
self.client = ModbusClient(self.ip, port=self.port)
# retries=3, retry_on_empty=True)
self.client.connect()
def stop_server(self):
subprocess.call(['sudo', 'kill', '-s', 'SIGKILL', str(self.process.pid)])
def write_coil(self, address, value, unit=Defaults.UnitId):
self.client.write_coil(address, value, unit=unit)
def write_holdingregister(self, address, value, unit=Defaults.UnitId):
self.client.write_register(address, value, unit=unit)
|
import json
from project.api.models import User
from project import db
from project.tests.base import BaseTestCase
import datetime
from project.tests.utils import add_user
class TestUserService(BaseTestCase):
"""Tests for the Users Service."""
def test_users(self):
"""Ensure the /ping route behaves correctly."""
response = self.client.get('/ping')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('pong!', data['message'])
self.assertIn('success', data['status'])
def test_add_user(self):
"""Ensure a new user can be added to the database."""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.admin = True
db.session.commit()
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('michael@realpython.com was added!', data['message'])
self.assertIn('success', data['status'])
def test_add_user_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.admin = True
db.session.commit()
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict()),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_invalid_json_keys(self):
"""
Ensure error is thrown if the JSON object does not have a username key.
"""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.admin = True
db.session.commit()
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict(
email='michael@realpython.com',
password='test')),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_invalid_json_keys_no_password(self):
"""
Ensure error is thrown if the JSON object does not have a password key.
"""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.admin = True
db.session.commit()
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com')),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_duplicate_email(self):
"""Ensure error is thrown if the email already exists."""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.admin = True
db.session.commit()
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
response = self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn(
'Sorry. That email already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_inactive(self):
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.active = False
db.session.commit()
with self.client:
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'error')
self.assertTrue(
data['message'] == 'Something went wrong. Please contact us.')
self.assertEqual(response.status_code, 401)
def test_add_user_not_admin(self):
add_user('test', 'test@test.com', 'test')
with self.client:
# user login
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/users',
data=json.dumps(dict(
username='michael',
email='michael@realpython.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'error')
self.assertTrue(
data['message'] == 'You do not have permission to do that.')
self.assertEqual(response.status_code, 401)
def test_single_user(self):
"""Ensure get single user behaves correctly."""
user = add_user('michael', 'michael@realpython.com','test..123')
with self.client:
response = self.client.get(f'/users/{user.id}')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertTrue('created_at' in data['data'])
self.assertIn('michael', data['data']['username'])
self.assertIn('michael@realpython.com', data['data']['email'])
self.assertIn('success', data['status'])
def test_single_user_no_id(self):
"""Ensure error is thrown if an id is not provided."""
with self.client:
response = self.client.get('/users/blah')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_single_user_incorrect_id(self):
"""Ensure error is thrown if the id does not exist."""
with self.client:
response = self.client.get('/users/999')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_all_users(self):
"""Ensure get all users behaves correctly."""
created = datetime.datetime.utcnow() + datetime.timedelta(-30)
add_user('michael', 'michael@realpython.com', 'test..123',created)
add_user('fletcher', 'fletcher@realpython.com','test..123')
with self.client:
response = self.client.get('/users')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(len(data['data']['users']), 2)
self.assertTrue('created_at' in data['data']['users'][0])
self.assertTrue('created_at' in data['data']['users'][1])
self.assertIn('michael', data['data']['users'][1]['username'])
self.assertIn(
'michael@realpython.com', data['data']['users'][1]['email'])
self.assertIn('fletcher', data['data']['users'][0]['username'])
self.assertIn(
'fletcher@realpython.com', data['data']['users'][0]['email'])
self.assertIn('success', data['status'])
|
import os
import posixpath
import errno
import json
import resource
import sys
import shutil
import textwrap
import urllib.parse
import urllib.request
import warnings
import logging
# External modules
import click
import yaml
# We import botocore here so we can catch when the user tries to
# access AWS without having their credentials configured and provide
# a friendly error message. Apart from that, flintrock.py should
# not really know anything about EC2 or boto since that is delegated
# to ec2.py.
import botocore
# Flintrock modules
from . import ec2
from .exceptions import (
UsageError,
UnsupportedProviderError,
NothingToDo,
Error)
from flintrock import __version__
from .services import HDFS, Spark # TODO: Remove this dependency.
FROZEN = getattr(sys, 'frozen', False)
if FROZEN:
THIS_DIR = sys._MEIPASS
else:
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
logger = logging.getLogger('flintrock.flintrock')
def format_message(*, message: str, indent: int=4, wrap: int=70):
"""
Format a lengthy message for printing to screen.
"""
return textwrap.indent(
textwrap.fill(
textwrap.dedent(text=message),
width=wrap),
prefix=' ' * indent)
def option_name_to_variable_name(option: str):
"""
Convert an option name like `--ec2-user` to the Python name it gets mapped to,
like `ec2_user`.
"""
return option.replace('--', '', 1).replace('-', '_')
def variable_name_to_option_name(variable: str):
"""
Convert a variable name like `ec2_user` to the Click option name it gets mapped to,
like `--ec2-user`.
"""
return '--' + variable.replace('_', '-')
def option_requires(
*,
option: str,
conditional_value=None,
requires_all: list=[],
requires_any: list=[],
scope: dict):
"""
Raise an exception if an option's requirements are not met.
The option's requirements are checked only if the option has a "truthy" value
(i.e. it's not a "falsy" value like '', None, or False), and if its value is
equal to conditional_value, if conditional_value is not None.
requires_all: Every option in this list must be defined.
requires_any: At least one option in this list must be defined.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
option_value = scope[option_name_to_variable_name(option)]
if option_value and \
(conditional_value is None or option_value == conditional_value):
if requires_all:
for required_option in requires_all:
required_name = option_name_to_variable_name(required_option)
if required_name not in scope or not scope[required_name]:
raise UsageError(
"Error: Missing option \"{missing_option}\" is required by "
"\"{option}{space}{conditional_value}\"."
.format(
missing_option=required_option,
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else ''))
if requires_any:
for required_option in requires_any:
required_name = option_name_to_variable_name(required_option)
if required_name in scope and scope[required_name] is not None:
break
else:
raise UsageError(
"Error: \"{option}{space}{conditional_value}\" requires at least "
"one of the following options to be set: {at_least}"
.format(
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else '',
at_least=', '.join(['"' + ra + '"' for ra in requires_any])))
def mutually_exclusive(*, options: list, scope: dict):
"""
Raise an exception if more than one of the provided options is specified.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
mutually_exclusive_names = [option_name_to_variable_name(o) for o in options]
used_options = set()
for name, value in scope.items():
if name in mutually_exclusive_names and scope[name]: # is not None:
used_options.add(name)
if len(used_options) > 1:
bad_option1 = used_options.pop()
bad_option2 = used_options.pop()
raise UsageError(
"Error: \"{option1}\" and \"{option2}\" are mutually exclusive.\n"
" {option1}: {value1}\n"
" {option2}: {value2}"
.format(
option1=variable_name_to_option_name(bad_option1),
value1=scope[bad_option1],
option2=variable_name_to_option_name(bad_option2),
value2=scope[bad_option2]))
def get_config_file() -> str:
"""
Get the path to Flintrock's default configuration file.
"""
config_dir = click.get_app_dir(app_name='Flintrock')
config_file = os.path.join(config_dir, 'config.yaml')
return config_file
def configure_log(debug: bool):
root_logger = logging.getLogger('flintrock')
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
if debug:
root_logger.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s - flintrock.%(module)-9s - %(levelname)-5s - %(message)s'))
else:
root_logger.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(message)s'))
root_logger.addHandler(handler)
def build_hdfs_download_url(ctx, param, value):
hdfs_download_url = value.format(v=ctx.params['hdfs_version'])
return hdfs_download_url
def build_spark_download_url(ctx, param, value):
spark_download_url = value.format(v=ctx.params['spark_version'])
return spark_download_url
def validate_download_source(url):
if 'spark' in url:
software = 'Spark'
elif 'hadoop' in url:
software = 'Hadoop'
else:
software = 'software'
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc == 'www.apache.org' and parsed_url.path == '/dyn/closer.lua':
logger.warning(
"Warning: "
"Downloading {software} from an Apache mirror. Apache mirrors are "
"often slow and unreliable, and typically only serve the most recent releases. "
"We strongly recommend you specify a custom download source. "
"For more background on this issue, please see: https://github.com/nchammas/flintrock/issues/238"
.format(
software=software,
)
)
try:
urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
raise Error(
"Error: Could not access {software} download. Maybe try a more recent release?\n"
" - Automatically redirected to: {url}\n"
" - HTTP error: {code}"
.format(
software=software,
url=e.url,
code=e.code,
)
)
@click.group()
@click.option(
'--config',
help="Path to a Flintrock configuration file.",
default=get_config_file())
@click.option('--provider', default='ec2', type=click.Choice(['ec2']))
@click.version_option(version=__version__)
# TODO: implement some solution like in https://github.com/pallets/click/issues/108
@click.option('--debug/--no-debug', default=False, help="Show debug information.")
@click.pass_context
def cli(cli_context, config, provider, debug):
"""
Flintrock
A command-line tool for launching Apache Spark clusters.
"""
cli_context.obj['provider'] = provider
if os.path.isfile(config):
with open(config) as f:
config_raw = yaml.safe_load(f)
debug = config_raw.get('debug') or debug
config_map = config_to_click(normalize_keys(config_raw))
cli_context.default_map = config_map
else:
if config != get_config_file():
raise FileNotFoundError(errno.ENOENT, 'No such file', config)
configure_log(debug=debug)
@cli.command()
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--install-hdfs/--no-install-hdfs', default=False)
@click.option('--hdfs-version', default='2.8.5')
@click.option('--hdfs-download-source',
help="URL to download Hadoop from.",
default='https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-{v}/hadoop-{v}.tar.gz',
show_default=True,
callback=build_hdfs_download_url)
@click.option('--install-spark/--no-install-spark', default=True)
@click.option('--spark-executor-instances', default=1,
help="How many executor instances per worker.")
@click.option('--spark-version',
# Don't set a default here because it will conflict with
# the config file if the git commit is set.
# See: https://github.com/nchammas/flintrock/issues/190
# default=,
help="Spark release version to install.")
@click.option('--spark-download-source',
help="URL to download a release of Spark from.",
default='https://www.apache.org/dyn/closer.lua?action=download&filename=spark/spark-{v}/spark-{v}-bin-hadoop2.7.tgz',
show_default=True,
callback=build_spark_download_url)
@click.option('--spark-git-commit',
help="Git commit to build Spark from. "
"Set to 'latest' to build Spark from the latest commit on the "
"repository's default branch.")
@click.option('--spark-git-repository',
help="Git repository to clone Spark from.",
default='https://github.com/apache/spark',
show_default=True)
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-key-name')
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-instance-type', default='m5.medium', show_default=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
# We set some of these defaults to empty strings because of boto3's parameter validation.
# See: https://github.com/boto/boto3/issues/400
@click.option('--ec2-availability-zone', default='')
@click.option('--ec2-ami')
@click.option('--ec2-user')
@click.option('--ec2-security-group', 'ec2_security_groups',
multiple=True,
help="Additional security groups names to assign to the instances. "
"You can specify this option multiple times.")
@click.option('--ec2-spot-price', type=float)
@click.option('--ec2-spot-request-duration', default='7d',
help="Duration a spot request is valid (e.g. 3d 2h 1m).")
@click.option('--ec2-min-root-ebs-size-gb', type=int, default=30)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-subnet-id', default='')
@click.option('--ec2-instance-profile-name', default='')
@click.option('--ec2-placement-group', default='')
@click.option('--ec2-tenancy', default='default')
@click.option('--ec2-ebs-optimized/--no-ec2-ebs-optimized', default=False)
@click.option('--ec2-instance-initiated-shutdown-behavior', default='stop',
type=click.Choice(['stop', 'terminate']))
@click.option('--ec2-user-data',
type=click.File(mode='r', encoding='utf-8'),
help="Path to EC2 user data script that will run on instance launch.")
@click.option('--ec2-tag', 'ec2_tags',
callback=ec2.cli_validate_tags,
multiple=True,
help="Additional tags (e.g. 'Key,Value') to assign to the instances. "
"You can specify this option multiple times.")
@click.pass_context
def launch(
cli_context,
cluster_name,
num_slaves,
install_hdfs,
hdfs_version,
hdfs_download_source,
install_spark,
spark_executor_instances,
spark_version,
spark_git_commit,
spark_git_repository,
spark_download_source,
assume_yes,
ec2_key_name,
ec2_identity_file,
ec2_instance_type,
ec2_region,
ec2_availability_zone,
ec2_ami,
ec2_user,
ec2_security_groups,
ec2_spot_price,
ec2_spot_request_duration,
ec2_min_root_ebs_size_gb,
ec2_vpc_id,
ec2_subnet_id,
ec2_instance_profile_name,
ec2_placement_group,
ec2_tenancy,
ec2_ebs_optimized,
ec2_instance_initiated_shutdown_behavior,
ec2_user_data,
ec2_tags):
"""
Launch a new cluster.
"""
provider = cli_context.obj['provider']
services = []
option_requires(
option='--install-hdfs',
requires_all=['--hdfs-version'],
scope=locals())
option_requires(
option='--install-spark',
requires_any=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
mutually_exclusive(
options=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
option_requires(
option='--install-spark',
requires_all=[
'--hdfs-version'],
scope=locals())
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-key-name',
'--ec2-identity-file',
'--ec2-instance-type',
'--ec2-region',
'--ec2-ami',
'--ec2-user'],
scope=locals())
# The subnet is required for non-default VPCs because EC2 does not
# support user-defined default subnets.
# See: https://forums.aws.amazon.com/thread.jspa?messageID=707417
# https://github.com/mitchellh/packer/issues/1935#issuecomment-111235752
option_requires(
option='--ec2-vpc-id',
requires_all=['--ec2-subnet-id'],
scope=locals())
check_external_dependency('ssh-keygen')
if install_hdfs:
validate_download_source(hdfs_download_source)
hdfs = HDFS(
version=hdfs_version,
download_source=hdfs_download_source,
)
services += [hdfs]
if install_spark:
if spark_version:
validate_download_source(spark_download_source)
spark = Spark(
spark_executor_instances=spark_executor_instances,
version=spark_version,
hadoop_version=hdfs_version,
download_source=spark_download_source,
)
elif spark_git_commit:
logger.warning(
"Warning: Building Spark takes a long time. "
"e.g. 15-20 minutes on an m5.xlarge instance on EC2.")
if spark_git_commit == 'latest':
spark_git_commit = get_latest_commit(spark_git_repository)
logger.info("Building Spark at latest commit: {c}".format(c=spark_git_commit))
spark = Spark(
spark_executor_instances=spark_executor_instances,
git_commit=spark_git_commit,
git_repository=spark_git_repository,
hadoop_version=hdfs_version,
)
services += [spark]
if provider == 'ec2':
cluster = ec2.launch(
cluster_name=cluster_name,
num_slaves=num_slaves,
services=services,
assume_yes=assume_yes,
key_name=ec2_key_name,
identity_file=ec2_identity_file,
instance_type=ec2_instance_type,
region=ec2_region,
availability_zone=ec2_availability_zone,
ami=ec2_ami,
user=ec2_user,
security_groups=ec2_security_groups,
spot_price=ec2_spot_price,
spot_request_duration=ec2_spot_request_duration,
min_root_ebs_size_gb=ec2_min_root_ebs_size_gb,
vpc_id=ec2_vpc_id,
subnet_id=ec2_subnet_id,
instance_profile_name=ec2_instance_profile_name,
placement_group=ec2_placement_group,
tenancy=ec2_tenancy,
ebs_optimized=ec2_ebs_optimized,
instance_initiated_shutdown_behavior=ec2_instance_initiated_shutdown_behavior,
user_data=ec2_user_data,
tags=ec2_tags)
else:
raise UnsupportedProviderError(provider)
print("Cluster master: {}".format(cluster.master_host))
print("Login with: flintrock login {}".format(cluster.name))
def get_latest_commit(github_repository: str):
"""
Get the latest commit on the default branch of a repository hosted on GitHub.
"""
parsed_url = urllib.parse.urlparse(github_repository)
repo_domain, repo_path = parsed_url.netloc, parsed_url.path.strip('/')
if repo_domain != 'github.com':
raise UsageError(
"Error: Getting the latest commit is only supported "
"for repositories hosted on GitHub. "
"Provided repository domain was: {d}".format(d=repo_domain))
url = "https://api.github.com/repos/{rp}/commits".format(rp=repo_path)
try:
with urllib.request.urlopen(url) as response:
result = json.loads(response.read().decode('utf-8'))
return result[0]['sha']
except Exception as e:
raise Exception(
"Could not get latest commit for repository: {r}"
.format(r=repo_path)) from e
@cli.command()
@click.argument('cluster-name')
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def destroy(cli_context, cluster_name, assume_yes, ec2_region, ec2_vpc_id):
"""
Destroy a cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to destroy this cluster?",
abort=True)
logger.info("Destroying {c}...".format(c=cluster.name))
cluster.destroy()
@cli.command()
@click.argument('cluster-name', required=False)
@click.option('--master-hostname-only', is_flag=True, default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def describe(
cli_context,
cluster_name,
master_hostname_only,
ec2_region,
ec2_vpc_id):
"""
Describe an existing cluster.
Leave out the cluster name to find all Flintrock-managed clusters.
The output of this command is both human- and machine-friendly. Full cluster
descriptions are output in YAML.
"""
provider = cli_context.obj['provider']
search_area = ""
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if cluster_name:
cluster_names = [cluster_name]
else:
cluster_names = []
if provider == 'ec2':
search_area = "in region {r}".format(r=ec2_region)
clusters = ec2.get_clusters(
cluster_names=cluster_names,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if cluster_name:
cluster = clusters[0]
if master_hostname_only:
logger.info(cluster.master_host)
else:
cluster.print()
else:
if master_hostname_only:
for cluster in sorted(clusters, key=lambda x: x.name):
logger.info("{}: {}".format(cluster.name, cluster.master_host))
else:
logger.info("Found {n} cluster{s}{space}{search_area}.".format(
n=len(clusters),
s='' if len(clusters) == 1 else 's',
space=' ' if search_area else '',
search_area=search_area))
if clusters:
logger.info('---')
for cluster in sorted(clusters, key=lambda x: x.name):
cluster.print()
# TODO: Provide different command or option for going straight to Spark Shell. (?)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def login(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Login to the master of an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
check_external_dependency('ssh')
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
# TODO: Check that master up first and error out cleanly if not
# via ClusterInvalidState.
cluster.login(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def start(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Start an existing, stopped cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.start_check()
logger.info("Starting {c}...".format(c=cluster_name))
cluster.start(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.pass_context
def stop(cli_context, cluster_name, ec2_region, ec2_vpc_id, assume_yes):
"""
Stop an existing, running cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
cluster.stop_check()
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to stop this cluster?",
abort=True)
logger.info("Stopping {c}...".format(c=cluster_name))
cluster.stop()
logger.info("{c} is now stopped.".format(c=cluster_name))
@cli.command(name='add-slaves')
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.option('--ec2-spot-price', type=float)
@click.option('--ec2-spot-request-duration', default='7d',
help="Duration a spot request is valid (e.g. 3d 2h 1m).")
@click.option('--ec2-min-root-ebs-size-gb', type=int, default=30)
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-tag', 'ec2_tags',
callback=ec2.cli_validate_tags,
multiple=True,
help="Additional tags (e.g. 'Key,Value') to assign to the instances. "
"You can specify this option multiple times.")
@click.pass_context
def add_slaves(
cli_context,
cluster_name,
num_slaves,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user,
ec2_spot_price,
ec2_spot_request_duration,
ec2_min_root_ebs_size_gb,
ec2_tags,
assume_yes):
"""
Add slaves to an existing cluster.
Flintrock will configure new slaves based on information queried
automatically from the master.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
provider_options = {
'min_root_ebs_size_gb': ec2_min_root_ebs_size_gb,
'spot_price': ec2_spot_price,
'spot_request_duration': ec2_spot_request_duration,
'tags': ec2_tags
}
else:
raise UnsupportedProviderError(provider)
if cluster.num_masters == 0:
raise Error(
"Cannot add slaves to cluster '{c}' since it does not "
"appear to have a master."
.format(
c=cluster_name))
cluster.load_manifest(
user=user,
identity_file=identity_file)
cluster.add_slaves_check()
if provider == 'ec2':
cluster.add_slaves(
user=user,
identity_file=identity_file,
num_slaves=num_slaves,
assume_yes=assume_yes,
**provider_options)
@cli.command(name='remove-slaves')
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-user')
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.pass_context
def remove_slaves(
cli_context,
cluster_name,
num_slaves,
ec2_region,
ec2_vpc_id,
ec2_user,
ec2_identity_file,
assume_yes):
"""
Remove slaves from an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-user',
'--ec2-identity-file'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
if num_slaves > cluster.num_slaves:
logger.warning(
"Warning: Cluster has {c} slave{cs}. "
"You asked to remove {n} slave{ns}."
.format(
c=cluster.num_slaves,
cs='' if cluster.num_slaves == 1 else 's',
n=num_slaves,
ns='' if num_slaves == 1 else 's'))
num_slaves = cluster.num_slaves
if not assume_yes:
cluster.print()
click.confirm(
text=("Are you sure you want to remove {n} slave{s} from this cluster?"
.format(
n=num_slaves,
s='' if num_slaves == 1 else 's')),
abort=True)
logger.info("Removing {n} slave{s}..."
.format(
n=num_slaves,
s='' if num_slaves == 1 else 's'))
cluster.remove_slaves(
user=user,
identity_file=identity_file,
num_slaves=num_slaves)
@cli.command(name='run-command')
@click.argument('cluster-name')
@click.argument('command', nargs=-1)
@click.option('--master-only', help="Run on the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def run_command(
cli_context,
cluster_name,
command,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user):
"""
Run a shell command on a cluster.
Examples:
flintrock run-command my-cluster 'touch /tmp/flintrock'
flintrock run-command my-cluster -- yum install -y package
Flintrock will return a non-zero code if any of the cluster nodes raises an error
while running the command.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.run_command_check()
logger.info("Running command on {target}...".format(
target="master only" if master_only else "cluster"))
cluster.run_command(
command=command,
master_only=master_only,
user=user,
identity_file=identity_file)
@cli.command(name='copy-file')
@click.argument('cluster-name')
@click.argument('local_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('remote_path', type=click.Path())
@click.option('--master-only', help="Copy to the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.option('--assume-yes/--no-assume-yes', default=False, help="Prompt before large uploads.")
@click.pass_context
def copy_file(
cli_context,
cluster_name,
local_path,
remote_path,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user,
assume_yes):
"""
Copy a local file up to a cluster.
This will copy the file to the same path on each node of the cluster.
Examples:
flintrock copy-file my-cluster /tmp/file.102.txt /tmp/file.txt
flintrock copy-file my-cluster /tmp/spark-defaults.conf /tmp/
Flintrock will return a non-zero code if any of the cluster nodes raises an error.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
# We assume POSIX for the remote path since Flintrock
# only supports clusters running CentOS / Amazon Linux.
if not posixpath.basename(remote_path):
remote_path = posixpath.join(remote_path, os.path.basename(local_path))
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.copy_file_check()
if not assume_yes and not master_only:
file_size_bytes = os.path.getsize(local_path)
num_nodes = len(cluster.slave_ips) + 1 # TODO: cluster.num_nodes
total_size_bytes = file_size_bytes * num_nodes
if total_size_bytes > 10 ** 6:
logger.warning("WARNING:")
logger.warning(
format_message(
message="""\
You are trying to upload {total_size} bytes ({size} bytes x {count}
nodes in {cluster}). Depending on your upload bandwidth, this may take
a long time.
You may be better off uploading this file to a storage service like
Amazon S3 and downloading it from there to the cluster using
`flintrock run-command ...`.
""".format(
size=file_size_bytes,
count=num_nodes,
cluster=cluster_name,
total_size=total_size_bytes),
wrap=60))
click.confirm(
text="Are you sure you want to continue?",
default=True,
abort=True)
logger.info("Copying file to {target}...".format(
target="master only" if master_only else "cluster"))
cluster.copy_file(
local_path=local_path,
remote_path=remote_path,
master_only=master_only,
user=user,
identity_file=identity_file)
def normalize_keys(obj):
"""
Used to map keys from config files to Python parameter names.
"""
if type(obj) != dict:
return obj
else:
return {k.replace('-', '_'): normalize_keys(v) for k, v in obj.items()}
def config_to_click(config: dict) -> dict:
"""
Convert a dictionary of configurations loaded from a Flintrock config file
to a dictionary that Click can use to set default options.
"""
service_configs = {}
if 'services' in config:
for service in config['services']:
if config['services'][service]:
service_configs.update(
{service + '_' + k: v for (k, v) in config['services'][service].items()})
ec2_configs = {
'ec2_' + k: v for (k, v) in config['providers']['ec2'].items()}
click_map = {
'launch': dict(
list(config['launch'].items()) +
list(ec2_configs.items()) +
list(service_configs.items())),
'describe': ec2_configs,
'destroy': ec2_configs,
'login': ec2_configs,
'start': ec2_configs,
'stop': ec2_configs,
'add-slaves': ec2_configs,
'remove-slaves': ec2_configs,
'run-command': ec2_configs,
'copy-file': ec2_configs,
}
return click_map
@cli.command()
@click.option('--locate', is_flag=True, default=False,
help="Don't open an editor. "
"Just open the folder containing the configuration file.")
@click.pass_context
def configure(cli_context, locate):
"""
Configure Flintrock's defaults.
This will open Flintrock's configuration file in your default YAML editor so
you can set your defaults.
"""
config_file = get_config_file()
if not os.path.isfile(config_file):
logger.info("Initializing config file from template...")
os.makedirs(os.path.dirname(config_file), exist_ok=True)
shutil.copyfile(
src=os.path.join(THIS_DIR, 'config.yaml.template'),
dst=config_file)
os.chmod(config_file, mode=0o644)
ret = click.launch(config_file, locate=locate)
if ret != 0:
raise Error(
"Flintrock could not launch an application to {action} "
"the config file at '{location}'. You may want to manually "
"find and edit this file."
.format(
action="locate" if locate else "edit",
location=config_file
)
)
def flintrock_is_in_development_mode() -> bool:
"""
Check if Flintrock was installed in development mode.
Use this function to toggle behavior that only Flintrock developers should
see.
"""
# This esoteric technique was pulled from pip.
# See: https://github.com/pypa/pip/pull/3258/files#diff-ab583908279e865537dec218246edcfcR310
for path_item in sys.path:
egg_link = os.path.join(path_item, 'Flintrock.egg-link')
if os.path.isfile(egg_link):
return True
else:
return False
def set_open_files_limit(desired_limit):
"""
On POSIX systems, set the open files limit to the desired number, unless
it is already equal to or higher than that.
Setting a high limit enables Flintrock to launch or interact with really
large clusters.
Background discussion: https://github.com/nchammas/flintrock/issues/81
"""
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft_limit < desired_limit:
if desired_limit > hard_limit:
warnings.warn(
"Flintrock cannot set the open files limit to {desired} "
"because the OS hard limit is {hard}. Going with {hard}. "
"You may have problems launching or interacting with "
"really large clusters."
.format(
desired=desired_limit,
hard=hard_limit),
category=RuntimeWarning,
stacklevel=2)
resource.setrlimit(
resource.RLIMIT_NOFILE,
(min(desired_limit, hard_limit), hard_limit))
def check_external_dependency(executable_name: str):
if shutil.which(executable_name) is None:
raise Error(
"Error: Flintrock could not find '{executable}' on your PATH. "
"Flintrock needs this executable to carry out the operation you "
"requested. Please install it and try again."
.format(
executable=executable_name
)
)
def main() -> int:
# Starting in Python 3.7, deprecation warnings are shown by default. We
# don't want to show these to end-users.
# See: https://docs.python.org/3/library/warnings.html#default-warning-filter
if not flintrock_is_in_development_mode():
warnings.simplefilter(action='ignore', category=DeprecationWarning)
set_open_files_limit(4096)
try:
try:
# We pass in obj so we can add attributes to it, like provider, which
# get shared by all commands.
# See: http://click.pocoo.org/6/api/#click.Context
cli(obj={})
except botocore.exceptions.NoCredentialsError:
raise Error(
"Flintrock could not find your AWS credentials. "
"You can fix this by providing your credentials "
"via environment variables or by creating a shared "
"credentials file.\n"
"For more information see:\n"
" * https://boto3.readthedocs.io/en/latest/guide/configuration.html#environment-variables\n"
" * https://boto3.readthedocs.io/en/latest/guide/configuration.html#shared-credentials-file"
)
except NothingToDo as e:
print(e)
return 0
except UsageError as e:
print(e, file=sys.stderr)
return 2
except Error as e:
print(e, file=sys.stderr)
return 1
|
from __future__ import division
import copy
import tensorflow as tf
def tri_vec_shape(N):
return [N * (N + 1) // 2]
def init_list(init, dims):
def empty_list(dims):
if not dims:
return None
else:
return [copy.deepcopy(empty_list(dims[1:])) for i in range(dims[0])]
def fill_list(dims, l):
if len(dims) == 1:
for i in range(dims[0]):
if callable(init):
l[i] = init()
else:
l[i] = init
else:
for i in range(dims[0]):
fill_list(dims[1:], l[i])
l = empty_list(dims)
fill_list(dims, l)
return l
def ceil_divide(dividend, divisor):
return (dividend + divisor - 1) / divisor
def log_cholesky_det(chol):
return 2 * tf.reduce_sum(tf.log(tf.diag_part(chol)))
def diag_mul(mat1, mat2):
return tf.reduce_sum(mat1 * tf.transpose(mat2), 1)
def logsumexp(vals, dim=None):
m = tf.reduce_max(vals, dim)
if dim is None:
return m + tf.log(tf.reduce_sum(tf.exp(vals - m), dim))
else:
return m + tf.log(tf.reduce_sum(tf.exp(vals - tf.expand_dims(m, dim)), dim))
def mat_square(mat):
return tf.matmul(mat, tf.transpose(mat))
def get_flags():
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_integer('n_epochs', 10000, 'Number of passes through the data')
flags.DEFINE_integer('n_inducing', 240, 'Number of inducing points')
flags.DEFINE_integer('display_step', 500, 'Display progress every FLAGS.display_step iterations')
flags.DEFINE_integer('mc_train', 100, 'Number of Monte Carlo samples used to compute stochastic gradients')
flags.DEFINE_integer('mc_test', 100, 'Number of Monte Carlo samples for predictions')
flags.DEFINE_string('optimizer', "adagrad", 'Optimizer')
flags.DEFINE_boolean('is_ard', True, 'Using ARD kernel or isotropic')
flags.DEFINE_float('lengthscale', 10, 'Initial lengthscale')
flags.DEFINE_integer('var_steps', 50, 'Number of times spent optimizing the variational objective.')
flags.DEFINE_integer('loocv_steps', 50, 'Number of times spent optimizing the LOOCV objective.')
flags.DEFINE_float('opt_growth', 0.0, 'Percentage to grow the number of each optimizations.')
flags.DEFINE_integer('num_components', 1, 'Number of mixture components on posterior')
flags.DEFINE_string('kernel', 'rbf', 'kernel')
flags.DEFINE_string('device_name', 'gpu0', 'Device name')
flags.DEFINE_integer('kernel_degree', 0, 'Degree of arccosine kernel')
flags.DEFINE_integer('kernel_depth', 1, 'Depth of arcosine kernel')
return FLAGS
|
#!/usr/local/bin/python3
import collections
class Point(collections.namedtuple("_Point", ["x", "y", "name"])):
def distance_to(self, x, y):
return abs(x - self.x) + abs(y - self.y)
points = []
with open("input.txt") as f:
for line in f.readlines():
[x, y] = map(int, line.strip().split(", "))
name = chr(ord('A') + len(points))
points.append(Point(x, y, name))
min_x = min(points, key=lambda p: p.x).x - 1
min_y = min(points, key=lambda p: p.y).y - 1
max_x = max(points, key=lambda p: p.x).x + 1
max_y = max(points, key=lambda p: p.y).y + 1
width = max_x - min_x
height = max_y - min_y
print("x: %d to %d -> width of %d" % (min_x, max_x, width))
print("y: %d to %d -> height of %d" % (min_y, max_y, height))
point_counts = collections.defaultdict(int)
grid = []
edge_points = set()
for x in range(width):
grid.append([None] * height)
def print_grid():
for y in range(height):
row = []
for x in range(width):
point = grid[x][y]
if point and point not in edge_points:
if point.x == x + min_x and point.y == y + min_y:
row.append(point.name)
else:
row.append(point.name.lower())
else:
row.append(" ")
print("".join(row))
for p in points:
grid[p.x - min_x][p.y - min_y] = p
print_grid()
print("--------")
for x in range(min_x, max_x):
for y in range(min_y, max_y):
closest_point = min(points, key=lambda p:p.distance_to(x, y))
second_closest_point = min([p for p in points if p != closest_point], key=lambda p:p.distance_to(x, y))
if closest_point.distance_to(x, y) != second_closest_point.distance_to(x, y):
point_counts[closest_point] += 1
grid[x - min_x][y - min_y] = closest_point
print_grid()
print("--------")
for x in range(width):
if grid[x][0]:
edge_points.add(grid[x][0])
if grid[x][-1]:
edge_points.add(grid[x][-1])
for y in range(height):
if grid[0][y]:
edge_points.add(grid[0][y])
if grid[-1][y]:
edge_points.add(grid[-1][y])
print_grid()
print("--------")
for point in edge_points:
del point_counts[point]
print(max(point_counts.values()))
|
#!/usr/bin/env python3
# encoding: utf-8
""" Configuration parser for reading, writing and manipulating
.ini files. This implementation features some customisations
such as sorting of keys, and convenience functions that
wrap load() and save() operations.
"""
from configparser import SafeConfigParser
# Signal is used in class methods write_config() at the bottom
# of this file. Signal lock forces exection to conclude before
# other Signal operations can execute.
#
# from facil.threadutils import Signal
# with Signal._lock():
# config.write_config(a,b,c)
from .threadutils import Signal
class TestConfig(SafeConfigParser):
""" Return a config parser object with default values.
"""
def __init__(self, filename, _DEFAULTS=[]):
self.filename = filename
self._DEFAULTS = _DEFAULTS
SafeConfigParser.__init__(self)
self.load()
# Future use. Example of upgradimg deprecated key/val:
# upgrade from deprecated "currency" to "quote_currency"
if self.has_option("forex", "currency"):
self.set("forex", "quote_currency", self.get_string("forex", "currency"))
self.remove_option("forex", "currency")
self.save()
def init_defaults(self, defaults):
""" add the missing default values,
default is a list of defaults
"""
for (sect, opt, default) in defaults:
self._default(sect, opt, default)
def save(self, sort=False):
""" save the config to the .ini file
"""
with open(self.filename, 'w') as configfile:
self.write(configfile, sort, space_around_delimiters=True)
def load(self):
""" (re)load the config from the .ini file
"""
self.read(self.filename)
def get_safe(self, sect, opt):
""" get value without throwing exception.
"""
try:
return self.get(sect, opt)
except:
for (dsect, dopt, default) in self._DEFAULTS:
if dsect == sect and dopt == opt:
self._default(sect, opt, default)
return default
return ""
def get_bool(self, sect, opt):
""" get boolean value from config"""
return self.get_safe(sect, opt) == "True"
def get_string(self, sect, opt):
""" get string value from config"""
return self.get_safe(sect, opt)
def get_int(self, sect, opt):
""" get int value from config"""
vstr = self.get_safe(sect, opt)
try:
return int(vstr)
except ValueError:
return 0
def get_float(self, sect, opt):
""" get int value from config"""
vstr = self.get_safe(sect, opt)
try:
return float(vstr)
except ValueError:
return 0.0
def _default(self, section, option, default, sort=False):
""" create a default option if it does not yet exist
"""
if not self.has_section(section):
self.add_section(section)
if not self.has_option(section, option):
self.set(section, option, default)
self.save(sort)
def write(self, fp, sort=False, space_around_delimiters=True):
""" Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces.
Please note that comments in the original configuration file are not preserved when writing the configuration back.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d, sort)
def _write_section(self, fp, section_name, section_items, delimiter, sort):
""" write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
if sort:
section_items = sorted(section_items)
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key, value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
##
# functions for micro-managing .ini file options
#
def write_config_setting(self, section, option, value):
""" write a setting in the ini file
"""
with Signal._lock:
setting = self.get_string(section, option)
self.set(section, option, str(value))
self.save()
def toggle_setting(self, alternatives, section, option, direction):
""" toggle a setting in the ini file
"""
with Signal._lock:
setting = self.get_string(section, option)
try:
newindex = (alternatives.index(setting) + direction) % len(alternatives)
except ValueError:
newindex = 0
self.set(section, option, alternatives[newindex])
self.save()
return alternatives[newindex]
|
# Generated by Django 2.1.7 on 2019-05-07 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='objectviewed',
name='session_id',
field=models.IntegerField(blank=True, null=True),
),
]
|
from pyschedule import Scenario, solvers, plotters
def plot(S) :
if solvers.mip.solve(S):
# %matplotlib inline
plotters.matplotlib.plot(S,task_colors=task_colors,fig_size=(10,5))
else:
print('no solution exists')
if __name__ == "__main__":
S = Scenario('bike_paint_shop', horizon=10)
Alice = S.Resource('Alice')
Bob = S.Resource('Bob')
green_paint, red_paint = S.Task('green_paint', length=2), S.Task('red_paint', length=2)
green_post, red_post = S.Task('green_post'), S.Task('red_post')
S += green_paint < green_post, red_paint + 1 <= red_post
# green_paint += Alice|Bob
# green_post += Alice|Bob
#
# red_paint += Alice|Bob
# red_post += Alice|Bob
S.clear_solution()
S.use_makespan_objective()
task_colors = { green_paint : '#A1D372',
green_post : '#A1D372',
red_paint : '#EB4845',
red_post : '#EB4845',
S['MakeSpan'] : '#7EA7D8'}
# First remove the old resource to task assignments
# green_paint -= Alice|Bob
# green_post -= Alice|Bob
# red_paint -= Alice|Bob
# red_post -= Alice|Bob
# Add new shared ones
green_resource = Alice|Bob
green_paint += green_resource
green_post += green_resource
red_resource = Alice|Bob
red_paint += red_resource
red_post += red_resource
Paint_Shop = S.Resource('Paint_Shop')
red_paint += Paint_Shop
green_paint += Paint_Shop
Lunch = S.Task('Lunch')
Lunch += {Alice, Bob}
S += Lunch > 3, Lunch < 5
task_colors[Lunch] = '#7EA7D8'
S += red_paint > 2
#Alice is a morning bird
S += Alice['length'][:3] >= 3
print(S)
# plot(S)
s = S.solution()
print s
|
from keckdrpframework.primitives.base_primitive import BasePrimitive
from kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_reader, \
kcwi_fits_writer
import os
class CorrectIllumination(BasePrimitive):
"""Subtract master bias frame"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.logger = context.pipeline_logger
def _pre_condition(self):
"""
Checks if we can correct illumination based on the processing table
:return:
"""
self.action.args.master_flat = None
self.logger.info("Checking precondition for CorrectIllumination")
# first check for internal flat
target_type = 'MFLAT'
tab = self.context.proctab.n_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
if len(tab) <= 0:
# next look for twilight flat
target_type = 'MTWIF'
tab = self.context.proctab.n_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
if len(tab) <= 0:
# finally look for dome flat
target_type = 'MDOME'
tab = self.context.proctab.n_proctab(
frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
if len(tab) <= 0:
precondition = False
else:
precondition = True
else:
precondition = True
else:
precondition = True
self.logger.info("pre condition got %d %s flats, expected 1"
% (len(tab), target_type))
if precondition:
self.action.args.master_flat = tab['OFNAME'][0].split('.')[0] + \
'_' + target_type.lower() + ".fits"
return precondition
def _perform(self):
# Header keyword to update
key = 'FLATCOR'
keycom = 'flat corrected?'
# obj, sky
obj = None
sky = None
self.logger.info("Correcting Illumination")
if self.action.args.master_flat:
mflat = kcwi_fits_reader(
os.path.join(os.path.dirname(self.action.args.name),
self.config.instrument.output_directory,
self.action.args.master_flat))[0]
# do the correction
self.action.args.ccddata.data *= mflat.data
# update header keywords
self.action.args.ccddata.header[key] = (True, keycom)
self.action.args.ccddata.header['MFFILE'] = (
self.action.args.master_flat, "Master flat filename")
# check for obj, sky observations
if self.action.args.nasmask and self.action.args.numopen > 1:
ofn = self.action.args.ccddata.header['OFNAME']
objfn = ofn.split('.')[0] + '_obj.fits'
full_path = os.path.join(
os.path.dirname(self.action.args.name),
self.config.instrument.output_directory, objfn)
if os.path.exists(full_path):
obj = kcwi_fits_reader(full_path)[0]
# correction
obj.data *= mflat.data
# update header
obj.header[key] = (True, keycom)
obj.header['MFFILE'] = (
self.action.args.master_flat, 'Master flat filename')
else:
obj = None
skyfn = ofn.split('.')[0] + '_sky.fits'
full_path = os.path.join(
os.path.dirname(self.action.args.name),
self.config.instrument.output_directory, skyfn)
if os.path.exists(full_path):
sky = kcwi_fits_reader(full_path)[0]
# correction
sky.data *= mflat.data
# update header
sky.header[key] = (True, keycom)
sky.header['MFFILE'] = (
self.action.args.master_flat, 'Master flat filename')
else:
sky = None
else:
self.logger.error("No master flat found, "
"cannot correct illumination.")
self.action.args.ccddata.header[key] = (False, keycom)
log_string = CorrectIllumination.__module__
self.action.args.ccddata.header['HISTORY'] = log_string
# write out intf image
kcwi_fits_writer(self.action.args.ccddata,
table=self.action.args.table,
output_file=self.action.args.name,
output_dir=self.config.instrument.output_directory,
suffix="intf")
self.context.proctab.update_proctab(frame=self.action.args.ccddata,
suffix="intf")
self.context.proctab.write_proctab()
# check for obj, sky images
if obj is not None:
kcwi_fits_writer(obj, output_file=self.action.args.name,
output_dir=self.config.instrument.output_directory,
suffix="objf")
if sky is not None:
kcwi_fits_writer(sky, output_file=self.action.args.name,
output_dir=self.config.instrument.output_directory,
suffix="skyf")
self.logger.info(log_string)
return self.action.args
# END: class CorrectIllumination()
|
"""
Stein Variational Gradient Descent implementation.
"""
import torch
import utils
import math
import numpy as np
def get_diff_sq(a, b):
"""
Computes squared pairwise differences between a and b.
:param a: tensor a
:param b: tensor b
:return: squared pairwise differences between a and b
"""
aa = a.matmul(a.t())
bb = b.matmul(b.t())
ab = a.matmul(b.t())
diff_sq = -2 * ab + aa.diag().unsqueeze(1) + bb.diag().unsqueeze(0)
return diff_sq
def K_RBF(x, y):
"""
Computes RBF kernel of x and y with bandwidth sqrt(0.5 * median(diff_squared) / (nr_datapoints + 1)).
:param x: tensor x
:param y: tensor y
:return: rbf kernel (x,y)
"""
diff_sq = get_diff_sq(x, y)
h = torch.median(diff_sq)
h = torch.sqrt(0.5 * h / math.log(x.size(0) + 1.))
Kxy = torch.exp(-diff_sq / h ** 2 / 2)
return Kxy
def svgd(current_particles, model, img, iter=20, lr=0.01):
"""
Stein Variational Gradient Descent procedure.
:param current_particles: initial particles
:param model: vae model for probability computations
:param img: datapoints
:param iter: number of iterations
:param lr: learning rate
:return: particles after SVGD optimisation
"""
final_particles = current_particles.clone().detach().requires_grad_(True)
optimizer = torch.optim.Adam([final_particles], lr=lr)
for i in range(0, iter):
particles = final_particles.detach().requires_grad_(True)
pred = model.dec_forward(particles)
log_pxz = utils.log_pxz(pred, img, particles)
grad_z = torch.autograd.grad(torch.sum(log_pxz), particles)[0]
K_zz = K_RBF(particles, particles.detach())
grad_K = -torch.autograd.grad(torch.sum(K_zz), particles)[0]
phi = (K_zz.detach().matmul(grad_z) + grad_K) / particles.size(0)
optimizer.zero_grad()
final_particles.grad = -phi
optimizer.step()
return final_particles
def svgd_batched(nr_particles, batch_size, current_particles, model, img, iter=20, lr=0.01):
"""
Stein Variational Gradient Descent procedure for batches.
Please note that RBF kernel bandwidth is computed on per-batch level.
:param nr_particles: number of particles per datapoint
:param batch_size: number of datapoints
:param current_particles: initial particles -> particles per datapoint 1, particles per datapoint 2, ...
:param model: vae model for probability computations
:param img: datapoints -> datapoint 1 x nr_particles, datapoint 2 x nr_particles, ....
:param iter: number of iterations
:param lr: learning rate
:return: particles after SVGD optimisation
"""
mask = torch.from_numpy(np.kron(np.eye(batch_size), np.ones((nr_particles, nr_particles)))).float().to(img.device)
final_particles = current_particles.clone().detach().requires_grad_(True)
optimizer = torch.optim.Adam([final_particles], lr=lr)
for i in range(0, iter):
particles = final_particles.detach().requires_grad_(True)
pred = model.dec_forward(particles)
log_pxz = utils.log_pxz(pred, img, particles)
grad_z = torch.autograd.grad(torch.sum(log_pxz), particles)[0]
K_zz = K_RBF(particles, particles.detach()) * mask
grad_K = -torch.autograd.grad(torch.sum(K_zz), particles)[0]
phi = (K_zz.detach().matmul(grad_z) + grad_K) / nr_particles
optimizer.zero_grad()
final_particles.grad = -phi
optimizer.step()
return final_particles
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2018 Ryan Collins <rlcollins@g.harvard.edu>
# Distributed under terms of the MIT license.
"""
Extract & classify trio allele counts for all complete sets of non-null genotypes
"""
import argparse
import sys
from collections import defaultdict
import pysam
import pybedtools as pbt
from math import floor, ceil
GTs_to_skip = './. None/None 0/None None/0'.split()
sex_chroms = 'X Y chrX chrY'.split()
def get_GT(record, ID):
"""
Extract a single sample's genotype from VCF record
"""
GT = record.samples[ID]['GT']
if GT is not None:
GT_str = '/'.join([str(i) for i in GT])
else:
GT_str = 'None/None'
return GT_str
def get_AC(GT):
"""
Convert GT to AC for a single sample
"""
if GT == 'None/None':
AC = 'NA'
else:
AC = sum([int(a) for a in GT.split('/')])
return str(AC)
def gather_parent_cnvs(vcf, fa, mo):
"""
Create BEDTools corresponding to parent CNVs for converage-based inheritance
"""
cnv_format = '{0}\t{1}\t{2}\t{3}\t{4}\n'
fa_cnvs = ''
mo_cnvs = ''
for record in vcf:
# Do not include variants from sex chromosomes
if record.chrom in sex_chroms:
continue
# Process biallelic CNVs
if record.info['SVTYPE'] in 'DEL DUP'.split() \
and 'MULTIALLELIC' not in record.filter:
# Father
fa_ac = get_AC(get_GT(record, fa))
if fa_ac != 'NA':
if int(fa_ac) > 0:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop),
record.info['SVTYPE'], fa_ac)
fa_cnvs = fa_cnvs + new_cnv
# Mother
mo_ac = get_AC(get_GT(record, mo))
if mo_ac != 'NA':
if int(mo_ac) > 0:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop),
record.info['SVTYPE'], mo_ac)
mo_cnvs = mo_cnvs + new_cnv
# Process multiallelic CNVs
if record.info['SVTYPE'] == 'MCNV' and 'MULTIALLELIC' in record.filter:
# Father
fa_ac = get_GT(record, fa).split('/')[1]
if fa_ac != 'None':
fa_ac = int(fa_ac)
if fa_ac < 2:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop), 'DEL',
str(2 - fa_ac))
fa_cnvs = fa_cnvs + new_cnv
elif fa_ac > 2:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop), 'DUP',
str(fa_ac - 2))
fa_cnvs = fa_cnvs + new_cnv
# Mother
mo_ac = get_GT(record, mo).split('/')[1]
if mo_ac != 'None':
mo_ac = int(mo_ac)
if mo_ac < 2:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop), 'DEL',
str(2 - mo_ac))
mo_cnvs = mo_cnvs + new_cnv
elif mo_ac > 2:
new_cnv = cnv_format.format(record.chrom, str(record.pos),
str(record.stop), 'DUP',
str(mo_ac - 2))
mo_cnvs = mo_cnvs + new_cnv
fa_cnvs = pbt.BedTool(fa_cnvs, from_string=True)
mo_cnvs = pbt.BedTool(mo_cnvs, from_string=True)
return fa_cnvs, mo_cnvs
def get_blacklist_hits(vcf, pro, blacklist = None):
"""
Create BEDTools corresponding to parent CNVs for converage-based inheritance
"""
breakpoint_format = '{0}\t{1}\t{2}\t{3}\n'
breakpoints = ''
for record in vcf:
if 'MULTIALLELIC' not in record.filter:
pro_ac = get_GT(record, pro).split('/')[1]
if pro_ac != 'None' and pro_ac != 'NA':
if int(pro_ac) > 0:
new_bp1 = breakpoint_format.format(record.chrom, str(record.pos),
str(record.pos + 1), record.id)
new_bp2 = breakpoint_format.format(record.chrom, str(record.stop),
str(record.stop + 1), record.id)
breakpoints = breakpoints + new_bp1 + new_bp2
sv_bt = pbt.BedTool(breakpoints, from_string=True)
if blacklist is not None:
sv_bt_bl = sv_bt.intersect(blacklist, u=True, wa=True)
bl_ids = list(set([f[3] for f in sv_bt_bl]))
else:
bl_ids = []
return bl_ids
def classify_trio_AC(ACs, record):
"""
Classify allele count for a single variant
"""
n_missing = len([c for c in ACs if c == 'NA'])
if n_missing > 0:
return 'INCOMPLETE'
ACs = [int(c) for c in ACs]
n_homref = len([c for c in ACs if c == 0])
pro_homref = ACs[0] == 0
n_homref_parents = len([c for c in ACs[1:3] if c == 0])
n_het = len([c for c in ACs if c == 1])
pro_het = ACs[0] == 1
n_het_parents = len([c for c in ACs[1:3] if c == 1])
n_homalt = len([c for c in ACs if c == 2])
pro_homalt = ACs[0] == 2
n_homalt_parents = len([c for c in ACs[1:3] if c == 2])
if pro_homalt and n_homref_parents > 0:
return 'APPARENT_DE_NOVO_HOM'
elif pro_het and n_homref_parents == 2:
return 'APPARENT_DE_NOVO_HET'
elif pro_homref and n_homalt_parents > 0:
return 'UNTRANSMITTED_HOM'
elif pro_homref and n_het_parents > 0:
return 'UNTRANSMITTED_HET'
elif pro_homalt and (n_het_parents + n_homalt_parents) == 2:
return 'MENDELIAN_CHILD_HOM'
elif pro_het and n_homref_parents < 2:
return 'MENDELIAN_CHILD_HET'
else:
error_message = 'Trio ACs {0} for site {1} do not fit expectations'.format(ACs, record.id)
raise ValueError(error_message)
def reclassify_cnv_label(record, label, ACs, fa_cnvs, mo_cnvs, min_cov=0.5):
"""
Attempt to reclassify inheritance labels for CNVs based on parent CNV coverage
"""
bedline = '{0}\t{1}\t{2}\n'
child_cnv = pbt.BedTool(bedline.format(record.chrom,
str(record.pos),
str(record.stop)),
from_string=True)
cnvtype = record.info['SVTYPE']
cnvlen = record.info['SVLEN']
if cnvlen < 1000:
fa_cov_dat = child_cnv.coverage(fa_cnvs.filter(lambda x: x[3] == cnvtype and x.length < 10000))
mo_cov_dat = child_cnv.coverage(mo_cnvs.filter(lambda x: x[3] == cnvtype and x.length < 10000))
else:
fa_cov_dat = child_cnv.coverage(fa_cnvs.filter(lambda x: x[3] == cnvtype))
mo_cov_dat = child_cnv.coverage(mo_cnvs.filter(lambda x: x[3] == cnvtype))
fa_cov = float([f[6] for f in fa_cov_dat][0])
mo_cov = float([f[6] for f in mo_cov_dat][0])
if fa_cov >= min_cov or mo_cov >= min_cov:
label = label.replace('APPARENT_DE_NOVO', 'MENDELIAN_CHILD')
return label
# def _breakpoints_in_blacklist(record, blacklist=None):
# """
# Check whether variant breakpoints fall in blacklisted regions
# """
# if blacklist is None:
# return False
# else:
# bedline = '{0}\t{1}\t{2}\n'
# bp1 = pbt.BedTool(bedline.format(record.chrom,
# str(record.pos),
# str(record.pos + 1)),
# from_string=True)
# bp2 = pbt.BedTool(bedline.format(record.chrom,
# str(record.stop),
# str(record.stop + 1)),
# from_string=True)
# if len(bp1.intersect(blacklist)) > 0 \
# or len(bp2.intersect(blacklist)) > 0:
# return True
# else:
# return False
def gather_info(vcf, fout, pro, fa, mo, fa_cnvs, mo_cnvs, no_header = False,
mvr_out = None, qual_out = None, qual_step=50,
blacklisted_ids = None):
CNVs = 'DEL DUP'.split()
labels = 'INCOMPLETE APPARENT_DE_NOVO_HET APPARENT_DE_NOVO_HOM ' + \
'MENDELIAN_CHILD_HET MENDELIAN_CHILD_HOM ' + \
'UNTRANSMITTED_HET UNTRANSMITTED_HOM'
labels = labels.split()
qual_labels = 'APPARENT_DE_NOVO_HET MENDELIAN_CHILD_HET'.split()
svtypes = 'ALL DEL DUP INS INV CPX CTX BND'.split()
qual_bins = [i for i in range(0, 1000, qual_step)]
#Prep dictionary for counting hets & MVRs by SVTYPE
counts = {}
for label in labels:
for svtype in svtypes:
for filt in 'PASS FAIL'.split():
key = '_'.join([svtype, filt, label])
if key not in counts.keys():
counts[key] = 0
#Prep dictionary for counting child hets by QUAL score
qual_counts = {}
for label in qual_labels:
for svtype in svtypes:
for minQual in qual_bins:
key = '_'.join([svtype, str(minQual),
str(minQual + qual_step), label])
if key not in counts.keys():
qual_counts[key] = 0
#Write header to output file(s)
if not no_header:
keys = list(counts.keys())
header = '{0}\t{1}\n'.format('#PROBAND', '\t'.join(keys))
fout.write(header)
if mvr_out is not None:
mvr_header = '#chr\tstart\tend\tVID\tSVTYPE\tPROBAND\tINH\tFILTER\n'
mvr_out.write(mvr_header)
if qual_out is not None:
qual_keys = qual_counts.keys()
qual_header = '{0}\t{1}\n'.format('#PROBAND', '\t'.join(qual_keys))
qual_out.write(qual_header)
trio_samples = [pro, fa, mo]
for record in vcf:
# #Do not include UNRESOLVED variants
# if 'UNRESOLVED' in record.info.keys() \
# or 'UNRESOLVED_TYPE' in record.info.keys() \
# or 'UNRESOLVED' in record.filter:
# continue
# #Only consider PASS variants
# if 'PASS' not in record.filter:
# continue
#Do not include variants from sex chromosomes
if record.chrom in sex_chroms:
continue
#Do not include multiallelic variants
if 'MULTIALLELIC' in record.info.keys() \
or 'MULTIALLELIC' in record.filter \
or len(record.alts) > 1:
continue
#Get GTs for trio
GTs = [get_GT(record, ID) for ID in trio_samples]
#Skip sites that are completely reference or null
if len([g for g in GTs if g == '0/0' or g in GTs_to_skip]) == 3:
continue
#Convert to ACs
ACs = [get_AC(g) for g in GTs]
#Classify ACs
label = classify_trio_AC(ACs, record)
#Correct inheritance label for apparently de novo heterozygous CNVs
if record.info['SVTYPE'] in CNVs \
and label == 'APPARENT_DE_NOVO_HET':
label = reclassify_cnv_label(record, label, ACs, fa_cnvs, mo_cnvs)
#Add counts to dict, as appropriate
svtype = record.info['SVTYPE']
filts = ','.join([f for f in record.filter])
if filts == 'PASS':
rfilt = 'PASS'
else:
rfilt = 'FAIL'
counts['ALL_{0}_{1}'.format(rfilt, label)] += 1
counts['{0}_{1}_{2}'.format(svtype, rfilt, label)] += 1
#Add counts to QUAL dict, as appropriate, only for PASS variants with SR support
if label in qual_labels \
and filts == 'PASS' \
and 'SR' in record.info.get('EVIDENCE') \
and record.id not in blacklisted_ids:
qual = int(record.qual)
qual_floor = str(qual_step * floor((qual - 1) / qual_step))
qual_ceil = str(qual_step * ceil(qual / qual_step))
qual_counts['ALL_{0}_{1}_{2}'.format(qual_floor, qual_ceil, label)] += 1
qual_counts['{0}_{1}_{2}_{3}'.format(svtype, qual_floor, qual_ceil, label)] += 1
#Write MVR coordinates to file, if optioned
labels_to_skip_coords = 'INCOMPLETE MENDELIAN_CHILD_HET ' + \
'MENDELIAN_CHILD_HOM UNTRANSMITTED_HET'
labels_to_skip_coords.split()
if mvr_out is not None \
and label not in labels_to_skip_coords:
if record.info['SVTYPE'] == 'INS':
mvr_record_vals = [record.chrom, record.pos, record.pos+1,
record.id, record.info['SVTYPE'], pro, label,
rfilt]
else:
mvr_record_vals = [record.chrom, record.pos, record.stop,
record.id, record.info['SVTYPE'], pro, label,
rfilt]
mvr_newline = '\t'.join(str(i) for i in mvr_record_vals)
mvr_out.write(mvr_newline + '\n')
# Write het and MVR counts to file
newline = '{0}\t{1}\n'.format(pro, '\t'.join(str(i) for i in counts.values()))
fout.write(newline)
# Write qual counts to file
newline_qual = '{0}\t{1}\n'.format(pro, '\t'.join(str(i) for i in qual_counts.values()))
qual_out.write(newline_qual)
#Main block
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Input vcf.')
parser.add_argument('fout', help='Output stats file (supports "stdout").')
parser.add_argument('pro', help='Proband sample ID.')
parser.add_argument('fa', help='Father sample ID.')
parser.add_argument('mo', help='Mother sample ID.')
parser.add_argument('--coordinates', help='File to write out MVR coordinates.',
default = None, dest = 'mvr_out')
parser.add_argument('--qual-out', help='File to write out de novo analysis ' +
' stratified by QUAL score.',
default = None, dest = 'qual_out')
parser.add_argument('--qual-bin-step', default=50, type=int,
help='Size of QUAL score bins.', dest='qual_bin_size')
parser.add_argument('--qual-blacklist', default=None, dest='blacklist',
help='Blacklist BED file to apply during QUAL score ' +
'stratified analysis of de novo rates.')
parser.add_argument('--no-header', help='Do not write header line.',
action = 'store_true', default = False)
args = parser.parse_args()
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = open(args.fout, 'w')
if args.mvr_out is not None:
mvrfout = open(args.mvr_out, 'w')
else:
mvrfout = None
if args.qual_out is not None:
qualfout = open(args.qual_out, 'w')
else:
qualfout = None
fa_cnvs, mo_cnvs = gather_parent_cnvs(vcf, args.fa, args.mo)
vcf = pysam.VariantFile(args.vcf)
bl_hit_ids = get_blacklist_hits(vcf, args.pro, args.blacklist)
vcf = pysam.VariantFile(args.vcf)
gather_info(vcf, fout, args.pro, args.fa, args.mo, fa_cnvs, mo_cnvs,
no_header = args.no_header, mvr_out = mvrfout,
qual_out = qualfout, qual_step = args.qual_bin_size,
blacklisted_ids = bl_hit_ids)
fout.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import smtplib # send mail
import socket # get host name
import commands
import datetime
import base64
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import (
Mail, Attachment, FileContent, FileName,
FileType, Disposition, ContentId)
"""
---------------------------------------MAIN----------------------------------------
Notify when some task has finished.
Dependencies:
sudo dpkg-reconfigure tzdata, set the correct timezone through terminal.
-----------------------------------------------------------------------------------
"""
def send_email(from_addr="lfmuller@inf.ufrgs.br",
to_addr_list=["<USER>@gmail.com"],
cc_addr_list=[""],
subject="", message="",
login="<USER>@gmail.com",
password="<PWD>",
smtpserver="smtp.gmail.com:587"):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Cc: %s\n' % ','.join(cc_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login, password)
error = server.sendmail(from_addr, to_addr_list, message)
if error:
print("An error occurred when sending the notification email address")
server.quit()
def send_notification_by_sendgrid(s_subject="", s_message="",):
"""
Send notification using SendGrid services (free tier - 100 requests).
:param s_subject:
:param s_message:
:return:
"""
message = Mail(
from_email='lfmuller@inf.ufrgs.br',
to_emails='<USER>@gmail.com',
subject=s_subject,
plain_text_content=s_message)
try:
file_path = './out_classif.log'
with open(file_path, 'rb') as f:
data = f.read()
f.close()
encoded = base64.b64encode(data).decode()
attachment = Attachment()
attachment.file_content = FileContent(encoded)
attachment.file_type = FileType('application/pdf')
attachment.file_name = FileName('out_classif.txt')
attachment.disposition = Disposition('attachment')
message.attachment = attachment
except Exception as e:
print("Warning: file - out_classif.log - was not located to attach to the notification message.")
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print("Warning: you don't seem to have (or not correctly) configured your "
"Sendgrid API Key (for details check the readme.md at project root dir), "
"you will not receive a notification by email.")
def send_notification_end_of_execution(s_command_line, fname_script, t_start, t_end):
"""
Send email notification w/ the parameters used when the classification is over and ready to get the results.
:param s_command_line:
:return:
"""
timestamp = datetime.datetime.now()
str_date = "{:%B %d, %Y}".format(timestamp)
str_mail_subject = str(socket.gethostname()) + ' -> ' + fname_script + \
' finished: ' + str_date + ' ' \
+ "---Total exec time: {} seconds".format(t_end - t_start)
str_message_email = "".join(str(v + "\n") for v in s_command_line) \
+ "\n OUTPUT LOG: \n" + commands.getoutput('cat *.log')
send_notification_by_sendgrid(s_subject=str_mail_subject, s_message=str_message_email)
|
#coding:utf-8
import os
from cactus.tests.deployment import BaseDeploymentTestCase, DummyDeploymentEngine, DummySite, DummyUI, DummyFile
class FileChangedTestCase(BaseDeploymentTestCase):
def setUp(self):
super(FileChangedTestCase, self).setUp()
self.ui = DummyUI()
self.site = DummySite(self.test_dir, self.ui)
self.engine = DummyDeploymentEngine(self.site)
with open(os.path.join(self.test_dir, "123.html"), "w") as f:
f.write("Hello!")
def test_file_unchanged(self):
"""
Test that we don't attempt to deploy unchanged files
"""
class TestFile(DummyFile):
def remote_changed(self):
super(TestFile, self).remote_changed()
return False
self.engine.FileClass = TestFile
self.engine.deploy()
files = self.engine.created_files
self.assertEqual(1, len(files))
f = files[0]
self.assertEqual(1, f.remote_changed_calls)
self.assertEqual(0, f.do_upload_calls)
def test_file_changed(self):
"""
Test that we deploy files that changed
"""
class TestFile(DummyFile):
def remote_changed(self):
super(TestFile, self).remote_changed()
return True
self.engine.FileClass = TestFile
self.engine.deploy()
files = self.engine.created_files
self.assertEqual(1, len(files))
f = files[0]
self.assertEqual(1, f.remote_changed_calls)
self.assertEqual(1, f.do_upload_calls)
|
#!/bin/sh
while true
do
./rocket-bot.py
sleep 1
done
|
#!/usr/bin/env python
from cyverse_irods.CyRODS import CyVerseiRODS
from datetime import datetime
from os import path, walk
import argparse
ap = argparse.ArgumentParser(description="CyVerse/iRODS interaction")
ap.add_argument("--upload", action='store_true')
ap.add_argument("--localsource")
ap.add_argument("--remotedestination")
ap.add_argument("--user")
ap.add_argument("--password")
ap.add_argument("--timestamp", action="store_true")
ap.add_argument("--output")
anon_prefix = "https://de.cyverse.org/anon-files/iplant/home/"
args = ap.parse_args()
kwargs = {}
if args.user and args.password:
kwargs["user"] = args.user
kwargs["password"] = args.password
# initialize connection
conn = CyVerseiRODS(**kwargs)
default_perm = {
"type" : "read",
"name" : "anonymous",
"zone" : conn.KWARGS["zone"]
}
# generate timestamp, if relevant
if args.timestamp or not args.remotedestination:
timestamp = datetime.utcnow().strftime("_%y%m%dT%H%M%S")
if not args.remotedestination:
args.remotedestination = args.user + "_hub" + timestamp
else:
args.remotedestination = args.remotedestination + timestamp
remote_subdir = args.remotedestination
args.remotedestination = conn.user_dir + "/" + args.remotedestination
# UCSC has a "hub.txt"
# JBrowse has a "myHub/trackList.json"
ucsc_specific = remote_subdir + "/hub.txt"
jbrowse_specific = remote_subdir
data_url = anon_prefix + kwargs["user"] + "/"
hubtype=""
title=""
header="Link to generated {} hub:"
filesize = 0
# find where the files REALLY live
args.localsource = ".".join(args.localsource.split('.')[:-1]) + "_files/"
# ensure we have a valid archive
if path.isfile(args.localsource + "myHub/hub.txt"):
data_url = data_url + ucsc_specific
args.localsource = args.localsource + "myHub/"
hubtype="UCSC"
url="http://genome.ucsc.edu/cgi-bin/hgHubConnect?hgHub_do_redirect=on&hgHubConnect.remakeTrackHub=on&hgHub_do_firstDb=1&hubClear={}"
elif path.isfile(args.localsource + "myHub/trackList.json"):
args.localsource = args.localsource + "myHub/"
data_url = data_url + jbrowse_specific
url="https://de.cyverse.org/anon-files/iplant/home/shared/G-OnRamp_hubs/JBrowse-1.12.3/index.html?data={}"
hubtype = "JBrowse"
elif path.isfile(args.localsource + "myHub"):
raise OSError("ERROR: myHub is a file, suggesting that this is an imported history, which does not work with G-OnRamp archive creators")
else:
raise OSError("Neither '{}' nor '{}' found.".format(args.localsource + "myHub/hub.txt", args.localsource + "myHub/trackList.json"))
# get filesize
for dirpath, dirnames, filenames in walk(args.localsource):
for f in filenames:
fpath = path.join(dirpath, f)
filesize += path.getsize(fpath)
print("bytes: {}".format(filesize))
sizestring = "<b>{}</b> {}"
# gigs
magnitude = 0.0
unit = ""
if int(filesize / 1000000000):
magnitude = filesize/1000000000
unit = "GB"
elif int(filesize / 1000000):
magnitude = filesize/1000000
unit = "MB"
elif int(filesize / 1000):
magnitude = filesize/1000
unit = "kB"
else:
magnitude = filesize
unit = "B"
sizestring = sizestring.format(magnitude, unit)
# upload
if args.upload:
if args.localsource is None:
parser.error("--upload requires --localsource, --remotedestination is optional")
else:
print("conn.recursive_upload(args.localsource, args.remotedestination, default_perm)\n{}\n{}\n{}\n\n".format(args.localsource, args.remotedestination, default_perm))
conn.recursive_upload(args.localsource, args.remotedestination, default_perm)
header = header.format(hubtype)
url = url.format(data_url)
# generate link
html_content = str('''
<!DOCTYPE html>\
<html lang="en">\
<head>\
<title>{}</title>\
</head>\
<body>\
<h1>{}</h1>\
<a href="{}">Generated Hub at {}</a>\
<h3>Hub size:</h3> <span style="color:blue">{}</span>\
</body>\
</html>''')
# generate HTML file
if args.output:
with open(args.output, "w") as file:
file.write(html_content.format(title, header, url, kwargs["user"] + "/" + remote_subdir, sizestring))
|
from src.grid.grid import Grid
from pyomo.environ import *
def create_model(grid: Grid, normalize: bool = False):
"""
Create a Pyomo model for the OPF problem.
grid -- Grid object specifying the topology and parameters of the grid.
Load power bounds are specified in the grid.loads[i].p_max
normalize -- Whether to normalize the variables in the model. Preferably, keep it True.
"""
# Indices
model = ConcreteModel()
model.nodes = Set(initialize=range(grid.n_nodes))
model.lines = Set(initialize=range(grid.n_lines))
# Variables
model.V_nodes = Var(model.nodes)
model.P_nodes = Var(model.nodes)
model.I_lines = Var(model.lines)
model.norm_factor = grid.ref_voltage if normalize else 1
# Power flow
model.power_balance = ConstraintList()
for n1_ind, n1 in enumerate(grid.nodes):
v_node = model.V_nodes[n1_ind]
p_node = -v_node * sum([grid.Y[n1_ind, n2_ind] * model.V_nodes[n2_ind] for n2_ind in range(grid.n_nodes)])
model.power_balance.add(model.P_nodes[n1_ind] == p_node)
# Lines currents
model.lines_current = ConstraintList()
for line_ind, line in enumerate(grid.lines):
node_from_ind = grid.nodes.index(line.node_from)
node_to_ind = grid.nodes.index(line.node_to)
i_line = line.g * (model.V_nodes[node_from_ind] - model.V_nodes[node_to_ind])
model.lines_current.add(model.I_lines[line_ind] == i_line)
model.I_lines[line_ind].setlb(line.i_min / model.norm_factor)
model.I_lines[line_ind].setub(line.i_max / model.norm_factor)
# Nodal constraints
model.nodal_voltage = ConstraintList()
model.nodal_power = ConstraintList()
model.nodal_currents = ConstraintList()
model.utilities_list = []
model.generators_power = ConstraintList()
for n1_ind, n1 in enumerate(grid.nodes):
v_min, v_max = n1.v_min, n1.v_max
p_min, p_max = max(n1.p_min, n1.p_demand_min), min(n1.p_max, n1.p_demand_max)
i_min, i_max = n1.i_min, n1.i_max
model.V_nodes[n1_ind].setlb(v_min / model.norm_factor)
model.V_nodes[n1_ind].setub(v_max / model.norm_factor)
model.P_nodes[n1_ind].setlb(p_min / model.norm_factor ** 2)
model.P_nodes[n1_ind].setub(p_max / model.norm_factor ** 2)
model.nodal_currents.add(model.P_nodes[n1_ind] - i_min / model.norm_factor * model.V_nodes[n1_ind] >= 0)
model.nodal_currents.add(i_max / model.norm_factor * model.V_nodes[n1_ind] - model.P_nodes[n1_ind] >= 0)
utility = model.P_nodes[n1_ind] * n1.utility_coef
model.utilities_list.append(utility)
# Objective
model.utility = Objective(sense=maximize, expr=sum(model.utilities_list))
return model
|
import numpy
import random
from tqdm import tqdm
from NeuralNetworks.Model.losses import loss_from_function
class Sequential:
def __init__(self):
self.layers = list()
self.lr = 0.01
pass
def add(self, layer):
self.layers.append(layer)
pass
def compile(self, lr, loss='mse'):
# store learning rate
self.lr = lr
# store loss function
self.loss = loss
# init weight matrices and store previous layers
previous_layer = None
for layer in self.layers:
layer.init(previous_layer)
previous_layer = layer
def back_propagate_gradients(self, error_gradients, network_input):
# sum and divide the error gradient
error_gradient_batch = (1 / len(error_gradients)) * numpy.sum(error_gradients, axis = 0)
# backward and update weights
for layer in reversed(self.layers):
# previous layer, until input
if layer.previous_layer == None:
previous_layer_output = network_input
else:
previous_layer_output = layer.previous_layer.layer_output
error_gradient_batch = layer.backward(self.lr, error_gradient_batch, previous_layer_output)
def fit(self, train_x, train_y, batch_size = 1, epochs = 1, verbose = 1, test_accuracy = None):
# Cache for updating accuracy fast
self.test_accuracy = test_accuracy
# go through all epochs
for epoch in range(epochs):
loss_array = []
error_gradients = []
if verbose >= 1:
pbar = tqdm(total=len(train_x), desc = "Epoch: {}".format(epoch), ncols = 128)
pbar_counter = 0
update_every = len(train_x) / 100
# go through all batches
for (batch_x, batch_y) in zip(train_x, train_y):
# convert into 2d arrays
network_input = numpy.array(batch_x, ndmin=2).T
network_target = numpy.array(batch_y, ndmin=2).T
# feed forward all layers
network_layer_forward = network_input
for layer in self.layers:
network_layer_forward = layer.forward(network_layer_forward)
network_predicted = network_layer_forward
# loss from output layer
error_gradient, loss_metric = loss_from_function(self.loss, network_target, network_predicted)
loss_array.append(loss_metric)
# In mini-batch gradient descent, the cost function (and therefore gradient) is averaged over a small number of samples, from around 10-500.
error_gradients.append(error_gradient)
if len(error_gradients) == batch_size:
self.back_propagate_gradients(error_gradients, network_input)
error_gradients = []
if verbose >= 1:
if pbar_counter % update_every == 0:
loss = "{:.4f}".format(numpy.mean(loss_array))
pbar.set_postfix(loss=loss, test_accuracy="--.--%")
pbar.update(update_every)
pbar_counter += 1
# propagate last batch if anything remains
if len(error_gradients) > 0:
self.back_propagate_gradients(error_gradients, network_input)
error_gradients = []
# test accuracy in the end
if test_accuracy != None and verbose >= 1:
accuracy = self.model_test_accuracy()
loss = "{:.4f}".format(numpy.mean(loss_array))
pbar.set_postfix(loss=loss, test_accuracy=accuracy)
if verbose >= 1:
pbar.close()
def predict(self, test_x):
# convert into 2d arrays
network_input = numpy.array(test_x, ndmin=2).T
# feed forward all layers
feed_forward = network_input
for layer in self.layers:
feed_forward = layer.forward(feed_forward)
network_predicted = feed_forward
return network_predicted
def model_test_accuracy(self):
(x_test, y_test) = self.test_accuracy
# evaluate
test_score = []
for (batch_x, batch_y) in zip(x_test, y_test):
outputs = self.predict(batch_x)
predicted_label = numpy.argmax(outputs)
correct_label = numpy.argmax(batch_y)
if (predicted_label == correct_label):
test_sNeuralNetworks.append(1)
else:
test_sNeuralNetworks.append(0)
test_score_array = numpy.asarray(test_score)
accuracy = "{0:.2f}%".format(100.0*(test_score_array.sum() / test_score_array.size))
return accuracy
|
#!/usr/bin/env python
from board import SCL, SDA
import busio
import adafruit_ssd1306
# create I2C interface
i2c = busio.I2C(SCL, SDA)
# create OLED class
disp = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# clear display
disp.fill(0)
disp.show()
|
from httpx import Response
from ..exceptions import HTTPException
def http_status_handler(response: Response) -> None:
status_code = response.status_code
if status_code == 200:
return
exception = HTTPException(
status_code=status_code, detail={
400: 'Bad request',
403: 'Forbidden',
404: 'Page not found',
422: 'Validation error'
}.get(status_code)
)
raise exception
|
#!/usr/bin/env python
import sys
from pubsubcommon import *
def callback(body, message):
sys.stdout.write('Message received was: %s\n' % (body, ))
message.ack()
while True:
with kombu.Connection(**conn_dict) as conn:
with conn.Consumer(queue, callbacks=[callback]):
for _ in kombu.eventloop(conn, timeout=None):
pass
|
from zeroSumGA import PlanetWarsProblem, PlanetWarsTimeLine
from pygga import PyGGA
from build import zeroSumBuilder
from argparse import ArgumentParser
class zeroSumGaParser(object):
def __new__(self):
argparser = ArgumentParser()
argparser.add_argument('-generations', default=10, type=int)
argparser.add_argument('-population_size', default=10, type=int)
argparser.add_argument('-tournament_size', default=2, type=int)
argparser.add_argument('-elitism_percentage', default=10.0, type=float)
argparser.add_argument('-crossover_probability', default=1.0, type=float)
argparser.add_argument('-mutation_probability', default=0.5, type=float)
args = argparser.parse_args()
return args
if __name__ == '__main__':
zeroSumBuilder()
args = zeroSumGaParser()
pygga = PyGGA(problem = PlanetWarsProblem(),
generations=args.generations,
population_size=args.population_size,
tournament_size=args.tournament_size,
elitism_percentage=args.elitism_percentage,
crossover_probability=args.crossover_probability,
mutation_probability=args.mutation_probability,
timeline = PlanetWarsTimeLine)
optimus = pygga()
optimus()
print(f"{optimus}")
|
from flask import Flask
from .config import configs
from .KV import kv
from .db import db
from .user_helper import login_manager
def create_app(env):
app = Flask(__name__, template_folder="../templates", static_folder="../static")
app.config.from_object(configs[env])
login_manager.init_app(app)
kv.init_app(app)
db.init_app(app)
from .main import main_bp
app.register_blueprint(main_bp)
from .admin import admin_bp
app.register_blueprint(admin_bp)
from .user import user_bp
app.register_blueprint(user_bp)
return app
|
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from agents import (
ModelBasedAgent,
AbstractFeatureProvider,
ViewsFeaturesProvider,
Model
)
from reco_gym import Configuration
logreg_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'with_ips': False,
# Should deltas (rewards) be used to calculate weights?
# If delta should not be used as a IPS numerator, than `1.0' is used.
'ips_numerator_is_delta': False,
# Should clipping be used to calculate Inverse Propensity Score?
'ips_with_clipping': False,
# Clipping value that limits the value of Inverse Propensity Score.
'ips_clipping_value': 10,
'solver': 'lbfgs',
'max_iter': 5000,
}
class LogregPolyModelBuilder(AbstractFeatureProvider):
def __init__(self, config):
super(LogregPolyModelBuilder, self).__init__(config)
def build(self):
class LogisticRegressionPolyFeaturesProvider(ViewsFeaturesProvider):
"""
Logistic Regression Polynomial Feature Provider
"""
def __init__(self, config, poly, poly_selection_flags):
super(LogisticRegressionPolyFeaturesProvider, self).__init__(config)
self.poly = poly
self.poly_selection_flags = poly_selection_flags
self.features_with_actions = np.zeros((self.config.num_products, 2 * self.config.num_products))
ixs = np.array(range(self.config.num_products))
self.features_with_actions[ixs, self.config.num_products + ixs] = 1
def features(self, observation):
features_with_actions = self.features_with_actions.copy()
features_with_actions[:, :self.config.num_products] = super().features(observation)
return self.poly.transform(features_with_actions)[:, self.poly_selection_flags]
class LogisticRegressionModel(Model):
"""
Logistic Regression Model
"""
def __init__(self, config, logreg):
super(LogisticRegressionModel, self).__init__(config)
self.logreg = logreg
def act(self, observation, features):
action_proba = self.logreg.predict_proba(features)[:, 1]
action = np.argmax(action_proba)
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
logreg = LogisticRegression(
solver = self.config.solver,
max_iter = self.config.max_iter,
random_state = self.config.random_seed
)
action_vector = np.zeros((features.shape[0], self.config.num_products))
action_vector[np.arange(features.shape[0]), actions] = 1
features_with_actions = np.append(features, action_vector, axis = 1)
poly = PolynomialFeatures(self.config.poly_degree)
features_poly = poly.fit_transform(features_with_actions)
only_first_degree = np.sum(poly.powers_, axis = 1) == 1
only_with_actions = np.sum(poly.powers_[:, self.config.num_products:], axis = 1) == 1
feature_selection_flags = only_first_degree | only_with_actions
if self.config.with_ips:
ips_numerator = deltas if self.config.ips_numerator_is_delta else 1.0
weights = ips_numerator / pss
if self.config.ips_with_clipping:
weights = np.minimum(deltas / pss, self.config.ips_clipping_value)
lr = logreg.fit(features_poly[:, feature_selection_flags], deltas, weights)
else:
lr = logreg.fit(features_poly[:, feature_selection_flags], deltas)
return (
LogisticRegressionPolyFeaturesProvider(self.config, poly, feature_selection_flags),
LogisticRegressionModel(self.config, lr)
)
class LogregPolyAgent(ModelBasedAgent):
"""
Logistic Regression Polynomial Agent
"""
def __init__(self, config = Configuration(logreg_poly_args)):
super(LogregPolyAgent, self).__init__(
config,
LogregPolyModelBuilder(config)
)
|
import collections
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
# This shortcut create a class named "Balance" with 2 attributes :
# "income" & "outcome"
Balance = collections.namedtuple('Balance', 'income outcome')
class GuziCreator:
def create_guzi(user, date, index):
return date.isoformat() + "-" + user.id + "-guzi{:04d}".format(index)
def create_guza(user, date, index):
return date.isoformat() + "-" + user.id + "-guza{:04d}".format(index)
class SpendableEntity:
def pay(self, guzis):
raise NotImplementedError
def spend_to(self, target, amount):
raise NotImplementedError
class User(SpendableEntity):
def __init__(self, id, birthdate):
self.id = id
self.birthdate = birthdate
self.guzi_wallet = []
self.guza_wallet = []
self.total_accumulated = []
self.guza_trashbin = []
self.balance = Balance([], [])
def daily_guzis(self):
"""
Return the number of Guzis (and Guzas) the user should earn each day
"""
return int(len(self.total_accumulated) ** (1/3) + 1)
def age(self, date=date.today()):
"""
Return User's age at given date
"""
years = relativedelta(date, self.birthdate).years
if years < 0:
raise ValueError("Date must be after user birth date {}".format(self.birthdate))
return years
def outdate(self, guzis):
"""
Outdate the given Guzis
If one or more given Guzis are not in any wallet, raise an error
Add given Guzis to total_accumulated
Add given Guzas to guza_trashbin
"""
invalid_guzis = [g for g in guzis
if g not in self.guzi_wallet + self.guza_wallet]
if len(invalid_guzis) > 0:
raise ValueError("guzi(s) {} is/are invalid".format(invalid_guzis))
for guzi in guzis:
if self._is_guzi(guzi):
del self.guzi_wallet[self.guzi_wallet.index(guzi)]
self.total_accumulated.append(guzi)
if self._is_guza(guzi):
del self.guza_wallet[self.guza_wallet.index(guzi)]
self.guza_trashbin.append(guzi)
def pay(self, guzis):
"""
Add given guzis to User balance income
"""
for guzi in guzis:
self.balance.income.append(guzi)
def spend_to(self, target, amount):
"""
Spend given amount of Guzis to given User target
if amount is < 0 or too expansive, raise an error
"""
if amount < 0:
raise ValueError("Cannot spend negative amount")
if amount > len(self.guzi_wallet):
raise ValueError("User cannot pay this amount")
if target is self:
self.total_accumulated += self.guzi_wallet[:amount]
else:
target.pay(self.guzi_wallet[:amount])
del self.guzi_wallet[:amount]
def give_guzas_to(self, target, amount):
"""
give amount of Guzas to given Company target
if amount is < 0 or too expansive, raise an error
"""
if amount < 0:
raise ValueError("Cannot give negative amount")
if amount > len(self.guza_wallet):
raise ValueError("User cannot give this amount")
if not isinstance(target, Company):
raise ValueError("Can only give Guzas to Company, not {}".format(type(target)))
target.add_guzas(self.guza_wallet[:amount])
for g in self.guza_wallet[:amount]:
self.balance.outcome.append(g)
del self.guza_wallet[:amount]
def check_balance(self):
"""
Check the balance state
If the balance income is greater than outcome,
add the bonus income to the total_accumulated
"""
while len(self.balance.income) > len(self.balance.outcome):
guzi = self.balance.income[-1]
# Remove last element to add it to total_accumulated
del self.balance.income[-1]
self.total_accumulated.append(guzi)
def check_outdated_guzis(self, date):
"""
Pass through every User's Guzis and add outdated ones
(>30 days old) to User's total_accumulated
"""
guzis_to_outdate = []
for guzi in self.guzi_wallet + self.guza_wallet:
# extract the date from the first 10 characters (YYYY-MM-DD)
creation_date = date.fromisoformat(guzi[:10])
if date - creation_date >= timedelta(days=30):
guzis_to_outdate.append(guzi)
self.outdate(guzis_to_outdate)
def create_daily_guzis(self, date):
"""
Create daily Guzis for User.
Daily_Guzis = (total_accumulated)^(1/3) + 1
Each Guzi has a specific format :
<date>-<owner_id>-guzi<guzi_index>"
<date> : 2010-04-18
<guzi_index> : 4 digits index ("0001", "0342")
"""
number_of_guzis_to_add = self.daily_guzis()
for i in range(number_of_guzis_to_add):
self.guzi_wallet.append(GuziCreator.create_guzi(self, date, i))
self.guza_wallet.append(GuziCreator.create_guza(self, date, i))
def _is_guzi(self, guzi):
return guzi[-8:-4] == "guzi"
def _is_guza(self, guzi):
return guzi[-8:-4] == "guza"
class Company(SpendableEntity):
def __init__(self, id, founders):
self.id = id
self.guzi_wallet = []
self.engaged_strategy = DefaultEngagedStrategy(founders)
def add_guzas(self, guzas):
"""
add_guzas is called from User to give the Company Guzas it will then
be able to spend.
"""
for guza in guzas:
if guza in self.guzi_wallet:
raise ValueError("guza {} already given".format(guza))
self.guzi_wallet += guzas
def spend_to(self, target, amount):
"""
Spend given amount of Guzas to given User target
if amount is < 0 or too expansive, raise an error
"""
if amount < 0:
raise ValueError("Cannot spend negative amount")
if amount > len(self.guzi_wallet):
raise ValueError("User cannot pay this amount")
if target is self:
self.total_accumulated += self.guzi_wallet[:amount]
else:
target.pay(self.guzi_wallet[:amount])
del self.guzi_wallet[:amount]
def add_engaged(self, user, times):
self.engaged_strategy.add_engaged(user, times)
def add_founder(self, user, times):
self.engaged_strategy.add_founder(user, times)
def pay(self, guzis):
"""
When a User or a Company pays a Company, the paied Guzis don't stay in
any Company wallet, it goes directly to Company's engaged users balance.
"""
self.engaged_strategy.pay(guzis)
class DefaultEngagedStrategy:
"""
DefaultEngagedStrategy gives Guzis to users fully in arrived order
Example :
- Add User1 3 times
- Add User2 1 time
- Add User3 5 times
- Add User1 2 times (yes, User1 again)
Then, when pay is called for 5 Guzis :
- Firstly, User1 gets 3 Guzis
- Secondly, User2 gets 1 Guzi
- Finaly, User3 gets 1 Gusi
Then, when pay is called again for 5 Guzis
- User3 gets 4 Guzis
- User 1 gets 1 Guzi
(See test test_pay_should_pay_in_arrival_and_times_order for details)
If a Company want a user to get daily engaged, it must add him daily
"""
def __init__(self, founders):
"""
At least one founder is necessary, instead where would the profit paied
Guzis go ? Company CAN NOT KEEP ANY PAID GUZI, so it needs to send them
to any user : the founder(s)
"""
if len(founders) == 0:
raise ValueError("At least one founder is necessary to a company")
self.users = {}
self.engaged_users = []
self.founders = []
for f in founders:
self.add_founder(f, 1)
self.founders_index = 0
def add_engaged(self, user, times):
"""
Here we store users once in users dict
and only store ids in engaged_users, to avoid big memory use
"""
self.users[user.id] = user
for t in range(times):
self.engaged_users.append(user.id)
def add_founder(self, user, times):
"""
founders only get profit. If engaged keep arriving, they always firstly
get paid. If only every engaged has got his engagement filled, then the
founders get the profit.
The difference here is that they never leave. While there is profit,
they keep earning with a loop).
"""
self.users[user.id] = user
for t in range(times):
self.founders.append(user.id)
def pay(self, guzis):
for g in guzis:
self._pay_guzi(g)
def _pay_guzi(self, guzi):
if len(self.engaged_users) == 0:
self.users[self.founders[self.founders_index]].pay([guzi])
self.founders_index += 1
self.founders_index %= len(self.founders)
else:
self.users[self.engaged_users[0]].pay(guzi)
del self.engaged_users[0]
|
#!/usr/bin/env python3
import urwid
import urwid
def menu_button(caption, callback):
button = urwid.Button(caption)
urwid.connect_signal(button, 'click', callback)
return urwid.AttrMap(button, None, focus_map='reversed')
def sub_menu(caption, choices):
contents = menu(caption, choices)
def open_menu(button):
return top.open_box(contents)
return menu_button([caption, u'...'], open_menu)
def menu(title, choices):
body = [urwid.Text(title), urwid.Divider()]
body.extend(choices)
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def item_chosen(button):
response = urwid.Text([u'You chose ', button.label, u'\n'])
done = menu_button(u'Ok', exit_program)
top.open_box(urwid.Filler(urwid.Pile([response, done])))
def exit_program(button):
raise urwid.ExitMainLoop()
menu_top = menu(u'Main Menu', [
sub_menu(u'Applications', [
sub_menu(u'Accessories', [
menu_button(u'Text Editor', item_chosen),
menu_button(u'Terminal', item_chosen),
]),
]),
sub_menu(u'System', [
sub_menu(u'Preferences', [
menu_button(u'Appearance', item_chosen),
]),
menu_button(u'Lock Screen', item_chosen),
]),
])
class CascadingBoxes(urwid.WidgetPlaceholder):
max_box_levels = 4
def __init__(self, box):
super(CascadingBoxes, self).__init__(urwid.SolidFill(u'/'))
self.box_level = 0
self.open_box(box)
def open_box(self, box):
self.original_widget = urwid.Overlay(urwid.LineBox(box),
self.original_widget,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80),
min_width=24, min_height=8,
left=self.box_level * 3,
right=(self.max_box_levels - self.box_level - 1) * 3,
top=self.box_level * 2,
bottom=(self.max_box_levels - self.box_level - 1) * 2)
self.box_level += 1
def keypress(self, size, key):
if key == 'esc' and self.box_level > 1:
self.original_widget = self.original_widget[0]
self.box_level -= 1
else:
return super(CascadingBoxes, self).keypress(size, key)
top = CascadingBoxes(menu_top)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
|
from callbench.callables import *
obj = Callable()
meth = obj.meth
umeth = Callable.meth
try:
fastmeth = obj.fastmeth
ufastmeth = Callable.fastmeth
except AttributeError:
pass
def have_PEP_580(obj):
return bool(type(obj).__flags__ & 2048)
def have_PEP_590(obj):
return bool(type(obj).__flags__ & 2)
|
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
def test_record():
array1 = awkward1.Array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}], checkvalid=True).layout
assert awkward1.tolist(array1) == [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]
array2 = array1.setitem_field("z", awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array2) == [{"x": 1, "y": 1.1, "z": []}, {"x": 2, "y": 2.2, "z": [1]}, {"x": 3, "y": 3.3, "z": [2, 2]}]
array3 = array1.setitem_field(None, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [{"x": 1, "y": 1.1, "2": []}, {"x": 2, "y": 2.2, "2": [1]}, {"x": 3, "y": 3.3, "2": [2, 2]}]
array3 = array1.setitem_field(0, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [{"x": 1, "y": 1.1, "0": []}, {"x": 2, "y": 2.2, "0": [1]}, {"x": 3, "y": 3.3, "0": [2, 2]}]
array1 = awkward1.Array([(1, 1.1), (2, 2.2), (3, 3.3)], checkvalid=True).layout
assert awkward1.tolist(array1) == [(1, 1.1), (2, 2.2), (3, 3.3)]
array2 = array1.setitem_field("z", awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array2) == [{"0": 1, "1": 1.1, "z": []}, {"0": 2, "1": 2.2, "z": [1]}, {"0": 3, "1": 3.3, "z": [2, 2]}]
array3 = array1.setitem_field(None, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [(1, 1.1, []), (2, 2.2, [1]), (3, 3.3, [2, 2])]
array3 = array1.setitem_field(0, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [([], 1, 1.1), ([1], 2, 2.2), ([2, 2], 3, 3.3)]
array3 = array1.setitem_field(1, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [(1, [], 1.1), (2, [1], 2.2), (3, [2, 2], 3.3)]
array3 = array1.setitem_field(100, awkward1.Array([[], [1], [2, 2]], checkvalid=True).layout)
assert awkward1.tolist(array3) == [(1, 1.1, []), (2, 2.2, [1]), (3, 3.3, [2, 2])]
def test_withfield():
base = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], checkvalid=True)
what = awkward1.Array([1.1, 2.2, 3.3], checkvalid=True)
assert awkward1.tolist(awkward1.withfield(base, what)) == [{"x": 1, "1": 1.1}, {"x": 2, "1": 2.2}, {"x": 3, "1": 3.3}]
assert awkward1.tolist(awkward1.withfield(base, what, where="y")) == [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]
base["z"] = what
assert awkward1.tolist(base) == [{"x": 1, "z": 1.1}, {"x": 2, "z": 2.2}, {"x": 3, "z": 3.3}]
base["q"] = 123
assert awkward1.tolist(base) == [{"x": 1, "z": 1.1, "q": 123}, {"x": 2, "z": 2.2, "q": 123}, {"x": 3, "z": 3.3, "q": 123}]
base = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], checkvalid=True)[2]
assert awkward1.tolist(awkward1.withfield(base, 100, "y")) == {"x": 3, "y": 100}
def test_regulararray():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
recordarray = awkward1.layout.RecordArray({"x": content})
regulararray = awkward1.Array(awkward1.layout.RegularArray(recordarray, 3), checkvalid=True)
content2 = awkward1.layout.NumpyArray(numpy.array([100, 200, 300]))
regulararray2 = awkward1.Array(awkward1.layout.RegularArray(content2, 1), checkvalid=True)
assert awkward1.tolist(awkward1.withfield(regulararray, regulararray2, "y")) == [[{"x": 0.0, "y": 100}, {"x": 1.1, "y": 100}, {"x": 2.2, "y": 100}], [{"x": 3.3, "y": 200}, {"x": 4.4, "y": 200}, {"x": 5.5, "y": 200}], [{"x": 6.6, "y": 300}, {"x": 7.7, "y": 300}, {"x": 8.8, "y": 300}]]
content2 = awkward1.layout.NumpyArray(numpy.array([100, 200, 300, 400, 500, 600, 700, 800, 900]))
regulararray2 = awkward1.Array(awkward1.layout.RegularArray(content2, 3), checkvalid=True)
assert awkward1.tolist(awkward1.withfield(regulararray, regulararray2, "y")) == [[{"x": 0.0, "y": 100}, {"x": 1.1, "y": 200}, {"x": 2.2, "y": 300}], [{"x": 3.3, "y": 400}, {"x": 4.4, "y": 500}, {"x": 5.5, "y": 600}], [{"x": 6.6, "y": 700}, {"x": 7.7, "y": 800}, {"x": 8.8, "y": 900}]]
content2 = awkward1.Array(awkward1.layout.NumpyArray(numpy.array([[100], [200], [300]])), checkvalid=True)
assert awkward1.tolist(awkward1.withfield(regulararray, content2, "y")) == [[{"x": 0.0, "y": 100}, {"x": 1.1, "y": 100}, {"x": 2.2, "y": 100}], [{"x": 3.3, "y": 200}, {"x": 4.4, "y": 200}, {"x": 5.5, "y": 200}], [{"x": 6.6, "y": 300}, {"x": 7.7, "y": 300}, {"x": 8.8, "y": 300}]]
content2 = awkward1.Array(awkward1.layout.NumpyArray(numpy.array([[100, 200, 300], [400, 500, 600], [700, 800, 900]])), checkvalid=True)
assert awkward1.tolist(awkward1.withfield(regulararray, content2, "y")) == [[{"x": 0.0, "y": 100}, {"x": 1.1, "y": 200}, {"x": 2.2, "y": 300}], [{"x": 3.3, "y": 400}, {"x": 4.4, "y": 500}, {"x": 5.5, "y": 600}], [{"x": 6.6, "y": 700}, {"x": 7.7, "y": 800}, {"x": 8.8, "y": 900}]]
def test_listarray():
one = awkward1.Array([[{"x": 1}, {"x": 2}, {"x": 3}], [], [{"x": 4}, {"x": 5}]], checkvalid=True)
two = awkward1.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]], checkvalid=True)
assert awkward1.tolist(awkward1.withfield(one, two, "y")) == [[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}], [], [{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}]]
three = awkward1.Array([100, 200, 300], checkvalid=True)
assert awkward1.tolist(awkward1.withfield(one, three, "y")) == [[{"x": 1, "y": 100}, {"x": 2, "y": 100}, {"x": 3, "y": 100}], [], [{"x": 4, "y": 300}, {"x": 5, "y": 300}]]
assert awkward1.tolist(awkward1.withfield(one, [100, 200, 300], "y")) == [[{"x": 1, "y": 100}, {"x": 2, "y": 100}, {"x": 3, "y": 100}], [], [{"x": 4, "y": 300}, {"x": 5, "y": 300}]]
|
# Copyright (c) 2017 SUSE LLC
import logging
import re
import subprocess
# Run a command, and return the result in string format, stripped. Return None if command fails.
def _run_cmd(cmd_array):
try:
return subprocess.check_output(cmd_array).decode("utf-8").strip()
except subprocess.CalledProcessError as c:
logging.warning('Command {} return error code [{}]:'.format(c.cmd, c.returncode))
return None
def get_repo():
"""Returns the current git repo; or 'Unknown repo' if there is an error."""
repo = _run_cmd(['git', 'ls-remote', '--get-url', 'origin'])
return 'Unknown repo' if repo is None else repo
def get_branch():
"""Returns the current git branch; or 'Unknown branch' if there is an error."""
branch = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
return 'Unknown branch' if branch is None else branch
def get_hash():
"""Returns the current git commit hash; or 'Unknown commit hash' if there is an error."""
commithash = _run_cmd(['git', 'rev-parse', '--verify', 'HEAD'])
return 'Unknown commit hash' if commithash is None else commithash
def file_is_dirty(file_path):
"""If a file is new, modified, or deleted in git's tracking return True. False otherwise."""
file_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', str(file_path)])
# git outputs filename on a line prefixed by whitespace if the file is new/modified/deleted
if re.match(r'^\s*' + file_path + '$', file_status_msg):
return True
return False
def branch_is_dirty():
"""
If any files are new, modified, or deleted in git's tracking return True. False otherwise.
"""
branch_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', '--porcelain'])
# --porcelain returns no output if no changes
if branch_status_msg:
return True
return False
|
import FWCore.ParameterSet.Config as cms
#------------------------------------------------
#AlCaReco filtering for pi0 calibration:
#------------------------------------------------
# create sequence for rechit filtering for pi0 calibration
from Calibration.EcalAlCaRecoProducers.alCaPi0HLTRegRecHits_cfi import *
seqAlcastreamEcalPi0 = cms.Sequence(alCaPi0RegRecHits)
|
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for custom Jupyter Widgets in Colab."""
import IPython as _IPython
_supported_widgets_versions = {
'5.0.0a': 'bf763107cca07459',
}
_default_version = '5.0.0a'
_installed_url = None
def enable_custom_widget_manager(version=_default_version):
"""Enables a Jupyter widget manager which supports custom widgets.
This will enable loading the required code from third party websites.
Args:
version: The version of Jupyter widgets for which support will be enabled.
"""
version_hash = _supported_widgets_versions.get(version)
if not version_hash:
raise ValueError(
'Unknown widgets version: {version}'.format(version=version))
_install_custom_widget_manager(
'https://ssl.gstatic.com/colaboratory-static/widgets/colab-cdn-widget-manager/{version_hash}/manager.min.js'
.format(version_hash=version_hash))
def disable_custom_widget_manager():
"""Disable support for custom Jupyter widgets."""
_install_custom_widget_manager(None)
def _install_custom_widget_manager(url):
"""Install a custom Jupyter widget manager.
Args:
url: The URL to an ES6 module which implements the custom widget manager
interface or None to disable third-party widget support.
"""
global _installed_url
if url and not _installed_url:
_IPython.get_ipython().display_pub.register_hook(_widget_display_hook)
elif not url and _installed_url:
_IPython.get_ipython().display_pub.unregister_hook(_widget_display_hook)
_installed_url = url
_WIDGET_MIME_TYPE = 'application/vnd.jupyter.widget-view+json'
def _widget_display_hook(msg):
"""Display hook to enable custom widget manager info in the display item."""
if not _installed_url:
return msg
content = msg.get('content', {})
if not content:
return msg
widget_data = content.get('data', {}).get(_WIDGET_MIME_TYPE)
if not widget_data:
return msg
widget_metadata = content.setdefault('metadata',
{}).setdefault(_WIDGET_MIME_TYPE, {})
widget_metadata['colab'] = {'custom_widget_manager': {'url': _installed_url,}}
return msg
|
# -*- coding: utf-8 -*-
"""
Basic example for python-highcharts
All datasets need to input using "add_data_set" method
Highchart options can be either set by "set_options" method as showing here or
construct a option dictionary object and input using "set_dict_options" method
"""
from highcharts import Highchart # import highchart library
H = Highchart() # setup highchart instance
data = list(range(1,20))
data2 = list(range(20,1,-1)) # generate some random datasets
"""
Each dataset needs to input using add_data_set and add_data_from_jsonp (not recommended) methods
1. add_data_set(data, series_type="line", name=None, **kwargs)
1. data is the dataset for chart
2. series_type (default: "line") is the type of plot this dataset will be presented
3. name is the variable name of dateset(default: Series X) used in python
4. kwargs are for parameters in series or plotOptions
(for detail please ref to highcharts API: http://api.highcharts.com/highcharts#)
2. add_data_from_jsonp(data_src, data_name='json_data', series_type="line", name=None, **kwargs)
add dataset from the data_src using jsonp. It is converted to jquery function "$.getJSON" in javascript environment
1. data_src is the url (https) for the dataset
2. data_name is the variable name of dataset. This name is used for javascript environment (not in python)
3. series_type( default: "line") is the type of plot this dataset will be presented
4. kwargs are for parameters in series or plotOptions
(for detail please ref to highcharts API: http://api.highcharts.com/highcharts#)
"""
H.add_data_set(data2,'line')
H.add_data_set(data, 'line',
marker={
'states': {
'hover': {
'enabled': True,
'fillColor': 'white',
'lineColor': 'red',
'lineWidth': 2
}
}
},
events={
'click': "function (event) { alert(this.name + ' clicked\\n' + 'Alt: ' + event.altKey + '\\n' + \
'Control: ' + event.ctrlKey + '\\n' + 'Shift: ' + event.shiftKey + '\\n');}"},
dashStyle='ShortDash'
)
"""
Set up highchart options using
1. set_options method:
set_options(option_type, option_dict)
option_type is the keyword for highchart options
option_dict is (python) dict for option settings
(for option details please ref to highcharts API: http://api.highcharts.com/highcharts#)
"""
H.set_options('chart', {'resetZoomButton': {'relativeTo': 'plot', 'position': {'x': 0, 'y': -30}}})
H.set_options('xAxis', {'events': {'afterBreaks': 'function(e){return}'}})
H.set_options('tooltip', {'formatter': 'default_tooltip'})
H.set_options('xAxis', {'events': {'pointBreak': 'function(e){return}'}})
H.set_options('chart', {'style': {'fontFamily': 'Lucida Grande, sans-serif', "fontfontSize": '12px'}})
H.set_options('chart', {'style': {"fontfontSize": '22px'}})
H.set_options('chart', {'resetZoomButton': {'position': {'x': 10}}})
H.set_options('chart', {'resetZoomButton': {'relativeTo': 'chart'}})
"""
Set up highchart options using
2. set_dict_options method:
set_dict_options(options)
option is a (python) dict for options settings
The way to use this method is very similar to the options object as on highcharts docs:
http://www.highcharts.com/docs/getting-started/how-to-set-options
1. construct option (python) dict similar to the option object in javascript
2. input option dict using set_dict_options
(for all the option details please ref to highcharts API: http://api.highcharts.com/highcharts#)
"""
options = {
'xAxis':{
'plotBands':
[{'color': '#FCFFC5', 'from': 2, 'to': 4},
{'color': '#FCFFC5', 'from': 6, 'to': 8},
{'color': '#FCFFC5', 'from': 10, 'to': 12}]
}
}
H.set_dict_options(options) # input option object using set_dict_options method
H # showing the chart on ipython
H.save_file('highcharts') # save result as .html file with input name (and location path) |
from datetime import timedelta
from django.shortcuts import reverse
from provider import constants
from provider.views import CaptureViewBase, AuthorizeViewBase, RedirectViewBase
from provider.views import AccessTokenViewBase, OAuthError
from provider.utils import now
from provider.oauth2 import forms
from provider.oauth2 import models
from provider.oauth2 import backends
class CaptureView(CaptureViewBase):
"""
Implementation of :class:`provider.views.Capture`.
"""
def validate_scopes(self, scope_list):
return set(scope_list).issubset(scope_list)
def get_redirect_url(self, request):
return reverse('oauth2:authorize')
class AuthorizeView(AuthorizeViewBase):
"""
Implementation of :class:`provider.views.Authorize`.
"""
def get_request_form(self, client, data):
return forms.AuthorizationRequestForm(data, client=client)
def get_authorization_form(self, request, client, data, client_data):
return forms.AuthorizationForm(data)
def get_client(self):
try:
return models.Client.objects.get(client_id=self.client_id)
except models.Client.DoesNotExist:
return None
def get_redirect_url(self, request):
return reverse('oauth2:redirect')
def has_authorization(self, request, client, scope_list):
if client.auto_authorize:
return True
if client.authorize_every_time:
return False
authclient_mgr = models.AuthorizedClient.objects
auth = authclient_mgr.check_authorization_scope(request.user,
client,
scope_list)
return bool(auth)
def save_authorization(self, request, client, form, client_data):
scope_list = {s for s in form.cleaned_data['scope']}
models.AuthorizedClient.objects.set_authorization_scope(request.user,
client,
scope_list)
grant = form.save(user=request.user,
client=client,
redirect_uri=client_data.get('redirect_uri', ''))
if grant is None:
return None
grant.user = request.user
grant.client = client
grant.redirect_uri = client_data.get('redirect_uri', '')
grant.save()
return grant.code
class RedirectView(RedirectViewBase):
"""
Implementation of :class:`provider.views.Redirect`
"""
pass
class AccessTokenView(AccessTokenViewBase):
"""
Implementation of :class:`provider.views.AccessToken`.
.. note:: This implementation does provide all default grant types defined
in :attr:`provider.views.AccessToken.grant_types`. If you
wish to disable any, you can override the :meth:`get_handler` method
*or* the :attr:`grant_types` list.
"""
authentication = (
backends.BasicClientBackend,
backends.RequestParamsClientBackend,
backends.PublicPasswordBackend,
backends.PublicClientBackend,
)
def get_authorization_code_grant(self, request, data, client):
form = forms.AuthorizationCodeGrantForm(data, client=client)
if not form.is_valid():
return form.errors
return form.cleaned_data.get('grant')
def get_refresh_token_grant(self, refresh_token, scope, client):
form = forms.RefreshTokenGrantForm(
refresh_token=refresh_token, scope=scope, client=client).clean()
return form['refresh_token']
def get_password_grant(self, username, password, scope, client):
return forms.PasswordGrantForm(username, password, scope, client=client).clean()
def get_access_token(self, request, user, scope, client):
try:
# Attempt to fetch an existing access token.
at = models.AccessToken.objects.get_scoped_token(
user, client, scope)
except models.AccessToken.DoesNotExist:
# None found... make a new one!
at = self.create_access_token(request, user, scope, client)
if client.client_type != constants.PUBLIC:
self.create_refresh_token(request, user, scope, at, client)
return at
def create_access_token(self, user, scope, client):
at = models.AccessToken.objects.create(
user=user,
client=client,
)
for s in scope:
at.scope.add(s)
return at
def create_refresh_token(self, user, scope, access_token, client):
return models.RefreshToken.objects.create(
user=user,
access_token=access_token,
client=client,
)
def invalidate_grant(self, grant):
if constants.DELETE_EXPIRED:
grant.delete()
else:
grant.expires = now() - timedelta(days=1)
grant.save()
def invalidate_refresh_token(self, rt):
if constants.DELETE_EXPIRED:
rt.delete()
else:
rt.expired = True
rt.save()
def invalidate_access_token(self, at):
if constants.DELETE_EXPIRED:
at.delete()
else:
at.expires = now() - timedelta(days=1)
at.save()
|
#%%
import os
import glob
import itertools
import re
import numpy as np
import pandas as pd
import collections
import skbio
import git
#%%
# Find project parental directory
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define data directory
datadir = f"{homedir}/data/processed_sequencing/20210507_lacI_negctrl_library_mapping/"
# Define output dir
outputdir = f"{homedir}/data/barcodes/20210507_lacI_negctrl_library_mapping/"
# List fastq.gz file
fastq_file = glob.glob(f"{datadir}*negctrl*.fastq.gz")[0]
#%%
# Read sequences
df_neg = pd.read_csv(
f"{homedir}/data/extra/neg_ctrl_sequences.tsv", delimiter="\t"
)
# Assign column with sequence length
df_neg = df_neg.assign(seq_len=df_neg["variant"].apply(len))
#%%
# Use skbio to have a generator to iterate over fastq
seqs = skbio.io.read(
fastq_file,
format="fastq",
verify="false",
variant="illumina1.8",
)
# Set counter
counter = 0
# Define number of samples
n_samples = 10000
print("reading sequences into memory...")
# Initialize list to save sequence objects
seq_list = list()
# Iterate over sequences
for seq in seqs: #itertools.islice(seqs, n_samples):
if counter % 10000 == 0:
print(f"reading seq #{counter}")
# Extract sequence information
seq_id = seq.metadata["id"]
sequence = str(skbio.DNA(sequence=seq, validate=False))
# Append to list
seq_list.append([seq_id, sequence])
# Update counter
counter += 1
# Initialize dataframe to save sequences
names = ["id", "sequence"]
df_seq = pd.DataFrame.from_records(seq_list, columns=names)
# Add sequence length to dataframe
df_seq["seq_len"] = df_seq.sequence.apply(len)
#%%
print("filtering sequences...")
# Define primer sequences
s100_rev = "TACTTTTGATTGCTGTGCCC"
s100_fwd = "ATAACACGGCACGAATAAGC"
# Search for these sequences
# Initialize array to save clone position
fwd_pos = np.empty(len(df_seq))
rev_pos = np.empty(len(df_seq))
# Loop through sequences
for i, seq in df_seq.iterrows():
# Search clone sequence
fwd_re = re.search(s100_fwd, seq["sequence"])
rev_re = re.search(s100_rev, seq["sequence"])
# Save position
if bool(fwd_re):
fwd_pos[i] = fwd_re.span()[0]
else:
fwd_pos[i] = np.nan
if bool(rev_re):
rev_pos[i] = rev_re.span()[0]
else:
rev_pos[i] = np.nan
# Add column to dataframe
df_seq = df_seq.assign(fwd_prim=fwd_pos, rev_prim=rev_pos)
# Reset index
df_seq.reset_index(inplace=True, drop=True)
#%%
# Filter out sequences without the primer sequences
df_seq = df_seq.dropna()
# Compute distance between primers
df_seq = df_seq.assign(
primer_dist = df_seq["fwd_prim"] - df_seq["rev_prim"]
)
# Reset index
df_seq.reset_index(inplace=True, drop=True)
#%%
# Filter out sesquences with primer_dist != 170
df_seq = df_seq[df_seq["primer_dist"] == 170]
# Reset index
df_seq.reset_index(inplace=True, drop=True)
#%%
# Define clone binding site
clone = str(
skbio.DNA("gctagcCAATGCGGgagctc".upper()).reverse_complement()
)
# Initialize array to save clone position
clone_pos = np.zeros(len(df_seq))
# Loop through sequences
for i, seq in df_seq.iterrows():
# Search clone sequence
clone_re = re.search(str(clone), seq["sequence"])
# Save position
if bool(clone_re):
clone_pos[i] = clone_re.span()[0]
else:
clone_pos[i] = np.nan
# Add column to dataframe
df_seq = df_seq.assign(clone=clone_pos)
# Compute clone distance
clone_dist = (
(df_seq["clone"] - df_seq["rev_prim"])
+ len(clone)
)
# Select sequences
df_seq = df_seq[(clone_dist == 0) & (clone_pos == 20)]
# Reset index
df_seq.reset_index(inplace=True, drop=True)
#%%
print("Mapping sequence and barcode...")
# Initialize dataframe to save sequences and barcodes
df_neg_map = pd.DataFrame([])
# Extract negative control sequence
df_neg_map["sequence"] = df_seq["sequence"].apply(lambda x: x[60: 60+150])
# Extract barcodes
df_neg_map["barcode"] = df_seq["sequence"].apply(lambda x: x[0:20])
#%%
print("Mapping sequences to TWIST order...")
# Set boolean array to determine if sequence is listed or not
neg_idx = np.array([False] * len(df_neg_map))
# Loop through each of the sequences
for i, seq in df_neg_map.iterrows():
# Compute reverse complement of sequence
s = str(skbio.DNA(seq["sequence"]).reverse_complement())
# Find if sequence is included in list
if s in df_neg["variant"].values:
neg_idx[i] = True
# Filter out sequences that are not exactly as in Guillaume's list
df_neg_map = df_neg_map[neg_idx]
# Reset index
df_neg_map.reset_index(inplace=True, drop=True)
print(f"Number of sequence: {len(df_neg_map)}")
#%%
print("Counting reads for each sequence/barcode pair")
# Generate df with sequence/barcode pairs and their counts
df_counts = df_neg_map.value_counts().reset_index(name="counts")
# Filter out pairs that have <= 3 counts
df_counts = df_counts[df_counts["counts"] >= 3]
#%%
# Write file to memory
print("writing barcode list into memory")
df_counts.to_csv(f"{outputdir}negctrl_barcodes_counts.csv", index=False)
print("Done! Barcodes filtered and quantified")
# %%
|
from django.urls import path
from .views import HomePageView, AboutPageView, StatsPageView
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('about/', AboutPageView.as_view(), name='about'),
path('stats/', StatsPageView.as_view(), name='stats'),
] |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'ide_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('version_def_name', self.gf('django.db.models.fields.CharField')(default='APP_RESOURCES', max_length=50)),
))
db.send_create_signal(u'ide', ['Project'])
# Adding unique constraint on 'Project', fields ['owner', 'name']
db.create_unique(u'ide_project', ['owner_id', 'name'])
# Adding model 'TemplateProject'
db.create_table(u'ide_templateproject', (
(u'project_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['ide.Project'], unique=True, primary_key=True)),
('template_kind', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
))
db.send_create_signal(u'ide', ['TemplateProject'])
# Adding model 'BuildResult'
db.create_table(u'ide_buildresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='builds', to=orm['ide.Project'])),
('uuid', self.gf('django.db.models.fields.CharField')(default='8277f892d4d84a69ba21c3989a02c61c', max_length=32)),
('state', self.gf('django.db.models.fields.IntegerField')(default=1)),
('started', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('finished', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'ide', ['BuildResult'])
# Adding model 'ResourceFile'
db.create_table(u'ide_resourcefile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='resources', to=orm['ide.Project'])),
('file_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=9)),
))
db.send_create_signal(u'ide', ['ResourceFile'])
# Adding unique constraint on 'ResourceFile', fields ['project', 'file_name']
db.create_unique(u'ide_resourcefile', ['project_id', 'file_name'])
# Adding model 'ResourceIdentifier'
db.create_table(u'ide_resourceidentifier', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('resource_file', self.gf('django.db.models.fields.related.ForeignKey')(related_name='identifiers', to=orm['ide.ResourceFile'])),
('resource_id', self.gf('django.db.models.fields.CharField')(max_length=100)),
('character_regex', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'ide', ['ResourceIdentifier'])
# Adding unique constraint on 'ResourceIdentifier', fields ['resource_file', 'resource_id']
db.create_unique(u'ide_resourceidentifier', ['resource_file_id', 'resource_id'])
# Adding model 'SourceFile'
db.create_table(u'ide_sourcefile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='source_files', to=orm['ide.Project'])),
('file_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'ide', ['SourceFile'])
# Adding unique constraint on 'SourceFile', fields ['project', 'file_name']
db.create_unique(u'ide_sourcefile', ['project_id', 'file_name'])
def backwards(self, orm):
# Removing unique constraint on 'SourceFile', fields ['project', 'file_name']
db.delete_unique(u'ide_sourcefile', ['project_id', 'file_name'])
# Removing unique constraint on 'ResourceIdentifier', fields ['resource_file', 'resource_id']
db.delete_unique(u'ide_resourceidentifier', ['resource_file_id', 'resource_id'])
# Removing unique constraint on 'ResourceFile', fields ['project', 'file_name']
db.delete_unique(u'ide_resourcefile', ['project_id', 'file_name'])
# Removing unique constraint on 'Project', fields ['owner', 'name']
db.delete_unique(u'ide_project', ['owner_id', 'name'])
# Deleting model 'Project'
db.delete_table(u'ide_project')
# Deleting model 'TemplateProject'
db.delete_table(u'ide_templateproject')
# Deleting model 'BuildResult'
db.delete_table(u'ide_buildresult')
# Deleting model 'ResourceFile'
db.delete_table(u'ide_resourcefile')
# Deleting model 'ResourceIdentifier'
db.delete_table(u'ide_resourceidentifier')
# Deleting model 'SourceFile'
db.delete_table(u'ide_sourcefile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': u"orm['ide.Project']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'7d2901ebedec4f708e706c6424a71e73'", 'max_length': '32'})
},
u'ide.project': {
'Meta': {'unique_together': "(('owner', 'name'),)", 'object_name': 'Project'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'version_def_name': ('django.db.models.fields.CharField', [], {'default': "'APP_RESOURCES'", 'max_length': '50'})
},
u'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['ide.Project']"})
},
u'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': u"orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': u"orm['ide.Project']"})
},
u'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': [u'ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['ide'] |
from django.core.validators import BaseValidator
from django.utils.translation import ugettext_lazy as _
class ProbablyLengthValidator(BaseValidator):
message = _('Ensure this value has at %(limit_value)s characters (it has %(show_value)d).')
code = 'probably_length'
def compare(self, a, b):
return a not in b
def clean(self, x):
return len(x)
class IsDigitValidator(BaseValidator):
message = _('Ensure this value has only digit characters.')
code = 'id_digit'
def __init__(self, limit_value=None, message=None):
super(IsDigitValidator, self).__init__(limit_value, message)
def compare(self, a, b):
return not a.isdigit()
class ControlNumberValidation(BaseValidator):
message = _('Verify the correct data.')
code = 'control_number'
def __init__(self, limit_value=None, message=None):
super(ControlNumberValidation, self).__init__(limit_value, message)
def compare(self, a, b):
return not a.is_valid_control
|
"""
Departures
http://doc.navitia.io/#departures
Also known as /departures service.
This endpoint retrieves a list of departures from a specific datetime of a selected object. Departures are ordered chronologically in ascending order as:
url Result
/coverage/{region_id}/{resource_path}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
/coverage/{lon;lat}/coords/{lon;lat}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
"""
import os
def departures(client, collection_name=None, object_id=None, coords=None, region=None, extra_params=None, verbose=False):
# Construct url
if coords and region:
raise ValueError(
"Cannot specifiy both coords and region, you must choose one.")
if coords:
# TODO: check coords format
# /coverage/{lon;lat}/coords/{lon;lat}/departures
url = os.path.join("coverage", coords, "coords",
coords, "departures")
else:
# /coverage/{region_id}/{resource_path}/departures
# First choose region
if not region and not hasattr(client, 'region'):
raise ValueError(
"You must specifiy coords or region, either here or in client")
elif region:
if isinstance(region, str):
# region argument overrides client specified region
used_region = region
else:
raise ValueError("Region must be a string")
elif not region and hasattr(client, 'region'):
# Takes already specified region
used_region = client.region
else:
# shouldn't be possible
raise ValueError("Weird error, caused by region")
# /coverage/{region_id}/{collection_name}
if not object_id or not collection_name:
raise ValueError("of correct type")
url = os.path.join("coverage", used_region,
collection_name, object_id, "departures")
return client._get(url=url, extra_params=extra_params, verbose=verbose)
|
# -*- coding: utf-8 -*-
import numpy as np
def get_field(self, args=None):
"""Get the value of variables stored in Solution.
Parameterss
----------
self : Solution
an Solution object
args : dict
dict of selected entries (not used)
Returns
-------
field: ndarray
an ndarray of field values
"""
if args is None:
args = dict()
field = self.field
if self.axis is not None:
for key in self.axis:
ax = np.where(np.array(field.shape) == self.axis[key])[0][0]
if key in args:
ind = args[key]
field = np.take(field, indices=ind, axis=ax)
return field
|
from django.apps import AppConfig
class DriversConfig(AppConfig):
name = 'drivers'
|
'''
<풀이 메모>
탐색, O(logN), 오름차순 정렬이라 이분 탐색 문제인 것은 명확했다.
case study, 그림 그려서 어떻게 이분 탐색해야할지,
if 문 분기 3가지에 대해서 파악했다.
bisect로는 어떻게 풀어야할지 감이 안와서, 반복문으로 직접 구현했다.
'''
n = int(input())
data = list(map(int, input().split()))
length = len(data)
# debug
# print(n)
# print(data)
result = -1
# bisect
start = 0
end = length - 1
while start <= end:
mid = (start + end) // 2
# print(start, mid, end)
if data[mid] == mid:
result = mid
break
elif data[mid] < mid:
start = mid + 1 # go right
else:
end = mid - 1 # go left
print(result)
'''
<Answer>
# 이진 탐색 소스코드 구현(재귀 함수)
def binary_search(array, start, end):
if start > end:
return None
mid = (start + end) // 2
# 고정점을 찾은 경우 인덱스 반환
if array[mid] == mid:
return mid
# 중간점이 가리키는 위치의 값보다 중간점이 작은 경우 왼쪽 확인
elif array[mid] > mid:
return binary_search(array, mid + 1, end)
n = int(input())
array = list(map(int, input().split()))
# 이진 탐색(Binary Search) 수행
index = binary_search(array, 0, n - 1)
# 고정점이 없는 경우 -1 출력
if index == None:
print(-1)
# 고정점이 있는 경우 해당 인덱스 출력
else:
print(index)
''' |
# -*- coding: utf-8 -*-
"""
Python toolboxes for math functions, personalized plots and other things.
Notes
-----
Developed for Python 3.6.1
@author: d-bouvier (bouvierdamien@gmail.com)
"""
__author__ = "Damien Bouvier"
__maintainer__ = "Damien Bouvier"
__version__ = "0.3"
__author_email__ = 'bouvierdamien@gmail.com'
from . import mathbox
from . import savebox
from . import plotbox
from . import utilities
__all__ = ['mathbox', 'savebox', 'plotbox', 'utilities']
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^all/$', 'article.views.articles'),
url(r'^get/(?P<article_id>\d+)/$','article.views.article'),
url(r'^like/(?P<article_id>\d+)/$','article.views.like_article'),
url(r'^create/$','article.views.create'),
)
|
# Generated by Django 4.0.2 on 2022-04-02 06:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stats', '0006_alter_drone_admin_id_alter_drone_flight_and_more'),
]
operations = [
migrations.RenameField(
model_name='drone',
old_name='admin_id',
new_name='admin',
),
]
|
import numpy as np
class Layer_Dense:
def __init__(
self,
n_inputs,
n_neurons,
weight_regulaizer_l1=0,
weight_regulaizer_l2=0,
bias_regularizer_l1=0,
bias_regularizer_l2=0,
):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
self.weight_regulaizer_l1 = weight_regulaizer_l1
self.weight_regulaizer_l2 = weight_regulaizer_l2
self.bias_regularizer_l1 = bias_regularizer_l1
self.bias_regularizer_l2 = bias_regularizer_l2
def forward(self, inputs, training):
self.output = np.dot(inputs, self.weights) + self.biases
self.inputs = inputs
def backward(self, dvalues):
self.dweights = np.dot(
self.inputs.T, dvalues
) # derivative with respect to weights is inputs d(input * weight)/d(weight) = input
self.dbiases = np.sum(
dvalues, axis=0, keepdims=True
) # derivative of sum is 1 here so just sum up dvalues
# adding the regularization derivatives for weights and biases if needed
if self.weight_regulaizer_l1 > 0:
dl1 = np.ones_like(self.weights)
dl1[self.weights < 0] = -1
self.dweights += self.weight_regulaizer_l1 * dl1
if self.weight_regulaizer_l2 > 0:
self.dweights += 2 * self.weight_regulaizer_l2 * self.weights
if self.bias_regularizer_l1 > 0:
dl1 = np.ones_like(self.biases)
dl1[self.biases < 0] = -1
self.dbiases += self.bias_regularizer_l1 * dl1
if self.bias_regularizer_l2 > 0:
self.dbiases += 2 * self.bias_regularizer_l2 * self.biases
self.dinputs = np.dot(
dvalues, self.weights.T
) # derivative with respect to inputs is weights d(input * weight)/d(input) = weight
def get_parameters(self):
return self.weights, self.biases
def set_parameters(self, weights, biases):
self.weights = weights
self.biases = biases
class Layer_Dropout:
def __init__(self, rate):
self.rate = 1 - rate
def forward(self, inputs, training):
self.inputs = inputs
if not training:
self.output = inputs.copy()
return
self.binary_mask = (
np.random.binomial(1, self.rate, size=inputs.shape) / self.rate
)
self.output = inputs * self.binary_mask
def backward(self, dvalues):
self.dinputs = (
dvalues * self.binary_mask
) # derivative of the dropout operation is equal to the binary mask - see derivation
class Layer_Input:
def forward(self, inputs, training):
self.output = inputs |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM
import logging
MDM_STATUS = 0x01000000
MDM_CTRL = 0x01000004
MDM_IDR = 0x010000fc
class KL25Z(CortexM):
def __init__(self, transport):
CortexM.__init__(self, transport)
self.auto_increment_page_size = 0x400
def init(self):
CortexM.init(self, False)
# check for flash security
val = self.transport.readAP(MDM_IDR)
if val != 0x001c0020:
logging.error("KL25Z: bad flash ID")
val = self.transport.readAP(MDM_STATUS)
if (val & (1 << 2)):
logging.warning("KL25Z secure state: will try to unlock")
self.transport.assertReset(True)
while True:
self.transport.writeAP(MDM_CTRL, 1)
val = self.transport.readAP(MDM_STATUS)
logging.info(val)
if (val & 1):
break
while True:
self.transport.writeAP(MDM_CTRL, 0)
val = self.transport.readAP(MDM_CTRL)
if (val == 0):
break
logging.info("KL25Z not in secure state")
self.halt()
self.setupFPB()
|
import time
from datetime import datetime, timedelta
from pymysql import connect
from ..general import Log
class MySql:
@staticmethod
def execute_query(environment_configuration, query, wait_time=0):
Log.info('Sending "%s" query to the "%s" database' % (query, environment_configuration['database']))
start_time = datetime.now()
rtn = []
while True:
conn = connect(user=environment_configuration['user'], password=environment_configuration['password'], host=environment_configuration['server'], database=environment_configuration['database'])
cursor = conn.cursor()
cursor.execute(query)
row = cursor.fetchone()
if (row and (row[0] is not None)) or (datetime.now() - start_time > timedelta(seconds=wait_time)):
break
conn.close()
time.sleep(0.1)
Log.info("Execution time = %s" % str(datetime.now() - start_time))
while row:
rtn.append(row)
row = cursor.fetchone()
query = query.lower()
if query.startswith('update') or query.startswith('delete') or query.startswith('insert'):
conn.commit()
if query.startswith('insert'):
rtn = cursor.lastrowid
conn.close()
return rtn
@staticmethod
def bulk_insert(environment_configuration, queries):
Log.info('Using bulk insert into the database')
out = list()
conn = connect(user=environment_configuration['user'], password=environment_configuration['password'], host=environment_configuration['server'], database=environment_configuration['database'])
cursor = conn.cursor()
for query in queries:
cursor.execute(query)
out.append(cursor.lastrowid)
conn.commit()
conn.close()
return out
class MySQLTable:
@property
def table_name(self):
raise NotImplementedError
@property
def environment(self):
raise NotImplementedError
def update_column(self, column_name, value, condition):
query = "UPDATE %s SET %s=%s WHERE %s" % (self.table_name, column_name, value, condition)
MySql.execute_query(self.environment, query)
def delete(self, condition=None):
query = "DELETE FROM %s" % self.table_name
if condition:
query += " WHERE %s" % condition
MySql.execute_query(self.environment, query)
def select_value(self, column_name, condition, wait_time=1):
query = "SELECT %s FROM %s WHERE %s" % (column_name, self.table_name, condition)
value = MySql.execute_query(self.environment, query, wait_time)
if not value:
Log.info("SQl result is None")
return None
else:
Log.info("SQl result: %s" % value[0][0])
return value[0][0]
def select_values(self, column_name, condition=None, wait_time=1):
query = "SELECT %s FROM %s" % (column_name, self.table_name)
if condition:
query += ' WHERE %s' % condition
values = MySql.execute_query(self.environment, query, wait_time)
if values:
values = [i[0] for i in values]
Log.info("SQl result: %s" % values)
return values
def select_values_by_a_custom_condition(self, column_name, condition, wait_time=1):
query = "SELECT %s FROM %s %s" % (column_name, self.table_name, condition)
values = MySql.execute_query(self.environment, query, wait_time)
if values:
values = [i[0] for i in values]
Log.info("SQl result: %s" % values)
return values
def prepare_queries_for_insert_from_dict(self, data):
out_queries = list()
for row in data:
columns = list()
values = list()
for key in row.keys():
columns.append(key)
values.append(row[key])
query = "INSERT INTO %s (%s) VALUE(%s)" % (self.table_name, ",".join(columns), ",".join(str(e) for e in values))
out_queries.append(query)
return out_queries
def insert_values(self, column_values):
query = self.prepare_queries_for_insert_from_dict([column_values])[0]
out_id = MySql.execute_query(self.environment, query)
return out_id
def bulk_insert(self, data):
queries = self.prepare_queries_for_insert_from_dict(data)
ids = MySql.bulk_insert(self.environment, queries)
return ids
def delete_all_data(self):
query = f'DELETE FROM {self.table_name}'
return MySql.execute_query(self.environment, query)
|
from django.contrib.auth.decorators import user_passes_test
from django.views.generic.simple import direct_to_template
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import RequestContext, loader
from google.appengine.ext import db
from projects.models import Project, ProjectURL
from projects.forms import ProjectForm, ProjectURLForm
@user_passes_test(lambda u: u.is_staff)
def project_list(request):
projects = Project.all().order("-name")
return direct_to_template(request, "project_list.html", extra_context={
"page": projects,
"nav": {"selected": "projects", "subnav": "list"},
})
@user_passes_test(lambda u: u.is_staff)
def project_add(request):
form = ProjectForm(request.POST or None)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("projects-list"))
return direct_to_template(request, "project_add.html", extra_context={
"form": form,
"nav": {"selected": "projects",},
})
@user_passes_test(lambda u: u.is_staff)
def project_edit(request, pk):
form = ProjectForm(request.POST or None, instance=Project.get(pk))
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("projects-list"))
return direct_to_template(request, "project_edit.html", extra_context={
"form": form,
"nav": {"selected": "projects",},
})
@user_passes_test(lambda u: u.is_staff)
def project_url_add(request, pk):
project = Project.get(pk)
form = ProjectURLForm(request.POST or None)
if form.is_valid():
obj = form.save(commit=False)
obj.project = project
obj.save()
return HttpResponseRedirect(reverse("projects-list"))
return direct_to_template(request, "project_url_add.html", extra_context={
"form": form,
"project": project,
"nav": {"selected": "projects",},
})
@user_passes_test(lambda u: u.is_staff)
def project_url_edit(request, pk, url):
url = ProjectURL.get(url)
project = Project.get(pk)
form = ProjectURLForm(request.POST or None, instance=url)
if form.is_valid():
obj = form.save(commit=False)
obj.project = project
obj.save()
return HttpResponseRedirect(reverse("projects-list"))
return direct_to_template(request, "project_url_edit.html", extra_context={
"form": form,
"project": project,
"nav": {"selected": "projects",},
})
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Third Party Stuff
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
# steelrumors Stuff
from steelrumors.base.models import UUIDModel
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, username, password, is_staff, is_superuser, **extra_fields):
"""Creates and saves a User with the given email and password.
"""
email = self.normalize_email(email)
user = self.model(email=email, username=username, is_staff=is_staff, is_active=True,
is_superuser=is_superuser, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, username, password=None, **extra_fields):
return self._create_user(email, username, password, False, False, **extra_fields)
def create_superuser(self, email, username, password, **extra_fields):
return self._create_user(email, username, password, True, True, **extra_fields)
@python_2_unicode_compatible
class User(AbstractBaseUser, UUIDModel, PermissionsMixin):
username = models.CharField(_('username'), max_length=30)
first_name = models.CharField(_('First Name'), max_length=120, blank=True)
last_name = models.CharField(_('Last Name'), max_length=120, blank=True)
email = models.EmailField(_('email address'), unique=True, db_index=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
bio = models.TextField(_('bio data'), blank=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('-date_joined', )
def __str__(self):
return str(self.id)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '{} {}'.format(self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name.strip()
|
from datetime import datetime
class SingleResult:
"""
This class represents the result data of a single earthquake event. In the USGS REST API,
searching for a range (time/location) gives a collection of events, which is named as
**FeatureCollection** in GeoJSON standard. Searching with a particular event ID gives us a
single event, which is named **Feature** in GeoJSON standard.
This class accepts all the json data that the query generated, processes the data, and provides
users all the necessary APIs for getting ordered, sorted and simplified results. The APIs are specially
designed to include all functionality of the USGS API.
Example:
::
event_id = "usc000lvb5"
single_result = EarthquakeQuery.search_by_event_id(event_id=event_id)
single_result.get_raw_json()
"""
def __init__(self, result_json):
"""
Constructor:
Saves the query result json string as class attribute
:param result_json: str, query result json string
"""
self.json_raw = result_json
def get_raw_json(self) -> str:
"""
Get a raw json file which describes a single earthquake
:return: str, raw json string
"""
return self.json_raw
def get_raw_properties(self) -> dict:
"""
Get properties of a individual earthquake, i.e. mag, title, etc.
:return: dict, a dictionary contains all properties attributes of an earthquake
"""
return self.json_raw["properties"]
def get_raw_geometry(self) -> dict:
"""
Get geometry of an individual earthquake, i.e. type and coordinates
:return: dict, a dictionary, contains all geometry info of an earthquake
"""
return self.json_raw["geometry"]
def get_coordinates(self) -> list:
"""
Get coordinates of where the individual earthquake source centered
:return: list, a list of the coordinates, which has latitude, longitude, and elevation info
"""
return self.json_raw["geometry"]["coordinates"]
def get_magnitude(self) -> float:
"""
Get the magnitude of the earthquake, measuring of the size of the earthquake source
:return: float, the magnitude of the earthquake
"""
return self.json_raw["properties"]["mag"]
def get_title(self) -> str:
"""
Get the title of the earthquake, containing the basic info describing the earthquake,
i.e. "M 6.5 - 32km W of Sola, Vanuatu"
:return: str, the title of the earthquake
"""
return self.json_raw["properties"]["title"]
def get_location_string(self) -> str:
"""
Get the location of the earthquake, describing the direction from the nearby city.
For example: '32km W of Sola, Vanuatu'
:return: str, the location of the earthquake
"""
return self.json_raw["properties"]["place"]
def get_epoch_time(self) -> int:
"""
Get the epoch time of the earthquake happened
:return: int, the epoch time
"""
return self.json_raw["properties"]["time"]
def get_datetime(self) -> datetime:
"""
Convert the epoch time to datetime in Python
:return: datetime, the converted datetime object from epoch time
"""
return datetime.fromtimestamp(self.json_raw["properties"]["time"] / 1000.0)
def get_webpage_url(self) -> str:
"""
Get the web page URL of this particular earthquake event
:return: str, the web page URL of this event
"""
return self.json_raw["properties"]["url"]
|
import logging
from . import OrController
class TriggerController(OrController):
"""A controller for triggering actions
An interface for using trigger controller
is "compatible" with how we use OpenRAVE controllers
"""
def __init__(self, namespace, controller_name, simulated=False):
self.logger = logging.getLogger(__name__)
self.namespace = namespace
self.controller_name = controller_name
self._current_cmd = None
self.simulated = simulated
if not simulated:
from ros_control_client_py import TriggerClient
self.controller_client = TriggerClient(namespace, controller_name)
self.logger.info("Trigger Controller initialized")
def Trigger(self, timeout=None):
"""Tigger the controlled action
:param timeout: if not None, block until timeout
:type timeout: double or None
"""
if self.simulated:
pass
if not self.IsDone():
from ros_control_client_py import TriggerFailed
raise TriggerFailed("Trigger action already \
in progress and cannot be preempted",
None)
if not self.simulated:
self._current_cmd = self.controller_client.execute()
if timeout is not None and timeout >= 0.0:
self._current_cmd.result(timeout)
def IsDone(self):
return (self.simulated or
self._current_cmd is None or
self._current_cmd.done())
|
from typing import Union
import numpy as np
from pandas import DataFrame, Series
from radvel.kepler import rv_drive
from radvel.orbit import timetrans_to_timeperi
from scipy.stats import norm, truncnorm
from ephemere import constants as const
def get_tp_param(planet: Series, n_samples: int = 0) -> Union[np.ndarray, float]:
"""
Get time of periastron from time of transit or time of periastron of a given planet
(with parameters stored in a series representing a row of the archive dataframe)
:param planet: Series with planet info (row from archive dataframe)
:type planet: Series
:param n_samples: Number of samples to draw, defaults to 0
:type n_samples: int, optional
:return: Single tp value or n_samples sample values
:rtype: Union[np.ndarray, float]
:raises ValueError: If neither tp or tc parameters are available
"""
has_tp = not np.isnan(planet[const.TP_KEY])
has_tc = not np.isnan(planet[const.TC_KEY])
# If both are NaN, we can't obtain tp
if not (has_tp or has_tc):
raise ValueError(
f"Both {const.TP_KEY} and {const.TC_KEY} are NaN f for {planet['pl_name']}"
)
if (planet[const.TRANSIT_FLAG] and has_tc) or not has_tp:
# For transiting planets, Tc is usually better constained -> try first
if n_samples > 0:
tc = draw_param(planet, const.TC_KEY, n_samples)
else:
tc = planet[const.TC_KEY]
# This assumes that other params are already distributions if they need to be
tp = timetrans_to_timeperi(
tc, planet[const.PER_KEY], planet[const.ECC_KEY], planet[const.OMEGA_KEY]
)
else:
if n_samples > 0:
tp = draw_param(planet, const.TP_KEY, n_samples)
else:
tp = planet[const.TP_KEY]
return tp
def draw_param(planet: Series, key: str, ndraws: int) -> np.ndarray:
"""
Draw parameter values based on planet parameters and uncertainties.
:param planet: Pandas series with planet parameters.
:type planet: Series
:param key: Key of the parameter to draw
:type key: str
:param ndraws: Number of draws.
:type ndraws: int
:return: Array of Monte-Carlo draws for the parameter.
:rtype: np.ndarray
"""
# NOTE: Maybe astropy uncertainties could do the job, but would need truncated normal
# (related issue: https://github.com/astropy/astropy/issues/12886)
# Bottom line: can implement custom distribution, but what we have does the job for now
pval = planet[key]
err = get_param_error(planet, key)
if np.any(np.isnan([pval, err])):
raise ValueError(f"Value and error for {key} must not be NaN")
if key in [const.ECC_KEY, const.PER_KEY, const.K_KEY]:
# Truncated normal if unphysical below 0
upper = (1.0 - pval) / err if key == const.ECC_KEY else np.inf
a, b = (0 - pval) / err, upper
dist = truncnorm(a, b, loc=pval, scale=err)
else:
dist = norm(loc=pval, scale=err)
return dist.rvs(ndraws)
def get_param_error(planet: Series, pkey: str) -> float:
"""
Get average uncertainty for a single parameter based on
:param planet: Series with planet parameter info
:type planet: Series
:param pkey: Key of the parameter for which we want the error
:type pkey: str
:return: Mean uncertainty based on upper and lower uncertainty
:rtype: float
"""
return np.mean(np.abs([planet[pkey + f"err{i}"] for i in (1, 2)]))
def get_orbit_params(planet: Series, n_samples: int = 0) -> Series:
"""
Get parameters that are required to compute RV curve from archive row
:param planet: Archive row with planet info stored in a pandas series
:type planet: Series
:param n_samples: Number of samples to draw (use best value if 0), defaults to 0
:type n_samples: int, optional
:return: Pandas series with planet orbit parameter, either with single values or with an array for each
:rtype: Series
:raises ValueError: Raises error if parameter value or error in NaN
"""
orbpars = planet.copy()
# Check that parameters are not missing
special_cases = [const.TP_KEY] # Parameters that we do not check directly
regular_params = [p for p in const.ORB_KEYS if p not in special_cases]
for pkey in regular_params:
if np.isnan(orbpars[pkey]):
raise ValueError(f"Parameter {pkey} for {orbpars['pl_name']} is NaN")
# Draw parameters from normal (or truncated normal if P, ecc, K)
# distribution to account for uncertainty
err = get_param_error(orbpars, pkey)
if n_samples > 0 and err != 0.0 and not np.isnan(err):
orbpars[pkey] = draw_param(orbpars, pkey, n_samples)
# TP might come from transit time, so handle separately
# We use orpars, so uncertainties from draw_param are propagated
orbpars[const.TP_KEY] = get_tp_param(orbpars, n_samples=n_samples)
# Keep only keys that we use to calculate orbit
orbpars = orbpars[const.ORB_KEYS]
# If some parameters have scalars and others have array,
# Repeat scalars to arrays of same length
try:
lvals = orbpars.str.len()
plen = int(lvals.max()) # All non-zero should be max
scalar_mask = lvals.isna()
orbpars[scalar_mask] = orbpars[scalar_mask].apply(lambda x: np.full(plen, x))
except AttributeError:
# if all scalars, nothing to do (just filter to dict)
pass
return orbpars
def rv_model_from_samples(rv_samples: np.ndarray) -> np.ndarray:
"""
Get RV median model and 1-sigma envelope from many sample RV curves
:param rv_samples: Input RV samples with shape (n_pts, n_samples)
:type rv_samples: np.ndarray
:return: RV Model and error (shape: (npts, 2)), first column is model, second is RV
:rtype: np.ndarray
"""
# Get curve and 1-sigma enveloppe from rv draws
rv_16th, rv_med, rv_84th = np.percentile(rv_samples, [16, 50, 84], axis=0)
rv_err_lo = rv_med - rv_16th
rv_err_hi = rv_84th - rv_med
rv_err = np.mean([rv_err_hi, rv_err_lo], axis=0)
return np.array([rv_med, rv_err]).T
def get_rv_signal(
t: np.ndarray, params: Series, return_samples: bool = False
) -> np.ndarray:
"""
Get RV signal from orbit parameters of a planet
:param t: Time points where we calculate the model
:type t: np.ndarray
:param params: Orbit parameters, either as floats or arrays
:type params: Series
:param return_samples: Return all samples when using arrays of parameters, defaults to False
:type return_samples: bool, optional
:return: Single RV curve (if parameters are scalar), all RV samples (if return_samples=True)
or RV curve with model (if return_samples=False and one array per parameter)
:rtype: np.ndarray
:raises TypeError: Raises TypeError if param has a mix of scalar and array elements
"""
t = np.atleast_1d(t)
scalar_mask = params.apply(np.isscalar)
if scalar_mask.any() and not scalar_mask.all():
raise TypeError(
"params must contain only scalars or only arrays, not a mix of both"
)
is_scalar = scalar_mask.all()
# If not scalar, dataframe will be more convenient
if not is_scalar:
params = DataFrame(params.to_dict())
# Make sure parameters are properly ordered for radvel
# Keep after dict conversion because dicts are unordered
params = params[const.ORB_KEYS]
if is_scalar:
orbel = params.to_list()
rv = rv_drive(t, orbel)
return rv
# Store RVs for each parameter sample in n_samples x len(t) array
rv_samples = np.empty((len(params), len(t)))
for i, pseries in params.iterrows():
orbel = pseries.to_list()
rv_samples[i] = rv_drive(t, orbel)
if return_samples:
return rv_samples
else:
return rv_model_from_samples(rv_samples)
|
from sqlalchemy import Column, Integer, String, Date, DateTime, ForeignKey, func, Text, Boolean
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
import datetime
Base = declarative_base()
class Person(Base):
__tablename__ = 'Persons'
PersonID = Column(String(50), primary_key = True)
FirstName = Column(String(50))
LastName = Column(String(50))
Password = Column(String(20))
Department = Column(String(20))
Position = Column(String(20))
Office = Column(String(20))
PhoneNumber = Column(String(20))
Email = Column(String(50))
IsAdmin = Column(Boolean)
Confirmed = Column(Boolean)
Skill1 = Column(String(20))
Skill2 = Column(String(20))
Skill3 = Column(String(20))
Interest1 = Column(String(20))
Interest2 = Column(String(20))
Campaigns = relationship("Campaign", primaryjoin= "Campaign.Creator == Person.PersonID", backref="Person")
ContributedTo = relationship("Contribution", primaryjoin= "Contribution.ContributorID == Person.PersonID", backref="Contributor")
Ventures = relationship("Venture", primaryjoin = "Venture.CreatorID == Person.PersonID", backref="Creator")
Comments = relationship('Comment', primaryjoin = 'Comment.Author == Person.PersonID', backref='Commentator')
ChallengesCreated = relationship('Challenge', primaryjoin = 'Challenge.Creator == Person.PersonID', backref = 'Initiator')
Discussions = relationship('Discussion', primaryjoin = 'Discussion.CreatorID == Person.PersonID', backref = 'Creator')
DiscussionEntries = relationship('DiscussionEntry', primaryjoin = 'DiscussionEntry.Author == Person.PersonID', backref='Commentator')
def get_monthly_contribution(self):
sum = int()
today = datetime.datetime.now()
for c in self.ContributedTo:
if(c.SubTime.year == today.year and c.SubTime.month == today.month):
sum += c.Contribution
return sum
def __init__(self, PersonID, FirstName, LastName, Password, Email):
self.PersonID = PersonID
self.FirstName = FirstName
self.LastName = LastName
self.Password = Password
self.Email = Email
self.IsAdmin = False
self.Confirmed = False
def is_authenticated(self):
return self.Confirmed
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.PersonID
def __repr__(self):
return self.FirstName
class Challenge(Base):
__tablename__ = "Challenges"
Creator = Column(String(50), ForeignKey('Persons.PersonID'))
ChallengeName = Column(String(250), primary_key = True)
DateMade = Column(DateTime)
Discussions = relationship('Discussion', primaryjoin = "Challenge.ChallengeName == Discussion.ChallengeName", backref = "RespondingTo")
def __init__(self, ChallengeName, Creator, DateMade):
self.ChallengeName = ChallengeName
self.Creator = Creator
self.DateMade = DateMade
def getNumDiscussions(self):
return len(self.Discussions)
class Discussion(Base):
__tablename__ = "Discussions"
ChallengeName = Column(String(250), ForeignKey('Challenges.ChallengeName'), primary_key = True)
Topic = Column(String(250), primary_key = True)
CreatorID = Column(String(50), ForeignKey('Persons.PersonID'), primary_key = True, autoincrement = False)
DateCreated = Column(DateTime)
Description = Column(Text)
Entries = relationship('DiscussionEntry', primaryjoin = 'DiscussionEntry.ParentPost == Discussion.Topic', backref='Discussion')
def __init__(self, ChallengeName, Topic, CreatorID, DateCreated, Description):
self.ChallengeName = ChallengeName
self.Topic = Topic
self.CreatorID = CreatorID
self.DateCreated = DateCreated
self.Description = Description
class DiscussionEntry(Base):
__tablename__ = 'DiscussionEntries'
Key = Column(Integer, primary_key = True)
ParentPost = Column(String(50), ForeignKey('Discussions.Topic'), primary_key = True)
Author = Column(String(20), ForeignKey('Persons.PersonID'), primary_key = True)
SubTime = Column(DateTime, primary_key = True)
Content = Column(Text)
def __init__(self, Topic, Author, Content):
self.ParentPost = Topic
self.Author = Author
self.SubTime = datetime.datetime.now()
self.Content = Content
class Venture(Base):
__tablename__ = 'Ventures'
Title = Column(String(50), primary_key = True)
ShortDesc = Column(String(300))
Backers = Column(Integer)
CreatorID = Column(String(50), ForeignKey('Persons.PersonID'))
class Campaign(Base):
__tablename__ = 'Campaigns'
CampaignTitle = Column(String(50), primary_key = True)
Description = Column(String(300))
DatePosted = Column(DateTime)
Creator = Column(String(50), ForeignKey('Persons.PersonID'))
IndividualContributions = relationship("Contribution", primaryjoin= "Contribution.CampaignName == Campaign.CampaignTitle", backref="ContributionTarget")
Comments = relationship('Comment', primaryjoin = 'Comment.ParentPost == Campaign.CampaignTitle', backref='TopicCampaign')
def __init__(self, CampaignTitle, Description, DatePosted, Creator):
self.CampaignTitle=CampaignTitle
self.Description=Description
self.DatePosted=DatePosted
self.Creator=Creator
def getContributionSum(self):
sum = int()
for c in self.IndividualContributions:
sum += c.Contribution
return sum
def getNumBackers(self):
temp = [instance.ContributorID for instance in self.IndividualContributions]
return len(set(temp))
class Contribution(Base):
__tablename__ = 'Contributions'
ContributorID = Column(String(50), ForeignKey('Persons.PersonID'), primary_key = True)
CampaignName = Column(String(20), ForeignKey('Campaigns.CampaignTitle'), primary_key = True)
Contribution = Column(Integer)
SubTime = Column(DateTime, primary_key = True)
def __init__(self, ContributorID, CampaignName, Contribution, SubTime):
self.ContributorID = ContributorID
self.CampaignName = CampaignName
self.Contribution = Contribution
self.SubTime = SubTime
class Comment(Base):
__tablename__ = 'Comments'
Key = Column(Integer, primary_key = True)
ParentPost = Column(String(50), ForeignKey('Campaigns.CampaignTitle'), primary_key = True)
Author = Column(String(20), ForeignKey('Persons.PersonID'), primary_key = True)
SubTime = Column(DateTime, primary_key = True)
Content = Column(Text)
def __init__(self, ParentPost, Author, Content):
self.ParentPost = ParentPost
self.Author = Author
self.SubTime = datetime.datetime.now()
self.Content = Content |
import discord
from discord.ext import commands
from cogs.utils.dataIO import fileIO, dataIO
from cogs.utils.chat_formatting import box
from cogs.utils import checks
from __main__ import send_cmd_help
import logging
import os
log = logging.getLogger("red.streetcred")
class StreetCred:
"""Original code by Squid-Plugins, modded by Mak-and-Cheese"""
def __init__(self, bot):
self.bot = bot
self.scores = fileIO("data/streetcred/scores.json", "load")
self.settings = fileIO("data/streetcred/settings.json", 'load')
for key in self.scores.keys():
self._add_entry(key)
def process_scores(self, member, is_downvote):
member_id = member.id
if not is_downvote:
score_to_add = self.settings["CRED_YIELD"]
else:
score_to_add = -(self.settings["CRED_YIELD"])
if member_id not in self.scores:
self._add_entry(member_id)
self.scores[member_id]["score"] += score_to_add
fileIO("data/streetcred/scores.json", "save", self.scores)
def _give_upvote(self, member, is_remove):
member_id = member.id
upvotes_given = self.scores[member_id]["upvotes_given"]
if not is_remove:
score_to_add = self.settings["UPVOTING_YIELD"]
else:
score_to_add = -(self.settings["UPVOTING_YIELD"])
if member_id not in self.scores:
self._add_entry(member_id)
self.scores[member_id]["upvotes_given"] += score_to_add
if upvotes_given % 1 == 0 and upvotes_given > 0:
self.process_scores(member, False)
self.scores[member_id]["upvotes_given"] = 0
fileIO("data/streetcred/scores.json", "save", self.scores)
def _process_upvote(self, member, upvote):
member_id = member.id
if member_id not in self.scores:
self._add_entry(member_id)
self.scores[member_id]["upvotes"] += upvote
fileIO("data/streetcred/scores.json", "save", self.scores)
def _process_downvote(self, member, downvote):
member_id = member.id
if member_id not in self.scores:
self._add_entry(member_id)
self.scores[member_id]["downvotes"] += downvote
fileIO("data/streetcred/scores.json", "save", self.scores)
def _add_entry(self, member):
member_id = member
if member_id in self.scores:
if "score" not in self.scores.get(member_id, {}):
self.scores[member_id]["score"] = 0
if "upvotes" not in self.scores.get(member_id, {}):
self.scores[member_id]["upvotes"] = 0
if "downvotes" not in self.scores.get(member_id, {}):
self.scores[member_id]["downvotes"] = 0
if "upvotes_given" not in self.scores.get(member_id, {}):
self.scores[member_id]["upvotes_given"] = 0
else:
self.scores[member_id] = {}
self.scores[member_id]["score"] = 0
self.scores[member_id]["upvotes"] = 0
self.scores[member_id]["downvotes"] = 0
self.scores[member_id]["upvotes_given"] = 0
fileIO("data/streetcred/scores.json", "save", self.scores)
def _add_reason(self, member_id, reason):
if reason.lstrip() == "":
return
if member_id in self.scores:
if "reasons" in self.scores.get(member_id, {}):
old_reasons = self.scores[member_id].get("reasons", [])
new_reasons = [reason] + old_reasons[:4]
self.scores[member_id]["reasons"] = new_reasons
else:
self.scores[member_id]["reasons"] = [reason]
else:
self.scores[member_id] = {}
self.scores[member_id]["reasons"] = [reason]
def _fmt_reasons(self, reasons):
if len(reasons) == 0:
return None
ret = "```Latest Reasons:\n"
for num, reason in enumerate(reasons):
ret += "\t" + str(num + 1) + ") " + str(reason) + "\n"
return ret + "```"
@commands.command(pass_context=True)
async def streetcred(self, ctx):
"""Checks a user's streetcred, requires @ mention
Example: !streetcred @Red"""
if len(ctx.message.mentions) != 1:
await send_cmd_help(ctx)
return
member = ctx.message.mentions[0]
if self.scores.get(member.id, 0) != 0:
member_dict = self.scores[member.id]
await self.bot.say(member.name + " has " +
str(member_dict["score"]) + " points.")
reasons = self._fmt_reasons(member_dict.get("reasons", []))
if reasons:
await self.bot.send_message(ctx.message.author, reasons)
else:
await self.bot.say(member.name + " has no street cred!")
@commands.command(pass_context=True)
async def upvotes(self, ctx):
"""Checks a user's upvote ratio, requires @ mention
Example: !upvotes @Red"""
if len(ctx.message.mentions) != 1:
await send_cmd_help(ctx)
return
member = ctx.message.mentions[0]
if self.scores.get(member.id, 0) != 0 or self.scores.get(member.id, 0) != 0:
member_dict = self.scores[member.id]
await self.bot.say(member.name + " has " +
str(member_dict["upvotes"]) + " upvotes and " +
str(member_dict["downvotes"]) + " downvotes.")
reasons = self._fmt_reasons(member_dict.get("reasons", []))
if reasons:
await self.bot.send_message(ctx.message.author, reasons)
else:
await self.bot.say(member.name + " has no upvotes/downvotes.")
@commands.command(pass_context=True)
async def credlb(self, ctx, decending: bool=True):
"""Prints the streetcred leaderboard
Example:
leaderboard - displays scores top - bottom
leaderboard False - displays scores bottom to top"""
server = ctx.message.server
member_ids = [m.id for m in server.members]
karma_server_members = [key for key in self.scores.keys()
if key in member_ids]
log.debug("Maki-Cogs server members:\n\t{}".format(
karma_server_members))
names = list(map(lambda mid: discord.utils.get(server.members, id=mid),
karma_server_members))
log.debug("Names:\n\t{}".format(names))
scores = list(map(lambda mid: self.scores[mid]["score"],
karma_server_members))
log.debug("Scores:\n\t{}".format(scores))
upvotes = list(map(lambda mid: self.scores[mid]["upvotes"],
karma_server_members))
log.debug("Upvotes:\n\t{}".format(upvotes))
downvotes = list(map(lambda mid: self.scores[mid]["downvotes"],
karma_server_members))
log.debug("Downvotes:\n\t{}".format(downvotes))
body = sorted(zip(names, scores, upvotes, downvotes), key=lambda tup: tup[1],
reverse=decending)[:10]
karmaboard = ""
place = 1
for entry in body:
karmaboard += str(place).rjust(2) + ". "
karmaboard += str(entry[0]) + "\n"
karmaboard += "\t\t" + "score: " + str(entry[1]).rjust(5) + " | "
karmaboard += "+" + str(entry[2]).rjust(5) + " | "
karmaboard += "-" + str(entry[3]).rjust(5) + "\n"
place += 1
if karmaboard != "":
await self.bot.say(box(karmaboard, lang="py"))
else:
await self.bot.say("There are no entries.")
@commands.group(pass_context=True)
@checks.mod_or_permissions(manage_messages=True)
async def credset(self, ctx):
"""Manage streetcred settings"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@credset.command(name="upemote", pass_context=True, no_pm=True)
async def _msgvote_upemoji(self, ctx, emoji):
"""Set the upvote emote"""
emoji = str(self.fix_custom_emoji(ctx.message.server, emoji))
self.settings["UP_EMOTE"] = emoji
fileIO('data/streetcred/settings.json', 'save', self.settings)
await self.bot.say("Upvote emoji set to: " + emoji)
@credset.command(name="downemote", pass_context=True, no_pm=True)
async def _msgvote_downemoji(self, ctx, emoji):
"""Set the downvote emote"""
emoji = str(self.fix_custom_emoji(ctx.message.server, emoji))
self.settings["DN_EMOTE"] = emoji
fileIO('data/streetcred/settings.json', 'save', self.settings)
await self.bot.say("Downvote emoji set to: " + emoji)
@credset.command(pass_context=True, name="respond")
async def _streetcredset_respond(self):
"""Toggles if bot will respond when points get added/removed"""
if self.settings['RESPOND_ON_POINT']:
await self.bot.say("Responses disabled.")
else:
await self.bot.say('Responses enabled.')
self.settings['RESPOND_ON_POINT'] = \
not self.settings['RESPOND_ON_POINT']
fileIO('data/streetcred/settings.json', 'save', self.settings)
@credset.command(pass_context=True, name="upyield")
async def _streetcredset_yield(self, ctx, kpu: int):
"""Amount of streetcred per upvote
Example: yield 1"""
self.settings["CRED_YIELD"] = kpu
await self.bot.say("streetcred is now set to {} per upvote.".format(kpu))
fileIO('data/streetcred/settings.json', 'save', self.settings)
@credset.command(pass_context=True, name="upbonus")
async def _streetcredset_upvotebonus(self, ctx, kpu: float):
"""The bonus the user gets upon upvoting
Example: upvotebonus 0.2"""
self.settings["UPVOTING_YIELD"] = kpu
await self.bot.say("Upvote bonus is now set to {} per upvote.".format(kpu))
fileIO('data/streetcred/settings.json', 'save', self.settings)
def fix_custom_emoji(self, server, emoji):
if emoji[:2] != "<:":
return emoji
return [r for r in server.emojis if r.name == emoji.split(':')[1]][0]
async def on_reaction_add(self, reaction, user):
if user == self.bot.user:
return
if reaction.emoji == self.fix_custom_emoji(reaction.message.server, self.settings["UP_EMOTE"]):
self.process_scores(reaction.message.author, False)
self._process_upvote(reaction.message.author, 1)
self._give_upvote(user, False)
elif reaction.emoji == self.fix_custom_emoji(reaction.message.server, self.settings["DN_EMOTE"]):
self.process_scores(reaction.message.author, True)
self._process_downvote(reaction.message.author, 1)
async def on_reaction_remove(self, reaction, user):
if user == self.bot.user:
return
if reaction.emoji == self.fix_custom_emoji(reaction.message.server, self.settings["UP_EMOTE"]):
self.process_scores(reaction.message.author, True)
self._process_upvote(reaction.message.author, -1)
self._give_upvote(user, True)
elif reaction.emoji == self.fix_custom_emoji(reaction.message.server, self.settings["DN_EMOTE"]):
self.process_scores(reaction.message.author, False)
self._process_downvote(reaction.message.author, -1)
def check_folder():
if not os.path.exists("data/streetcred"):
print("Creating data/streetcred folder...")
os.makedirs("data/streetcred")
def check_file():
scores = {}
settings = {"RESPOND_ON_POINT": True, "CRED_YIELD": 1, "UPVOTING_YIELD": 0.2, "UP_EMOTE": "\ud83d\udc4d", "DN_EMOTE": "\ud83d\udc4e"}
f = "data/streetcred/scores.json"
if not fileIO(f, "check"):
print("Creating default streetcred's scores.json...")
fileIO(f, "save", scores)
f = "data/streetcred/settings.json"
if not fileIO(f, "check"):
print("Creating default streetcred's scores.json...")
fileIO(f, "save", settings)
def setup(bot):
check_folder()
check_file()
bot.add_cog(StreetCred(bot))
|
#!/usr/bin/env python
# coding:utf-8
# Author: Alejandro Nolla - z0mbiehunt3r
# Purpose: Example for detecting language using a stopwords based approach
# Created: 15/05/13
try:
from nltk import wordpunct_tokenize
from nltk.corpus import stopwords
except ImportError:
print('[!] You need to install nltk (http://nltk.org/index.html)')
# ----------------------------------------------------------------------
def _calculate_languages_ratios(text):
"""
Calculate probability of given text to be written in several languages and
return a dictionary that looks like {'french': 2, 'spanish': 4, 'english': 0}
@param text: Text whose language want to be detected
@type text: str
@return: Dictionary with languages and unique stopwords seen in analyzed text
@rtype: dict
"""
languages_ratios = {}
'''
nltk.wordpunct_tokenize() splits all punctuations into separate tokens
>>> wordpunct_tokenize("That's thirty minutes away. I'll be there in ten.")
['That', "'", 's', 'thirty', 'minutes', 'away', '.', 'I', "'", 'll', 'be', 'there', 'in', 'ten', '.']
'''
tokens = wordpunct_tokenize(text)
words = [word.lower() for word in tokens]
# Compute per language included in nltk number of unique stopwords appearing in analyzed text
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements) # language "score"
return languages_ratios
# ----------------------------------------------------------------------
def detect_language(text):
"""
Calculate probability of given text to be written in several languages and
return the highest scored.
It uses a stopwords based approach, counting how many unique stopwords
are seen in analyzed text.
@param text: Text whose language want to be detected
@type text: str
@return: Most scored language guessed
@rtype: str
"""
ratios = _calculate_languages_ratios(text)
most_rated_language = max(ratios, key=ratios.get)
return most_rated_language
if __name__ == '__main__':
text = '''
There's a passage I got memorized. Ezekiel 25:17. "The path of the righteous man is beset on all sides\
by the inequities of the selfish and the tyranny of evil men. Blessed is he who, in the name of charity\
and good will, shepherds the weak through the valley of the darkness, for he is truly his brother's keeper\
and the finder of lost children. And I will strike down upon thee with great vengeance and furious anger\
those who attempt to poison and destroy My brothers. And you will know I am the Lord when I lay My vengeance\
upon you." Now... I been sayin' that shit for years. And if you ever heard it, that meant your ass. You'd\
be dead right now. I never gave much thought to what it meant. I just thought it was a cold-blooded thing\
to say to a motherfucker before I popped a cap in his ass. But I saw some shit this mornin' made me think\
twice. See, now I'm thinking: maybe it means you're the evil man. And I'm the righteous man. And Mr.\
9mm here... he's the shepherd protecting my righteous ass in the valley of darkness. Or it could mean\
you're the righteous man and I'm the shepherd and it's the world that's evil and selfish. And I'd like\
that. But that shit ain't the truth. The truth is you're the weak. And I'm the tyranny of evil men.\
But I'm tryin', Ringo. I'm tryin' real hard to be the shepherd.
'''
language = detect_language(text)
print(language) |
def solution(l, t):
# Your code here
c = 0
ra = 0
m = set(l)
s = 0
d = 0
if len(m) != len(l):
if sum(l) == t:
ra = ([0,len(l)-1])
else:
for i in l:
if ra == 0:
ra = rs(l[d:], t, l, d)
d += 1
print(ra)
else:
for i in l:
if ra == 0:
ra = rec(l[c:], t)
c += 1
if ra == 0:
return [-1, -1]
else:
print([l.index(ra[0]), l.index(ra[1])])
def rec(l,t):
s = 0
r = 0
for n, i in enumerate(l):
s = s + i
if(s == t):
r = [l[0], l[n]]
return r
def rs(l,t,fl,d):
s = 0
r = 0
j = [0]*d
jl = j+l
for n, i in enumerate(jl):
s = s + i
if s == t:
f = jl.index(l[0])
r = (f, n)
return r
solution([6, 4, 1, 1, 1], 3)
def solution(total_lambs):
# Your code here
if total_lambs >= 10**9:
return 0
doubledList=[]
x=0
runningtotal=0
while x<= total_lambs:
currentvalue=2**x
doubledList.append(currentvalue)
runningtotal=runningtotal + currentvalue
if runningtotal > total_lambs:
break
x=x+1
fiblist=[1,1]
fibrunningtotal=2
y=2
while y<= total_lambs:
value=fiblist[y-1] + fiblist[y-2]
fiblist.append(value)
fibrunningtotal=fibrunningtotal + int(fiblist[y])
if fibrunningtotal > total_lambs:
break
y=y+1
solution = len(fiblist) - len(doubledList)
return abs(solution)
|
import re
from collections import Counter
with open("puzzles/day2/puzzle_input.txt") as f:
text = f.read()
data = [row.split(":") for row in text.split("\n")]
def checksum(check, password):
m = re.search("(\d+)-(\d+) ([a-z])", check)
lower, higher, char = m.groups()
count = Counter(password).get(char, 0)
return count >= int(lower) and count <= int(higher)
solution = [checksum(check, password) for check, password in data]
print(f"answer of puzzle 1 is:", Counter(solution).get(True))
|
# Copyright © 2021 to 2022 IOTIC LABS LTD. info@iotics.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/iotics-host-lib/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def get_version():
# The version environment variable can be used here because this module is not packaged but it is deployed
# using Docker.
# Do not use environment variables in setup.py for a packaged module. Those variables will be interpreted each time
# the package will be installed from the sources (*.tar.gz)
version = os.environ.get('VERSION')
if not version:
raise ValueError('The VERSION environment variable must be set and not empty')
return version
if __name__ == '__main__':
setup(version=get_version())
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
'''
Accepted
'''
class Solution:
def invertTreeHelper(self, root):
if root is not None:
# the root of the current subtree stays in place, we just flip its children
temp = root.left
root.left = root.right
root.right = temp
# then we recursively invert the children of the tree
self.invertTree(root.left)
self.invertTree(root.right)
def invertTree(self, root: TreeNode) -> TreeNode:
self.invertTreeHelper(root)
return root
|
#!/bin/python3
import sys
'''
Function
greatestCommonDivisor(n1,n2)
Parameters
n1 => larger number
n2 => smaller number
'''
def greatestCommonDivisor(n1,n2):
if n2==0:
return n1
return greatestCommonDivisor(n2,n1%n2)
'''
Function
sumOfMultiples(a,n)
Parameters
a => Number whose Sum Of Multiples we've to Find
n => It's the number of Mulitples of a
'''
def sumOfMultiples(a,n):
sumMultiples = (n*(2*a + (n-1)*a))//2
return sumMultiples
t = int(input().strip())
for a0 in range(t):
N = int(input().strip())
n1 = 3
n2 = 5
gcd = greatestCommonDivisor(max(n1,n2),min(n1,n2))
lcm = (n1*n2)//gcd
# Now we've to find the sum of multiples of n1,n2,gcd respectively below N
# So let's see how many multiples can be there of each n1,n2,gcd
mul_n1 = (N-1)//n1
mul_n2 = (N-1)//n2
mul_lcm = (N-1)//lcm
ans = sumOfMultiples(n1,mul_n1) + sumOfMultiples(n2,mul_n2) - sumOfMultiples(lcm,mul_lcm)
print(ans)
|
import itertools
from cartographer.utils.version import get_default_version
class PostedDocument(object):
"""This is a reader for JSON API Documents"""
def __init__(self, json_data, version=None):
self.json_data = json_data
if version is None:
version = get_default_version()
self.version = version
def data(self):
if isinstance(self.json_data["data"], list):
return [PostedResource(datum, self) for datum in self.json_data["data"]]
else:
return PostedResource(self.json_data["data"], self)
def all_resource_json_data(self):
data = self.json_data["data"]
if not isinstance(data, list):
data = [data]
resources = itertools.chain([], data)
if "included" in self.json_data:
resources = itertools.chain(resources, self.json_data["included"])
return resources
def find_resource_by_type_and_id(self, resource_type, resource_id):
for resource_json_data in self.all_resource_json_data():
if resource_json_data.get("type") == resource_type and resource_json_data.get("id") == resource_id:
return PostedResource(resource_json_data, self)
return None
class PostedResource(object):
"""Represents a single object in a JSON API Document"""
def __init__(self, json_data, document):
self.json_data = json_data
self.document = document
def resource_type(self):
return self.json_data.get("type")
def attributes(self):
return self.json_data.get("attributes", {})
def attribute(self, name):
return self.attributes().get(name, None)
def assert_type(self, resource_type, exception=Exception):
if self.resource_type() != resource_type:
raise exception("Expected a " + resource_type + ", but got a " + self.resource_type())
return self
def relationships(self):
return self.json_data.get("relationships", {})
def relationship(self, relationship_name):
relationships = self.relationships()
if relationship_name not in relationships:
return None
return PostedRelationship(relationships[relationship_name], self.document)
def relationship_id(self, relationship_name):
relationship = self.relationship(relationship_name)
return relationship.relationship_id() if relationship else None
def related_resource(self, relationship_name):
relationship = self.relationship(relationship_name)
return relationship.resource() if relationship else None
class PostedRelationship(object):
"""Represents a named relationship in a JSON API Document"""
def __init__(self, json_data, document):
self.json_data = json_data
self.document = document
def relationship_id(self):
if "data" in self.json_data:
if isinstance(self.json_data["data"], list):
return [PostedRelationshipID(datum, self.document) for datum in self.json_data["data"]]
else:
return PostedRelationshipID(self.json_data["data"], self.document)
def resource(self):
relationship_id = self.relationship_id()
if isinstance(relationship_id, list):
return [one_id.related_resource() for one_id in relationship_id]
return relationship_id.related_resource()
class PostedRelationshipID(object):
"""Represents a the actual data of a relationship in a JSON API Document"""
def __init__(self, json_data, document):
self.json_data = json_data
self.document = document
def resource_type(self):
return self.json_data.get("type")
def resource_id(self):
return self.json_data.get("id")
def assert_type(self, resource_type, exception=Exception):
if self.resource_type() != resource_type:
raise exception("Expected a " + resource_type + ", but got a " + self.resource_type())
return self
def related_resource(self):
return self.document.find_resource_by_type_and_id(self.resource_type(), self.resource_id())
|
import random
print("\nGuess-the-number\n")
name = input("Enter your name:\n")
comp = random.randrange(100)
points = 10
com = 0
def score():
global guess,com,points
if guess> comp:
com = 1
points -= 1
elif guess < comp:
com= 0
points -= 1
def file_create():
global final
content = str({name:points})
try:
f = open(f"{name}.txt","xt")
app = f.write(content)
f.close()
except:
f = open(f"{name}.txt","at")
char = f.tell()
f.seek(char)
app = f.write(content)
f.close()
finally:
try:
a = open("base.txt","xt")
a.write({f"{name}:{points}"})
a.close()
except:
a = open("base.txt","at")
sen = a.tell()
a.seek(sen)
a.write(f"{name}:{points}\n")
a.close()
f = open(f"{name}.txt")
final = f.read()
f.close()
def main():
global guess,final,com
guess = int(input("Enter a number between 1 and 100:\n"))
if guess > 100 or guess <0:
raise ValueError("Please enter number between 1 and 100")
a = True
while a:
score()
if com == 1:
guess = int(input(f"Enter a number below {guess}\n"))
else:
guess = int(input(f"Enter a number above {guess}\n"))
if points == 0:
print("You lost the game!")
a = False
if guess == comp:
print("Won!")
a = False
file_create()
print(f"You won by {points} points\nYour records are {final}")
if __name__ == "__main__":
main()
|
import pytest # type: ignore
from helpers import mock_legacy_venv, run_pipx_cli
def test_upgrade(pipx_temp_env, capsys):
assert run_pipx_cli(["upgrade", "pycowsay"])
assert not run_pipx_cli(["install", "pycowsay"])
assert not run_pipx_cli(["upgrade", "pycowsay"])
@pytest.mark.parametrize("metadata_version", [None, "0.1"])
def test_upgrade_legacy_venv(pipx_temp_env, capsys, metadata_version):
assert run_pipx_cli(["upgrade", "pycowsay"])
assert not run_pipx_cli(["install", "pycowsay"])
mock_legacy_venv("pycowsay", metadata_version=metadata_version)
assert not run_pipx_cli(["upgrade", "pycowsay"])
def test_upgrade_suffix(pipx_temp_env, capsys):
name = "pycowsay"
suffix = "_a"
assert not run_pipx_cli(["install", name, f"--suffix={suffix}"])
assert run_pipx_cli(["upgrade", f"{name}"])
assert not run_pipx_cli(["upgrade", f"{name}{suffix}"])
@pytest.mark.parametrize("metadata_version", ["0.1"])
def test_upgrade_suffix_legacy_venv(pipx_temp_env, capsys, metadata_version):
name = "pycowsay"
suffix = "_a"
assert not run_pipx_cli(["install", name, f"--suffix={suffix}"])
mock_legacy_venv(f"{name}{suffix}", metadata_version=metadata_version)
assert run_pipx_cli(["upgrade", f"{name}"])
assert not run_pipx_cli(["upgrade", f"{name}{suffix}"])
|
"""
Copyright 2021 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from queue import Queue, Empty
import socket
from threading import Thread, Lock
from typing import List, Tuple
from util import get_logger
logger = get_logger(__name__.split(".", 1)[-1])
class Scanner:
@staticmethod
def scan(host_port: List[Tuple[str, int]], num_workers: int, timeout: float) -> List[Tuple[str, int]]:
queue = Queue()
list_lock = Lock()
alive_host_list: List[Tuple[str, int]] = []
for host, port in host_port:
queue.put_nowait((host, port))
if num_workers == 0:
num_workers = len(host_port)
for _ in range(min(num_workers, len(host_port))): # no more workers than checks
worker = ScanWorker(queue, alive_host_list, list_lock, timeout)
worker.start()
queue.join()
return alive_host_list
class ScanWorker(Thread):
def __init__(self, queue: Queue, alive_host_list: List[Tuple[str, int]], list_lock: Lock, timeout: float):
super().__init__()
self.queue = queue
self.alive_host_list = alive_host_list
self.list_lock = list_lock
self.timeout = timeout
def run(self):
while self.queue.not_empty:
try:
host, port = self.queue.get_nowait()
except Empty:
return
try:
socket.create_connection((host, port), timeout=self.timeout).close()
with self.list_lock:
self.alive_host_list.append((host, port))
except Exception as ex:
logger.debug(host + ":" + str(port) + " - " + str(ex))
continue
finally:
self.queue.task_done()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/mozman/svgwrite/blob/master/examples/ltattrie/text_font_size.py
# pip install svgwrite
import svgwrite
# http://www.w3.org/TR/2008/REC-CSS2-20080411/fonts.html#font-size-props
def create_svg(name):
svg_size = 900
font_size = 20
title = name + ': Example of text font_sizes'
sample = (8, 10, 12, 15, 20, 30, 40, 50)
dwg = svgwrite.Drawing(name, (svg_size, svg_size), debug=True)
# background will be white.
dwg.add(dwg.rect(insert=(0, 0), size=('100%', '100%'), fill='white'))
# give the name of the example and a title.
y = font_size + 5
dwg.add(dwg.text(title, insert=(0, y), font_family="serif", font_size=font_size, fill='black'))
for i, item in enumerate(sample):
# font_size has many properties and adjustments which can be done. See the web page listed
# above for the complete description.
y += item + 10
dwg.add(dwg.text(
"font_size='" + str(item) + "'",
insert=(font_size, y),
font_family="serif",
font_size=item,
fill='black'
))
y += font_size + 10
dwg.add(dwg.text(
'Since svg fonts are usually vectors, font_size can be very large.',
insert=(0, y),
font_family="serif",
font_size=font_size,
fill='black'
))
# Show just the top of the single letter 'f'. The whole letter will not fit in the view area.
y += font_size + 10
dwg.add(dwg.text(
'Enlarged small parts of a character are like looking through a microscope. (font_size=4000)',
insert=(0, y),
font_family="serif",
font_size=font_size,
fill='black'
))
# Note the insert value of x is a negative number which is actually outside the view area.
y += 2800 + 10
dwg.add(dwg.text('f', insert=(-1100, y), font_family="serif", font_size=4000, fill='black'))
dwg.save()
if __name__ == '__main__':
import sys
prog_name = sys.argv[0].rstrip('.py') + '.svg'
create_svg(prog_name)
|
balance = 235
annualInterestRate = 0.25
remainingBalance = balance
monthlyInterestRate = annualInterestRate/12.0
mmp = 0
totalPaid = 0
while remainingBalance > 0:
remainingBalance = balance
month = 0
mmp += 10
while month < 12 and remainingBalance > 0:
month += 1
remainingBalance = remainingBalance - mmp
remainingBalance = remainingBalance + (remainingBalance * monthlyInterestRate)
print("Lowest Payment: " + str(mmp)) |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import initializers
from keras.engine.topology import InputSpec
from keras import backend as K
from ..utils.caps_utils import mixed_shape
from .. import probability_transformations as prob_trans
from ..capsule import Module
from .. import constraints, regularizers
# Todo: think about adding epsilon as parameter
class NearestCompetition(Module):
def __init__(self,
use_for_loop=True,
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
self.use_for_loop = use_for_loop
self.output_regularizers = [regularizers.get(signal_regularizer),
regularizers.get(diss_regularizer)]
# be sure to call this at the end
super(NearestCompetition, self).__init__(module_input=True,
module_output=True,
support_sparse_signal=True,
support_full_signal=True,
**self._del_module_args(**kwargs))
def _build(self, input_shape):
if input_shape[0][1] != self.proto_number:
raise ValueError('The capsule number provided by input_shape is not equal the self.proto_number: '
'input_shape[0][1]=' + str(input_shape[0][1]) + ' != ' +
'self.proto_number=' + str(self.proto_number) + ". Maybe you forgot to call a routing"
" module.")
if input_shape[1][1] != self.proto_number:
raise ValueError('The prototype number provided by input_shape is not equal the self.proto_number: '
'input_shape[1][1]=' + str(input_shape[1][1]) + ' != ' +
'self.proto_number=' + str(self.proto_number))
if len(input_shape[1]) != 2:
raise ValueError("The dissimilarity vector must be of length two (batch, dissimilarities per prototype). "
"You provide: " + str(len(input_shape[1])) + ". Maybe you forgot to call a routing "
"module.")
self.input_spec = [InputSpec(shape=(None,) + tuple(input_shape[0][1:])),
InputSpec(shape=(None,) + tuple(input_shape[1][1:]))]
def _build_sparse(self, input_shape):
self._build(input_shape)
def _call(self, inputs, **kwargs):
if self.proto_number == self.capsule_number:
return inputs
else:
signals = inputs[0]
diss = inputs[1]
signal_shape = mixed_shape(signals)
if self.use_for_loop:
diss_stack = []
signals_stack = []
sub_idx = None
with K.name_scope('for_loop'):
for p in self._proto_distrib:
with K.name_scope('compute_slices'):
diss_ = diss[:, p[0]:(p[-1]+1)]
signals_ = K.reshape(signals[:, p[0]:(p[-1]+1), :],
[signal_shape[0] * len(p)] + list(signal_shape[2:]))
with K.name_scope('competition'):
if len(p) > 1:
with K.name_scope('competition_indices'):
argmin_idx = K.argmin(diss_, axis=-1)
if sub_idx is None:
sub_idx = K.arange(0, signal_shape[0], dtype=argmin_idx.dtype)
argmin_idx = argmin_idx + len(p) * sub_idx
with K.name_scope('dissimilarity_competition'):
diss_stack.append(K.expand_dims(K.gather(K.flatten(diss_), argmin_idx), -1))
with K.name_scope('signal_competition'):
signals_stack.append(K.gather(signals_, argmin_idx))
else:
diss_stack.append(diss_)
signals_stack.append(signals_)
diss = K.concatenate(diss_stack, 1)
with K.name_scope('signal_concatenation'):
signals = K.concatenate(signals_stack, 1)
signals = K.reshape(signals, [signal_shape[0], self.capsule_number] + list(signal_shape[2:]))
else:
with K.name_scope('dissimilarity_preprocessing'):
# extend if it is not equally distributed
if not self._equally_distributed:
# permute to first dimension is prototype (protos x batch)
diss = K.permute_dimensions(diss, [1, 0])
# gather regarding extension (preparing for reshape to block)
diss = K.gather(diss, self._proto_extension)
# permute back (max_proto_number x (max_proto_number * batch))
diss = K.permute_dimensions(diss, [1, 0])
# reshape to block form
diss = K.reshape(diss, [signal_shape[0] * self.capsule_number, self._max_proto_number_in_capsule])
with K.name_scope('competition_indices'):
# get minimal idx in each class and batch for element selection in diss and signals
argmin_idx = K.argmin(diss, axis=-1)
argmin_idx = argmin_idx + self._max_proto_number_in_capsule * \
K.arange(0, signal_shape[0] * self.capsule_number, dtype=argmin_idx.dtype)
with K.name_scope('dissimilarity_competition'):
# get minimal values in the form (batch x capsule)
diss = K.gather(K.flatten(diss), argmin_idx)
diss = K.reshape(diss, [signal_shape[0], self.capsule_number])
with K.name_scope('signal_preprocessing'):
# apply the same steps as above for signals
# get signals in: (batch x protos x dim1 x ... x dimN) --> out: (batch x capsule x dim1 x ... x dimN)
# extend if is not equally distributed
if not self._equally_distributed:
signals = K.permute_dimensions(signals, [1, 0] + list(range(2, len(signal_shape))))
signals = K.gather(signals, self._proto_extension)
signals = K.permute_dimensions(signals, [1, 0] + list(range(2, len(signal_shape))))
signals = K.reshape(signals,
[signal_shape[0] * self.capsule_number * self._max_proto_number_in_capsule]
+ list(signal_shape[2:]))
with K.name_scope('signal_competition'):
signals = K.gather(signals, argmin_idx)
signals = K.reshape(signals, [signal_shape[0], self.capsule_number] + list(signal_shape[2:]))
return {0: signals, 1: diss}
def _call_sparse(self, inputs, **kwargs):
return self._call(inputs, **kwargs)
def _compute_output_shape(self, input_shape):
signals = list(input_shape[0])
diss = list(input_shape[1])
signals[1] = self.capsule_number
diss[1] = self.capsule_number
return [tuple(signals), tuple(diss)]
def _compute_output_shape_sparse(self, input_shape):
return self._compute_output_shape(input_shape)
def get_config(self):
config = {'use_for_loop': self.use_for_loop,
'signal_regularizer': regularizers.serialize(self.output_regularizers[0]),
'diss_regularizer': regularizers.serialize(self.output_regularizers[1])}
super_config = super(NearestCompetition, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
class GibbsCompetition(Module):
# Todo: Test of Gibbs with scaling
def __init__(self,
beta_initializer='ones',
beta_regularizer=None,
beta_constraint='NonNeg',
use_for_loop=True,
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
self.beta_initializer = initializers.get(beta_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.beta = None
self.use_for_loop = use_for_loop
self.output_regularizers = [regularizers.get(signal_regularizer),
regularizers.get(diss_regularizer)]
# be sure to call this at the end
super(GibbsCompetition, self).__init__(module_input=True,
module_output=True,
support_sparse_signal=True,
support_full_signal=True,
**self._del_module_args(**kwargs))
def _build(self, input_shape):
if not self.built:
if input_shape[0][1] != self.proto_number:
raise ValueError('The capsule number provided by input_shape is not equal the self.proto_number: '
'input_shape[0][1]=' + str(input_shape[0][1]) + ' != ' +
'self.capsule_number=' + str(self.proto_number) + ". Maybe you forgot to call a "
"routing module.")
if input_shape[1][1] != self.proto_number:
raise ValueError('The prototype number provided by input_shape is not equal the self.proto_number: '
'input_shape[1][1]=' + str(input_shape[1][1]) + ' != ' +
'self.proto_number=' + str(self.proto_number))
if len(input_shape[1]) != 2:
raise ValueError("The dissimilarity vector must be of length two (batch, dissimilarities per "
"prototype). You provide: " + str(len(input_shape[1])) + ". Maybe you forgot to call "
"a routing module.")
self.beta = self.add_weight(shape=(self.capsule_number,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta')
self.input_spec = [InputSpec(shape=(None,) + tuple(input_shape[0][1:])),
InputSpec(shape=(None,) + tuple(input_shape[1][1:]))]
def _build_sparse(self, input_shape):
self._build(input_shape)
def _call(self, inputs, **kwargs):
if self.proto_number == self.capsule_number:
return inputs
else:
signals = inputs[0]
diss = inputs[1]
signal_shape = None
# signal.shape: (batch, proto_num, caps_dim1, ..., caps_dimN)
if self.input_spec[0].ndim > 3:
signal_shape = mixed_shape(signals)
signals = K.reshape(signals, signal_shape[0:2] + (-1,))
if not self._equally_distributed:
if self.use_for_loop:
signals_stack = []
diss_stack = []
with K.name_scope('for_loop'):
for i, p in enumerate(self._proto_distrib):
with K.name_scope('compute_slices'):
diss_ = diss[:, p[0]:(p[-1]+1)]
signals_ = signals[:, p[0]:(p[-1] + 1), :]
if len(p) > 1:
with K.name_scope('competition_probabilities'):
coefficients = prob_trans.NegSoftmax(axis=-1, max_stabilization=True)(
diss_ * self.beta[i])
with K.name_scope('signal_competition'):
signals_stack.append(K.expand_dims(K.batch_dot(coefficients, signals_, [1, 1]), 1))
with K.name_scope('dissimilarity_competition'):
diss_stack.append(K.batch_dot(coefficients, diss_, [1, 1]))
else:
signals_stack.append(signals_)
diss_stack.append(diss_)
signals = K.concatenate(signals_stack, axis=1)
diss = K.concatenate(diss_stack, axis=-1)
else:
extension_idx = []
for i in self._proto_extension:
if i not in extension_idx:
extension_idx.append(i)
else:
extension_idx.append(max(self._proto_extension)+1)
batch_size = K.shape(signals)[0] if signal_shape is None else signal_shape[0]
# reshape to block
with K.name_scope('competition_probabilities'):
with K.name_scope('neg_softmax'):
with K.name_scope('coefficients'):
beta = K.gather(self.beta, self._capsule_extension)
coefficients = -diss * beta
# max stabilization
coefficients = coefficients - K.max(coefficients, axis=-1, keepdims=True)
coefficients = K.exp(coefficients)
coefficients = K.concatenate([coefficients,
K.zeros_like(coefficients[:, 0:1])], axis=-1)
coefficients = K.transpose(coefficients)
coefficients = K.gather(coefficients, extension_idx)
coefficients = K.transpose(coefficients)
coefficients = K.reshape(coefficients,
[batch_size, self.capsule_number,
self._max_proto_number_in_capsule])
# could never be a zero division
with K.name_scope('normalization_constant'):
constant = K.sum(coefficients, axis=-1, keepdims=True)
probs = coefficients / constant
with K.name_scope('dissimilarity_preprocessing'):
diss = K.transpose(diss)
diss = K.gather(diss, self._proto_extension)
diss = K.transpose(diss)
diss = K.reshape(diss,
[batch_size, self.capsule_number, self._max_proto_number_in_capsule])
with K.name_scope('dissimilarity_competition'):
diss = K.squeeze(K.batch_dot(probs, K.expand_dims(diss), [2, 2]), -1)
with K.name_scope('signal_preprocessing'):
signals = K.permute_dimensions(signals, [1, 0, 2])
signals = K.gather(signals, self._proto_extension)
signals = K.permute_dimensions(signals, [1, 0, 2])
signals = K.reshape(signals,
[batch_size, self.capsule_number, self._max_proto_number_in_capsule, -1])
with K.name_scope('signal_competition'):
signals = K.batch_dot(probs, signals, [2, 2])
else:
batch_size = K.shape(signals)[0] if signal_shape is None else signal_shape[0]
diss = K.reshape(diss, [batch_size, self.capsule_number, self._max_proto_number_in_capsule])
with K.name_scope('competition_probabilities'):
coefficients = prob_trans.NegSoftmax(axis=-1, max_stabilization=True)(
diss * K.expand_dims(self.beta, -1))
with K.name_scope('signal_competition'):
signals = K.reshape(signals,
[batch_size, self.capsule_number, self._max_proto_number_in_capsule, -1])
signals = K.batch_dot(coefficients, signals, [2, 2])
with K.name_scope('dissimilarity_competition'):
diss = K.squeeze(K.batch_dot(coefficients, K.expand_dims(diss), [2, 2]), -1)
if self.input_spec[0].ndim > 3:
signals = K.reshape(signals, [signal_shape[0], self.capsule_number] + list(signal_shape[2:]))
return {0: signals, 1: diss}
def _call_sparse(self, inputs, **kwargs):
return self._call(inputs, **kwargs)
def _compute_output_shape(self, input_shape):
signals = list(input_shape[0])
diss = list(input_shape[1])
signals[1] = self.capsule_number
diss[1] = self.capsule_number
return [tuple(signals), tuple(diss)]
def _compute_output_shape_sparse(self, input_shape):
return self._compute_output_shape(input_shape)
def get_config(self):
config = {'beta_initializer': initializers.serialize(self.beta_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'use_for_loop': self.use_for_loop,
'signal_regularizer': regularizers.serialize(self.output_regularizers[0]),
'diss_regularizer': regularizers.serialize(self.output_regularizers[1])}
super_config = super(GibbsCompetition, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
class Competition(Module):
def __init__(self,
probability_transformation='neg_softmax',
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
self.probability_transformation = prob_trans.get(probability_transformation)
self.output_regularizers = [regularizers.get(signal_regularizer),
regularizers.get(diss_regularizer)]
# be sure to call this at the end
super(Competition, self).__init__(module_input=True,
module_output=True,
support_sparse_signal=True,
support_full_signal=True,
**self._del_module_args(**kwargs))
def _build(self, input_shape):
if input_shape[0][1] != self.proto_number:
raise ValueError('The capsule number provided by input_shape is not equal the self.proto_number: '
'input_shape[0][1]=' + str(input_shape[0][1]) + ' != ' +
'self.capsule_number=' + str(self.proto_number) + ". Maybe you forgot to call a routing"
" module.")
if input_shape[1][1] != self.proto_number:
raise ValueError('The prototype number provided by input_shape is not equal the self.proto_number: '
'input_shape[1][1]=' + str(input_shape[1][1]) + ' != ' +
'self.proto_number=' + str(self.proto_number))
if len(input_shape[1]) != 2:
raise ValueError("The dissimilarity vector must be of length two (batch, dissimilarities per prototype). "
"You provide: " + str(len(input_shape[1])) + ". Maybe you forgot to call a routing "
"module.")
self.input_spec = [InputSpec(shape=(None,) + tuple(input_shape[0][1:])),
InputSpec(shape=(None,) + tuple(input_shape[1][1:]))]
def _build_sparse(self, input_shape):
self._build(input_shape)
def _call(self, inputs, **kwargs):
if self.proto_number == self.capsule_number:
return inputs
else:
signals = inputs[0]
diss = inputs[1]
signal_shape = None
# signal.shape: (batch, proto_num, caps_dim1, ..., caps_dimN)
if self.input_spec[0].ndim > 3:
signal_shape = mixed_shape(signals)
signals = K.reshape(signals, signal_shape[0:2] + (-1,))
if not self._equally_distributed:
# we can't define this without a for loop (due to the probability transformation)
signals_stack = []
diss_stack = []
with K.name_scope('for_loop'):
for p in self._proto_distrib:
with K.name_scope('compute_slices'):
diss_ = diss[:, p[0]:(p[-1]+1)]
signals_ = signals[:, p[0]:(p[-1] + 1), :]
if len(p) > 1:
with K.name_scope('competition_probabilities'):
coefficients = self.probability_transformation(diss_)
with K.name_scope('signal_competition'):
signals_stack.append(K.expand_dims(K.batch_dot(coefficients, signals_, [1, 1]), 1))
with K.name_scope('dissimilarity_competition'):
diss_stack.append(K.batch_dot(coefficients, diss_, [1, 1]))
else:
signals_stack.append(signals_)
diss_stack.append(diss_)
signals = K.concatenate(signals_stack, axis=1)
diss = K.concatenate(diss_stack, axis=-1)
else:
batch_size = K.shape(signals)[0] if signal_shape is None else signal_shape[0]
diss = K.reshape(diss, [batch_size, self.capsule_number, self._max_proto_number_in_capsule])
with K.name_scope('competition_probabilities'):
coefficients = self.probability_transformation(diss)
with K.name_scope('signal_competition'):
signals = K.reshape(signals,
[batch_size, self.capsule_number, self._max_proto_number_in_capsule, -1])
signals = K.batch_dot(coefficients, signals, [2, 2])
with K.name_scope('dissimilarity_competition'):
diss = K.squeeze(K.batch_dot(coefficients, K.expand_dims(diss), [2, 2]), -1)
if self.input_spec[0].ndim > 3:
signals = K.reshape(signals, [signal_shape[0], self.capsule_number] + list(signal_shape[2:]))
return {0: signals, 1: diss}
def _call_sparse(self, inputs, **kwargs):
return self._call(inputs, **kwargs)
def _compute_output_shape(self, input_shape):
signals = list(input_shape[0])
diss = list(input_shape[1])
signals[1] = self.capsule_number
diss[1] = self.capsule_number
return [tuple(signals), tuple(diss)]
def _compute_output_shape_sparse(self, input_shape):
return self._compute_output_shape(input_shape)
def get_config(self):
config = {'probability_transformation': prob_trans.serialize(self.probability_transformation),
'signal_regularizer': regularizers.serialize(self.output_regularizers[0]),
'diss_regularizer': regularizers.serialize(self.output_regularizers[1])}
super_config = super(Competition, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
|
# more experimaentation with strings in Python
#use of formattter to input values into strings
x = "There are %d types of programmers." % 2
language = "python"
fakes = "dont"
y = "Those who know %s and those who %s." % (language, fakes)
print x
print y
print "I said: %r." % x
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that true?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e |
#!/usr/bin/env python
"""Plot depth for a set of heterozygous calls relative to quality and allele ratio.
Used to help identify cutoff for filtering false positives by comparing
distribution to true positives.
Usage:
plot_depth_ratio.py <VCF file of het calls> '<Plot title>'
"""
import os
import sys
import vcf
import prettyplotlib as ppl
import matplotlib.pyplot as plt
def main(in_file, title):
depths, ratios, quals = get_ad_depth(in_file)
plot_qual_hist(quals, in_file)
plot_depth_ratios(depths, ratios, quals, in_file, title)
def plot_depth_ratios(depths, ratios, quals, in_file, title):
out_file = "%s-depthratios.png" % os.path.splitext(in_file)[0]
fig, ax = plt.subplots(1)
for ds, rs, qualrange in _group_ratios_by_qual(depths, ratios, quals):
print qualrange, len(ds)
ppl.scatter(ax, x=depths, y=ratios, label=qualrange)
ppl.legend(ax, title="Quality score range")
ax.set_title(title)
ax.set_xlabel("Depth")
ax.set_ylabel("Variant/Total ratio")
fig.savefig(out_file)
def _group_ratios_by_qual(depths, ratios, quals):
#ranges = [(0, 100), (100, 250), (250, 500), (500, 1000), (1000, 2500)]
#ranges = [(0, 50), (50, 100), (100, 150), (150, 250)]
ranges = [(0, 250), (250, 500)]
for qs, qe in ranges:
cur_ds = []
cur_rs = []
for d, r, q in zip(depths, ratios, quals):
if q >= qs and q < qe:
cur_ds.append(d)
cur_rs.append(r)
yield cur_ds, cur_rs, "%s-%s" % (qs, qe)
def plot_qual_hist(quals, in_file):
quals = [x for x in quals if x < 500.0]
out_file = "%s-hist.png" % os.path.splitext(in_file)[0]
fig, ax = plt.subplots(1)
ppl.hist(ax, [quals], bins=100)
fig.savefig(out_file)
def get_ad_depth(in_file):
depths = []
ratios = []
quals = []
with open(in_file) as in_handle:
reader = vcf.Reader(in_handle)
for rec in reader:
for sample in rec.samples:
try:
ad = sample["AD"]
except AttributeError:
ad = []
if len(ad) == 2:
ref, alt = sample["AD"]
depth = ref + alt
if depth > 0:
depths.append(min(rec.INFO["DP"], 500))
ratios.append(alt / float(depth))
quals.append(rec.QUAL)
return depths, ratios, quals
if __name__ == "__main__":
main(*sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.