content stringlengths 5 1.05M |
|---|
"""
Comparing ANN and linear model for evaluating model biases, differences, and
other thresholds using explainable AI for historical data (and SMOOTHED data)
Author : Zachary M. Labe
Date : 19 May 2021
Version : 1 - adds extra class (#8), but tries the MMean
"""
### Import packages
import matplotlib.pyplot as plt
import numpy as np
import sys
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.cartocolors.qualitative as cc
from sklearn.metrics import accuracy_score
import scipy.stats as sts
import cmasher as cmr
import cmocean
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib
### Plotting defaults
matplotlib.rc('savefig', facecolor='black')
matplotlib.rc('axes', edgecolor='darkgrey')
matplotlib.rc('xtick', color='darkgrey')
matplotlib.rc('ytick', color='darkgrey')
matplotlib.rc('axes', labelcolor='darkgrey')
matplotlib.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
variablesall = ['T2M','P','SLP']
variablesall = ['T2M']
pickSMILEall = [[]]
ridge_penalty = [0,0.1]
lab = ['LINEAR','LINEAR-L$_{2}$=0.1','ANN-L$_{2}$=0.1','ANN(smooth)-L$_{2}$=0.1']
for va in range(len(variablesall)):
for m in range(len(pickSMILEall)):
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydata = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
directoryfigure = '/Users/zlabe/Documents/Projects/ModelBiasesANN/Dark_Figures/'
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
###############################################################################
###############################################################################
modelGCMs = ['CanESM2','MPI','CSIRO-MK3.6','KNMI-ecearth',
'GFDL-CM3','GFDL-ESM2M','LENS']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = variablesall[va]
reg_name = 'Arctic'
timeper = 'historical'
###############################################################################
###############################################################################
pickSMILE = pickSMILEall[m]
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE) + 1 # For random class
else:
lenOfPicks = len(modelGCMs) + 1 # For random class
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
# integer = 5 # random noise value to add/subtract from each grid point
sizeOfTwin = 1 # number of classes to add to other models
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
yearsall = np.arange(1950+window,2019+1,1)
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
yearsall = np.arange(1950+window,2019+1,1)
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
if len(modelGCMs) == 6:
lensalso = False
elif len(modelGCMs) == 7:
lensalso = True
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
modelGCMsNames = np.append(modelGCMs,['MMmean'])
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'LAND'
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'OCEAN'
else:
saveData = timeper + '_' + seasons[0] + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'GLOBAL'
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Select how to save files for ANN
if land_only == True:
saveDataANN = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveDataANN = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveDataANN = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveDataANN)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Select how to save files for smoothed ANN
if land_only == True:
saveDataANNs = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_SMOOTHER_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveDataANNs = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_SMOOTHER_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveDataANNs = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_SMOOTHER_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveDataANNs)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in confidence for linear model
linear_lab = np.load(directorydata + 'PlotLINEARLabels_' + saveData + '.npz')['arr_0']
linear_conf = np.load(directorydata + 'PlotLINEARConfidence_' + saveData + '.npz')['arr_0']
ann_lab = np.int_(np.genfromtxt(directorydata + 'obsLabels_' + saveDataANN + '.txt'))
ann_conf = np.genfromtxt(directorydata + 'obsConfid_' + saveDataANN + '.txt')
ann_labs = np.int_(np.genfromtxt(directorydata + 'obsLabels_' + saveDataANNs + '.txt'))
ann_confs = np.genfromtxt(directorydata + 'obsConfid_' + saveDataANNs + '.txt')
### Combine data together
pred = [linear_lab[0],linear_lab[1],ann_lab,ann_labs]
conf = [linear_conf[0],linear_conf[1],ann_conf,ann_confs]
###############################################################################
###############################################################################
###############################################################################
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Begin plot
fig = plt.figure(figsize=(9,5))
for r in range(len(lab)*2):
ax = plt.subplot(2,4,r+1)
if r < 4:
obsout = conf[r]
label = np.argmax(obsout,axis=1)
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('darkgrey')
ax.spines['bottom'].set_color('darkgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='darkgrey')
# ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.3)
color = cmr.infinity(np.linspace(0.00,1,len(modelGCMsNames)))
ctest = []
for i,c in zip(range(len(modelGCMsNames)),color):
if i == 7:
c = 'w'
else:
c = c
plt.plot(yearsall,obsout[:,i],color=c,linewidth=0.2,
label=r'\textbf{%s}' % modelGCMsNames[i],zorder=1,
clip_on=False,alpha=0)
plt.scatter(yearsall,obsout[:,i],color=c,s=9,zorder=12,
clip_on=False,alpha=0.2,edgecolors='none')
ctest.append(c)
for yr in range(yearsall.shape[0]):
la = label[yr]
if i == la:
plt.scatter(yearsall[yr],obsout[yr,i],color=c,s=9,zorder=12,
clip_on=False,alpha=1,edgecolors='none')
low = lowess(np.nanmax(obsout,axis=1),yearsall,frac=1/3)
plt.plot(yearsall,low[:,1],linestyle='-',linewidth=0.5,color='darkgrey')
if r == 1:
leg = plt.legend(shadow=False,fontsize=6,loc='upper center',
bbox_to_anchor=(1.2,-1.35),fancybox=True,ncol=4,frameon=False,
handlelength=0,handletextpad=0)
for line,text in zip(leg.get_lines(), leg.get_texts()):
text.set_color(line.get_color())
if r == 0:
plt.yticks(np.arange(0,1.01,0.1),map(str,np.round(np.arange(0,1.01,0.1),2)),size=3)
else:
plt.yticks(np.arange(0,1.01,0.1),map(str,np.round(np.arange(0,1.01,0.1),2)),size=3)
ax.set_yticklabels([])
plt.xticks(np.arange(1950,2030+1,20),map(str,np.arange(1950,2030+1,20)),size=5)
plt.xlim([1950,2020])
plt.ylim([0,1.0])
if r == 0:
if land_only == True:
plt.ylabel(r'\textbf{Confidence [%s-%s-LAND-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=23)
elif ocean_only == True:
plt.ylabel(r'\textbf{Confidence [%s-%s-OCEAN-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=23)
else:
plt.ylabel(r'\textbf{Confidence [%s-%s-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=23)
plt.title(r'\textbf{%s}' % (lab[r]),color='w',fontsize=10)
else:
obspred = pred[r-4]
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('darkgrey')
ax.spines['bottom'].set_color('darkgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='darkgrey')
ax.yaxis.grid(zorder=1,color='w',alpha=0.35)
x=np.arange(1950,2019+1,1)
for cct in range(len(obspred)):
if obspred[cct] == 0:
col = ctest[0]
elif obspred[cct] == 1:
col = ctest[1]
elif obspred[cct] == 2:
col = ctest[2]
elif obspred[cct] == 3:
col = ctest[3]
elif obspred[cct] == 4:
col = ctest[4]
elif obspred[cct] == 5:
col = ctest[5]
elif obspred[cct] == 6:
col = ctest[6]
elif obspred[cct] == 7:
col = ctest[7]
plt.scatter(x[cct],obspred[cct],color=col,s=9,clip_on=False,
edgecolor='none',linewidth=0.4,zorder=10)
plt.xticks(np.arange(1950,2030+1,20),map(str,np.arange(1950,2030+1,20)),size=5)
if r-4 == 0:
plt.yticks(np.arange(0,lenOfPicks+1,1),modelGCMsNames,size=3)
else:
plt.yticks(np.arange(0,lenOfPicks+1,1),modelGCMsNames,size=3)
ax.set_yticklabels([])
plt.xlim([1950,2020])
plt.ylim([0,lenOfPicks-1])
if r-4 == 0:
if land_only == True:
plt.ylabel(r'\textbf{Prediction [%s-%s-LAND-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=7.5)
elif ocean_only == True:
plt.ylabel(r'\textbf{Prediction [%s-%s-OCEAN-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=7.5)
else:
plt.ylabel(r'\textbf{Prediction [%s-%s-%s]' % (seasons[0],variq,reg_name),color='darkgrey',fontsize=6,labelpad=7.5)
# plt.tight_layout()
# plt.subplots_adjust(bottom=0.15)
plt.savefig(directoryfigure + '%s_LinearANNsmoothcomparison_%s.png' % (typeOfAnalysis,saveDataANN),dpi=300) |
import calendar
from datetime import datetime
import decimal
import xml.etree.ElementTree as xmlET
def dparse(*dstr):
"""Convert date string to datetime.
This function is used by Pandas to parse dates.
If only a year is provided the returned datetime will be for the last day of the year (e.g. 12-31).
If only a year and a month is provided the returned datetime will be for the last day of the given month.
:param list[str] dstr: year, month, day or year, month or year
:returns: datetime
:rtype: datetime
"""
dint = [int(x) for x in dstr]
if len(dint) == 2:
# For months we want the last day of each month
dint.append(calendar.monthrange(*dint)[1])
if len(dint) == 1:
# For annual we want the last day of the year
dint.append(12)
dint.append(calendar.monthrange(*dint)[1])
return datetime(*dint)
# def dparse(yr, mo, dy, hr, minute, sec):
# # Date parser for working with the date format from PRMS files
#
# # Convert to integer first
# yr, mo, dy, hr, minute, sec = [int(x) for x in [yr, mo, dy, hr, minute, sec]]
#
# dt = datetime.datetime(yr, mo, dy, hr, minute, sec)
# return dt
def read_xml(filename):
"""Returns the root of the xml tree for a given file.
:param str filename: XML filename
:returns: root of the xml tree
:rtype: xmlET.ElementTree
"""
# Open and parse an xml file and return the root of the tree
xml_tree = xmlET.parse(filename)
return xml_tree.getroot()
def float_to_str(f):
"""Convert the given float to a string, without resorting to scientific notation.
:param float f: number
:returns: string representation of the float
:rtype: str
"""
# From: https://stackoverflow.com/questions/38847690/convert-float-to-string-without-scientific-notation-and-false-precision
# create a new context for this task
ctx = decimal.Context()
# 20 digits should be enough for everyone :D
ctx.prec = 20
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
img = cv2.imread(root.filename)
root.destroy()
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'b')
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show() |
from __future__ import print_function
import os
import json
import numpy as np
import dask
import toolz as tz
import functools
from .io import IoN5, IoHDF5 # IoDVID
def load_input(io, offset, context, output_shape, padding_mode='reflect'):
starts = [off - context[i] for i, off in enumerate(offset)]
stops = [off + output_shape[i] + context[i] for i, off in enumerate(offset)]
shape = io.shape
# we pad the input volume if necessary
pad_left = None
pad_right = None
# check for padding to the left
if any(start < 0 for start in starts):
pad_left = tuple(abs(start) if start < 0 else 0 for start in starts)
starts = [max(0, start) for start in starts]
# check for padding to the right
if any(stop > shape[i] for i, stop in enumerate(stops)):
pad_right = tuple(stop - shape[i] if stop > shape[i] else 0 for i, stop in enumerate(stops))
stops = [min(shape[i], stop) for i, stop in enumerate(stops)]
bb = tuple(slice(start, stop) for start, stop in zip(starts, stops))
data = io.read(bb)
# pad if necessary
if pad_left is not None or pad_right is not None:
pad_left = (0, 0, 0) if pad_left is None else pad_left
pad_right = (0, 0, 0) if pad_right is None else pad_right
pad_width = tuple((pl, pr) for pl, pr in zip(pad_left, pad_right))
data = np.pad(data, pad_width, mode=padding_mode)
return data
def run_inference_n5(prediction,
preprocess,
postprocess,
raw_path,
save_file,
offset_list,
input_shape,
output_shape,
input_key,
target_keys,
padding_mode='reflect',
num_cpus=5,
log_processed=None,
channel_order=None):
assert os.path.exists(raw_path)
assert os.path.exists(raw_path)
assert os.path.exists(save_file)
if isinstance(target_keys, str):
target_keys = (target_keys,)
# The N5 IO/Wrapper needs iterables as keys
# so we wrap the input key in a list.
# Note that this is not the case for the hdf5 wrapper,
# which just takes a single key.
io_in = IoN5(raw_path, [input_key])
io_out = IoN5(save_file, target_keys, channel_order=channel_order)
run_inference(prediction, preprocess, postprocess, io_in, io_out,
offset_list, input_shape, output_shape, padding_mode=padding_mode,
num_cpus=num_cpus, log_processed=log_processed)
# This is not necessary for n5 datasets
# which do not need to be closed, but we leave it here for
# reference when using other (hdf5) io wrappers
io_in.close()
io_out.close()
def run_inference_h5(prediction,
preprocess,
postprocess,
raw_path,
save_file,
offset_list,
input_shape,
output_shape,
input_key,
target_keys,
padding_mode='reflect',
num_cpus=5,
log_processed=None,
channel_order=None):
assert os.path.exists(raw_path)
assert os.path.exists(raw_path)
assert os.path.exists(save_file)
if isinstance(target_keys, str):
target_keys = (target_keys,)
# The N5 IO/Wrapper needs iterables as keys
# so we wrap the input key in a list.
# Note that this is not the case for the hdf5 wrapper,
# which just takes a single key.
io_in = IoHDF5(raw_path, [input_key])
io_out = IoHDF5(save_file, target_keys, channel_order=channel_order)
run_inference(prediction, preprocess, postprocess, io_in, io_out,
offset_list, input_shape, output_shape, padding_mode=padding_mode,
num_cpus=num_cpus, log_processed=log_processed)
# This is not necessary for n5 datasets
# which do not need to be closed, but we leave it here for
# reference when using other (hdf5) io wrappers
io_in.close()
io_out.close()
def run_inference(prediction,
preprocess,
postprocess,
io_in,
io_out,
offset_list,
input_shape,
output_shape,
padding_mode='reflect',
num_cpus=5,
log_processed=None):
assert callable(prediction)
assert callable(preprocess)
assert len(output_shape) == len(input_shape)
n_blocks = len(offset_list)
print("Starting prediction...")
print("For %i number of blocks" % n_blocks)
# the additional context requested in the input
context = np.array([input_shape[i] - output_shape[i]
for i in range(len(input_shape))]) / 2
context = context.astype('uint32')
shape = io_in.shape
@dask.delayed
def load_offset(offset):
return load_input(io_in, offset, context, output_shape,
padding_mode=padding_mode)
preprocess = dask.delayed(preprocess)
predict = dask.delayed(prediction)
if postprocess is not None:
postprocess = dask.delayed(postprocess)
@dask.delayed(nout=2)
def verify_shape(offset, output):
# crop if necessary
stops = [off + outs for off, outs in zip(offset, output.shape[1:])]
if any(stop > dim_size for stop, dim_size in zip(stops, shape)):
bb = ((slice(None),) +
tuple(slice(0, dim_size - off if stop > dim_size else None)
for stop, dim_size, off in zip(stops, shape, offset)))
output = output[bb]
output_bounding_box = tuple(slice(off, off + outs)
for off, outs in zip(offset, output_shape))
return output, output_bounding_box
@dask.delayed
def write_output(output, output_bounding_box):
io_out.write(output, output_bounding_box)
return 1
@dask.delayed
def log(off):
if log_processed is not None:
with open(log_processed, 'a') as log_f:
log_f.write(json.dumps(off) + ', ')
return off
# iterate over all the offsets, get the input data and predict
results = []
for offset in offset_list:
output = tz.pipe(offset, log, load_offset, preprocess, predict)
output_crop, output_bounding_box = verify_shape(offset, output)
if postprocess is not None:
output_crop = postprocess(output_crop, output_bounding_box)
result = write_output(output_crop, output_bounding_box)
results.append(result)
get = functools.partial(dask.threaded.get, num_workers=num_cpus)
# NOTE: Because dask.compute doesn't take an argument, but rather an
# arbitrary number of arguments, computing each in turn, the output of
# dask.compute(results) is a tuple of length 1, with its only element
# being the results list. If instead we pass the results list as *args,
# we get the desired container of results at the end.
success = dask.compute(*results, get=get)
print('Ran {0:} jobs'.format(sum(success)))
|
from asyncio import get_event_loop
from typing import Any, Coroutine, Optional
from inspect import iscoroutinefunction
from .src import IPSWME, SHSH2, SWSCAN, Jailbreak, Pallas
class Client:
"""
Main class of PyApple.
"""
def __init__(self, *args, **kwargs): # needs to be reworked
super().__init__(*args, **kwargs)
@classmethod
def sync(cls):
client = cls()
request_func = getattr(client, "request")
async def session_closer(*args: Any, **kwargs: Any) -> Any:
try:
return await request_func(*args, **kwargs)
finally:
if client.session:
await client.session.close()
def to_sync(func: Coroutine):
def wrapper(*args: Any, **kwargs: Any):
loop = get_event_loop()
if loop.is_running():
return func(*args, **kwargs)
return loop.run_until_complete(func(*args, **kwargs))
return wrapper
setattr(client, "__aenter__", None)
setattr(client, "__aexit__", None)
setattr(client, "sync", None)
del client.__aenter__
del client.__aexit__
del client.sync
needs_to_be_changed = [
fname
for fname in dir(client)
if iscoroutinefunction(getattr(client, fname))
]
client.__setattr__("request", session_closer)
for fname in needs_to_be_changed:
client.__setattr__(fname, to_sync(getattr(client, fname)))
return client
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 14:33:59 2019
@author: SpaceMeerkat
A script for testing the trained CAE with synthetic moment maps
"""
# =============================================================================
# Import relevant packages
# =============================================================================
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import torch
from tqdm import tqdm
import sys
sys.path.append("../utils/")
from model_int import CAE
from functions import return_cube, synthetic_plotter
from rcparams import rcparams
rcparams()
# =============================================================================
# Setup paths
# =============================================================================
model_path = '/path/to/saves/models/'
image_path = '/path/to/image/save/directory/'
# =============================================================================
# Setup the model
# =============================================================================
def apply_dropout(m):
if type(m) == torch.nn.Dropout:
m.train(True)
model = CAE()
model.load_state_dict(torch.load(model_path))
model = model.cpu()
model.train(False)
model.apply(apply_dropout)
print("Model cast to CPU")
# =============================================================================
# Open the testing data
# =============================================================================
index = np.array([100])
num_cores = 14
pool = mp.Pool(num_cores,maxtasksperchild=100)
results = list(pool.imap(return_cube,index))
pool.close()
batch = np.array([r[0] for r in results])
target = np.array([r[1:] for r in results])
batch = torch.tensor(batch).to(torch.float)
batch[batch!=batch]=0
target = torch.tensor(target).to(torch.float)
pos = target[:,0]
ah = target[:,-2]
mom0s, mom1s = batch[:,0,:,:].unsqueeze(1), batch[:,1,:,:].unsqueeze(1)
print("Test data created")
# =============================================================================
# Begin the testing procedure
# =============================================================================
predictions = []
errors = []
temp_pred = []
for _ in tqdm(range(1000)): #10000
prediction1 = model.test_encode(mom0s,mom1s,pos)
prediction1 = prediction1.detach().numpy()
temp_pred.append(prediction1)
temp_pred = np.vstack(temp_pred)
mean_pred = np.mean(temp_pred,0)
predictions.append(mean_pred)
errors.append(np.sum(np.abs(temp_pred-mean_pred[None,:]),0)/len(temp_pred))
errors = np.vstack(errors)
fig, axs = plt.subplots(1, 4, figsize=(30,7.5))
axs[0].hist(np.rad2deg(temp_pred[:,1]),bins=50,density=True,color='b')
axs[0].set_xlim(10,90)
axs[1].hist(temp_pred[:,2],bins=50,density=True,color='b')
axs[1].set_xlim(0.1,0.35)
axs[2].hist(temp_pred[:,4],bins=50,density=True,color='b')
axs[2].set_xlim(50,500)
axs[3].hist(temp_pred[:,3],bins=50,density=True,color='b')
axs[3].set_xlim(0.01,0.8)
axs[0].set(ylabel= 'Probability density')
axs[0].set(xlabel= r'i $(^{\circ})$')
axs[1].set(xlabel= r'$a_{I}$')
axs[2].set(xlabel= r'$V_{max} \, sin(i) \, (km\,s^{-1})$')
axs[3].set(xlabel= r'$V_{scale}$')
plt.tight_layout()
plt.savefig(image_path + 'param_dists.png')
# =============================================================================
# Put the medians back into the network to get "out" images
# =============================================================================
predictions = torch.tensor(mean_pred).to(torch.float).unsqueeze(0).unsqueeze(0).unsqueeze(0)
batch_size = predictions.shape[0]
### Create the auxiliary arrays
l = torch.arange(0 - 63/2., (63/2.)+1)
yyy, xxx, zzz = torch.meshgrid(l,l,l)
xxx, yyy, zzz = xxx.repeat(batch_size,1,1,1), yyy.repeat(batch_size,1,1,1), zzz.repeat(batch_size,1,1,1)
xxx = xxx.to(torch.float)
yyy = yyy.to(torch.float)
zzz = zzz.to(torch.float)
BRIGHTNESS, VELOCITY, vmax = CAE(xxx,yyy,zzz).test_images(mom0s, mom1s, predictions[:,:,:,0],
predictions[:,:,:,1], predictions[:,:,:,2],
predictions[:,:,:,3], predictions[:,:,:,4],
predictions[:,:,:,0]*0 + 1, shape=64)
predictions = predictions.squeeze(0).squeeze(0).numpy()
mom0s, mom1s = mom0s.squeeze(1).numpy(), mom1s.squeeze(1).numpy()
BRIGHTNESS, VELOCITY = BRIGHTNESS.squeeze(1).numpy(), VELOCITY.squeeze(1).numpy()
synthetic_plotter(np.array([32]), mom0s, mom1s, BRIGHTNESS,VELOCITY,predictions,errors,0,image_path+'synthetic.pdf')
# =============================================================================
# End of script
# =============================================================================
|
__all__ = [
'Socket',
'device',
'terminate',
]
import curio
import curio.traps
from . import (
SocketBase,
errors,
terminate as _terminate,
)
from .constants import (
AF_SP,
AF_SP_RAW,
NN_DONTWAIT,
)
async def device(sock1, sock2=None):
"""Re-implement nn_device without threads."""
def test_fd(sock, fd_name):
try:
getattr(sock.options, fd_name)
except errors.ENOPROTOOPT:
return False
else:
return True
async def forward(s1, s2):
while True:
try:
with await s1.recvmsg() as message:
await s2.sendmsg(message)
except errors.EBADF:
break
errors.asserts(
sock1.options.nn_domain == AF_SP_RAW,
'expect raw socket: %r', sock1,
)
errors.asserts(
sock2 is None or sock2.options.nn_domain == AF_SP_RAW,
'expect raw socket: %r', sock2,
)
if sock2 is None:
await forward(sock1, sock1)
return
async with curio.TaskGroup() as group:
okay = False
if test_fd(sock1, 'nn_rcvfd') and test_fd(sock2, 'nn_sndfd'):
await group.spawn(forward(sock1, sock2))
okay = True
if test_fd(sock2, 'nn_rcvfd') and test_fd(sock1, 'nn_sndfd'):
await group.spawn(forward(sock2, sock1))
okay = True
if not okay:
raise AssertionError('incorrect direction: %r, %r', sock1, sock2)
await group.join()
#
# Note about the hack:
#
# After a file descriptor (specifically, nn_sndfd, and nn_rcvfd) is
# added to curio's event loop, it can't to detect when file descriptor
# is closed. As a result, __transmit will be blocked forever on waiting
# the file descriptor becoming readable.
#
# To address this issue, before we close the socket, we will get the
# curio kernel object, and mark the blocked tasks as ready manually.
#
async def terminate():
# HACK: Mark tasks as ready before close sockets.
kernel = await curio.traps._get_kernel()
# Make a copy before modify it.
items = tuple(kernel._selector.get_map().items())
for fd, key in items:
if isinstance(fd, Fd):
rtask, wtask = key.data
_mark_ready(kernel, rtask)
_mark_ready(kernel, wtask)
kernel._selector.unregister(fd)
# Now we may close sockets.
_terminate()
class Socket(SocketBase):
def __init__(self, *, domain=AF_SP, protocol=None, socket_fd=None):
super().__init__(domain=domain, protocol=protocol, socket_fd=socket_fd)
# Fields for tracking info for the close-socket hack.
self.__kernels_fds = [] # Allow duplications.
async def __aenter__(self):
return super().__enter__()
async def __aexit__(self, *exc_info):
return super().__exit__(*exc_info) # XXX: Would this block?
def close(self):
# HACK: Mark tasks as ready before close the socket.
for kernel, fd in self.__kernels_fds:
try:
key = kernel._selector.get_key(fd)
except KeyError:
continue
rtask, wtask = key.data
_mark_ready(kernel, rtask)
_mark_ready(kernel, wtask)
kernel._selector.unregister(fd)
# Now we may close the socket.
super().close()
async def send(self, message, size=None, flags=0):
return await self.__transmit(
self.options.nn_sndfd,
self._send,
(message, size, flags | NN_DONTWAIT),
)
async def recv(self, message=None, size=None, flags=0):
return await self.__transmit(
self.options.nn_rcvfd,
self._recv,
(message, size, flags | NN_DONTWAIT),
)
async def sendmsg(self, message, flags=0):
return await self.__transmit(
self.options.nn_sndfd,
self._sendmsg,
(message, flags | NN_DONTWAIT),
)
async def recvmsg(self, message=None, flags=0):
return await self.__transmit(
self.options.nn_rcvfd,
self._recvmsg,
(message, flags | NN_DONTWAIT),
)
async def __transmit(self, eventfd, transmit, args):
while True:
# It's closed while we were blocked.
if self.fd is None:
raise errors.EBADF
try:
return transmit(*args)
except errors.EAGAIN:
pass
# Wrap eventfd so that terminate() may find it.
eventfd = Fd(eventfd)
pair = (await curio.traps._get_kernel(), eventfd)
self.__kernels_fds.append(pair)
try:
await curio.traps._read_wait(eventfd)
finally:
self.__kernels_fds.remove(pair)
# A wrapper class for separating out "our" file descriptors.
class Fd(int):
pass
def _mark_ready(kernel, task):
if task is None:
return
kernel._ready.append(task)
task.next_value = None
task.next_exc = None
task.state = 'READY'
task.cancel_func = None
|
import logging
from celery import shared_task
from django.conf import settings
from django.contrib.auth import get_user_model
from .utils import (
change_user_password as _change_user_password,
create_user as _create_user,
update_user as _update_user,
)
logger = logging.getLogger('kompassi')
@shared_task(ignore_result=True)
def create_user(user_pk, password):
User = get_user_model()
user = User.objects.get(pk=user_pk)
_create_user(user, password)
@shared_task(ignore_result=True)
def update_user(user_pk):
User = get_user_model()
user = User.objects.get(pk=user_pk)
_update_user(user)
@shared_task(ignore_result=True)
def change_user_password(user_pk, new_password):
User = get_user_model()
user = User.objects.get(pk=user_pk)
_change_user_password(user, new_password)
|
import re
pattern = re.compile(r"([A-Za-z]).([a])")
text = 'Search this inside ot this text please!'
a = pattern.search(text)
print(a.group()) # Sea
b = pattern.findall(text)
print(b) # [('S', 'a'), ('l', 'a')]
c = pattern.fullmatch(text)
print(c) # None
d = pattern.match(text)
print(d) # None
|
from collections import OrderedDict
from enum import Enum, auto
from threading import RLock
from typing import (
TYPE_CHECKING,
Callable,
Generic,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
NoReturn,
Optional,
)
from typing import OrderedDict as OrderedDictType
from typing import Set, Tuple, TypeVar, Union, cast, overload
if TYPE_CHECKING:
# We can only import Protocol if TYPE_CHECKING because it's a development
# dependency, and is not available at runtime.
from typing_extensions import Protocol
class HasGettableStringKeys(Protocol):
def keys(self) -> Iterator[str]:
...
def __getitem__(self, key: str) -> str:
...
__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
# Key type
_KT = TypeVar("_KT")
# Value type
_VT = TypeVar("_VT")
# Default type
_DT = TypeVar("_DT")
ValidHTTPHeaderSource = Union[
"HTTPHeaderDict",
Mapping[str, str],
Iterable[Tuple[str, str]],
"HasGettableStringKeys",
]
class _Sentinel(Enum):
not_passed = auto()
def ensure_can_construct_http_header_dict(
potential: object,
) -> Optional[ValidHTTPHeaderSource]:
if isinstance(potential, HTTPHeaderDict):
return potential
elif isinstance(potential, Mapping):
# Full runtime checking of the contents of a Mapping is expensive, so for the
# purposes of typechecking, we assume that any Mapping is the right shape.
return cast(Mapping[str, str], potential)
elif isinstance(potential, Iterable):
# Similarly to Mapping, full runtime checking of the contents of an Iterable is
# expensive, so for the purposes of typechecking, we assume that any Iterable
# is the right shape.
return cast(Iterable[Tuple[str, str]], potential)
elif hasattr(potential, "keys") and hasattr(potential, "__getitem__"):
return cast("HasGettableStringKeys", potential)
else:
return None
class RecentlyUsedContainer(Generic[_KT, _VT], MutableMapping[_KT, _VT]):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
_container: OrderedDictType[_KT, _VT]
_maxsize: int
dispose_func: Optional[Callable[[_VT], None]]
lock: RLock
def __init__(
self, maxsize: int = 10, dispose_func: Optional[Callable[[_VT], None]] = None
) -> None:
super().__init__()
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = OrderedDict()
self.lock = RLock()
def __getitem__(self, key: _KT) -> _VT:
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key: _KT, value: _VT) -> None:
evicted_item = None
with self.lock:
# Possibly evict the existing value of 'key'
try:
# If the key exists, we'll overwrite it, which won't change the
# size of the pool. Because accessing a key should move it to
# the end of the eviction line, we pop it out first.
evicted_item = key, self._container.pop(key)
self._container[key] = value
except KeyError:
# When the key does not exist, we insert the value first so that
# evicting works in all cases, including when self._maxsize is 0
self._container[key] = value
if len(self._container) > self._maxsize:
# If we didn't evict an existing value, and we've hit our maximum
# size, then we have to evict the least recently used item from
# the beginning of the container.
evicted_item = self._container.popitem(last=False)
# After releasing the lock on the pool, dispose of any evicted value.
if evicted_item is not None and self.dispose_func:
_, evicted_value = evicted_item
self.dispose_func(evicted_value)
def __delitem__(self, key: _KT) -> None:
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self) -> int:
with self.lock:
return len(self._container)
def __iter__(self) -> NoReturn:
raise NotImplementedError(
"Iteration over this class is unlikely to be threadsafe."
)
def clear(self) -> None:
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self) -> Set[_KT]: # type: ignore[override]
with self.lock:
return set(self._container.keys())
class HTTPHeaderDictItemView(Set[Tuple[str, str]]):
"""
HTTPHeaderDict is unusual for a Mapping[str, str] in that it has two modes of
address.
If we directly try to get an item with a particular name, we will get a string
back that is the concatenated version of all the values:
>>> d['X-Header-Name']
'Value1, Value2, Value3'
However, if we iterate over an HTTPHeaderDict's items, we want to get a
distinct item for every different value of a header:
>>> list(d.items())
[
('X-Header-Name', 'Value1')
('X-Header-Name', 'Value2')
('X-Header-Name', 'Value3')
]
This class conforms to the interface required by the MutableMapping ABC while
also giving us the nonstandard iteration behavior we want; items with duplicate
keys, ordered by time of first insertion.
"""
_headers: "HTTPHeaderDict"
def __init__(self, headers: "HTTPHeaderDict") -> None:
self._headers = headers
def __len__(self) -> int:
return len(list(self._headers.iteritems()))
def __iter__(self) -> Iterator[Tuple[str, str]]:
return self._headers.iteritems()
def __contains__(self, item: object) -> bool:
if isinstance(item, tuple) and len(item) == 2:
passed_key, passed_val = item
if isinstance(passed_key, str) and isinstance(passed_val, str):
return self._headers._has_value_for_header(passed_key, passed_val)
return False
class HTTPHeaderDict(MutableMapping[str, str]):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
_container: MutableMapping[str, List[str]]
def __init__(self, headers: Optional[ValidHTTPHeaderSource] = None, **kwargs: str):
super().__init__()
self._container = {} # 'dict' is insert-ordered in Python 3.7+
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key: str, val: str) -> None:
# avoid a bytes/str comparison by decoding before httplib
if isinstance(key, bytes):
key = key.decode("latin-1")
self._container[key.lower()] = [key, val]
def __getitem__(self, key: str) -> str:
val = self._container[key.lower()]
return ", ".join(val[1:])
def __delitem__(self, key: str) -> None:
del self._container[key.lower()]
def __contains__(self, key: object) -> bool:
if isinstance(key, str):
return key.lower() in self._container
return False
def __eq__(self, other: object) -> bool:
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return False
else:
other_as_http_header_dict = type(self)(maybe_constructable)
return {k.lower(): v for k, v in self.itermerged()} == {
k.lower(): v for k, v in other_as_http_header_dict.itermerged()
}
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __len__(self) -> int:
return len(self._container)
def __iter__(self) -> Iterator[str]:
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def discard(self, key: str) -> None:
try:
del self[key]
except KeyError:
pass
def add(self, key: str, val: str) -> None:
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
# avoid a bytes/str comparison by decoding before httplib
if isinstance(key, bytes):
key = key.decode("latin-1")
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
vals.append(val)
def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError(
f"extend() takes at most 1 positional arguments ({len(args)} given)"
)
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key, val in other.items():
self.add(key, val)
elif isinstance(other, Iterable):
other = cast(Iterable[Tuple[str, str]], other)
for key, value in other:
self.add(key, value)
elif hasattr(other, "keys") and hasattr(other, "__getitem__"):
# THIS IS NOT A TYPESAFE BRANCH
# In this branch, the object has a `keys` attr but is not a Mapping or any of
# the other types indicated in the method signature. We do some stuff with
# it as though it partially implements the Mapping interface, but we're not
# doing that stuff safely AT ALL.
for key in other.keys():
self.add(key, other[key])
for key, value in kwargs.items():
self.add(key, value)
@overload
def getlist(self, key: str) -> List[str]:
...
@overload
def getlist(self, key: str, default: _DT) -> Union[List[str], _DT]:
...
def getlist(
self, key: str, default: Union[_Sentinel, _DT] = _Sentinel.not_passed
) -> Union[List[str], _DT]:
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
if default is _Sentinel.not_passed:
# _DT is unbound; empty list is instance of List[str]
return []
# _DT is bound; default is instance of _DT
return default
else:
# _DT may or may not be bound; vals[1:] is instance of List[str], which
# meets our external interface requirement of `Union[List[str], _DT]`.
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self) -> str:
return f"{type(self).__name__}({dict(self.itermerged())})"
def _copy_from(self, other: "HTTPHeaderDict") -> None:
for key in other:
val = other.getlist(key)
self._container[key.lower()] = [key, *val]
def copy(self) -> "HTTPHeaderDict":
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self) -> Iterator[Tuple[str, str]]:
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self) -> Iterator[Tuple[str, str]]:
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ", ".join(val[1:])
def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]
return HTTPHeaderDictItemView(self)
def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
if header_name in self:
return potential_value in self._container[header_name.lower()][1:]
return False
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import time
import boto3
import logging
import cfnresponse
from botocore.exceptions import ClientError
sm_client = boto3.client('sagemaker')
logger = logging.getLogger(__name__)
def delete_apps(domain_id):
logging.info(f'Start deleting apps for domain id: {domain_id}')
try:
sm_client.describe_domain(DomainId=domain_id)
except:
logging.info(f'Cannot retrieve {domain_id}')
return
for p in sm_client.get_paginator('list_apps').paginate(DomainIdEquals=domain_id):
for a in p['Apps']:
if a['AppType'] == 'KernelGateway' and a['Status'] != 'Deleted':
sm_client.delete_app(DomainId=a['DomainId'], UserProfileName=a['UserProfileName'], AppType=a['AppType'], AppName=a['AppName'])
apps = 1
while apps:
apps = 0
for p in sm_client.get_paginator('list_apps').paginate(DomainIdEquals=domain_id):
apps += len([a['AppName'] for a in p['Apps'] if a['AppType'] == 'KernelGateway' and a['Status'] != 'Deleted'])
logging.info(f'Number of active KernelGateway apps: {str(apps)}')
time.sleep(5)
logger.info(f'KernelGateway apps for {domain_id} deleted')
return
def lambda_handler(event, context):
response_data = {}
physicalResourceId = event.get('PhysicalResourceId')
try:
if event['RequestType'] in ['Create', 'Update']:
physicalResourceId = event.get('ResourceProperties')['DomainId']
elif event['RequestType'] == 'Delete':
delete_apps(physicalResourceId)
cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, physicalResourceId=physicalResourceId)
except ClientError as exception:
logging.error(exception)
cfnresponse.send(event, context, cfnresponse.FAILED, response_data, physicalResourceId=physicalResourceId, reason=str(exception)) |
import pandas as pd
class IterateWrapper:
def __init__(self, iter_df, key):
self.iter_df = iter_df
self.key = key
def __call__(self, *args, **kwargs):
results = DataFrameArray()
for df in self.iter_df:
results.append(
getattr(df, self.key)(*args, **kwargs)
)
return results
class DataFrameArray(list):
def get(self, *keys):
return super().__getitem__(*keys)
def _apply(self, method, *args, **kwargs):
return DataFrameArray([
method(df, *args, **kwargs) for df in self
])
@property
def index(self):
return pd.concat([i.index for i in self])
@property
def columns(self):
return self.get(0).columns if len(self) > 0 else None
def __getitem__(self, *keys):
return DataFrameArray(
df.__getitem__(*keys) for df in self
)
def __getattr__(self, key):
return IterateWrapper(self, key) |
from time import sleep
while True:
sleep(0.1)
print("Test")
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_news.items import SoccerNewsItem
import scrapy_news.url_selector as url_selector
#to run
#scrapy crawl metro
class MetroSpider(scrapy.Spider):
name = 'metro'
allowed_domains = ['metro.co.uk']
source = 'Metro'
start_urls = url_selector.get_urls(source)
def parse(self, response):
url = response.url
datetime = response.css(".post-date ::text").extract_first()
headline = response.css(".post-title ::text").extract_first()
subhead = ""
author = response.css(".author ::text").extract_first().strip()
body_text = " ".join(response.css('.article-body p ::text').extract())
rel_lst = response.css('.zopo-title span ::text').extract()
vid_text = " ".join(response.css("p.vjs-no-js ::text").extract())
mor_text = " ".join(response.css(".mor-link ::text").extract())
#twt_lst = response.css('.embed-twitter p ::text').extract()
#igm_lst = response.css(".instagram-media p ::text").extract()
for i in range(0,len(rel_lst),3):
i_text = " ".join(rel_lst[i:i+3])
body_text = body_text.replace(i_text, "")
body_text = body_text.replace(vid_text, "")
body_text = body_text.replace(mor_text, "")
#for i in twt_lst:
# body_text = body_text.replace(i.strip(), "")
#for i in igm_lst:
# body_text = body_text.replace(i.strip(), "")
notice = SoccerNewsItem(
headline=headline, subhead=subhead,
author=author, body_text=body_text,
url=url, datetime=datetime,
source=self.name)
yield notice
#Twt
#https://metro.co.uk/2018/07/04/manchester-united-boss-jose-mourinho-tells-friends-keen-xherdan-shaqiri-7684184/
#Instagram
#Mais de um video
#https://metro.co.uk/2018/08/01/manchester-united-transfer-chief-flies-spain-seal-35million-yerry-mina-deal-7787869/
|
import redis
class RedisClient:
def __init__(self, db = 0):
'''
Init the RedisClient instance. It should be the direct contact with the redis-server on this machine.
'''
self.db = db
self.redis = redis.Redis()
def ping(self) -> bool:
try:
return self.redis.ping()
except redis.exceptions.ConnectionError as ex:
return False
def set(self, key, value, expiration = None) -> bool:
self.redis.set(name = key, value= value)
def get(self, key) -> bool:
self.redis.get(name = key)
def flushall(self) -> bool:
self.redis.flushall()
def flushdb(self) -> bool:
self.redis.flushdb
|
#!usr/bin/python
# -*- coding: utf-8 -*-
import random
import os
import numpy as np
import pandas as pd
from pathlib import Path
from functools import partial
import torch
from torch import nn
import warnings
from fastai.torch_core import defaults
from fastai import vision
from fastai.data_block import CategoryList, FloatList
from fastai.basic_train import Learner
from fastai.vision.learner import model_meta, _default_meta
from pyronear.datasets import OpenFire
from pyronear import models
# Disable warnings from fastai using deprecated functions for PyTorch>=1.3
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
# Add split meta data since fastai doesn't have mobilenet
model_meta[models.mobilenet_v2] = lambda m: (m[0][17], m[1])
def set_seed(seed):
"""Set the seed for pseudo-random number generations
Args:
seed (int): seed to set for reproducibility
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class CustomBCELogitsLoss(nn.BCEWithLogitsLoss):
def forward(self, x, target):
# Reshape output tensor for BCELoss
return super(CustomBCELogitsLoss, self).forward(x, target.view(-1, 1))
def main(args):
if args.deterministic:
set_seed(42)
# Set device
if args.device is None:
if torch.cuda.is_available():
args.device = 'cuda:0'
else:
args.device = 'cpu'
defaults.device = torch.device(args.device)
# Aggregate path and labels into list for fastai ImageDataBunch
fnames, labels, is_valid = [], [], []
dataset = OpenFire(root=args.data_path, train=True, download=True)
for sample in dataset.data:
fnames.append(dataset._images.joinpath(sample['name']).relative_to(dataset.root))
labels.append(sample['target'])
is_valid.append(False)
dataset = OpenFire(root=args.data_path, train=False, download=True)
for sample in dataset.data:
fnames.append(dataset._images.joinpath(sample['name']).relative_to(dataset.root))
labels.append(sample['target'])
is_valid.append(True)
df = pd.DataFrame.from_dict(dict(name=fnames, label=labels, is_valid=is_valid))
# Split train and valid sets
il = vision.ImageList.from_df(df, path=args.data_path).split_from_df('is_valid')
# Encode labels
il = il.label_from_df(cols='label', label_cls=FloatList if args.binary else CategoryList)
# Set transformations
il = il.transform(vision.get_transforms(), size=args.resize)
# Create the Databunch
data = il.databunch(bs=args.batch_size, num_workers=args.workers).normalize(vision.imagenet_stats)
# Metric
metric = partial(vision.accuracy_thresh, thresh=0.5) if args.binary else vision.error_rate
# Create model
model = models.__dict__[args.model](imagenet_pretrained=args.pretrained,
num_classes=data.c, lin_features=args.lin_feats,
concat_pool=args.concat_pool, bn_final=args.bn_final,
dropout_prob=args.dropout_prob)
# Create learner
learner = Learner(data, model,
wd=args.weight_decay,
loss_func=CustomBCELogitsLoss() if args.binary else nn.CrossEntropyLoss(),
metrics=metric)
# Form layer group for optimization
meta = model_meta.get(args.model, _default_meta)
learner.split(meta['split'])
# Freeze model's head
if args.pretrained:
learner.freeze()
if args.resume:
learner.load(args.resume)
if args.unfreeze:
learner.unfreeze()
learner.fit_one_cycle(args.epochs, max_lr=slice(None, args.lr, None),
div_factor=args.div_factor, final_div=args.final_div_factor)
learner.save(args.checkpoint)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='PyroNear Classification Training with Fastai',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Input / Output
parser.add_argument('--data-path', default='./data', help='dataset root folder')
parser.add_argument('--checkpoint', default='checkpoint', type=str, help='name of output file')
parser.add_argument('--resume', default=None, help='checkpoint name to resume from')
# Architecture
parser.add_argument('--model', default='resnet18', type=str, help='model architecture')
parser.add_argument("--concat-pool", dest="concat_pool",
help="replaces AdaptiveAvgPool2d with AdaptiveConcatPool2d",
action="store_true")
parser.add_argument('--lin-feats', default=512, type=int,
help='number of nodes in intermediate head layers')
parser.add_argument("--bn-final", dest="bn_final",
help="adds a batch norm layer after last FC",
action="store_true")
parser.add_argument('--dropout-prob', default=0.5, type=float, help='dropout rate of last FC layer')
parser.add_argument("--binary", dest="binary",
help="should the task be considered as binary Classification",
action="store_true")
parser.add_argument("--pretrained", dest="pretrained",
help="use ImageNet pre-trained parameters",
action="store_true")
# Device
parser.add_argument('--device', default=None, help='device')
parser.add_argument("--deterministic", dest="deterministic",
help="should the training be performed in deterministic mode",
action="store_true")
# Loader
parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size')
parser.add_argument('-s', '--resize', default=224, type=int, help='image size after resizing')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers')
# Optimizer
parser.add_argument('--lr', default=3e-3, type=float, help='maximum learning rate')
parser.add_argument('--epochs', default=10, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float,
metavar='W', help='weight decay',
dest='weight_decay')
parser.add_argument("--unfreeze", dest="unfreeze", help="should all layers be unfrozen",
action="store_true")
# Scheduler
parser.add_argument('--div-factor', default=25., type=float,
help='div factor of OneCycle policy')
parser.add_argument('--final-div-factor', default=1e4, type=float,
help='final div factor of OneCycle policy')
args = parser.parse_args()
main(args)
|
from __future__ import annotations
import abc
import asyncio
import dataclasses
import functools
import io
import os.path
import pickle
import shlex
import threading
import uuid
from typing import Callable, TypeVar, Union, Any, Dict, Optional, Sequence, cast, Tuple
import cloudpickle
import fabric
import paramiko.ssh_exception
from meadowgrid import ServerAvailableFolder
from meadowgrid.agent import run_one_job
from meadowgrid.aws_integration import _get_default_region_name
from meadowgrid.config import MEADOWGRID_INTERPRETER
from meadowgrid.coordinator_client import (
_add_deployments_to_job,
_create_py_function,
_make_valid_job_id,
_pickle_protocol_for_deployed_interpreter,
_string_pairs_from_dict,
)
from meadowgrid.deployed_function import (
CodeDeployment,
InterpreterDeployment,
MeadowGridFunction,
VersionedCodeDeployment,
VersionedInterpreterDeployment,
)
from meadowgrid.ec2_alloc import allocate_ec2_instances
from meadowgrid.grid import _get_id_name_function, _get_friendly_name
from meadowgrid.grid_task_queue import (
get_results,
worker_loop,
create_queues_and_add_tasks,
)
from meadowgrid.meadowgrid_pb2 import (
Job,
JobToRun,
ProcessState,
PyCommandJob,
PyFunctionJob,
ServerAvailableInterpreter,
)
from meadowgrid.resource_allocation import Resources
_T = TypeVar("_T")
_U = TypeVar("_U")
# if num_concurrent_tasks isn't specified, by default, launch total_num_tasks *
# _DEFAULT_CONCURRENT_TASKS_FACTOR workers
_DEFAULT_CONCURRENT_TASKS_FACTOR = 0.5
async def _retry(
function: Callable[[], _T],
exception_types: Exception,
max_num_attempts: int = 3,
delay_seconds: float = 1,
) -> _T:
i = 0
while True:
try:
return function()
except exception_types as e: # type: ignore
i += 1
if i >= max_num_attempts:
raise
else:
print(f"Retrying on error: {e}")
await asyncio.sleep(delay_seconds)
@dataclasses.dataclass(frozen=True)
class Deployment:
interpreter: Union[InterpreterDeployment, VersionedInterpreterDeployment]
code: Union[CodeDeployment, VersionedCodeDeployment, None] = None
environment_variables: Optional[Dict[str, str]] = None
def _add_defaults_to_deployment(
deployment: Optional[Deployment],
) -> Tuple[
Union[InterpreterDeployment, VersionedInterpreterDeployment],
Union[CodeDeployment, VersionedCodeDeployment],
Dict[str, str],
]:
if deployment is None:
return (
ServerAvailableInterpreter(interpreter_path=MEADOWGRID_INTERPRETER),
ServerAvailableFolder(),
{},
)
return (
deployment.interpreter,
deployment.code or ServerAvailableFolder(),
deployment.environment_variables or {},
)
class Host(abc.ABC):
@abc.abstractmethod
async def run_job(self, job_to_run: JobToRun) -> Any:
pass
@dataclasses.dataclass(frozen=True)
class LocalHost(Host):
async def run_job(self, job_to_run: JobToRun) -> Any:
initial_update, continuation = await run_one_job(job_to_run)
if (
initial_update.process_state.state != ProcessState.ProcessStateEnum.RUNNING
or continuation is None
):
result = initial_update.process_state
else:
result = (await continuation).process_state
if result.state == ProcessState.ProcessStateEnum.SUCCEEDED:
return pickle.loads(result.pickled_result)
else:
# TODO make better error messages
raise ValueError(f"Error: {result.state}")
@dataclasses.dataclass(frozen=True)
class SshHost(Host):
address: str
# these options are forwarded directly to Fabric
fabric_kwargs: Optional[Dict[str, Any]] = None
async def run_job(self, job_to_run: JobToRun) -> Any:
with fabric.Connection(
self.address, **(self.fabric_kwargs or {})
) as connection:
job_io_prefix = ""
try:
# assumes that meadowgrid is installed in /meadowgrid/env as per
# build_meadowgrid_amis.md. Also uses the default working_folder, which
# should (but doesn't strictly need to) correspond to
# agent._set_up_working_folder
# try the first command 3 times, as this is when we actually try to
# connect to the remote machine.
home_result = await _retry(
lambda: connection.run("echo $HOME"),
cast(Exception, paramiko.ssh_exception.NoValidConnectionsError),
)
if not home_result.ok:
raise ValueError(
"Error getting home directory on remote machine "
+ home_result.stdout
)
remote_working_folder = f"{home_result.stdout.strip()}/meadowgrid"
mkdir_result = connection.run(f"mkdir -p {remote_working_folder}/io")
if not mkdir_result.ok:
raise ValueError(
"Error creating meadowgrid directory " + mkdir_result.stdout
)
job_io_prefix = f"{remote_working_folder}/io/{job_to_run.job.job_id}"
# serialize job_to_run and send it to the remote machine
with io.BytesIO(
job_to_run.SerializeToString()
) as job_to_run_serialized:
connection.put(
job_to_run_serialized, remote=f"{job_io_prefix}.job_to_run"
)
# fabric doesn't have any async APIs, which means that in order to run
# more than one fabric command at the same time, we need to have a
# thread per fabric command. We use an asyncio.Future here to make the
# API async, so from the user perspective, it feels like this function
# is async
# fabric is supposedly not threadsafe, but it seems to work as long as
# more than one connection is not being opened at the same time:
# https://github.com/fabric/fabric/pull/2010/files
result_future: asyncio.Future = asyncio.Future()
event_loop = asyncio.get_running_loop()
def run_and_wait() -> None:
try:
# use meadowrun to run the job
returned_result = connection.run(
"/meadowgrid/env/bin/meadowrun "
f"--job-id {job_to_run.job.job_id} "
f"--working-folder {remote_working_folder} "
# TODO this flag should only be passed in if we were
# originally using an EC2AllocHost
f"--needs-deallocation"
)
event_loop.call_soon_threadsafe(
lambda r=returned_result: result_future.set_result(r)
)
except Exception as e2:
event_loop.call_soon_threadsafe(
lambda e2=e2: result_future.set_exception(e2)
)
threading.Thread(target=run_and_wait).start()
result = await result_future
# TODO consider using result.tail, result.stdout
# see if we got a normal return code
if result.return_code != 0:
raise ValueError(f"Process exited {result.return_code}")
with io.BytesIO() as result_buffer:
connection.get(f"{job_io_prefix}.process_state", result_buffer)
result_buffer.seek(0)
process_state = ProcessState()
process_state.ParseFromString(result_buffer.read())
if process_state.state == ProcessState.ProcessStateEnum.SUCCEEDED:
job_spec_type = job_to_run.job.WhichOneof("job_spec")
# we must have a result from functions, in other cases we can
# optionally have a result
if job_spec_type == "py_function" or process_state.pickled_result:
return pickle.loads(process_state.pickled_result)
else:
return None
else:
# TODO we should throw a better exception
raise ValueError(f"Running remotely failed: {process_state}")
finally:
if job_io_prefix:
remote_paths = " ".join(
[
f"{job_io_prefix}.job_to_run",
f"{job_io_prefix}.state",
f"{job_io_prefix}.result",
f"{job_io_prefix}.process_state",
f"{job_io_prefix}.initial_process_state",
]
)
try:
# -f so that we don't throw an error on files that don't
# exist
connection.run(f"rm -f {remote_paths}")
except Exception as e:
print(
f"Error cleaning up files on remote machine: "
f"{remote_paths} {e}"
)
# TODO also clean up log file?s
@dataclasses.dataclass(frozen=True)
class EC2AllocHost(Host):
"""A placeholder for a host that will be allocated/created by ec2_alloc.py"""
logical_cpu_required: int
memory_gb_required: float
interruption_probability_threshold: float
region_name: Optional[str] = None
private_key_filename: Optional[str] = None
async def run_job(self, job_to_run: JobToRun) -> Any:
hosts = await allocate_ec2_instances(
Resources(self.memory_gb_required, self.logical_cpu_required, {}),
1,
self.interruption_probability_threshold,
self.region_name or await _get_default_region_name(),
)
fabric_kwargs: Dict[str, Any] = {"user": "ubuntu"}
if self.private_key_filename:
fabric_kwargs["connect_kwargs"] = {
"key_filename": self.private_key_filename
}
if len(hosts) != 1:
raise ValueError(f"Asked for one host, but got back {len(hosts)}")
for host, job_ids in hosts.items():
if len(job_ids) != 1:
raise ValueError(f"Asked for one job allocation but got {len(job_ids)}")
# Kind of weird that we're changing the job_id here, but okay as long as
# job_id remains mostly an internal concept
job_to_run.job.job_id = job_ids[0]
return await SshHost(host, fabric_kwargs).run_job(job_to_run)
@dataclasses.dataclass(frozen=True)
class EC2AllocHosts:
"""
A placeholder for a set of hosts that will be allocated/created by ec2_alloc.py
"""
logical_cpu_required_per_task: int
memory_gb_required_per_task: float
interruption_probability_threshold: float
# defaults to half the number of total tasks
num_concurrent_tasks: Optional[int] = None
region_name: Optional[str] = None
private_key_filename: Optional[str] = None
async def run_function(
function: Callable[..., _T],
host: Host,
deployment: Optional[Deployment] = None,
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Dict[str, Any]] = None,
) -> _T:
"""
Same as run_function_async, but runs on a remote machine, specified by "host".
Connects to the remote machine over SSH via the fabric library
https://www.fabfile.org/ fabric_kwargs are passed directly to fabric.Connection().
The remote machine must have meadowgrid installed as per build_meadowgrid_amis.md
"""
job_id, friendly_name, pickled_function = _get_id_name_function(function)
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
pickle_protocol = _pickle_protocol_for_deployed_interpreter()
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(friendly_name),
environment_variables=_string_pairs_from_dict(environment_variables),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_function=_create_py_function(
MeadowGridFunction.from_pickled(pickled_function, args, kwargs),
pickle_protocol,
),
)
_add_deployments_to_job(job, code, interpreter)
# TODO figure out what to do about the [0], which is there for dropping effects
return (await host.run_job(JobToRun(job=job)))[0]
async def run_command(
args: Union[str, Sequence[str]],
host: Host,
deployment: Optional[Deployment] = None,
) -> None:
"""
Runs the specified command on a remote machine. See run_function_remote for more
details on requirements for the remote host.
"""
job_id = str(uuid.uuid4())
if isinstance(args, str):
args = shlex.split(args)
# this is kind of a silly way to get a friendly name--treat the first three
# elements of args as if they're paths and take the last part of each path
friendly_name = "-".join(os.path.basename(arg) for arg in args[:3])
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(friendly_name),
environment_variables=_string_pairs_from_dict(environment_variables),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_command=PyCommandJob(command_line=args),
)
_add_deployments_to_job(job, code, interpreter)
await host.run_job(JobToRun(job=job))
async def run_map(
function: Callable[[_T], _U],
args: Sequence[_T],
hosts: EC2AllocHosts,
deployment: Optional[Deployment] = None,
) -> Sequence[_U]:
"""Equivalent to map(function, args), but runs distributed."""
if not hosts.num_concurrent_tasks:
num_concurrent_tasks = len(args) // 2 + 1
else:
num_concurrent_tasks = min(hosts.num_concurrent_tasks, len(args))
region_name = hosts.region_name or await _get_default_region_name()
# the first stage of preparation, which happens concurrently:
# 1. get hosts
allocated_hosts_future = asyncio.create_task(
allocate_ec2_instances(
Resources(
hosts.memory_gb_required_per_task,
hosts.logical_cpu_required_per_task,
{},
),
num_concurrent_tasks,
hosts.interruption_probability_threshold,
region_name,
)
)
# 2. create SQS queues and add tasks to the request queue
queues_future = asyncio.create_task(create_queues_and_add_tasks(region_name, args))
# 3. prepare some variables for constructing the worker jobs
friendly_name = _make_valid_job_id(_get_friendly_name(function))
interpreter, code, environment_variables = _add_defaults_to_deployment(deployment)
environment_variables = _string_pairs_from_dict(environment_variables)
pickle_protocol = _pickle_protocol_for_deployed_interpreter()
fabric_kwargs: Dict[str, Any] = {"user": "ubuntu"}
if hosts.private_key_filename:
fabric_kwargs["connect_kwargs"] = {"key_filename": hosts.private_key_filename}
# now wait for 1 and 2 to complete:
request_queue_url, result_queue_url = await queues_future
allocated_hosts = await allocated_hosts_future
# Now we will run worker_loop jobs on the hosts we got:
pickled_worker_function = cloudpickle.dumps(
functools.partial(
worker_loop, function, request_queue_url, result_queue_url, region_name
),
protocol=pickle_protocol,
)
worker_tasks = []
worker_id = 0
for public_address, worker_job_ids in allocated_hosts.items():
for worker_job_id in worker_job_ids:
job = Job(
job_id=worker_job_id,
job_friendly_name=friendly_name,
environment_variables=environment_variables,
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
py_function=PyFunctionJob(
pickled_function=pickled_worker_function,
pickled_function_arguments=pickle.dumps(
([public_address, worker_id], {}), protocol=pickle_protocol
),
),
)
_add_deployments_to_job(job, code, interpreter)
worker_tasks.append(
asyncio.create_task(
SshHost(public_address, fabric_kwargs).run_job(JobToRun(job=job))
)
)
worker_id += 1
# finally, wait for results:
results = await get_results(result_queue_url, region_name, len(args))
# not really necessary except interpreter will complain...
await asyncio.gather(*worker_tasks)
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils import setup
def readfile(file_path):
dir_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir_path, file_path), 'r') as f:
return f.read()
setup(
name='htmldammit',
version='0.2.0a0',
description=('Make every effort to properly decode HTML,'
' because HTML is unicode, dammit!'),
long_description=readfile('README.rst'),
author='Tal Einat',
author_email='taleinat@gmail.com',
url='https://github.com/taleinat/htmldammit',
packages=['htmldammit', 'htmldammit.integrations'],
package_dir={'': 'src'},
install_requires=[
'six',
'beautifulsoup4',
],
license='MIT',
keywords='htmldammit HTML unicode',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
class WrappedDictionary(object):
def __init__(self):
self.__dict = {}
def __getattr__(self, item):
return self.__dict[item]
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
self.__dict[key] = value
def __repr__(self):
return str(self.__dict)
def __add__(self, other):
if isinstance(other, (dict, WrappedDictionary)):
self.__dict.update(other)
else:
raise TypeError("Cannot add type '{}'".format(type(other))) |
# -*- coding: utf-8 -*-
import os
import unittest
from manolo_scraper.spiders.osce import OSCESpider
from utils import fake_response_from_file
class TestOsceSpider(unittest.TestCase):
def setUp(self):
self.spider = OSCESpider()
def test_parse_item(self):
filename = os.path.join('data/osce', '18-08-2015.html')
items = self.spider.parse(fake_response_from_file(filename, meta={'date': u'18/08/2015'}))
item = next(items)
self.assertEqual(item.get('full_name'), u'Silvia Sousa Cristofol')
self.assertEqual(item.get('time_start'), u'16:38')
self.assertEqual(item.get('institution'), u'osce')
self.assertEqual(item.get('id_document'), u'CARNET DE EXTRANJERIA')
self.assertEqual(item.get('id_number'), u'000904735')
self.assertEqual(item.get('entity'), u'everis')
self.assertEqual(item.get('reason'), u'REUNIÓN DE TRABAJO')
self.assertEqual(item.get('host_name'), u'Isabel Rosario Vega Palomino')
self.assertEqual(item.get('title'), u'[Ninguno]')
self.assertEqual(item.get('office'), u'Sala de Espera')
self.assertEqual(item.get('time_end'), None)
self.assertEqual(item.get('date'), u'2015-08-18')
number_of_items = 1 + sum(1 for x in items)
self.assertEqual(number_of_items, 15)
|
def sunday(x, n):
a = x
b = 0
for i in range(len(n)):
q = n[i] % 7
a += q
if a%7 == 0:
b += 1
if a>7:
a -= 7
z = [a, b]
return z
if __name__ == '__main__':
ans = 0
day = 2
for i in range(1901,2001):
print(day)
months = [31,28,31,30,31,30,31,31,30,31,30,31]
if (i%4 == 0 and i%100 != 0) or (i%100 == 0 and i%400 == 0):
months[1] = 29
result = sunday(day, months)
day = result[0]
ans += result[1]
else:
result = sunday(day, months)
day = result[0]
ans += result[1]
print(ans)
|
"""
EXERCÍCIO 011: Pintando Parede
Faça um programa que leia a largura e a altura de uma parede em metros,
calcule a sua área e a quantidade de tinta necessária para pintá-la,
sabendo que cada litro de tinta, pinta uma área de 2 m².
"""
larg = float(input('Largura da parede: '))
alt = float(input('Altura da parede: '))
area = larg * alt
tinta = area / 2
print('Sua parede tem as dimensões de {} x {} e sua área é de {} m².'.format(larg, alt, area))
print('Para pintar essa parede, você precisará de {} litros de tinta.'.format(tinta))
|
import pandas as pd
import numpy as np
import random
# Reading data set
df1 = pd.read_csv('.\data\wordsDataSet.csv')
# Removed Count coloumn as it was not required
df1 = df1.drop('count', 1)
# Added length column to remove words with small length
df1['length'] = df1.word.str.len()
df1 = df1[df1.length > 6.0]
df2 = pd.read_csv('.\data\CountriesName.csv')
# Removing irrelevant columns
df2.drop(['Region', 'Population', 'Area (sq. mi.)', 'Pop. Density (per sq. mi.)', 'Coastline (coast/area ratio)', 'Net migration', 'Infant mortality (per 1000 births)', 'GDP ($ per capita)',
'Literacy (%)', 'Phones (per 1000)', 'Arable (%)', 'Crops (%)', 'Other (%)', 'Climate', 'Birthrate', 'Deathrate', 'Agriculture', 'Industry', 'Service'], axis=1, inplace=True)
# Removing irrelevant columns
df3 = pd.read_csv('.\data\ColorNames.csv')
df3.drop(['hex_24_bit', 'red_8_bit', 'green_8_bit', 'blue_8_bit',
'hue_degrees', 'hsl_s', 'hsl_l'], axis=1, inplace=True)
# To keep track of used words
usedWord = np.empty(1, str)
# Converting the DataFrame to numpy
wordArray = df1[['word']].to_numpy()
countriesArray = df2[['Country']].to_numpy()
colorArray = df3[['name']].to_numpy()
def get_word(category):
if category == 1:
getWord = ''.join(random.choice(wordArray))
while (getWord in usedWord):
# Generating another word as the fetched word has been used before
getWord = ''.join(random.choice(wordArray))
return getWord.lower()
elif category == 2:
getWord = ''.join(random.choice(countriesArray))
while (getWord in usedWord):
# Generating another word as the fetched word has been used before
getWord = ''.join(random.choice(countriesArray))
return getWord.lower()
elif category == 3:
getWord = ''.join(random.choice(colorArray))
while (getWord in usedWord):
# Generating another word as the fetched word has been used before
getWord = ''.join(random.choice(colorArray))
return getWord.lower()
|
import os
from subprocess import Popen, PIPE
from tempfile import TemporaryDirectory
_temp_dir = None
source_path = ""
def reset_checkout():
global _temp_dir, source_path
if _temp_dir is None:
_temp_dir = TemporaryDirectory()
source_path = _temp_dir.name
process = Popen([
'repo', 'init',
'-u', 'git://github.com/couchbase/manifest',
'-m', 'python_tools/patch_via_gerrit/testsuite.xml'
], stdout=PIPE, stderr=PIPE, cwd=source_path)
stdout, stderr = process.communicate()
# Clean up any stale manifest changes
process = Popen([
'git', 'reset', '--hard', 'origin/master'
], stdout=PIPE, stderr=PIPE, cwd=os.path.join(source_path, '.repo', 'manifests'))
stdout, stderr = process.communicate()
process = Popen([
'repo', 'sync', '-j4'
], stdout=PIPE, stderr=PIPE, cwd=source_path)
stdout, stderr = process.communicate()
|
from .settings_common import *
DEBUG = False
# This is handy for debugging problems that *only* happen when Debug = False,
# because exceptions are printed directly to the log/console when they happen.
# Just don't leave it on!
# DEBUG_PROPAGATE_EXCEPTIONS = True
# The base location, on disk, where we want to store our generated assets
MEDIA_ROOT = 'perma/assets/generated/'
# Schedule celerybeat jobs.
# These will be added to CELERYBEAT_SCHEDULE in settings.utils.post_processing
CELERY_BEAT_JOB_NAMES = [
'update-stats',
'send-links-to-internet-archive',
'delete-links-from-internet-archive',
'send-js-errors',
'run-next-capture',
'verify_webrecorder_api_available',
'sync_subscriptions_from_perma_payments',
'cache_playback_status_for_new_links',
]
# logging
LOGGING['handlers']['file']['filename'] = '/var/log/perma/perma.log'
# use separate subdomain for user content
MEDIA_URL = '//perma-archives.org/media/'
# Our sorl thumbnail settings
# We only use this redis config in prod. dev envs use the local db.
THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore'
THUMBNAIL_REDIS_HOST = 'localhost'
THUMBNAIL_REDIS_PORT = '6379'
# caching
CACHES["default"] = {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # since this is just a cache, we don't want to show errors if redis is offline for some reason
}
}
# security settings
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# subscription packages
TIERS['Individual'] = [
{
'period': 'monthly',
'link_limit': 10,
'rate_ratio': 1
},{
'period': 'monthly',
'link_limit': 100,
'rate_ratio': 2.5
},{
'period': 'monthly',
'link_limit': 500,
'rate_ratio': 10
}
]
|
# -*- coding=utf-8 -*-
"""
自编码器的学习目标是 使用少量稀疏的高阶特征重构输入:
1. 限制中间隐层节点的数量,比如让中级那隐含层节点的数量小于输入/输出节点的数量,就相当于一个降维
再给中间隐含层的权重加一个 L1 正则,则可以根据隐含系数控制隐含节点的稀疏程度,惩罚系数越大,学到的特征组合越稀疏,实际使用(非零权重)
的特征数量越少。
2. 如果给数据加入噪声,那么就是 Denoising Auto Encoder(去噪自编码器)
唯有学习数据频繁出现的模式和结构,将无规律的噪声略去,才可以复原数据。
常使用的噪声是 加性高斯噪声(Additive Gaussian Noise)
Hinton 提出的 DBN 模型有多个隐含层,每个隐含层都是限制性玻尔兹曼机 RBM。
下面实现 去噪自编码机(Denoising AutoEncoder)
"""
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 如果深度学习模型的权重初始化得太小,那信号将在每层间传递时逐渐缩小而难以产生作用
# 如果权重初始化得太大,那么信号将在每层间传递时逐渐方法并导致发散和失效
# 使用 Xavier Initialization 作为参数初始化方法
# Xaiver 初始化器做的事情是: 让深度学习模型的权重呗初始化得不大不小,正好合适。
# 从数学的角度分析,Xaiver 就是让权重满足 0 均值,同时方差为 2/(n_in+n_out), 分布可以用均匀分布或者高斯分布。
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = -constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)
class AdditiveGaussianNoiseAutoEncoder(object):
"""
n_input: 输入变量数
n_hidden: 隐含层节点的数目
transfer_function: 隐含层激活函数,默认为 softplus
optimizer: 优化器,默认为 Adam
scale: 高斯噪声系数
class内的scale做成了一个placeholder
这里只使用了一个隐含层
"""
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32, [None, self.n_input])
# 将输入 x 加上噪声,然后乘上隐含层的权重 w1,再加上偏置 b1
self.hidden = self.transfer(tf.add(tf.matmul(
self.x + scale*tf.random_normal((n_input,)),
self.weights['w1']), self.weights['b1']))
# 在输出层进行数据复原、重建操作,这里不需要激活曾,直接将隐含层的输出乘上输出层的权重再加上偏置即可
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# 定义自编码器的损失函数
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
# 定义计算损失 cost 以及执行一步训练的函数 partial_fit
# 函数 partial_fit 做的就是用一个 batch 数据进行训练并返回当前的 cost
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X, self.scale: self.training_scale})
return cost
# 该函数在自编码器训练完毕后,在测试集上对模型性能进行评估,因此不会像 partial_fit 一样触发训练操作
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict={self.x: X, self.scale: self.training_scale})
# 该函数用户返回自编码器隐含层的输出结果
# 他的目的是提供一个接口来获取抽象后的特征,自编码器的隐含层的最主要的功能就是学习出数据中的高阶特征
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X, self.scale: self.training_scale})
# 该函数将隐含层的输出结果作为输入,通过之后的重建层将提取到的高阶特征复原为原始数据
def generate(self, hidden=None):
if hidden is None:
hidden = np.random.normal(size=self.weights['b1'])
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X, self.scale: self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBias(self):
return self.sess.run(self.weights['b1'])
# 读取数据集
mnist = input_data.read_data.read_data_sets('MNIST_data', one_hot=True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data)-batch_size)
return data[start_index:(start_index+batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnits.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1 # 设置每隔一轮就显示一次 cost
autoEncoder = AdditiveGaussianNoiseAutoEncoder(
n_input=784,
n_hidden=200,
transfer_function = tf.nn.softplus,
optimizer=tf.train。AdamOptimizer(learning_rate=0.001),
scale=0.01)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples/batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoEncoder.partial_fit(batch_xs)
avg_cost += cost/n_samples*batch_size
if epoch % display_step == 0:
print('Epoch: ', '%40d' %(epoch+1), 'cost=', '{:.9f}'.format(avg_cost))
print("Total cost: " + str(autoEncoder.calc_total_cost(X_test)))
|
from __future__ import print_function
import logging
from eosetl_airflow.build_load_dag import build_load_dag
from eosetl_airflow.variables import read_load_dag_vars
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
# airflow DAG
DAG = build_load_dag(
dag_id='eos_load_dag',
chain='eos',
**read_load_dag_vars(
var_prefix='eos_',
schedule_interval='30 15 * * *'
)
)
|
#
# Copyright (c) 2021 Nitric Technologies Pty Ltd.
#
# This file is part of Nitric Python 3 SDK.
# See https://github.com/nitrictech/python-sdk for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import List, AsyncIterator, Union, Any, Tuple
from grpclib import GRPCError
from nitric.api.const import MAX_SUB_COLLECTION_DEPTH
from nitric.api.exception import exception_from_grpc_error
from nitricapi.nitric.document.v1 import (
DocumentServiceStub,
Collection as CollectionMessage,
Key as KeyMessage,
Expression as ExpressionMessage,
ExpressionValue,
Document as DocumentMessage,
)
from nitric.utils import new_default_channel, _dict_from_struct, _struct_from_dict
NIL_DOC_ID = ""
class CollectionDepthException(Exception):
"""The max depth of document sub-collections has been exceeded."""
pass
@dataclass(frozen=True, order=True)
class DocumentRef:
"""A reference to a document in a collection."""
_documents: Documents
parent: CollectionRef
id: str
def collection(self, name: str) -> CollectionRef:
"""
Return a reference to a sub-collection of this document.
This is currently only supported to one level of depth.
e.g. Documents().collection('a').doc('b').collection('c').doc('d') is valid,
Documents().collection('a').doc('b').collection('c').doc('d').collection('e') is invalid (1 level too deep).
"""
current_depth = self.parent.sub_collection_depth()
if current_depth >= MAX_SUB_COLLECTION_DEPTH:
# Collection nesting is only supported to a maximum depth.
raise CollectionDepthException(
f"sub-collections supported to a depth of {MAX_SUB_COLLECTION_DEPTH}, "
f"attempted to create new collection with depth {current_depth + 1}"
)
return CollectionRef(_documents=self._documents, name=name, parent=self)
async def get(self) -> Document:
"""Retrieve the contents of this document, if it exists."""
try:
response = await self._documents._stub.get(key=_doc_ref_to_wire(self))
return _document_from_wire(documents=self._documents, message=response.document)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
async def set(self, content: dict):
"""
Set the contents of this document.
If the document exists it will be updated, otherwise a new document will be created.
"""
try:
await self._documents._stub.set(
key=_doc_ref_to_wire(self),
content=_struct_from_dict(content),
)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
async def delete(self):
"""Delete this document, if it exists."""
try:
await self._documents._stub.delete(
key=_doc_ref_to_wire(self),
)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
def _document_from_wire(documents: Documents, message: DocumentMessage) -> Document:
ref = _doc_ref_from_wire(documents=documents, message=message.key)
return Document(
_ref=ref,
content=_dict_from_struct(message.content),
)
def _doc_ref_to_wire(ref: DocumentRef) -> KeyMessage:
return KeyMessage(id=ref.id, collection=_collection_to_wire(ref.parent))
def _doc_ref_from_wire(documents: Documents, message: KeyMessage) -> DocumentRef:
return DocumentRef(
_documents=documents,
id=message.id,
parent=_collection_from_wire(documents=documents, message=message.collection),
)
def _collection_to_wire(ref: CollectionRef) -> CollectionMessage:
if ref.is_sub_collection():
return CollectionMessage(name=ref.name, parent=_doc_ref_to_wire(ref.parent) if ref.parent else None)
return CollectionMessage(name=ref.name)
def _collection_from_wire(documents: Documents, message: CollectionMessage) -> CollectionRef:
return CollectionRef(
_documents=documents,
name=message.name,
parent=_doc_ref_from_wire(documents=documents, message=message.parent) if message.parent else None,
)
@dataclass(frozen=True, order=True)
class CollectionRef:
"""A reference to a collection of documents."""
_documents: Documents
name: str
parent: Union[DocumentRef, None] = field(default_factory=lambda: None)
def doc(self, doc_id: str) -> DocumentRef:
"""Return a reference to a document in the collection."""
return DocumentRef(_documents=self._documents, parent=self, id=doc_id)
def collection(self, name: str) -> CollectionGroupRef:
"""
Return a reference to a sub-collection of this document.
This is currently only supported to one level of depth.
e.g. Documents().collection('a').collection('b').doc('c') is valid,
Documents().collection('a').doc('b').collection('c').collection('d') is invalid (1 level too deep).
"""
current_depth = self.sub_collection_depth()
if current_depth >= MAX_SUB_COLLECTION_DEPTH:
# Collection nesting is only supported to a maximum depth.
raise CollectionDepthException(
f"sub-collections supported to a depth of {MAX_SUB_COLLECTION_DEPTH}, "
f"attempted to create new collection with depth {current_depth + 1}"
)
return CollectionGroupRef(_documents=self._documents, name=name, parent=self)
def query(
self,
paging_token: Any = None,
limit: int = 0,
expressions: Union[Expression, List[Expression]] = None,
) -> QueryBuilder:
"""Return a query builder scoped to this collection."""
return QueryBuilder(
documents=self._documents,
collection=self,
paging_token=paging_token,
limit=limit,
expressions=[expressions] if isinstance(expressions, Expression) else expressions,
)
def sub_collection_depth(self) -> int:
"""Return the depth of this collection, which is a count of the parents above this collection."""
if not self.is_sub_collection():
return 0
else:
return self.parent.parent.sub_collection_depth() + 1
def is_sub_collection(self):
"""Return True if this collection is a sub-collection of a document in another collection."""
return self.parent is not None
@dataclass(frozen=True, order=True)
class CollectionGroupRef:
"""A reference to a collection group."""
_documents: Documents
name: str
parent: Union[CollectionRef, None] = field(default_factory=lambda: None)
def query(
self,
paging_token: Any = None,
limit: int = 0,
expressions: Union[Expression, List[Expression]] = None,
) -> QueryBuilder:
"""Return a query builder scoped to this collection."""
return QueryBuilder(
documents=self._documents,
collection=self.to_collection_ref(),
paging_token=paging_token,
limit=limit,
expressions=[expressions] if isinstance(expressions, Expression) else expressions,
)
def sub_collection_depth(self) -> int:
"""Return the depth of this collection group, which is a count of the parents above this collection."""
if not self.is_sub_collection():
return 0
else:
return self.parent.sub_collection_depth() + 1
def is_sub_collection(self):
"""Return True if this collection is a sub-collection of a document in another collection."""
return self.parent is not None
def to_collection_ref(self):
"""Return this collection group as a collection ref."""
return CollectionRef(
self._documents,
self.name,
DocumentRef(
self._documents,
self.parent,
NIL_DOC_ID,
),
)
@staticmethod
def from_collection_ref(collectionRef: CollectionRef, documents: Documents) -> CollectionGroupRef:
"""Return a collection ref as a collection group."""
if collectionRef.parent is not None:
return CollectionGroupRef(
documents,
collectionRef.name,
CollectionGroupRef.from_collection_ref(
collectionRef.parent,
documents,
),
)
class Operator(Enum):
"""Valid query expression operators."""
less_than = "<"
greater_than = ">"
less_than_or_equal = "<="
greater_than_or_equal = ">="
equals = "=="
starts_with = "startsWith"
class _ExpressionBuilder:
"""Builder for creating query expressions using magic methods."""
def __init__(self, operand):
self._operand = operand
def __eq__(self, other) -> Expression:
return Expression(self._operand, Operator.equals, other)
def __lt__(self, other) -> Expression:
return Expression(self._operand, Operator.less_than, other)
def __le__(self, other) -> Expression:
return Expression(self._operand, Operator.less_than_or_equal, other)
def __gt__(self, other) -> Expression:
return Expression(self._operand, Operator.greater_than, other)
def __ge__(self, other) -> Expression:
return Expression(self._operand, Operator.greater_than_or_equal, other)
def eq(self, other) -> Expression:
return self == other
def lt(self, other) -> Expression:
return self < other
def le(self, other) -> Expression:
return self <= other
def gt(self, other) -> Expression:
return self > other
def ge(self, other) -> Expression:
return self >= other
def starts_with(self, match) -> Expression:
return Expression(self._operand, Operator.starts_with, match)
def condition(name: str) -> _ExpressionBuilder:
"""
Construct a query expressions builder, for convenience.
Expression builders in turn provides magic methods for constructing expressions.
e.g. prop('first_name') == 'john' is equivalent to Expression('first_name, '=', 'john')
Supported operations are ==, <, >, <=, >=, .starts_with()
"""
return _ExpressionBuilder(operand=name)
@dataclass(order=True)
class Expression:
"""Query expressions, representing a boolean operation used for query filters."""
operand: str
operator: Union[Operator, str]
value: Union[str, int, float, bool]
def __post_init__(self):
if isinstance(self.operator, str):
# Convert string operators to their enum values
self.operator = Operator(self.operator)
def _value_to_expression_value(self):
"""Return an ExpressionValue message representation of the value of this expression."""
if isinstance(self.value, str):
return ExpressionValue(string_value=self.value)
# Check bool before numbers, because booleans are numbers.
if isinstance(self.value, bool):
return ExpressionValue(bool_value=self.value)
if isinstance(self.value, int):
return ExpressionValue(int_value=self.value)
if isinstance(self.value, float):
return ExpressionValue(double_value=self.value)
def _to_wire(self):
"""Return the Expression protobuf message representation of this expression."""
return ExpressionMessage(
operand=self.operand,
operator=self.operator.value,
value=self._value_to_expression_value(),
)
def __str__(self):
return "{0} {1} {2}".format(self.operand, self.operator.name, self.value)
@dataclass(frozen=True, order=True)
class Document:
"""Represents a document and any associated metadata."""
_ref: DocumentRef
content: dict
@property
def id(self):
"""Return the document's unique id."""
return self._ref.id
@property
def collection(self) -> CollectionRef:
"""Return the CollectionRef for the collection that contains this document."""
return self._ref.parent
@property
def ref(self):
"""Return the DocumentRef for this document."""
return self._ref
@dataclass(frozen=True, order=True)
class QueryResultsPage:
"""Represents a page of results from a query."""
paging_token: any = field(default_factory=lambda: None)
documents: List[Document] = field(default_factory=lambda: [])
def has_more_pages(self) -> bool:
"""Return false if the page token is None or empty (both represent no more pages)."""
return bool(self.paging_token)
class QueryBuilder:
"""Document query builder for retrieving documents from a collection based on filters."""
_documents: Documents
_collection: CollectionRef
_paging_token: Any
_limit: int
_expressions: List[Expression]
def __init__(
self,
documents: Documents,
collection: CollectionRef,
paging_token: Any = None,
limit: int = 0,
expressions: List[Expression] = None,
):
"""Construct a new QueryBuilder."""
self._documents = documents
self._collection = collection
self._paging_token = paging_token
self._limit = limit # default to unlimited.
if expressions is None:
self._expressions = []
else:
self._expressions = expressions
def _flat_expressions(self, expressions) -> List[Expression]:
"""Process possible inputs for .where() into a flattened list of expressions."""
if isinstance(expressions, tuple) and len(expressions) == 3 and isinstance(expressions[0], str):
# handle the special case where an expression was passed in as its component arguments.
# e.g. .where('age', '<', 30) instead of .where(condition('age') > 30)
return [Expression(*expressions)]
if isinstance(expressions, Expression):
# when a single expression is received, wrap in a list and return it
return [expressions]
else:
# flatten lists of lists into single dimension list of expressions
exps = []
for exp in expressions:
exps = exps + self._flat_expressions(exp)
return exps
def where(
self,
*expressions: Union[
Expression, List[Expression], Union[str, Operator, int, bool, Tuple[str, Union[str, Operator], Any]]
],
) -> QueryBuilder:
"""
Add a filter expression to the query.
:param expressions: a single expression or a set of expression args or a variadic/tuple/list of expressions.
Examples
--------
.where('age', '>', 20)
.where(condition('age') > 20)
.where(condition('age').gt(20))
.where(
condition('age') > 20,
condition('age') < 50,
)
.where(
[
condition('age') > 20,
condition('age') < 50,
]
)
.where(
('age', '>', 20),
('age', '<', 50),
)
"""
for expression in self._flat_expressions(expressions):
self._expressions.append(expression)
return self
def page_from(self, token) -> QueryBuilder:
"""
Set the paging token for the query.
Used when requesting subsequent pages from a query.
"""
self._paging_token = token
return self
def limit(self, limit: int) -> QueryBuilder:
"""Set the maximum number of results returned by this query."""
if limit is None or not isinstance(limit, int) or limit < 0:
raise ValueError("limit must be a positive integer or 0 for unlimited.")
self._limit = limit
return self
def _expressions_to_wire(self) -> List[ExpressionMessage]:
"""Return this queries' expressions as a list of their protobuf message representation."""
return [expressions._to_wire() for expressions in self._expressions]
async def stream(self) -> AsyncIterator[Document]:
"""Return all query results as a stream."""
# TODO: add limit, expressions and paging token to query.
if self._paging_token is not None:
raise ValueError("page_from() should not be used with streamed queries.")
try:
async for result in self._documents._stub.query_stream(
collection=_collection_to_wire(self._collection),
expressions=self._expressions_to_wire(),
limit=self._limit,
):
yield _document_from_wire(documents=self._documents, message=result.document)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
async def fetch(self) -> QueryResultsPage:
"""
Fetch a single page of results.
If a page has been fetched previously, a token can be provided via paging_from(), to fetch the subsequent pages.
"""
try:
results = await self._documents._stub.query(
collection=_collection_to_wire(self._collection),
expressions=self._expressions_to_wire(),
limit=self._limit,
paging_token=self._paging_token,
)
return QueryResultsPage(
paging_token=results.paging_token if results.paging_token else None,
documents=[
_document_from_wire(documents=self._documents, message=result) for result in results.documents
],
)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
def __eq__(self, other):
return self.__repr__() == other.__repr__()
def __str__(self):
repr_str = "from {0}".format(str(self._collection))
if self._paging_token:
repr_str += ", paging token {0}".format(str(self._paging_token))
if len(self._expressions):
repr_str += ", where " + " and ".join([str(exp) for exp in self._expressions])
if self._limit != 1:
repr_str += ", limit to {0} results".format(self._limit)
return "Query({0})".format(repr_str)
def __repr__(self):
repr_str = "Documents.collection({0}).query()".format(self._collection)
if self._paging_token:
repr_str += ".page_from({0})".format(self._paging_token)
if len(self._expressions):
repr_str += "".join([".where({0})".format(str(exp)) for exp in self._expressions])
if self._limit != 1:
repr_str += ".limit({0})".format(self._limit)
return repr_str
class Documents(object):
"""
Nitric client for interacting with document collections.
This client insulates application code from stack specific event operations or SDKs.
"""
_stub: DocumentServiceStub
def __init__(self):
"""Construct a Nitric Document Client."""
self._channel = new_default_channel()
self._stub = DocumentServiceStub(channel=self._channel)
def __del__(self):
# close the channel when this client is destroyed
if self._channel is not None:
self._channel.close()
def collection(self, name: str) -> CollectionRef:
"""Return a reference to a document collection."""
return CollectionRef(_documents=self, name=name)
|
module_name = "main"
variable_name = "app" |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._prepare_method(
self._build_mapreduce_func(lambda df: df.dtypes)
)
reduce_func = self._build_mapreduce_func(dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
self._dtype_cache = (
self._full_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
)
else:
self._dtype_cache = pandas.Series([])
# reset name to None because we use "__reduced__" internally
self._dtype_cache.name = None
return self._dtype_cache
dtypes = property(_get_dtype)
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(prefix) + str(x))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(prefix) + str(x))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(x) + str(suffix))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(x) + str(suffix))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(),
self.index.copy(),
self.columns.copy(),
self._dtype_cache,
self._is_transposed,
)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
if self._is_transposed:
# If others are transposed, we handle that behavior correctly in
# `copartition`, but it is not handled correctly in the case that `self` is
# transposed.
return (
self.transpose()
._append_list_of_managers(
[o.transpose() for o in others], axis ^ 1, **kwargs
)
.transpose()
)
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
new_self, to_append, joined_axis = self.copartition(
axis ^ 1,
others,
join,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of QueryCompiler objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
new_self, to_join, joined_index = self.copartition(
0,
others,
how,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
if len(others) == 1:
others_proxy = pandas.DataFrame(columns=others[0].columns)
else:
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
self_proxy = pandas.DataFrame(columns=self.columns)
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Copartition
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = compute_reindex(right_old_idxes[i])
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis,
other[i].data,
reindex_left,
reindex_right,
other[i]._is_transposed,
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object.
"""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To/From Pandas
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the QueryCompiler.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
@classmethod
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns QueryCompiler containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
# END To/From Pandas
# To NumPy
def to_numpy(self):
"""Converts Modin DataFrame to NumPy Array.
Returns:
NumPy Array of the QueryCompiler.
"""
arr = self.data.to_numpy(is_transposed=self._is_transposed)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New QueryCompiler with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, sort=False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New QueryCompiler with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
# If this QueryCompiler is transposed, copartition can sometimes fail to
# properly co-locate the data. It does not fail if other is transposed, so
# if this object is transposed, we will transpose both and do the operation,
# then transpose at the end.
if self._is_transposed:
return (
self.transpose()
._inter_manager_operations(
other.transpose(), "outer", lambda x, y: func(x, y, **kwargs)
)
.transpose()
)
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def binary_op(self, op, other, **kwargs):
"""Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
"""
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self._map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self._scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
if axis == 1 and isinstance(scalar, pandas.Series):
new_columns = self.columns.union(
[label for label in scalar.index if label not in self.columns]
)
else:
new_columns = self.columns
return self.__constructor__(new_data, self.index, new_columns)
else:
return self._map_partitions(self._prepare_method(func))
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
if self._is_transposed:
return (
self.transpose()
.reindex(axis=axis ^ 1, labels=labels, **kwargs)
.transpose()
)
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the data within the blocks.
new_manager = self.__constructor__(
new_data, self.columns, self.index, is_transposed=self._is_transposed ^ 1
)
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler, which will be handled in the front end.
def _full_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
"""
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
)
def _build_mapreduce_func(self, func, **kwargs):
def _map_reduce_func(df):
series_result = func(df, **kwargs)
if kwargs.get("axis", 0) == 0 and isinstance(series_result, pandas.Series):
# In the case of axis=0, we need to keep the shape of the data
# consistent with what we have done. In the case of a reduction, the
# data for axis=0 should be a single value for each column. By
# transposing the data after we convert to a DataFrame, we ensure that
# the columns of the result line up with the columns from the data.
# axis=1 does not have this requirement because the index already will
# line up with the index of the data based on how pandas creates a
# DataFrame from a Series.
return pandas.DataFrame(series_result).T
return pandas.DataFrame(series_result)
return _map_reduce_func
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
def dot(self, other):
"""Computes the matrix multiplication of self and other.
Args:
other: The other query compiler or other array-like to matrix
multiply with self.
Returns:
Returns the result of the matrix multiply.
"""
if self._is_transposed:
return self.transpose().dot(other).transpose()
def map_func(df, other=other):
if isinstance(other, pandas.DataFrame):
other = other.squeeze()
result = df.squeeze().dot(other)
if is_list_like(result):
return pandas.DataFrame(result)
else:
return pandas.DataFrame([result])
if isinstance(other, BaseQueryCompiler):
if len(self.columns) > 1 and len(other.columns) == 1:
# If self is DataFrame and other is a series, we take the transpose
# to copartition along the columns.
new_self = self
other = other.transpose()
axis = 1
new_index = self.index
elif len(self.columns) == 1 and len(other.columns) > 1:
# If self is series and other is a Dataframe, we take the transpose
# to copartition along the columns.
new_self = self.transpose()
axis = 1
new_index = self.index
elif len(self.columns) == 1 and len(other.columns) == 1:
# If both are series, then we copartition along the rows.
new_self = self
axis = 0
new_index = ["__reduce__"]
new_self, list_of_others, _ = new_self.copartition(
axis, other, "left", False
)
other = list_of_others[0]
reduce_func = self._build_mapreduce_func(
pandas.DataFrame.sum, axis=axis, skipna=False
)
new_data = new_self.groupby_reduce(axis, other, map_func, reduce_func)
else:
if len(self.columns) == 1:
axis = 0
new_index = ["__reduce__"]
else:
axis = 1
new_index = self.index
new_data = self.data.map_across_full_axis(axis, map_func)
return self.__constructor__(new_data, index=new_index, columns=["__reduced__"])
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
A new QueryCompiler object with the maximum values from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().max(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.max, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
A new QueryCompiler object containing the mean from each numerical column or
row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().mean(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
if sums._is_transposed and counts._is_transposed:
sums = sums.transpose()
counts = counts.transpose()
result = sums.binary_op("truediv", counts, axis=axis)
return result.transpose() if axis == 0 else result
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
A new QueryCompiler object with the minimum value from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().min(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object.
"""
axis = kwargs.get("axis", 0)
min_count = kwargs.get("min_count", 0)
def sum_prod_builder(df, **kwargs):
return func(df, **kwargs)
builder_func = self._build_mapreduce_func(sum_prod_builder, **kwargs)
if min_count <= 1:
return self._full_reduce(axis, builder_func)
else:
return self._full_axis_reduce(axis, builder_func)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
A new QueryCompiler object with the product of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().prod(**kwargs)
return self._process_sum_prod(pandas.DataFrame.prod, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
A new QueryCompiler object with the sum of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().sum(**kwargs)
return self._process_sum_prod(pandas.DataFrame.sum, **kwargs)
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func)
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().all(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
if kwargs.get("axis", 0) == 1:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().any(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.any(**kwargs), **kwargs)
# END Full Reduce operations
# Map partitions operations
# These operations are operations that apply a function to every partition.
def _map_partitions(self, func, new_dtypes=None):
return self.__constructor__(
self.data.map_across_blocks(func), self.index, self.columns, new_dtypes
)
def abs(self):
func = self._prepare_method(pandas.DataFrame.abs)
return self._map_partitions(func, new_dtypes=self.dtypes.copy())
def applymap(self, func):
remote_func = self._prepare_method(pandas.DataFrame.applymap, func=func)
return self._map_partitions(remote_func)
def invert(self):
remote_func = self._prepare_method(pandas.DataFrame.__invert__)
return self._map_partitions(remote_func)
def isin(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.isin, **kwargs)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def isna(self):
func = self._prepare_method(pandas.DataFrame.isna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def memory_usage(self, axis=0, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
if self._is_transposed:
return self.transpose().memory_usage(axis=1, **kwargs)
def memory_usage_builder(df, **kwargs):
axis = kwargs.pop("axis")
# We have to manually change the orientation of the data within the
# partitions because memory_usage does not take in an axis argument
# and always does it along columns.
if axis:
df = df.T
result = df.memory_usage(**kwargs)
return result
def sum_memory_usage(df, **kwargs):
axis = kwargs.pop("axis")
return df.sum(axis=axis)
# Even though memory_usage does not take in an axis argument, we have to
# pass in an axis kwargs for _build_mapreduce_func to properly arrange
# the results.
map_func = self._build_mapreduce_func(memory_usage_builder, axis=axis, **kwargs)
reduce_func = self._build_mapreduce_func(sum_memory_usage, axis=axis, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
def negative(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.__neg__, **kwargs)
return self._map_partitions(func)
def notna(self):
func = self._prepare_method(pandas.DataFrame.notna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def round(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.round, **kwargs)
return self._map_partitions(func, new_dtypes=self._dtype_cache)
# END Map partitions operations
# String map partition operations
def _str_map_partitions(self, func, new_dtypes=None, **kwargs):
def str_op_builder(df, **kwargs):
str_series = df.squeeze().str
return func(str_series, **kwargs).to_frame()
builder_func = self._prepare_method(str_op_builder, **kwargs)
return self._map_partitions(builder_func, new_dtypes=new_dtypes)
def str_split(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.split, new_dtypes=self.dtypes, **kwargs
)
def str_rsplit(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rsplit, new_dtypes=self.dtypes, **kwargs
)
def str_get(self, i):
return self._str_map_partitions(
pandas.Series.str.get, new_dtypes=self.dtypes, i=i
)
def str_join(self, sep):
return self._str_map_partitions(
pandas.Series.str.join, new_dtypes=self.dtypes, sep=sep
)
def str_contains(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
return self._str_map_partitions(
pandas.Series.str.contains, new_dtypes=new_dtypes, **kwargs
)
def str_replace(self, pat, repl, **kwargs):
kwargs["pat"] = pat
kwargs["repl"] = repl
return self._str_map_partitions(
pandas.Series.str.replace, new_dtypes=self.dtypes, **kwargs
)
def str_repeats(self, repeats):
return self._str_map_partitions(
pandas.Series.str.repeats, new_dtypes=self.dtypes, repeats=repeats
)
def str_pad(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.pad, new_dtypes=self.dtypes, **kwargs
)
def str_center(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.center, new_dtypes=self.dtypes, **kwargs
)
def str_ljust(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.ljust, new_dtypes=self.dtypes, **kwargs
)
def str_rjust(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.rjust, new_dtypes=self.dtypes, **kwargs
)
def str_zfill(self, width):
return self._str_map_partitions(
pandas.Series.str.zfill, new_dtypes=self.dtypes, width=width
)
def str_wrap(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.wrap, new_dtypes=self.dtypes, **kwargs
)
def str_slice(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.slice, new_dtypes=self.dtypes, **kwargs
)
def str_slice_replace(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.slice_replace, new_dtypes=self.dtypes, **kwargs
)
def str_count(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([int])
# We have to pass in a lambda because pandas.Series.str.count does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.count(**kwargs), new_dtypes=new_dtypes
)
def str_startswith(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.startswith does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.startswith(**kwargs), new_dtypes=new_dtypes
)
def str_endswith(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.endswith does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.endswith(**kwargs), new_dtypes=new_dtypes
)
def str_findall(self, pat, **kwargs):
kwargs["pat"] = pat
# We have to pass in a lambda because pandas.Series.str.findall does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.findall(**kwargs), new_dtypes=self.dtypes
)
def str_match(self, pat, **kwargs):
kwargs["pat"] = pat
return self._str_map_partitions(
pandas.Series.str.match, new_dtypes=self.dtypes, **kwargs
)
def str_len(self):
new_dtypes = pandas.Series([int])
return self._str_map_partitions(pandas.Series.str.len, new_dtypes=new_dtypes)
def str_strip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.strip, new_dtypes=self.dtypes, **kwargs
)
def str_rstrip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rstrip, new_dtypes=self.dtypes, **kwargs
)
def str_lstrip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.lstrip, new_dtypes=self.dtypes, **kwargs
)
def str_partition(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.partition, new_dtypes=self.dtypes, **kwargs
)
def str_rpartition(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rpartition, new_dtypes=self.dtypes, **kwargs
)
def str_lower(self):
# We have to pass in a lambda because pandas.Series.str.lower does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.lower(), new_dtypes=self.dtypes
)
def str_upper(self):
# We have to pass in a lambda because pandas.Series.str.upper does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.upper(), new_dtypes=self.dtypes
)
def str_find(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.find, new_dtypes=self.dtypes, **kwargs
)
def str_rfind(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.rfind, new_dtypes=self.dtypes, **kwargs
)
def str_index(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.index, new_dtypes=self.dtypes, **kwargs
)
def str_rindex(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.rindex, new_dtypes=self.dtypes, **kwargs
)
def str_capitalize(self):
# We have to pass in a lambda because pandas.Series.str.capitalize does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.capitalize(), new_dtypes=self.dtypes
)
def str_swapcase(self):
# We have to pass in a lambda because pandas.Series.str.swapcase does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.swapcase(), new_dtypes=self.dtypes
)
def str_normalize(self, form):
return self._str_map_partitions(
pandas.Series.str.normalize, new_dtypes=self.dtypes, form=form
)
def str_translate(self, table, **kwargs):
kwargs["table"] = table
return self._str_map_partitions(
pandas.Series.str.translate, new_dtypes=self.dtypes, **kwargs
)
def str_isalnum(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isalnum does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isalnum(), new_dtypes=new_dtypes
)
def str_isalpha(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isalpha does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isalpha(), new_dtypes=new_dtypes
)
def str_isdigit(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isdigit does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isdigit(), new_dtypes=new_dtypes
)
def str_isspace(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isspace does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isspace(), new_dtypes=new_dtypes
)
def str_islower(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.islower does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.islower(), new_dtypes=new_dtypes
)
def str_isupper(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isupper does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isupper(), new_dtypes=new_dtypes
)
def str_istitle(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.istitle does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.istitle(), new_dtypes=new_dtypes
)
def str_isnumeric(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isnumeric does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isnumeric(), new_dtypes=new_dtypes
)
def str_isdecimal(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isdecimal does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isdecimal(), new_dtypes=new_dtypes
)
# END String map partitions operations
# Map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
# END Map partitions across select indices
# Column/Row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the font end will handle.
def _full_axis_reduce(self, axis, func, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
result = self.data.map_across_full_axis(axis, func)
if axis == 0:
columns = alternate_index if alternate_index is not None else self.columns
return self.__constructor__(result, index=["__reduced__"], columns=columns)
else:
index = alternate_index if alternate_index is not None else self.index
return self.__constructor__(result, index=index, columns=["__reduced__"])
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result]
def idxmax(self, **kwargs):
"""Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmax(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmax_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmax(**kwargs)
func = self._build_mapreduce_func(idxmax_builder, **kwargs)
return self._full_axis_reduce(axis, func)
def idxmin(self, **kwargs):
"""Returns the first occurrence of the minimum over requested axis.
Returns:
A new QueryCompiler object containing the minimum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmin(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmin_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmin(**kwargs)
func = self._build_mapreduce_func(idxmin_builder, **kwargs)
return self._full_axis_reduce(axis, func)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result]
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
A new QueryCompiler object of ints indexed by column or index names.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().nunique(**kwargs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.nunique, **kwargs)
return self._full_axis_reduce(axis, func)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
A new QueryCompiler object containing the skew of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().skew(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.skew, **kwargs)
return self._full_axis_reduce(axis, func)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
A new QueryCompiler object containing the standard deviation of each column
or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().std(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.std, **kwargs)
return self._full_axis_reduce(axis, func)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
A new QueryCompiler object containing the variance of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().var(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.var, **kwargs)
return self._full_axis_reduce(axis, func)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def _full_axis_reduce_along_select_indices(self, func, axis, index):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def _map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cummax(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummax(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummin(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumsum(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumsum(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cumprod(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumprod(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().diff(**kwargs).transpose()
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
def eval_builder(df, **kwargs):
# pop the `axis` parameter because it was needed to build the mapreduce
# function but it is not a parameter used by `eval`.
kwargs.pop("axis", None)
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
return result
func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)
new_data = self._map_across_full_axis(1, func)
if expect_series:
new_columns = [columns_copy.name]
new_index = index
else:
new_columns = columns_copy.columns
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
method = kwargs.get("method", None)
limit = kwargs.get("limit", None)
full_axis = method is not None or limit is not None
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
if full_axis:
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, fillna_dict_builder, value, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
if full_axis:
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(func)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis:
# If along rows, then drop the nonnumeric columns, record the index, and
# take transpose. We have to do this because if we don't, the result is all
# in one column for some reason.
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if axis == 1 else result
func = query_compiler._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler._map_across_full_axis(axis, func)
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Float64Index(q)
result = self.__constructor__(new_data, q_index, new_columns)
return result.transpose() if axis == 1 else result
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self._map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
if numeric_only:
new_columns = self.compute_index(1, new_data, True)
else:
new_columns = self.columns
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
if self._is_transposed:
return self.transpose().sort_index(axis=axis ^ 1, **kwargs).transpose()
index = self.columns if axis else self.index
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy(), self._is_transposed
)
# END Map across rows/columns
# Head/Tail/Front/Back
def head(self, n):
"""Returns the first n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
QueryCompiler containing the first n rows of the original QueryCompiler.
"""
# We grab the front if it is transposed and flag as transposed so that
# we are not physically updating the data from this manager. This
# allows the implementation to stay modular and reduces data copying.
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
# Transpose the blocks back to their original orientation first to
# ensure that we extract the correct data on each node. The index
# on a transposed manager is already set to the correct value, so
# we need to only take the head of that instead of re-transposing.
result = self.__constructor__(
self.data.transpose().take(1, n).transpose(),
self.index[:n],
self.columns,
self._dtype_cache,
self._is_transposed,
)
else:
result = self.__constructor__(
self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
)
return result
def tail(self, n):
"""Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
QueryCompiler containing the last n rows of the original QueryCompiler.
"""
# See head for an explanation of the transposed behavior
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
self.index[-n:],
self.columns,
self._dtype_cache,
self._is_transposed,
)
else:
result = self.__constructor__(
self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache
)
return result
def front(self, n):
"""Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
QueryCompiler containing the first n columns of the original QueryCompiler.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, n).transpose(),
self.index,
self.columns[:n],
new_dtypes,
self._is_transposed,
)
else:
result = self.__constructor__(
self.data.take(1, n), self.index, self.columns[:n], new_dtypes
)
return result
def back(self, n):
"""Returns the last n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
QueryCompiler containing the last n columns of the original QueryCompiler.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[-n:]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, -n).transpose(),
self.index,
self.columns[-n:],
new_dtypes,
self._is_transposed,
)
else:
result = self.__constructor__(
self.data.take(1, -n), self.index, self.columns[-n:], new_dtypes
)
return result
# End Head/Tail/Front/Back
# __getitem__ methods
def getitem_column_array(self, key, numeric=False):
"""Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
numeric: A boolean representing whether or not the key passed in represents
the numeric index or the named index.
Returns:
A new QueryCompiler.
"""
if self._is_transposed:
return (
self.transpose()
.getitem_row_array(self.columns.get_indexer_for(key))
.transpose()
)
# Convert to list for type checking
if not numeric:
numeric_indices = self.columns.get_indexer_for(key)
else:
numeric_indices = key
result = self.data.mask(col_indices=numeric_indices)
# We can't just set the columns to key here because there may be
# multiple instances of a key.
new_columns = self.columns[numeric_indices]
if self._dtype_cache is not None:
new_dtypes = self.dtypes[numeric_indices]
else:
new_dtypes = None
return self.__constructor__(result, self.index, new_columns, new_dtypes)
def getitem_row_array(self, key):
"""Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
"""
if self._is_transposed:
return self.transpose().getitem_column_array(key, numeric=True).transpose()
result = self.data.mask(row_indices=key)
# We can't just set the index to key here because there may be multiple
# instances of a key.
new_index = self.index[key]
return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem(df, internal_indices=[]):
def _setitem():
if len(internal_indices) == 1:
if axis == 0:
df[df.columns[internal_indices[0]]] = value
else:
df.iloc[internal_indices[0]] = value
else:
if axis == 0:
df[df.columns[internal_indices]] = value
else:
df.iloc[internal_indices] = value
try:
_setitem()
except ValueError:
# TODO: This is a workaround for a pyarrow serialization issue
df = df.copy()
_setitem()
return df
if axis == 0:
numeric_indices = list(self.columns.get_indexer_for([key]))
else:
numeric_indices = list(self.index.get_indexer_for([key]))
prepared_func = self._prepare_method(setitem)
if is_list_like(value):
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, prepared_func, numeric_indices, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, prepared_func, numeric_indices, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
# END __getitem__ methods
# Drop/Dropna
# This will change the shape of the resulting data.
def dropna(self, **kwargs):
"""Returns a new QueryCompiler with null values dropped along given axis.
Return:
a new QueryCompiler
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset", None)
thresh = kwargs.get("thresh", None)
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1).to_pandas().squeeze()
> thresh
for ax in axis
}
else:
drop_values = {
ax
^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1).to_pandas().squeeze()
for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
"""
if index is None and columns is None:
return self.copy()
if self._is_transposed:
return self.transpose().drop(index=columns, columns=index).transpose()
if index is None:
new_index = self.index
idx_numeric_indices = None
else:
idx_numeric_indices = pandas.RangeIndex(len(self.index)).drop(
self.index.get_indexer_for(index)
)
new_index = self.index[~self.index.isin(index)]
if columns is None:
new_columns = self.columns
new_dtypes = self._dtype_cache
col_numeric_indices = None
else:
col_numeric_indices = pandas.RangeIndex(len(self.columns)).drop(
self.columns.get_indexer_for(columns)
)
new_columns = self.columns[~self.columns.isin(columns)]
if self._dtype_cache is not None:
new_dtypes = self.dtypes.drop(columns)
else:
new_dtypes = None
new_data = self.data.mask(
row_indices=idx_numeric_indices, col_indices=col_numeric_indices
)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
# END Drop/Dropna
# Insert
# This method changes the shape of the resulting data. In Pandas, this
# operation is always inplace, but this object is immutable, so we just
# return a new one from here and let the front end handle the inplace
# update.
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
"""
if is_list_like(value):
# TODO make work with another querycompiler object as `value`.
# This will require aligning the indices with a `reindex` and ensuring that
# the data is partitioned identically.
if isinstance(value, pandas.Series):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index = pandas.RangeIndex(len(df.index))
df.insert(internal_idx, internal_idx, value, allow_duplicates=True)
df.columns = pandas.RangeIndex(len(df.columns))
df.index = old_index
return df
new_data = self.data.apply_func_to_select_indices_along_full_axis(
0, insert, loc, keep_remaining=True
)
new_columns = self.columns.insert(loc, column)
return self.__constructor__(new_data, self.index, new_columns)
# END Insert
# UDF (apply and agg) methods
# There is a wide range of behaviors that are supported, so a lot of the
# logic can get a bit convoluted.
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
def _post_process_apply(self, result_data, axis, try_scale=True):
"""Recompute the index after applying function.
Args:
result_data: a BaseFrameManager object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
"""
if try_scale:
try:
internal_index = self.compute_index(0, result_data, True)
except IndexError:
internal_index = self.compute_index(0, result_data, False)
try:
internal_columns = self.compute_index(1, result_data, True)
except IndexError:
internal_columns = self.compute_index(1, result_data, False)
else:
internal_index = self.compute_index(0, result_data, False)
internal_columns = self.compute_index(1, result_data, False)
if not axis:
index = internal_index
# We check if the two columns are the same length because if
# they are the same length, `self.columns` is the correct index.
# However, if the operation resulted in a different number of columns,
# we must use the derived columns from `self.compute_index()`.
if len(internal_columns) != len(self.columns):
columns = internal_columns
else:
columns = self.columns
else:
columns = internal_columns
# See above explanation for checking the lengths of columns
if len(internal_index) != len(self.index):
index = internal_index
else:
index = self.index
return self.__constructor__(result_data, index, columns)
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
return full_result
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns)
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
def callable_apply_builder(df, axis=0):
if not axis:
df.index = index
df.columns = pandas.RangeIndex(len(df.columns))
else:
df.columns = index
df.index = pandas.RangeIndex(len(df.index))
result = df.apply(func, axis=axis, *args, **kwargs)
return result
index = self.index if not axis else self.columns
func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)
result_data = self._map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis)
# END UDF
# Manual Partitioning methods (e.g. merge, groupby)
# These methods require some sort of manual partitioning due to their
# nature. They require certain data to exist on the same partition, and
# after the shuffle, there should be only a local map required.
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
):
def _map(df, other):
return map_func(
df.groupby(by=other.squeeze(), axis=axis, **groupby_args), **map_args
).reset_index(drop=False)
if reduce_func is not None:
def _reduce(df):
return reduce_func(
df.groupby(by=df.columns[0], axis=axis, **groupby_args),
**reduce_args
)
else:
def _reduce(df):
return map_func(
df.groupby(by=df.columns[0], axis=axis, **groupby_args), **map_args
)
new_data = self.data.groupby_reduce(axis, by.data, _map, _reduce)
if axis == 0:
new_columns = (
self.columns if not numeric_only else self.numeric_columns(True)
)
new_index = self.compute_index(axis, new_data, False)
else:
new_columns = self.compute_index(axis, new_data, False)
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
def groupby_agg(self, by, axis, agg_func, groupby_args, agg_args):
remote_index = self.index if not axis else self.columns
def groupby_agg_builder(df):
if not axis:
df.index = remote_index
df.columns = pandas.RangeIndex(len(df.columns))
# We need to be careful that our internal index doesn't overlap with the
# groupby values, otherwise we return an incorrect result. We
# temporarily modify the columns so that we don't run into correctness
# issues.
if all(b in df for b in by):
df = df.add_prefix("_")
else:
df.columns = remote_index
df.index = pandas.RangeIndex(len(df.index))
def compute_groupby(df):
grouped_df = df.groupby(by=by, axis=axis, **groupby_args)
try:
result = agg_func(grouped_df, **agg_args)
# This will set things back if we changed them (see above).
if axis == 0 and not is_numeric_dtype(result.columns.dtype):
result.columns = [int(col[1:]) for col in result]
# This happens when the partition is filled with non-numeric data and a
# numeric operation is done. We need to build the index here to avoid issues
# with extracting the index.
except DataError:
result = pandas.DataFrame(index=grouped_df.size().index)
return result
try:
return compute_groupby(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_groupby(df.copy())
func_prepared = self._prepare_method(lambda df: groupby_agg_builder(df))
result_data = self._map_across_full_axis(axis, func_prepared)
if axis == 0:
index = self.compute_index(0, result_data, False)
columns = self.compute_index(1, result_data, True)
else:
index = self.compute_index(0, result_data, True)
columns = self.compute_index(1, result_data, False)
# If the result is a Series, this is how `compute_index` returns the columns.
if len(columns) == 0 and len(index) != 0:
return self._post_process_apply(result_data, axis, try_scale=True)
else:
return self.__constructor__(result_data, index, columns)
# END Manual Partitioning methods
# Get_dummies
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
"""
cls = type(self)
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
# We have to do one of two things in order to ensure the final columns
# are correct. Our first option is to map over the data and assign the
# columns in a separate pass. That is what we have chosen to do here.
# This is not as efficient, but it requires less information from the
# lower layers and does not break any of our internal requirements. The
# second option is that we assign the columns as a part of the
# `get_dummies` call. This requires knowledge of the length of each
# partition, and breaks some of our assumptions and separation of
# concerns.
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self._map_across_full_axis(
1, lambda df: set_columns(df, set_cols)
)
# In some cases, we are mapping across all of the data. It is more
# efficient if we are mapping over all of the data to do it this way
# than it would be to reuse the code for specific columns.
if len(columns) == len(self.columns):
def get_dummies_builder(df):
if df is not None:
if not df.empty:
return pandas.get_dummies(df, **kwargs)
else:
return pandas.DataFrame([])
func = self._prepare_method(lambda df: get_dummies_builder(df))
new_data = columns_applied.map_across_full_axis(0, func)
untouched_data = None
else:
def get_dummies_builder(df, internal_indices=[]):
return pandas.get_dummies(
df.iloc[:, internal_indices], columns=None, **kwargs
)
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = columns_applied.apply_func_to_select_indices_along_full_axis(
0, get_dummies_builder, numeric_indices, keep_remaining=False
)
untouched_data = self.drop(columns=columns)
# Since we set the columns in the beginning, we can just extract them
# here. There is fortunately no required extra steps for a correct
# column index.
final_columns = self.compute_index(1, new_data, False)
# If we mapped over all the data we are done. If not, we need to
# prepend the `new_data` with the raw data from the columns that were
# not selected.
if len(columns) != len(self.columns):
new_data = untouched_data.data.concat(1, new_data)
final_columns = untouched_data.columns.append(pandas.Index(final_columns))
return cls(new_data, self.index, final_columns)
# END Get_dummies
# Indexing
def view(self, index=None, columns=None):
if self._is_transposed:
return self.transpose().view(columns=index, index=columns)
index_map_series = pandas.Series(np.arange(len(self.index)), index=self.index)
column_map_series = pandas.Series(
np.arange(len(self.columns)), index=self.columns
)
if index is not None:
index_map_series = index_map_series.iloc[index]
if columns is not None:
column_map_series = column_map_series.iloc[columns]
return PandasQueryCompilerView(
self.data,
index_map_series.index,
column_map_series.index,
self._dtype_cache,
index_map_series,
column_map_series,
)
def write_items(self, row_numeric_index, col_numeric_index, broadcasted_items):
def iloc_mut(partition, row_internal_indices, col_internal_indices, item):
partition = partition.copy()
partition.iloc[row_internal_indices, col_internal_indices] = item
return partition
mutated_blk_partitions = self.data.apply_func_to_indices_both_axis(
func=iloc_mut,
row_indices=row_numeric_index,
col_indices=col_numeric_index,
mutate=True,
item_to_distribute=broadcasted_items,
)
self.data = mutated_blk_partitions
def global_idx_to_numeric_idx(self, axis, indices):
"""
Note: this function involves making copies of the index in memory.
Args:
axis: Axis to extract indices.
indices: Indices to convert to numerical.
Returns:
An Index object.
"""
assert axis in ["row", "col", "columns"]
if axis == "row":
return pandas.Index(
pandas.Series(np.arange(len(self.index)), index=self.index)
.loc[indices]
.values
)
elif axis in ["col", "columns"]:
return pandas.Index(
pandas.Series(np.arange(len(self.columns)), index=self.columns)
.loc[indices]
.values
)
def enlarge_partitions(self, new_row_labels=None, new_col_labels=None):
new_data = self.data.enlarge_partitions(
len(new_row_labels), len(new_col_labels)
)
concated_index = (
self.index.append(type(self.index)(new_row_labels))
if new_row_labels
else self.index
)
concated_columns = (
self.columns.append(type(self.columns)(new_col_labels))
if new_col_labels
else self.columns
)
return self.__constructor__(new_data, concated_index, concated_columns)
class PandasQueryCompilerView(PandasQueryCompiler):
"""
This class represent a view of the PandasQueryCompiler
In particular, the following constraints are broken:
- (len(self.index), len(self.columns)) != self.data.shape
Note:
The constraint will be satisfied when we get the data
"""
def __init__(
self,
block_partitions_object,
index,
columns,
dtypes=None,
index_map_series=None,
columns_map_series=None,
):
"""
Args:
index_map_series: a Pandas Series Object mapping user-facing index to
numeric index.
columns_map_series: a Pandas Series Object mapping user-facing index to
numeric index.
"""
assert index_map_series is not None
assert columns_map_series is not None
assert index.equals(index_map_series.index)
assert columns.equals(columns_map_series.index)
self.index_map = index_map_series
self.columns_map = columns_map_series
PandasQueryCompiler.__init__(
self, block_partitions_object, index, columns, dtypes
)
@property
def __constructor__(self):
"""Return parent object when getting the constructor."""
return PandasQueryCompiler
def _get_data(self):
"""Perform the map step
Returns:
A BaseFrameManager object.
"""
masked_data = self.parent_data.mask(
row_indices=self.index_map.values, col_indices=self.columns_map.values
)
return masked_data
def _set_data(self, new_data):
"""Note this setter will be called by the
`super(PandasQueryCompiler).__init__` function
"""
self.parent_data = new_data
data = property(_get_data, _set_data)
def global_idx_to_numeric_idx(self, axis, indices):
assert axis in ["row", "col", "columns"]
if axis == "row":
return self.index_map.loc[indices].index
elif axis in ["col", "columns"]:
return self.columns_map.loc[indices].index
|
from PyQuantum.Common.Matrix import *
import numpy as np
# import pandas as pd
from scipy.sparse import identity, kron, csc_matrix
class Hamiltonian(Matrix):
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- INIT -------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def __init__(self, capacity, cavity, RWA=True, iprint=False, iprint_symb=False):
self.capacity = capacity
self.cavity = cavity
# |00i, |01i, |10i, |11i, |0ti
self.states = [
'00',
'01',
'10',
'11',
'0t'
]
self.data = [
[ 0, 0, 0, 0, 0],
[ 0, cavity.wc, 0, 0, 0],
[ 0, 0, cavity.wc, 0, cavity.g],
[ 0, 0, 0, 2*cavity.wc, 0],
[ 0, 0, cavity.g, 0, cavity.wc]
]
self.size = len(self.data)
self.data = csc_matrix(self.data)
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- PRINT STATES -----------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def print_states(self):
cprint("Basis:\n", "green")
for i in self.states:
print(i)
print()
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def print(self):
for i in self.data.toarray():
print(i) |
"""
@Author: yshhuang@foxmail.com
@Date: 2020-07-23 16:13:08
@LastEditors: yshhuang@foxmail.com
@LastEditTime: 2020-07-27 17:47:49
@FilePath: /d2l-zh/srcnn/model.py
"""
from mxnet.gluon import nn
from mxnet import initializer
class SrCnn(nn.Sequential):
def __init__(self, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False, force_reinit=False):
self.add(nn.Conv2D(kernel_size=9,
channels=64, activation='relu'))
self.add(nn.Conv2D(kernel_size=1,
channels=32, activation='relu'))
self.add(nn.Conv2D(kernel_size=5, channels=3))
return super().initialize(init=init, ctx=ctx, verbose=verbose, force_reinit=force_reinit)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-07 05:21
from __future__ import unicode_literals
from django.db import migrations
def create_system_financial_account(apps, schema_editor):
# We can't import the FinancialAccount model directly as it may be a newer
# version than this migration expects. We use the historical version.
account = apps.get_model("crowdsourcing", "FinancialAccount")
account.objects.get_or_create(is_system=True, type="paypal_deposit")
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0011_auto_20151221_1618'),
]
operations = [
migrations.RunPython(create_system_financial_account),
]
|
# -*- coding: utf-8 -*-
"""Regression test for issue #49."""
from __future__ import unicode_literals
import unittest
import os.path
from xml.etree import ElementTree as ET
from statik.generator import generate
class TestForAdditionalPathSegmentInjection(unittest.TestCase):
def test_issue(self):
test_path = os.path.dirname(os.path.realpath(__file__))
output_data = generate(
os.path.join(test_path, 'data-non-root-base'),
in_memory=True
)
self.assertIn('albums', output_data)
self.assertIn('browse', output_data['albums'])
self.assertIn('index.html', output_data['albums']['browse'])
self.assertIn('test-album1', output_data['albums'])
self.assertIn('index.html', output_data['albums']['test-album1'])
self.assertIn('test-album2', output_data['albums'])
self.assertIn('index.html', output_data['albums']['test-album2'])
html = ET.fromstring(output_data['albums']['browse']['index.html'])
self.assertEqual('Browse Albums', html.find('head/title').text.strip())
album_els = [el for el in html.findall('body/ul/li/a')]
self.assertEqual(2, len(album_els))
self.assertEqual('Test Album 2', album_els[0].text.strip())
self.assertEqual(
'/non/standard/albums/test-album2/',
album_els[0].attrib['href']
)
self.assertEqual('Test Album 1', album_els[1].text.strip())
self.assertEqual(
'/non/standard/albums/test-album1/',
album_els[1].attrib['href']
)
|
import datetime
from PySide2 import QtWidgets, QtGui, QtCore
import fseutil
from fseutil.etc.images_base64 import OFR_LOGO_1_PNG
from fseutil.etc.images_base64 import OFR_LOGO_2_PNG
from fseutil.gui.layout.main import Ui_MainWindow
from fseutil.gui.logic.dialog_0101_adb_datasheet_1 import Dialog as Dialog0101
from fseutil.gui.logic.dialog_0102_bs9999_datasheet_1 import Dialog as Dialog0102
from fseutil.gui.logic.dialog_0103_bs9999_merging_flow import Dialog0103 as Dialog0103
from fseutil.gui.logic.dialog_0111_pd7974_heat_detector_activation import Dialog0111 as Dialog0111
from fseutil.gui.logic.dialog_0401_br187_parallel_simple import Dialog0401 as Dialog0401
from fseutil.gui.logic.dialog_0402_br187_perpendicular_simple import Dialog0402 as Dialog0402
from fseutil.gui.logic.dialog_0403_br187_parallel_complex import Dialog0403 as Dialog0403
from fseutil.gui.logic.dialog_0404_br187_perpendicular_complex import Dialog0404 as Dialog0404
from fseutil.gui.logic.dialog_0405_thermal_radiation_extreme import Dialog0405 as Dialog0405
from fseutil.gui.logic.dialog_0601_naming_convention import Dialog0601 as Dialog0601
from fseutil.gui.logic.dialog_0602_pd7974_flame_height import Dialog0602 as Dialog0602
try:
from fseutil.__key__ import key
KEY = key()
except ModuleNotFoundError:
KEY = None
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
# ui setup
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# window properties
self.setWindowTitle('OFR Fire Safety Engineering Utility Tools')
self.statusBar().setSizeGripEnabled(False)
self.setFixedSize(self.width(), self.height())
# signals
self.init_buttons()
# default values
self.ui.label_big_name.setText('FSE Toolbox')
self.ui.label_version.setText('Version ' + fseutil.__version__)
self.ui.label_version.setStyleSheet('color: grey;')
self.ui.label_version.setStatusTip('Version ' + fseutil.__version__)
self.ui.label_version.setToolTip('Version ' + fseutil.__version__)
self.init_logos() # logo
self.ui.dialog_error = QtWidgets.QErrorMessage(self)
self.ui.dialog_error.setWindowTitle('Message')
def init_logos(self):
ba = QtCore.QByteArray.fromBase64(OFR_LOGO_1_PNG)
pix_map = QtGui.QPixmap()
pix_map.loadFromData(ba)
self.setWindowIcon(pix_map)
ba = QtCore.QByteArray.fromBase64(OFR_LOGO_2_PNG)
pix_map = QtGui.QPixmap()
pix_map.loadFromData(ba)
self.ui.label_logo.setPixmap(pix_map)
# tips
self.ui.label_logo.setToolTip('Click to go to ofrconsultants.com')
self.ui.label_logo.setStatusTip('Click to go to ofrconsultants.com')
# signals
self.ui.label_logo.mousePressEvent = self.label_logo_mousePressEvent
def label_logo_mousePressEvent(self, event=None):
if event:
QtGui.QDesktopServices.openUrl(QtCore.QUrl("https://ofrconsultants.com/"))
def init_buttons(self):
self.ui.pushButton_0101_adb2_datasheet_1.clicked.connect(lambda: self.activate_app(Dialog0101))
self.ui.pushButton_0102_bs9999_datasheet_1.clicked.connect(lambda: self.activate_app(Dialog0102))
self.ui.pushButton_0103_merging_flow.clicked.connect(lambda: self.activate_app(Dialog0103))
self.ui.pushButton_0111_heat_detector_activation.clicked.connect(lambda: self.activate_app(Dialog0111))
self.ui.pushButton_0401_br187_parallel_simple.clicked.connect(lambda: self.activate_app(Dialog0401))
self.ui.pushButton_0402_br187_perpendicular_simple.clicked.connect(lambda: self.activate_app(Dialog0402))
self.ui.pushButton_0403_br187_parallel_complex.clicked.connect(lambda: self.activate_app(Dialog0403))
self.ui.pushButton_0404_br187_perpendicular_complex.clicked.connect(lambda: self.activate_app(Dialog0404))
self.ui.pushButton_0405_thermal_radiation_extreme.clicked.connect(lambda: self.activate_app(Dialog0405))
self.ui.pushButton_0601_naming_convention.clicked.connect(lambda: self.activate_app(Dialog0601))
self.ui.pushButton_0602_pd7974_flame_height.clicked.connect(lambda: self.activate_app(Dialog0602))
def activate_app(self, app_):
app_ = app_(self)
app_.show()
try:
app_.exec_()
except AttributeError:
pass
return app_
|
from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
# This call to setup() does all the work
setup(
name="hdfs-lmdc",
version="2.0.7",
description="Esta biblioteca tem como objetivo generalizar funções da integração entre HDFS e Python utilizando HDFS3 ou JavaWrapper",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/LMDC-UFF/hdfs-python",
author="LMDC-UFF",
author_email="opensource@lmdc.uff.br",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["hdfs_lmdc"],
include_package_data=True,
install_requires=["hdfs3", "pillow==6.2.2"],
entry_points={
"console_scripts": [
"hdfs-lmdc=hdfs_lmdc.demo:main",
]
},
) |
import random
from math import floor
def Generate(GUI):
h = GUI.COL
w = GUI.ROW
GUI.restatGrid()
GUI.paintGrid()
for i in range (0,w):
GUI.colorCase(0,i, "dimgray")
GUI.colorCase(h-1,i, "dimgray")
for i in range (0,h):
GUI.colorCase(i,0, "dimgray")
GUI.colorCase(i,w-1, "dimgray")
divide(w-1,h-1,1,1,GUI)
def divide(ax,ay,zx,zy,GUI):
dx = ax - zx
dy = ay - zy
if dx < 2 or dy < 2:
if dx > 1:
for x in range(zx, ax):
GUI.colorCase(zy,x,"dimgray")
elif dy > 1:
for y in range(zy, ay-1):
GUI.colorCase(y,zx,"dimgray")
return
isVertical = 1 if dy > dx else (0 if dx > dy else random.randrange(2))
xp = random.randrange(zx, ax-(isVertical))
yp = random.randrange(zy, ay-(isVertical))
if isVertical:
for i in range(xp,ax):
GUI.colorCase(yp,i,"white")
divide(ax,ay, zx, yp, GUI)
divide(ax,yp,zx, zy, GUI)
else:
for i in range (yp,ay):
GUI.colorCase(i,xp,"white")
divide(ax,ay,xp+1, zy,GUI)
divide(xp,ay,zx, zy,GUI) |
import json
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import train_test_split, ShuffleSplit
from scipy import stats
from typing import Union, List, Tuple, Sequence, Dict, Any, Optional, Collection
from copy import copy
from pathlib import Path
import pickle as pkl
import logging
import random
import lmdb
from scipy.spatial.distance import pdist, squareform
plt.rcParams['figure.dpi'] = 300
AA = list("-ACDEFGHIKLMNPQRSTVWY")
AA_IDX = {AA[i]:i for i in range(len(AA))}
IDX_AA = {i:AA[i].upper() for i in range(len(AA))}
# from CbAS' util.py
BLOSUM = np.array([
[3.9029,0.6127,0.5883,0.5446,0.8680,0.7568,0.7413,1.0569,0.5694,0.6325,0.6019,0.7754,0.7232,0.4649,0.7541,1.4721,0.9844,0.4165,0.5426,0.9365],
[0.6127,6.6656,0.8586,0.5732,0.3089,1.4058,0.9608,0.4500,0.9170,0.3548,0.4739,2.0768,0.6226,0.3807,0.4815,0.7672,0.6778,0.3951,0.5560,0.4201],
[0.5883,0.8586,7.0941,1.5539,0.3978,1.0006,0.9113,0.8637,1.2220,0.3279,0.3100,0.9398,0.4745,0.3543,0.4999,1.2315,0.9842,0.2778,0.4860,0.3690],
[0.5446,0.5732,1.5539,7.3979,0.3015,0.8971,1.6878,0.6343,0.6786,0.3390,0.2866,0.7841,0.3465,0.2990,0.5987,0.9135,0.6948,0.2321,0.3457,0.3365],
[0.8680,0.3089,0.3978,0.3015,19.5766,0.3658,0.2859,0.4204,0.3550,0.6535,0.6423,0.3491,0.6114,0.4390,0.3796,0.7384,0.7406,0.4500,0.4342,0.7558],
[0.7568,1.4058,1.0006,0.8971,0.3658,6.2444,1.9017,0.5386,1.1680,0.3829,0.4773,1.5543,0.8643,0.3340,0.6413,0.9656,0.7913,0.5094,0.6111,0.4668],
[0.7413,0.9608,0.9113,1.6878,0.2859,1.9017,5.4695,0.4813,0.9600,0.3305,0.3729,1.3083,0.5003,0.3307,0.6792,0.9504,0.7414,0.3743,0.4965,0.4289],
[1.0569,0.4500,0.8637,0.6343,0.4204,0.5386,0.4813,6.8763,0.4930,0.2750,0.2845,0.5889,0.3955,0.3406,0.4774,0.9036,0.5793,0.4217,0.3487,0.3370],
[0.5694,0.9170,1.2220,0.6786,0.3550,1.1680,0.9600,0.4930,13.5060,0.3263,0.3807,0.7789,0.5841,0.6520,0.4729,0.7367,0.5575,0.4441,1.7979,0.3394],
[0.6325,0.3548,0.3279,0.3390,0.6535,0.3829,0.3305,0.2750,0.3263,3.9979,1.6944,0.3964,1.4777,0.9458,0.3847,0.4432,0.7798,0.4089,0.6304,2.4175],
[0.6019,0.4739,0.3100,0.2866,0.6423,0.4773,0.3729,0.2845,0.3807,1.6944,3.7966,0.4283,1.9943,1.1546,0.3711,0.4289,0.6603,0.5680,0.6921,1.3142],
[0.7754,2.0768,0.9398,0.7841,0.3491,1.5543,1.3083,0.5889,0.7789,0.3964,0.4283,4.7643,0.6253,0.3440,0.7038,0.9319,0.7929,0.3589,0.5322,0.4565],
[0.7232,0.6226,0.4745,0.3465,0.6114,0.8643,0.5003,0.3955,0.5841,1.4777,1.9943,0.6253,6.4815,1.0044,0.4239,0.5986,0.7938,0.6103,0.7084,1.2689],
[0.4649,0.3807,0.3543,0.2990,0.4390,0.3340,0.3307,0.3406,0.6520,0.9458,1.1546,0.3440,1.0044,8.1288,0.2874,0.4400,0.4817,1.3744,2.7694,0.7451],
[0.7541,0.4815,0.4999,0.5987,0.3796,0.6413,0.6792,0.4774,0.4729,0.3847,0.3711,0.7038,0.4239,0.2874,12.8375,0.7555,0.6889,0.2818,0.3635,0.4431],
[1.4721,0.7672,1.2315,0.9135,0.7384,0.9656,0.9504,0.9036,0.7367,0.4432,0.4289,0.9319,0.5986,0.4400,0.7555,3.8428,1.6139,0.3853,0.5575,0.5652],
[0.9844,0.6778,0.9842,0.6948,0.7406,0.7913,0.7414,0.5793,0.5575,0.7798,0.6603,0.7929,0.7938,0.4817,0.6889,1.6139,4.8321,0.4309,0.5732,0.9809],
[0.4165,0.3951,0.2778,0.2321,0.4500,0.5094,0.3743,0.4217,0.4441,0.4089,0.5680,0.3589,0.6103,1.3744,0.2818,0.3853,0.4309,38.1078,2.1098,0.3745],
[0.5426,0.5560,0.4860,0.3457,0.4342,0.6111,0.4965,0.3487,1.7979,0.6304,0.6921,0.5322,0.7084,2.7694,0.3635,0.5575,0.5732,2.1098,9.8322,0.6580],
[0.9365,0.4201,0.3690,0.3365,0.7558,0.4668,0.4289,0.3370,0.3394,2.4175,1.3142,0.4565,1.2689,0.7451,0.4431,0.5652,0.9809,0.3745,0.6580,3.6922]]
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class LMDBDataset(Dataset):
"""Creates a dataset from an lmdb file.
Args:
data_file (Union[str, Path]): Path to lmdb file.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_file: Union[str, Path],
in_memory: bool = False):
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
env = lmdb.open(str(data_file), max_readers=1, readonly=True,
lock=False, readahead=False, meminit=False)
with env.begin(write=False) as txn:
num_examples = pkl.loads(txn.get(b'num_examples'))
if in_memory:
cache = [None] * num_examples
self._cache = cache
self._env = env
self._in_memory = in_memory
self._num_examples = num_examples
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if not 0 <= index < self._num_examples:
raise IndexError(index)
if self._in_memory and self._cache[index] is not None:
item = self._cache[index]
else:
with self._env.begin(write=False) as txn:
item = pkl.loads(txn.get(str(index).encode()))
if 'id' not in item:
item['id'] = str(index)
if self._in_memory:
self._cache[index] = item
return item
def one_hot_encode_aa(aa_str, pad=None):
aa_str = aa_str.upper()
M = len(aa_str)
aa_arr = np.zeros((M, 21), dtype=int)
for i in range(M):
aa_arr[i, AA_IDX[aa_str[i]]] = 1
return aa_arr
def get_X(seqs):
M = len(seqs[0])
N = len(seqs)
X = []
for i in range(N):
try:
X.append(one_hot_encode_aa(seqs[i]))
except KeyError:
pass
return np.array(X)
class SequenceData(Dataset):
def __init__(self, X):
if not torch.is_tensor(X):
self.X = torch.from_numpy(X)
else:
self.X = X
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx]
class SeqfuncData(Dataset):
def __init__(self, X, y, scale_X=True):
if not torch.is_tensor(X):
self.X = torch.from_numpy(X)
else:
self.X = X
if not torch.is_tensor(y):
self.y = torch.from_numpy(y)
else:
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
def read_fasta(fname):
seqs = []
s = ""
with open(fname) as f:
line = f.readline()
while line:
if line.startswith(">"):
if s != "":
seqs.append(s)
s = ""
elif len(line) > 0:
s += line.strip()
line = f.readline()
seqs.append(s)
X = torch.tensor(get_X(seqs))
return X
def save_fasta(X_p, fname, sampling='max'):
seqs = ""
if torch.is_tensor(X_p):
X_p = X_p.cpu().numpy()
b, l, d = X_p.shape
# nchar = 1
for i in range(b):
seqs += ">{}\n".format(i)
for j in range(l):
p = X_p[i, j]
if sampling == 'max': # only take the one with max probability
k = np.argmax(p)
elif sampling == 'multinomial': # sample from multinomial
k = np.random.choice(range(len(p)), p=p)
aa = IDX_AA[k]
if aa != '-':
seqs += IDX_AA[k]
# if nchar % 60 == 0: # optional
# seqs += "\n"
seqs += "\n"
with open(fname, "w") as f:
f.write(seqs)
class VAE(nn.Module):
def __init__(self, **kwargs):
super(VAE, self).__init__()
self.seqlen = kwargs["seqlen"]
self.n_tokens = kwargs["n_tokens"]
self.latent_dim = kwargs["latent_dim"]
self.enc_units = kwargs["enc_units"]
self.encoder = nn.Sequential(
nn.Linear(self.seqlen*self.n_tokens, self.enc_units),
nn.ELU(),
)
self.mean = nn.Linear(self.enc_units, self.latent_dim)
self.var = nn.Linear(self.enc_units, self.latent_dim)
self.decoder = nn.Sequential(
nn.Linear(self.latent_dim, self.enc_units),
nn.ELU(),
nn.Linear(self.enc_units, self.seqlen*self.n_tokens),
)
self.getprobs = nn.Softmax(dim=-1)
def encode(self, x):
z = self.encoder(x)
mean = self.mean(z)
logvar = self.var(z)
return [mean, logvar]
def decode(self, z):
xhat = self.decoder(z).view(-1, self.seqlen, self.n_tokens)
xhat = self.getprobs(xhat)
return xhat
def reparameterize(self, mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + eps*std
def forward(self, x, **kwargs):
mean, logvar = self.encode(x)
z = self.reparameterize(mean, logvar)
return [self.decode(z), x, mean, logvar]
def loss(self, *args, **kwargs):
xhat = args[0]
x = args[1]
mean = args[2]
logvar = args[3]
kl_weight = kwargs['kl_weight']
x = x.view(-1, self.seqlen, self.n_tokens)
# x = torch.argmax(x, -1).flatten()
# xhat = xhat.flatten(end_dim=1)
# recon_loss = F.cross_entropy(xhat, x.type(torch.long))
recon_loss = F.mse_loss(x, xhat)
kl_loss = torch.mean(-0.5*torch.sum(1 + logvar - mean**2 - logvar.exp(), dim=1), dim=0)
loss = recon_loss + kl_weight * kl_loss
return {'loss': loss, 'recon_loss': recon_loss, 'kl_loss': -kl_loss}
def sample(self, num_samples, device, **kwargs):
z = torch.randn(num_samples, self.latent_dim).to(device)
return self.decode(z)
def reconstruct(self, x, **kwargs):
recon = self.forward(x)
return recon[0]
|
import csv
import io
from typing import IO, Optional, List, Tuple, Type
from .._functions import get_open_func
from ._CSVFile import CSVFile, DATA_TYPE, VALUE_TYPE, TYPES_TYPE
# Number of lines to sample to try and determine CSV format
SAMPLE_SIZE = 4
def loadf(filename: str,
encoding: str = 'utf-8',
has_header: Optional[bool] = None,
**fmtparams) -> CSVFile:
"""
Reads a CSV file from the file given by name.
:param filename: The name of the file to read.
:param encoding: The encoding of the file.
:param has_header: Whether there is a header row in the file,
or None to attempt to auto-determine.
:param fmtparams: Any additional parameters to pass to csv.reader.
:return: The loaded CSV file.
"""
# Select the open method based on the filename
open_func, mode = get_open_func(filename)
# Parse the file
with open_func(filename, mode, encoding=encoding) as file:
return load(file, has_header, **fmtparams)
def loads(string: str,
has_header: Optional[bool] = None,
**fmtparams) -> CSVFile:
"""
Reads a CSV file from the given string in CSV file format.
:param string: Text in CSV file format.
:param has_header: Whether there is a header row in the file,
or None to attempt to auto-determine.
:param fmtparams: Any additional parameters to pass to csv.reader.
:return: The loaded CSV file.
"""
return load(io.StringIO(string), has_header, **fmtparams)
def load(file: IO[str],
has_header: Optional[bool] = None,
**fmtparams) -> CSVFile:
"""
Reads a CSV file from the given file-like handle.
:param file: Any stream of text in CSV file format.
:param has_header: Whether there is a header row in the file,
or None to attempt to auto-determine.
:param fmtparams: Any additional parameters to pass to csv.reader.
:return: The loaded CSV file.
"""
# Sniff the possible dialect
dialect, has_header = sniff_dialect(file, has_header)
# Get the reader
reader = csv.reader(file, dialect, **fmtparams)
# Read in all the data
data = [row for row in reader]
# Take the header from the data if there is one
if has_header:
header = data.pop(0)
else:
header = None
# Get the column types
types = estimate_types(data[:SAMPLE_SIZE])
# Convert the data to the given types
convert_columns(data, types)
# Create and return the CSV file object
return CSVFile(header, types, data, dialect)
def sniff_dialect(file: IO[str], has_header: Optional[bool]) -> Tuple[csv.Dialect, bool]:
"""
Uses the csv Sniffer class to try and auto-detect the dialect of
the CSV file. Also determines if the file has a header row.
:param file: The CSV file to sniff.
:param has_header: Whether the file has a header, or None to auto-detect.
:return: The detected dialect of the file, and whether it has a header.
"""
# Peek a small sample of lines from the file
sample = ''.join([file.readline() for _ in range(SAMPLE_SIZE)])
file.seek(0)
# Create the sniffer
sniffer: csv.Sniffer = csv.Sniffer()
# Sniff the dialect
dialect: csv.Dialect = sniffer.sniff(sample)
# Sniff the header if it isn't already determined
if has_header is None:
has_header = sniffer.has_header(sample)
return dialect, has_header
def estimate_types(sample: DATA_TYPE) -> List[Type[VALUE_TYPE]]:
"""
Attempts to guess which columns of the provided sample contain numeric data.
:param sample: A sample of the data from the CSV file.
:return: A list of indices of columns which are thought to contain numeric data.
"""
# Initialise the list
types = []
# Test each column in turn
for column_index in range(len(sample[0])):
# Assume the column is integers
column_type = int
# Test the value in each row of this column
for row_index in range(len(sample)):
# Get the value
value = sample[row_index][column_index]
# Test if it converts to an int
try:
int(value)
except Exception:
# If not, test float
column_type = float
try:
float(value)
except Exception:
# If not, leave as string
column_type = str
# If all values converted, assume it is a numeric column
types.append(column_type)
return types
def convert_columns(data: DATA_TYPE, types: TYPES_TYPE):
"""
Converts the values in each of the specified columns into the given types.
:param data: The data from the CSV file.
:param types: The types to convert the data to.
:return: Nothing, the data is converted in place.
"""
# Convert each row in turn
for row in data:
# Convert only those columns that are marked numeric
for column_index in range(len(row)):
# Convert the value in place
row[column_index] = types[column_index](row[column_index])
|
""" Test the automech CLI
"""
import os
import tempfile
import subprocess
from automechanic import fs
AUTOMECH_CMD = 'automech'
PATH = os.path.dirname(os.path.realpath(__file__))
HEPTANE_PATH = os.path.join(PATH, '../examples/heptane')
def test__help():
""" test `automech -h`
"""
subprocess.check_call([AUTOMECH_CMD, '-h'])
def test__chemkin__help():
""" test `automech chemkin -h`
"""
subprocess.check_call([AUTOMECH_CMD, 'chemkin', '-h'])
def test__chemkin__to_csv():
""" test `automech chemkin to_csv`
"""
subprocess.check_call([AUTOMECH_CMD, 'chemkin', 'to_csv', '-h'])
tmp_dir = tempfile.mkdtemp()
print(tmp_dir)
with fs.enter(tmp_dir):
mech_txt = os.path.join(HEPTANE_PATH, 'mechanism.txt')
ther_txt = os.path.join(HEPTANE_PATH, 'thermo_data.txt')
subprocess.check_call([AUTOMECH_CMD, 'chemkin', 'to_csv',
mech_txt, ther_txt, '-p'])
def test__species__help():
""" test `automech species -h`
"""
subprocess.check_call([AUTOMECH_CMD, 'species', '-h'])
def test__species__to_inchi():
""" test `automech species to_inchi`
"""
subprocess.check_call([AUTOMECH_CMD, 'species', 'to_inchi', '-h'])
tmp_dir = tempfile.mkdtemp()
print(tmp_dir)
with fs.enter(tmp_dir):
spc_csv = os.path.join(HEPTANE_PATH, 'smiles.csv')
subprocess.check_call([AUTOMECH_CMD, 'species', 'to_inchi',
'smiles', spc_csv, '-S', 'inchi.csv', '-p'])
def test__species__filesystem():
""" test `automech species filesystem`
"""
subprocess.check_call([AUTOMECH_CMD, 'species', 'filesystem', '-h'])
tmp_dir = tempfile.mkdtemp()
print(tmp_dir)
with fs.enter(tmp_dir):
spc_csv = os.path.join(HEPTANE_PATH, 'inchi.csv')
subprocess.check_call([AUTOMECH_CMD, 'species', 'filesystem',
spc_csv, '-F', 'automech_fs',
'--stereo_handling', 'pick', '-p'])
subprocess.check_call([AUTOMECH_CMD, 'species', 'filesystem',
spc_csv, '-F', 'automech_fs_expanded',
'--stereo_handling', 'expand',
'-S', 'species_expanded.csv', '-p'])
if __name__ == '__main__':
# test__help()
# test__chemkin__help()
# test__chemkin__to_csv()
# test__species__help()
# test__species__to_inchi()
test__species__filesystem()
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestPT(unittest.TestCase):
def setUp(self):
self.holidays = holidays.PT()
def test_2017(self):
# http://www.officeholidays.com/countries/portugal/2017.php
self.assertIn(date(2017, 1, 1), self.holidays) # New Year
self.assertIn(date(2017, 4, 14), self.holidays) # Good Friday
self.assertIn(date(2017, 4, 16), self.holidays) # Easter
self.assertIn(date(2017, 4, 25), self.holidays) # Liberation Day
self.assertIn(date(2017, 5, 1), self.holidays) # Labour Day
self.assertIn(date(2017, 6, 10), self.holidays) # Portugal Day
self.assertIn(date(2017, 6, 15), self.holidays) # Corpus Christi
self.assertIn(date(2017, 8, 15), self.holidays) # Assumption Day
self.assertIn(date(2017, 10, 5), self.holidays) # Republic Day
self.assertIn(date(2017, 11, 1), self.holidays) # All Saints Day
self.assertIn(date(2017, 12, 1), self.holidays) # Independence
self.assertIn(date(2017, 12, 8), self.holidays) # Immaculate
self.assertIn(date(2017, 12, 25), self.holidays) # Christmas
class TestPortugalExt(unittest.TestCase):
def setUp(self):
self.holidays = holidays.PortugalExt()
def test_2017(self):
self.assertIn(date(2017, 12, 24), self.holidays) # Christmas' Eve
self.assertIn(date(2017, 12, 26), self.holidays) # S.Stephan
self.assertIn(date(2017, 12, 26), self.holidays) # New Year's Eve
|
import os
import shutil
import subprocess
import argparse
import signal
import time
import datetime
from customized_timeout import TimeoutException,TimeoutError
proc_to_kill = {"PdrChc":"z3", "Cvc4Sy": "cvc4", "PdrAbc":"abc", "RelChc":"z3"}
TestSucc = \
[("GB",[("GBpdrchc","PdrChc"), ("GBcvc4sy","Cvc4Sy")])]
TestFull = \
[
("AES",[("AESrelchc","RelChc"),("AESpdrabc","PdrAbc"), ("AESpdrchc","PdrChc"), ("AEScvc4sy","Cvc4Sy")]),
("Pico",[("PICOrelchc","RelChc"),("PICOpdrabc","PdrAbc"), ("PICOpdrchc","PdrChc"), ("PICOcvc4sy","Cvc4Sy")]),
("GB",[("GBpdrchc","PdrChc"), ("GBcvc4sy","Cvc4Sy")])]
def CountRuns(tests):
ret_len = 0
for d,l in tests:
ret_len += len(l)
return ret_len
def getNumbers(fin):
res = fin.readline()
total_time = float(fin.readline())
cegar = int(fin.readline())
syn_time = float(fin.readline())
eq_time = float(fin.readline())
return res, cegar, syn_time, eq_time, total_time
def ClearVerifOutput(tests):
cwd = os.getcwd()
for directory, test_list in tests:
if directory == 'GB':
continue
base_dir = os.path.join(cwd, directory)
for prg,outDir in test_list:
result_dir = os.path.join(base_dir,"verification/"+outDir)
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
def RunTests(tests, timeout, total):
cwd = os.getcwd()
idx = 1
for directory, test_list in tests:
base_dir = os.path.join(cwd, directory)
test_prg_dir = os.path.join(base_dir,"build")
#print test_list
for prg,outDir in test_list:
# clear result
test_result_file = os.path.join(base_dir,"verification/"+outDir+'/result-stat.txt')
if os.path.exists(test_result_file):
os.remove(test_result_file)
full_prg = os.path.join(test_prg_dir, prg)
print '--------------------------'
print '| Job: (%3d/%3d) |' % (idx, total)
print '--------------------------'
print 'Start time:', datetime.datetime.now()
print 'Run:', full_prg
print 'Design:',directory
idx += 1
if not os.path.exists( full_prg ):
print full_prg,'not available'
os.chdir(test_prg_dir)
#result_log = open('running_result.log','w')
process = subprocess.Popen(['./' + prg,str(timeout)], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
print 'PID:', process.pid
print 'Method:',outDir
proc_name = proc_to_kill[outDir] if outDir in proc_to_kill else ''
#print (proc_name)
#os.setpgid(process.pid, process.pid)
#process.getpgid(process.pid)
pythonkilled = False
try:
with TimeoutException(int(timeout)+5):
process.communicate()
except KeyboardInterrupt:
print 'Try killing subprocess...',
pythonkilled = True
try:
process.terminate()
print 'Done'
except OSError:
print 'Unable to kill'
process.wait()
except TimeoutError:
pythonkilled = True
print 'Try killing subprocess...',
try:
process.terminate()
print 'Done'
except OSError:
print 'Unable to kill'
process.wait()
if len(proc_name)>0:
pkill_result = os.system('pkill -n '+proc_name)
if pkill_result == 256:
time.sleep(1)
pkill_result = os.system('pkill -n '+proc_name)
if pythonkilled:
print '--------------------------'
print '| Result |'
print '--------------------------'
print 'Status : ','KILLED'
print '--------------------------'
elif os.path.exists(test_result_file):
with open(test_result_file) as fin:
res, cegar_iter, syn_time, eq_time,total_time = getNumbers(fin)
if outDir == 'RelChc':
print
print '--------------------------'
print '| Result |'
print '--------------------------'
print 'Status : ',res
if 'KILLED' not in res:
print 't(total) =',total_time
print '--------------------------'
print
else:
print
print '--------------------------'
print '| Result |'
print '--------------------------'
print 'Status : ',res
if 'KILLED' not in res:
print '#(iter) =',cegar_iter
print 't(syn) =',syn_time
print 't(eq) =',eq_time
print 't(syn+eq) =',syn_time+eq_time
print '--------------------------'
print
else:
print '--------------------------'
print '| Result |'
print '--------------------------'
print 'skipped'
print '--------------------------'
print
# if exists
# execute with timeout
parser = argparse.ArgumentParser(description='Run Experiments on Redundant Counters (RC)')
parser.add_argument('-t','--timeout',
default=2*60*60,
help='The time limit in seconds')
parser.add_argument('-a','--all', action='store_true',
default=False,
help='Run all the tests (default: only GBpdrchc and GBcvc4sy)')
args = parser.parse_args()
testset = TestFull if args.all else TestSucc
print '--------------------------'
print '| Jobs |'
print '--------------------------'
print 'Will launch (%d) jobs ' % CountRuns(testset)
print 'Time-out limit (sec): ',args.timeout
print '--------------------------'
print
ClearVerifOutput(testset)
RunTests(testset, args.timeout, CountRuns(testset))
#ClearVerifOutput(TestsAll)
|
# Author: Michał Bednarek PUT Poznan
# Comment: Helper script for validating data created from the simulation
import pickle
import matplotlib.pyplot as plt
import numpy as np
# path = "./data/dataset/final_ds/real/real_train.pickle"
paths = ["./data/dataset/final_ds/real/real_train.pickle",
"./data/dataset/final_ds/real/real_val.pickle",
"./data/dataset/final_ds/real/real_test.pickle"]
# paths = ["./data/dataset/final_ds/sim/sim_train.pickle",
# "./data/dataset/final_ds/sim/sim_val.pickle"]
# paths = ["./data/dataset/40_10_60/",
# "./data/dataset/final_ds/real/real_val.pickle",
# "./data/dataset/final_ds/real/real_test.pickle"]
# path = "data/dataset/40_10_60/real_dataset_train.pickle"
path = "data/dataset/final_ds/sim/sim_val.pickle"
def noised_modality(data, noise_mag: float = 0.2):
noise = np.random.uniform(-noise_mag, noise_mag, size=data.shape)
data += noise
return data
def compute_magnitude(samples):
return np.sqrt(samples[:, :, 0] ** 2 + samples[:, :, 1] ** 2 + samples[:, :, 2] ** 2)
def playground():
labels, samples = list(), list()
for path in paths:
with open(path, "rb") as fp:
ds = pickle.load(fp)
labels.append(ds["stiffness"])
samples.append(ds["data"])
labels = np.concatenate([*labels], axis=0)
samples = np.concatenate([*samples], axis=0)
values = np.unique(labels)
train_dataset_x, train_dataset_y = list(), list()
val_dataset_x, val_dataset_y = list(), list()
test_dataset_x, test_dataset_y = list(), list()
for i, val in enumerate(values):
arr = np.where(labels == val, 1, 0)
idx = np.argwhere(arr == 1).flatten()
idx_train, idx_val, idx_test = idx[:30], idx[30:40], idx[40:100]
# samples split
x_train, y_train = samples[idx_train, :, :], labels[idx_train]
x_train[..., 0] *= -1.0
x_train[..., 2] *= -1.0
train_dataset_x.append(x_train)
train_dataset_y.append(y_train)
x_val, y_val = samples[idx_val, :, :], labels[idx_val]
x_val[..., 0] *= -1.0
x_val[..., 2] *= -1.0
val_dataset_x.append(x_val)
val_dataset_y.append(y_val)
x_test, y_test = samples[idx_test, :, :], labels[idx_test]
x_test[..., 0] *= -1.0
x_test[..., 2] *= -1.0
test_dataset_x.append(x_test)
test_dataset_y.append(y_test)
print("Val: {}, num_samples: {}".format(val, arr.sum()))
train_dataset_x = np.vstack(train_dataset_x)
train_dataset_y = np.vstack(train_dataset_y).flatten()
val_dataset_x = np.vstack(val_dataset_x)
val_dataset_y = np.vstack(val_dataset_y).flatten()
test_dataset_x = np.vstack(test_dataset_x)
test_dataset_y = np.vstack(test_dataset_y).flatten()
file = open('data/dataset/40_10_60/real_dataset_train.pickle', 'wb')
pickle.dump({
"data": train_dataset_x,
"stiffness": train_dataset_y
}, file)
file.close()
file = open('data/dataset/40_10_60/real_dataset_val.pickle', 'wb')
pickle.dump({
"data": val_dataset_x,
"stiffness": val_dataset_y
}, file)
file.close()
file = open('data/dataset/40_10_60/real_dataset_test.pickle', 'wb')
pickle.dump({
"data": test_dataset_x,
"stiffness": test_dataset_y
}, file)
file.close()
# with open(path, "rb") as fp:
# data = pickle.load(fp)
#
# acc1, acc2 = list(), list()
# w1, w2 = list(), list()
#
# for sampl, stif in zip(data["data"], data["stiffness"]):
# acc1.append(np.sqrt(sampl[:, 0] ** 2 + sampl[:, 1] ** 2))
# acc2.append(np.sqrt(sampl[:, 3] ** 2 + sampl[:, 4] ** 2))
# # w1.append(np.sqrt(sampl[:, 6] ** 2 + sampl[:, 7] ** 2))
# # w2.append(np.sqrt(sampl[:, 9] ** 2 + sampl[:, 10] ** 2))
# # mag = [acc1, acc2, gyr1, gyr2]
#
# # # acc
# # plt.subplot(4, 1, 1)
# # plt.plot(mag[0], 'r')
# # plt.subplot(4, 1, 2)
# # plt.plot(mag[1], 'g')
# # plt.subplot(4, 1, 3)
# # plt.plot(mag[2], 'b')
# # plt.subplot(4, 1, 4)
# # plt.plot(mag[3], 'y')
# #
# # plt.show()
# # input(stif)
#
# signal = np.stack([acc1, acc2], -1)
# file = open('data/dataset/val_acc_only_sim.pickle', 'wb')
# pickle.dump({
# "data": signal,
# "stiffness": data["stiffness"]
# }, file)
# file.close()
if __name__ == '__main__':
playground()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/perfect-squares/description/
Given a positive integer n, find the least number of perfect square numbers
(for example, 1, 4, 9, 16, ...) which sum to n.
For example, given n = 12, return 3 because 12 = 4 + 4 + 4; given n = 13,
return 2 because 13 = 4 + 9.
"""
import pytest
import math
class Solution(object):
def numSquares(self, num):
# results[n] means the least number of perfect square numbers which
# add up to n
# the sub-problem is defined as:
# n can be the sum of some square number s and another number m.
# so results[n] = min(results[n - s] + 1, for each square number < n)
results = [0]
for i in range(1, num + 1):
r = min(
results[i - j * j] + 1
for j in range(1, int(math.sqrt(i)) + 1)
)
results.append(r)
return results[num]
TESTCASES = [
[1, 1],
[2, 2],
[3, 3],
[4, 1],
[5, 2],
[6, 3],
[7, 4],
[8, 2],
[9, 1],
[10, 2],
[11, 3],
[12, 3],
[13, 2],
[14, 3],
[15, 4],
[6665, 3],
]
@pytest.mark.parametrize('args,expected', TESTCASES)
def test_perfect_squares(args, expected):
actual = Solution().numSquares(args)
assert actual == expected
|
import os
from ImageSimilarity import ImageSimilarity
from ReportFoto import ReportFoto
class ImageWorker:
def generate_reportfoto(self):
lista_foto_scaricate = self.get_path_files("photo_downloaded\\")
correttezza_logo = self.find_and_classify_logo(lista_foto_scaricate)
presenza_competitors = self.find_competitors(lista_foto_scaricate)
presenza_scritte_foto = self.find_keywords_in_images(lista_foto_scaricate)
report_foto = ReportFoto(correttezza_logo, presenza_competitors, presenza_scritte_foto)
return report_foto
def find_and_classify_logo(self, lista_foto_scaricate):
img_ssim = ImageSimilarity()
lista_foto_competitors = self.get_path_files("dataset_image_ssim\\competitors\\")
lista_foto_creo_errati = self.get_path_files("dataset_image_ssim\\creo ERRATI\\")
lista_foto_creo_ni = self.get_path_files(
"dataset_image_ssim\\creo loghi ok ma proporzioni o abbinamenti NON CORRETTI\\")
lista_foto_creo_ok = self.get_path_files("dataset_image_ssim\\creo TUTTO OK\\")
lista_foto_lubecreo_errati = self.get_path_files("dataset_image_ssim\\lube&creo ERRATI\\")
lista_foto_lubecreo_ni = self.get_path_files(
"dataset_image_ssim\\lube&creo loghi ok ma proporzioni o abbinamenti NON CORRETTI\\")
lista_foto_lubecreo_ok = self.get_path_files("dataset_image_ssim\\lube&creo TUTTO OK\\")
lista_foto_lube_ni = self.get_path_files(
"dataset_image_ssim\\lube loghi ok ma proporzioni o abbinamenti NON CORRETTI\\")
lista_foto_lube_ok = self.get_path_files("dataset_image_ssim\\lubeTUTTO OK\\")
lista_foto_lube_errati = self.get_path_files("dataset_image_ssim\\lubeERRATI\\")
lista_foto_notlogo = self.get_path_files("dataset_image_ssim\\NOT LOGO\\")
# TODO : for che scorre le foto
Dict = {}
for x in lista_foto_scaricate:
result = [0] * 11
for l1 in lista_foto_competitors:
tmp1 = img_ssim.compare(x, l1)
result[0] = tmp1 if tmp1 > result[0] else result[0]
for l2 in lista_foto_creo_errati:
tmp2 = img_ssim.compare(x, l2)
result[1] = tmp2 if tmp2 > result[1] else result[1]
for l3 in lista_foto_creo_ni:
tmp3 = img_ssim.compare(x, l3)
result[2] = tmp3 if tmp3 > result[2] else result[2]
for l4 in lista_foto_creo_ok:
tmp4 = img_ssim.compare(x, l4)
result[3] = tmp4 if tmp4 > result[3] else result[3]
for l5 in lista_foto_lubecreo_errati:
tmp5 = img_ssim.compare(x, l5)
result[4] = tmp5 if tmp5 > result[4] else result[4]
for l6 in lista_foto_lubecreo_ni:
tmp6 = img_ssim.compare(x, l6)
result[5] = tmp6 if tmp6 > result[5] else result[5]
for l7 in lista_foto_lubecreo_ok:
tmp7 = img_ssim.compare(x, l7)
result[6] = tmp7 if tmp7 > result[6] else result[6]
for l8 in lista_foto_lube_ni:
tmp8 = img_ssim.compare(x, l8)
result[7] = tmp8 if tmp8 > result[7] else result[7]
for l9 in lista_foto_lube_ok:
tmp9 = img_ssim.compare(x, l9)
result[8] = tmp9 if tmp9 > result[8] else result[8]
for l10 in lista_foto_lube_errati:
tmp10 = img_ssim.compare(x, l10)
result[9] = tmp10 if tmp10 > result[9] else result[9]
for l11 in lista_foto_notlogo:
print("---->" + x)
tmp11 = img_ssim.compare(x, l11)
print(tmp11)
result[10] = tmp11 if tmp11 > result[10] else result[10]
print(result)
index_result_max = result.index(max(result))
options = {0: " competitors",
1: " creo errati",
2: " creo ni",
3: " creo ok",
4: " lube e creo errati",
5: " lube e creo ni",
6: " lube e creo ok",
7: " lube ni",
8: " lube ok",
9: " lube errati",
10: " not logo"
}
print(x + options.get(index_result_max) + " percentuale : " + str(result[index_result_max]))
# risultato =
# result.append()
# print(str(result))
# Dict["competitors"] = result
# result.clear()
# TODO: step1 image similarity su una fot
def find_competitors(self, lista_foto_scaricate):
pass
def find_keywords_in_images(self, lista_foto_scaricate):
pass
def get_path_files(self, path):
# mypath = "photo_downloaded\\"
mypath2 = "C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\"
paths = [os.path.join(path, fn) for fn in next(os.walk(path))[2]]
temp = []
for x in paths:
temp.append(mypath2 + x)
return temp
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRodbc(RPackage):
"""An ODBC database interface."""
homepage = "https://cloud.r-project.org/package=RODBC"
url = "https://cloud.r-project.org/src/contrib/RODBC_1.3-13.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/RODBC/"
version('1.3-15', sha256='c43e5a2f0aa2f46607e664bfc0bb3caa230bbb779f4ff084e01727642da136e1')
version('1.3-13', sha256='e8ea7eb77a07be36fc2d824c28bb426334da7484957ffbc719140373adf1667c')
depends_on('unixodbc')
depends_on('r@3.0.0:', type=('build', 'run'))
|
""" genjs constant database module
"""
import py
from pypy.rpython.ootypesystem import ootype
from pypy.translator.js.opcodes import opcodes
from pypy.translator.js.function import Function
from pypy.translator.js.log import log
from pypy.translator.js._class import Class
from pypy.translator.js.support import JavascriptNameManager
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Void, Bool, Float
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong, typeOf
from pypy.rpython.lltypesystem.lltype import Char, UniChar
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem import bltregistry
from pypy.objspace.flow.model import Variable, Constant
from pypy.translator.js.modules import dom
from pypy.translator.js.commproxy import XmlHttp
try:
set
except NameError:
from sets import Set as set
class LowLevelDatabase(object):
def __init__(self, genoo):
self._pending_nodes = set()
self.genoo = genoo
self._rendered_nodes = set()
self.classes = {} # classdef --> class_name
self.functions = {} # graph --> function_name
self.function_names = {} # graph --> real_name
self.methods = {} # graph --> method_name
self.consts = {} # value --> const_name
self.reverse_consts = {}
self.const_names = set()
self.rendered = set()
self.const_var = Variable("__consts")
self.name_manager = JavascriptNameManager(self)
self.pending_consts = []
self.cts = self.genoo.TypeSystem(self)
self.proxies = []
def is_primitive(self, type_):
if type_ in [Void, Bool, Float, Signed, Unsigned, SignedLongLong, UnsignedLongLong, Char, UniChar, ootype.StringBuilder] or \
isinstance(type_,ootype.StaticMethod):
return True
return False
def pending_function(self, graph):
self.pending_node(self.genoo.Function(self, graph))
def pending_abstract_function(self, name):
pass
# XXX we want to implement it at some point (maybe...)
def pending_class(self, classdef):
c = Class(self, classdef)
self.pending_node(c)
return c
def pending_record(self, record):
r = Record(self, record)
self.pending_node(r)
return r.get_name()
def pending_node(self, node):
if node in self._pending_nodes or node in self._rendered_nodes:
return
self._pending_nodes.add(node)
def record_function(self, graph, name):
self.functions[graph] = name
def get_uniquename(self, graph, name):
try:
return self.function_names[graph]
except KeyError:
real_name = self.name_manager.uniquename(name, lenmax=1111111)
self.function_names[graph] = real_name
return real_name
def record_class(self, classdef, name):
self.classes[classdef] = name
def register_comm_proxy(self, proxy_const, *args):
""" Register external object which should be rendered as
method call
"""
self.proxies.append(XmlHttp(proxy_const, *args))
def graph_name(self, graph):
return self.functions.get(graph, None)
def class_name(self, classdef):
return self.classes.get(classdef, None)
def record_const(self, value, type_ = None, retval='name'):
if type_ is None:
type_ = typeOf(value)
if self.is_primitive(type_):
return None
const = AbstractConst.make(self, value)
if not const:
return None
try:
if retval == 'name':
return self.consts[const]
else:
self.consts[const]
return self.reverse_consts[self.consts[const]]
except KeyError:
if self.genoo.config.translation.verbose:
log("New const:%r"%value)
if isinstance(value, ootype._string):
log(value._str)
else:
log.dot()
name = const.get_name()
if name in self.const_names:
name += '__%d' % len(self.consts)
self.consts[const] = name
self.reverse_consts[name] = const
self.const_names.add(name)
self.pending_consts.append((const,name))
if retval == 'name':
return name
else:
return const
def gen_constants(self, ilasm, pending):
try:
while True:
const,name = self.pending_consts.pop()
const.record_fields()
except IndexError:
pass
if pending:
return
if not self.rendered:
ilasm.begin_consts(self.const_var.name)
def generate_constants(consts):
all_c = [const for const,name in consts.iteritems()]
dep_ok = set()
while len(all_c) > 0:
const = all_c.pop()
if const not in self.rendered:
to_render = True
if hasattr(const, 'depends_on') and const.depends_on:
for i in const.depends_on:
if i not in self.rendered and i not in dep_ok:
assert i.depends is None or const in i.depends
to_render = False
continue
if to_render and (not hasattr(const, 'depends')) or (not const.depends) or const in dep_ok:
yield const,consts[const]
self.rendered.add(const)
else:
all_c.append(const)
for i in const.depends:
all_c.append(i)
dep_ok.add(const)
# We need to keep track of fields to make sure
# our items appear earlier than us
to_init = []
for const, name in generate_constants(self.consts):
if self.genoo.config.translation.verbose:
log("Recording %r %r"%(const,name))
else:
log.dot()
ilasm.load_local(self.const_var)
const.init(ilasm)
ilasm.set_field(None, name)
ilasm.store_void()
to_init.append((const, name))
#ilasm.field(name, const.get_type(), static=True)
for const, name in to_init:
const.init_fields(ilasm, self.const_var, name)
def load_const(self, type_, value, ilasm):
if self.is_primitive(type_):
ilasm.load_const(self.cts.primitive_repr(type_, value))
else:
try:
return self.consts[BuiltinConst(value)]
except KeyError:
name = self.record_const(value)
ilasm.load_local(self.const_var)
ilasm.get_field(name)
#assert False, 'Unknown constant %s' % const
class AbstractConst(object):
def __init__(self, db, const):
self.db = db
self.const = const
self.cts = db.genoo.TypeSystem(db)
self.depends = set()
self.depends_on = set()
def __hash__(self):
return hash(self.get_key())
def __eq__(self, other):
return (other.__class__ is self.__class__ and
other.get_key() == self.get_key())
def __ne__(self, other):
return not (self == other)
def make(db, const):
if isinstance(const, ootype._view):
static_type = const._TYPE
const = const._inst
else:
static_type = None
if isinstance(const, ootype._instance):
return InstanceConst(db, const, static_type)
elif isinstance(const, ootype._list):
return ListConst(db, const)
elif isinstance(const, ootype._record):
return RecordConst(db, const)
elif isinstance(const, ootype._string):
return StringConst(db, const)
elif isinstance(const, ootype._dict):
return DictConst(db, const)
elif isinstance(const, bltregistry._external_inst):
return ExtObject(db, const)
elif isinstance(const, ootype._class):
if const._INSTANCE:
return ClassConst(db, const)
else:
return None
else:
assert False, 'Unknown constant: %s %r' % (const, typeOf(const))
make = staticmethod(make)
def get_name(self):
pass
def get_type(self):
pass
def init(self, ilasm):
pass
def init_fields(self, ilasm, const_var, name):
pass
def record_fields(self):
pass
class InstanceConst(AbstractConst):
def __init__(self, db, obj, static_type):
self.depends = set()
self.depends_on = set()
self.db = db
self.cts = db.genoo.TypeSystem(db)
self.obj = obj
if static_type is None:
self.static_type = obj._TYPE
else:
self.static_type = static_type
self.cts.lltype_to_cts(obj._TYPE) # force scheduling of obj's class
def get_key(self):
return self.obj
def get_name(self):
return self.obj._TYPE._name.replace('.', '_')
def get_type(self):
return self.cts.lltype_to_cts(self.static_type)
def init(self, ilasm):
if not self.obj:
ilasm.load_void()
return
classdef = self.obj._TYPE
try:
classdef._hints['_suggested_external']
ilasm.new(classdef._name.split(".")[-1])
except KeyError:
ilasm.new(classdef._name.replace(".", "_"))
def record_fields(self):
if not self.obj:
return
INSTANCE = self.obj._TYPE
#while INSTANCE:
for i, (_type, val) in INSTANCE._allfields().items():
if _type is not ootype.Void:
name = self.db.record_const(getattr(self.obj, i), _type, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def init_fields(self, ilasm, const_var, name):
if not self.obj:
return
INSTANCE = self.obj._TYPE
#while INSTANCE:
for i, (_type, el) in INSTANCE._allfields().items():
if _type is not ootype.Void:
ilasm.load_local(const_var)
self.db.load_const(_type, getattr(self.obj, i), ilasm)
ilasm.set_field(None, "%s.%s"%(name, i))
ilasm.store_void()
class RecordConst(AbstractConst):
def get_name(self):
return "const_tuple"
def init(self, ilasm):
if not self.const:
ilasm.load_void()
else:
ilasm.new_obj()
def record_fields(self):
if not self.const:
return
for i in self.const._items:
name = self.db.record_const(self.const._items[i], None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def get_key(self):
return self.const
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
#for i in self.const.__dict__["_items"]:
for i in self.const._items:
ilasm.load_local(const_var)
el = self.const._items[i]
self.db.load_const(typeOf(el), el, ilasm)
ilasm.set_field(None, "%s.%s"%(name, i))
ilasm.store_void()
class ListConst(AbstractConst):
def get_name(self):
return "const_list"
def init(self, ilasm):
if not self.const:
ilasm.load_void()
else:
ilasm.new_list()
def record_fields(self):
if not self.const:
return
for i in self.const._list:
name = self.db.record_const(i, None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def get_key(self):
return self.const
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
for i in xrange(len(self.const._list)):
ilasm.load_str("%s.%s"%(const_var.name, name))
el = self.const._list[i]
self.db.load_const(typeOf(el), el, ilasm)
self.db.load_const(typeOf(i), i, ilasm)
ilasm.list_setitem()
ilasm.store_void()
class StringConst(AbstractConst):
def get_name(self):
return "const_str"
def get_key(self):
return self.const._str
def init(self, ilasm):
if self.const:
s = self.const._str
# do some escaping
#s = s.replace("\n", "\\n").replace('"', '\"')
#s = repr(s).replace("\"", "\\\"")
ilasm.load_str("%s" % repr(s))
else:
ilasm.load_str("undefined")
def init_fields(self, ilasm, const_var, name):
pass
class ClassConst(AbstractConst):
def __init__(self, db, const):
super(ClassConst, self).__init__(db, const)
self.cts.lltype_to_cts(const._INSTANCE) # force scheduling of class
def get_key(self):
return self.get_name()
def get_name(self):
return self.const._INSTANCE._name.replace(".", "_")
def init(self, ilasm):
ilasm.load_const("%s" % self.get_name())
#def init_fields(self, ilasm, const_var, name):
# pass
class BuiltinConst(AbstractConst):
def __init__(self, name):
self.name = name
def get_key(self):
return self.name
def get_name(self):
return self.name
def init_fields(self, *args):
pass
def init(self, ilasm):
ilasm.load_str(self.name)
class DictConst(RecordConst):
def record_const(self, co):
name = self.db.record_const(co, None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def record_fields(self):
if not self.const:
return
for i in self.const._dict:
self.record_const(i)
self.record_const(self.const._dict[i])
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
for i in self.const._dict:
ilasm.load_str("%s.%s"%(const_var.name, name))
el = self.const._dict[i]
self.db.load_const(typeOf(el), el, ilasm)
self.db.load_const(typeOf(i), i, ilasm)
ilasm.list_setitem()
ilasm.store_void()
class ExtObject(AbstractConst):
def __init__(self, db, const):
self.db = db
self.const = const
self.name = self.get_name()
self.depends = set()
self.depends_on = set()
def get_key(self):
return self.name
def get_name(self):
return self.const._TYPE._name
def init(self, ilasm):
_class = self.const._TYPE._class_
if getattr(_class, '_render_xmlhttp', False):
use_xml = getattr(_class, '_use_xml', False)
base_url = getattr(_class, '_base_url', "") # XXX: should be
method = getattr(_class, '_use_method', 'GET')
# on per-method basis
self.db.register_comm_proxy(self.const, self.name, use_xml, base_url, method)
ilasm.new(self.get_name())
else:
# Otherwise they just exist, or it's not implemented
if not hasattr(self.const.value, '_render_name'):
raise ValueError("Prebuilt constant %s has no attribute _render_name,"
"don't know how to render" % self.const.value)
ilasm.load_str(self.const.value._render_name)
|
'''
Linearly scale the expression range of one gene to be between 0 and 1.
If a reference dataset is provided, then the scaling of one gene in the
target dataset in done using the minimun and range of that gene in the
reference dataset.
'''
import sys
import argparse
sys.path.insert(0, 'Data_collection_processing/')
from pcl import PCLfile
parser = argparse.ArgumentParser(description="Linearly scale the expression range\
of one gene to be between 0 and 1. If a reference dataset is provided, then \
the scaling of one gene in the target dataset in done using the minimum and \
range of that gene in the reference dataset.")
parser.add_argument('tar', help='the target file for zero one normalization')
parser.add_argument('out', help='the output file after zero one normalization')
parser.add_argument('ref', help='the reference file. If reference file\
is \'None\', then zero one normalization will be done based on\
target file itself.')
args = parser.parse_args()
def zero_one_normal(tar=None, out=None, ref=None):
'''
tar: the target file for zero one normalization
out: the output file after zero one normalization
ref: the reference file. If reference file is 'None',
then zero one normalization will be done based on
target file itself.
'''
if ref == 'None':
tar_data = PCLfile(tar, skip_col=0)
tar_data.zero_one_normalization()
tar_data.write_pcl(out)
else:
ref_data = PCLfile(ref, skip_col=0)
tar_data = PCLfile(tar, skip_col=0)
for i in xrange(ref_data.data_matrix.shape[0]):
row_minimum = ref_data.data_matrix[i, :].min()
row_maximum = ref_data.data_matrix[i, :].max()
row_range = row_maximum - row_minimum
tar_data.data_matrix[i, :] =\
(tar_data.data_matrix[i, :] - row_minimum)/row_range
# bound the values to be between 0 and 1
tar_data.data_matrix[i, :] =\
[0 if x < 0 else x for x in tar_data.data_matrix[i, :]]
tar_data.data_matrix[i, :] =\
[1 if x > 1 else x for x in tar_data.data_matrix[i, :]]
tar_data.write_pcl(out)
zero_one_normal(tar=args.tar, out=args.out, ref=args.ref)
|
from flask import Flask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.update(
TESTING=True,
DEBUG=True,
SECRET_KEY=b'_5#y2xwL"F4Q8z\n\xec]/'
)
bootstrap = Bootstrap(app)
from . import routes |
from __future__ import annotations
import tempfile
from pathlib import Path
from typing import Any
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import typer
from kaggle_imaterialist2020_model.cmd.segment import main as segment
from PIL import Image
from segmentation.transforms import coco_rle_to_mask
resource_dir = Path(__file__).parents[2] / "tests/resources"
image_dir = resource_dir / "images"
mask_dir = resource_dir / "masks"
def rle_to_mask(rle: dict[str, Any]) -> np.ndarray:
rle["counts"] = rle["counts"].lstrip("b").strip("'").replace("\\\\", "\\").encode()
mask = coco_rle_to_mask(rle)
# {0, 1}^(heght, width)
return mask
def iou(a: np.ndarray, e: np.ndarray) -> np.float64:
return np.logical_and(a, e).sum() / np.logical_or(a, e).sum()
IMAGE = np.array(Image.open(image_dir / "00a8764cff12b2e849c850f4be5608bc.jpg"))
def save_mask_image(mask, out):
t = 0.7
plt.imshow(((1 - t) * IMAGE + t * 255 * mask[:, :, None]).astype(np.uint8))
plt.axis("off")
plt.savefig(out, bbox_inches="tight")
def join_cateogry(df):
# TODO: Include the following categories into training artifacts
# and dynamicaly load them from the config at this script.
# pasted from https://github.com/hrsma2i/dataset-iMaterialist/blob/main/raw/classes.txt
df_c = (
pd.Series(
[
"background",
"shirt|blouse",
"top|t-shirt|sweatshirt",
"sweater",
"cardigan",
"jacket",
"vest",
"pants",
"shorts",
"skirt",
"coat",
"dress",
"jumpsuit",
"cape",
"glasses",
"hat",
"headband|head_covering|hair_accessory",
"tie",
"glove",
"watch",
"belt",
"leg_warmer",
"tights|stockings",
"sock",
"shoe",
"bag|wallet",
"scarf",
"umbrella",
]
)
.reset_index()
.rename(columns={"index": "category_id", 0: "category"})
)
df = df.merge(df_c, on="category_id")
return df
def crop_and_resize(mask):
# TODO: Remove this function after fixing the mask resizing bug
# https://github.com/hrsma2i/kaggle-imaterialist2020-model/pull/11
h, w, _ = IMAGE.shape
h_ = 640
w_ = int(640 / h * w)
resized_mask = cv2.resize(mask[:h_, :w_], (w, h))
return resized_mask
def main(
config_file: str = typer.Option(
...,
help="A config YAML file to load a trained model. "
"Choose from GCS URI (gs://bucket/models/foo/config.yaml) or local path (path/to/config.yaml).",
),
checkpoint_path: str = typer.Option(
...,
help="A Tensorflow checkpoint file to load a trained model. "
"Choose from GCS URI (gs://bucket/models/foo/model.ckpt-1234) or local path (path/to/model.ckpt-1234).",
),
out_qual: Path = typer.Option(
None,
help="The path to save images for qualitative evaluation.",
),
) -> None:
"""Check that editing the training code (tf_tpu_models/official/detection/main.py)
doesn't make the accuracy worse.
"""
with tempfile.NamedTemporaryFile(suffix=".jsonl") as f:
segment(
config_file=config_file,
checkpoint_path=checkpoint_path,
image_dir=str(image_dir),
cache_dir=None,
out=f.name,
)
print("load segmentation")
df = pd.read_json(f.name, lines=True)
df = join_cateogry(df)
actual_masks = df["segmentation"].apply(rle_to_mask)
# TODO: Remove mask cropping & resizing after fixing the mask resizing bug
# https://github.com/hrsma2i/kaggle-imaterialist2020-model/pull/11
actual_masks = actual_masks.apply(crop_and_resize)
if out_qual:
print(f"save actual mask images at: {out_qual}")
out_qual.mkdir(parents=True, exist_ok=True)
df["actual_mask"] = actual_masks
df.reset_index().apply(
lambda row: save_mask_image(
row["actual_mask"],
out_qual / f"actual_{row['index']}_{row['category']}.png",
),
axis=1,
)
print(f"check each expected mask exists in the actual masks")
for mask_file in mask_dir.glob("*.npy"):
expected = np.load(mask_file)
# TODO: Remove mask cropping & resizing after fixing the mask resizing bug
# https://github.com/hrsma2i/kaggle-imaterialist2020-model/pull/11
expected = crop_and_resize(expected)
if out_qual:
save_mask_image(expected, out_qual / f"expected_{mask_file.stem}.png")
assert actual_masks.apply(
lambda actual: iou(actual, expected) > 0.90
).any(), f"{mask_file.name} mask doesn't exist in the prediction."
print(f"{mask_file}: OK")
if __name__ == "__main__":
typer.run(main)
|
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a brute force optimizer for a short only, martingale style, volatility trading strategy
#that takes incrementally larger positions
#Import modules
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
#Ticker assignment
Ticker1 = 'UVXY'
#Request data
Asset1 = DatabaseGrabber(Ticker1)
Asset1 = Asset1[:] #In
#Iterable
Iterations = range(0,50)
Counter = 1
#Range index
Asset1['SubIndex'] = range(1,len(Asset1)+1)
#Empty data structures
Empty = []
Dataset = pd.DataFrame()
#For number of iterations in optimization
for n in Iterations:
#Generate variable windows
ROCWindow = rand.randint(5,200)
HoldPeriod = rand.randint(25,200)
ATRWindow = 20
PositionSize = 1 + (rand.random() * 5) # 8 = 8% of account per leg
UniformMove = rand.random() * .4 # .5 = 1.5 highoverrollingmin for first unit to be active
PositionScale = rand.random() * .04 # .08 = add 8% to each new leg over previous leg
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
#ROC calculations
Asset1['RateOfChange'] = (Asset1['Adj Close'] - Asset1['Adj Close'].shift(ROCWindow)
) / Asset1['Adj Close'].shift(ROCWindow)
Bottom = Asset1['RateOfChange'].min()
#Unit 1
Asset1['UnitOne'] = 0
Asset1['UnitOne'] = np.where(Asset1['RateOfChange'] > Bottom + (1 * UniformMove), PositionSize, 0)
for i in range(0,HoldPeriod):
Asset1['UnitOne'] = np.where(Asset1['UnitOne'].shift(1) == PositionSize, PositionSize, Asset1['UnitOne'])
#Unit 2
Asset1['UnitTwo'] = 0
Asset1['UnitTwo'] = np.where(Asset1['RateOfChange'] > Bottom + (2 * UniformMove), (PositionSize + (1 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitTwo'] = np.where(Asset1['UnitTwo'].shift(1) == (PositionSize + (1 * PositionScale)), (PositionSize + (1 * PositionScale)), Asset1['UnitTwo'])
#Unit 3
Asset1['UnitThree'] = 0
Asset1['UnitThree'] = np.where(Asset1['RateOfChange'] > Bottom + (3 * UniformMove), (PositionSize + (2 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitThree'] = np.where(Asset1['UnitThree'].shift(1) == (PositionSize + (2 * PositionScale)), (PositionSize + (2 * PositionScale)), Asset1['UnitThree'])
#Unit 4
Asset1['UnitFour'] = 0
Asset1['UnitFour'] = np.where(Asset1['RateOfChange'] > Bottom + (4 * UniformMove), (PositionSize + (3 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitFour'] = np.where(Asset1['UnitFour'].shift(1) == (PositionSize + (3 * PositionScale)), (PositionSize + (3 * PositionScale)), Asset1['UnitFour'])
#Unit 5
Asset1['UnitFive'] = 0
Asset1['UnitFive'] = np.where(Asset1['RateOfChange'] > Bottom + (5 * UniformMove), (PositionSize + (4 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitFive'] = np.where(Asset1['UnitFive'].shift(1) == (PositionSize + (4 * PositionScale)), (PositionSize + (4 * PositionScale)), Asset1['UnitFive'])
#Unit 6
Asset1['UnitSix'] = 0
Asset1['UnitSix'] = np.where(Asset1['RateOfChange'] > Bottom + (6 * UniformMove), (PositionSize + (5 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitSix'] = np.where(Asset1['UnitSix'].shift(1) == (PositionSize + (5 * PositionScale)), (PositionSize + (5 * PositionScale)), Asset1['UnitSix'])
#Unit 7
Asset1['UnitSeven'] = 0
Asset1['UnitSeven'] = np.where(Asset1['RateOfChange'] > Bottom + (7 * UniformMove), (PositionSize + (6 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitSeven'] = np.where(Asset1['UnitSeven'].shift(1) == (PositionSize + (6 * PositionScale)), (PositionSize + (6 * PositionScale)), Asset1['UnitSeven'])
#Unit 8
Asset1['UnitEight'] = 0
Asset1['UnitEight'] = np.where(Asset1['RateOfChange'] > Bottom + (8 * UniformMove), (PositionSize + (7 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitEight'] = np.where(Asset1['UnitEight'].shift(1) == (PositionSize + (7 * PositionScale)), (PositionSize + (7 * PositionScale)), Asset1['UnitEight'])
#Unit 9
Asset1['UnitNine'] = 0
Asset1['UnitNine'] = np.where(Asset1['RateOfChange'] > Bottom + (9 * UniformMove), (PositionSize + (8 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitNine'] = np.where(Asset1['UnitNine'].shift(1) == (PositionSize + (8 * PositionScale)), (PositionSize + (8 * PositionScale)), Asset1['UnitNine'])
#Unit 10
Asset1['UnitTen'] = 0
Asset1['UnitTen'] = np.where(Asset1['RateOfChange'] > Bottom + (10 * UniformMove), (PositionSize + (9 * PositionScale)), 0)
for i in range(0,HoldPeriod):
Asset1['UnitTen'] = np.where(Asset1['UnitTen'].shift(1) == (PositionSize + (9 * PositionScale)), (PositionSize + (9 * PositionScale)), Asset1['UnitTen'])
#Unit 11
Asset1['UnitEleven'] = 0
Asset1['UnitEleven'] = np.where(Asset1['RateOfChange'] > Bottom + (11 * UniformMove), PositionSize, 0)
for i in range(0,HoldPeriod):
Asset1['UnitEleven'] = np.where(Asset1['UnitEleven'].shift(1) == PositionSize, PositionSize, Asset1['UnitEleven'])
#Adding position sizes
Asset1['SumUnits'] = Asset1[['UnitOne','UnitTwo','UnitThree','UnitFour',#]].sum(axis = 1)
'UnitFive','UnitSix','UnitSeven','UnitEight','UnitNine','UnitTen','UnitEleven']].sum(axis = 1)
#Exposure methodology
Asset1['Regime'] = np.where(Asset1['SumUnits'] >= 1, -1,0)
#Apply weights to returns
Asset1['Strategy'] = Asset1['Regime'].shift(1) * Asset1['LogRet'] * (Asset1['SumUnits']/100)
#Asset1['Strategy'].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
#Returns on $1
Asset1['Multiplier'] = Asset1['Strategy'].cumsum().apply(np.exp)
#Incorrectly calculated drawdown statistic
drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax())
drawdown = drawdown.fillna(0)
#s['drawdown'] = 1 - s['Multiplier'].div(s['Multiplier'].cummax())
MaxDD = max(drawdown)
#Iteration tracking
Counter = Counter + 1
#Constraints
if MaxDD > .5:
continue
dailyreturn = Asset1['Strategy'].mean()
if dailyreturn < .0015:
continue
dailyvol = Asset1['Strategy'].std()
if dailyvol == 0:
continue
#Performance metrics
Sharpe = dailyreturn/dailyvol
SharpeOverMaxDD = Sharpe/MaxDD
#Save params and metrics to list
Empty.append(ROCWindow)
Empty.append(HoldPeriod)
Empty.append(PositionSize)
Empty.append(UniformMove)
Empty.append(PositionScale)
Empty.append(dailyreturn)
Empty.append(dailyvol)
Empty.append(Sharpe)
Empty.append(SharpeOverMaxDD)
Empty.append(MaxDD)
#List to Series
Emptyseries = pd.Series(Empty)
#Series to dataframe column
Dataset[n] = Emptyseries.values
#Clear list
Empty[:] = []
#Iteration tracking
print(Counter)
#Rename columns
#Trades = Trades.rename(index={0: "ExitTaken", 1: "LengthOfTrade", 2: "EntryPriceUnitOne",
# 3: "StopPriceUnitOne", 4: "SubIndexOfEntry", 5: "SubIndexOfExit",
# 6: "TradeDirection", 7: "OpenPriceOnGap", 8: "TradeReturn"})
#Desired metric to sort
z1 = Dataset.iloc[7]
#Percentile threshold
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top params
DS1W = pd.DataFrame() #this variable stores your params for specific dataset
#For all metrics
for h in z1:
#If metric greater than threshold
if h > w1:
#Add to list
v1.append(h)
#For top metrics
for j in v1:
#Find column ID
r = Dataset.columns[(Dataset == j).iloc[7]]
#Add to dataframe
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
#Optimal param
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[7]] #this is the column number
#Param set
kfloat = float(k[0])
#End timer
End = t.time()
#Timer stats
#print(End-Start, 'seconds later')
#Display params
print(Dataset[k])
|
#!/usr/local/bin/python3
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ds_codes import ds_codes
from modified_utf8 import utf8m_to_utf8s
def read_number_from_hex_string(string, offset, size):
value = int(string[offset : offset + size], 16)
bits = size * 4
if value & (1 << (bits - 1)):
value -= 1 << bits
return value, size
def read_unsigned_number_from_hex_string(string, offset, size):
value = int(string[offset : offset + size], 16)
bits = size * 4
return value, size
def read_byte_value(string, offset):
return read_number_from_hex_string(string, offset, 2)
def read_unsigned_byte_value(string, offset):
return read_unsigned_number_from_hex_string(string, offset, 2)
def read_short_value(string, offset):
return read_number_from_hex_string(string, offset, 4)
def read_int_value(string, offset):
return read_number_from_hex_string(string, offset, 8)
def read_long_value(string, offset):
return read_number_from_hex_string(string, offset, 16)
def read_array_length(message_bytes, offset):
(byte_value, offset) = call_reader_function(message_bytes, offset, read_byte_value)
array_len = 0
if byte_value == 255:
raise Exception("Don't know how to handle len == -1 in serialized array!")
elif byte_value == 254:
(array_len, offset) = call_reader_function(
message_bytes, offset, read_short_value
)
elif byte_value == 253:
(array_len, offset) = call_reader_function(
message_bytes, offset, read_int_value
)
else:
array_len = byte_value
return array_len, offset
def read_byte_array(string, offset):
(array_length, offset) = read_array_length(string, offset)
byte_string = ""
for i in range(offset, offset + (array_length * 2), 2):
byte_string += string[i : i + 2]
byte_string += " "
byte_string = byte_string[:-1]
return byte_string, offset + (array_length * 2)
def read_byte_array_with_length(string, offset, array_length):
byte_string = ""
for i in range(offset, offset + (array_length * 2), 2):
byte_string += string[i : i + 2]
byte_string += " "
byte_string = byte_string[:-1]
return byte_string, offset + (array_length * 2)
def read_boolean_value(message_bytes, offset):
(bool_val, offset) = call_reader_function(message_bytes, offset, read_byte_value)
bool_string = "True" if bool_val == 1 else "False"
return bool_string, offset
def read_unsigned_vl(string, offset):
shift = 0
result = 0
cursor = offset
while shift < 64:
b, cursor = call_reader_function(string, cursor, read_byte_value)
result |= (b & 0x7F) << shift
if not (b & 0x80):
break
shift += 7
if shift >= 64:
raise ValueError("Malformed variable length integer")
return result, cursor - offset
def read_string_value(string, length, offset):
string_value = bytearray.fromhex(string[offset : offset + (length * 2)]).decode(
"utf-8"
)
return (string_value, offset + (length * 2))
def read_fixed_id_byte_value(string, offset):
(ds_code, offset) = call_reader_function(string, offset, read_byte_value)
if ds_codes[ds_code] == "FixedIDByte":
(byte_value, offset) = call_reader_function(string, offset, read_byte_value)
else:
raise TypeError("Expected DSCode 'FixedIDByte'")
return (byte_value, offset)
def read_cacheable_string_value(string, offset):
(dscode, offset) = call_reader_function(string, offset, read_byte_value)
string_type = ds_codes[dscode]
if string_type == "CacheableString":
(string_length, offset) = call_reader_function(string, offset, read_short_value)
return read_geode_jmutf8_string_value(string, offset, string_length)
elif string_type == "CacheableStringHuge":
(string_length, offset) = call_reader_function(string, offset, read_int_value)
offset += string_length
else:
raise TypeError("Expected CacheableString or CacheableStringHuge")
def read_cacheable_ascii_string_value(string, offset):
(ds_code, offset) = call_reader_function(string, offset, read_byte_value)
string_value = []
if ds_codes[ds_code] == "CacheableASCIIString":
(size, offset) = call_reader_function(string, offset, read_short_value)
for i in range(size):
(ascii_char, offset) = call_reader_function(string, offset, read_byte_value)
string_value.append(ascii_char)
else:
raise TypeError("Attempt to decode another type as CacheableASCIIString")
return (bytes(string_value).decode("ascii"), offset)
# Decodes a hex string to JM utf-8 bytes, returns plain utf-8 string
def read_geode_jmutf8_string_value(buffer, offset, string_length):
cursor = offset
string = []
bad_length = IndexError("Insufficient length for JM utf-8 string")
while cursor < offset + (string_length * 2):
code_point, cursor = call_reader_function(buffer, cursor, read_byte_value)
if code_point == 0:
raise TypeError("Should not encounter a 0 byte in JM utf-8")
elif code_point < 0x7F: # one-byte encoding
string.append(code_point)
elif (code_point & 0xE0) == 0xC0: # two-byte encoding
if cursor < string_length - 1:
(byte2, cursor) = call_reader_function(buffer, cursor, read_byte_value)
string.append(code_point)
string.append(byte2)
else:
raise bad_length
# 3-byte or 6-byte encoding. We don't care which here, because we'll
# just pick up the next 3-byte encoding in the loop, and the conversion
# at the end will raise an exception if there's a problem.
elif (code_point & 0xF0) == 0xE0:
if cursor < string_length - 3:
(byte2, cursor) = call_reader_function(buffer, cursor, read_byte_value)
(byte3, cursor) = call_reader_function(buffer, cursor, read_byte_value)
string.append(code_point)
string.append(byte2)
string.append(byte3)
else:
raise bad_length
return (utf8m_to_utf8s(string), cursor)
def call_reader_function(string, offset, fn):
(value, read_count) = fn(string, offset)
return (value, offset + read_count)
def read_cacheable(message_bytes, offset):
value = {}
dscode = ""
(dscode, offset) = call_reader_function(message_bytes, offset, read_byte_value)
value["DSCode"] = ds_codes[dscode]
if (
value["DSCode"] == "CacheableASCIIString"
or value["DSCode"] == "CacheableASCIIStringHuge"
):
(value["StringLength"], offset) = call_reader_function(
message_bytes, offset, read_short_value
)
(value["Value"], offset) = read_string_value(
message_bytes, value["StringLength"], offset
)
elif value["DSCode"] == "CacheableBoolean":
(bool_val, offset) = call_reader_function(
message_bytes, offset, read_byte_value
)
value["Value"] = "False" if bool_val == 0 else "True"
elif value["DSCode"] == "CacheableInt16":
(int_val, offset) = call_reader_function(message_bytes, offset, read_short_value)
value["Value"] = int_val
elif value["DSCode"] == "CacheableInt32":
(int_val, offset) = call_reader_function(message_bytes, offset, read_int_value)
value["Value"] = int_val
elif value["DSCode"] == "CacheableInt64":
(int_val, offset) = call_reader_function(message_bytes, offset, read_long_value)
value["Value"] = int_val
elif value["DSCode"] == "NullObj":
# Gah! Nasty little bug in the protocol here. NC writes '1' in the
# size field for a NullObj, but the payload is actually ZERO bytes,
# and if you read 1 byte of payload like it says to you'll blow the
# message parse.
value["Value"] = "<<null>>"
elif value["DSCode"] == "PDX":
value["Value"] = "<<Unreadable - no type info available in gnmsg>>"
# This is here for completion, but not actually necessary.
offset = len(message_bytes)
else:
raise Exception("Unknown DSCode")
return (value, offset)
def parse_key_or_value(message_bytes, offset):
value = {}
(value["Size"], offset) = call_reader_function(
message_bytes, offset, read_int_value
)
(value["IsObject"], offset) = call_reader_function(
message_bytes, offset, read_byte_value
)
try:
(value["Data"], offset) = read_cacheable(message_bytes, offset)
except:
offset += value["Size"] * 2
return (value, offset)
|
import unittest
def es_mayor_de_dad(edad):
if edad>=18 :
return True
else:
return False
class PruebaDeCristal(unittest.TestCase): #aca se hacn los tests
def test_es_mayor_de_edad(self):
edad = 20
resultado= es_mayor_de_dad(edad)
self.assertEqual(resultado,True)
def test_es_mayor_de_edad(self):
edad = 15
resultado= es_mayor_de_dad(edad)
self.assertEqual(resultado,False)
if __name__ == "__main__":
unittest.main() |
from django.db import models
class Parent(models.Model):
parent_field = models.CharField(max_length=100)
class Child(Parent):
child_field = models.CharField(max_length=100)
class Proxy(Parent):
class Meta:
proxy = True
class Related(models.Model):
parent = models.ForeignKey(Parent)
related_field = models.CharField(max_length=100)
class OtherRelated(models.Model):
related = models.ForeignKey(Related)
# forward/reverse relationship testing models
class Author(models.Model):
name = models.CharField(max_length=100)
class Cover(models.Model):
text = models.CharField(max_length=100)
class Tag(models.Model):
text = models.CharField(max_length=100)
class Book(models.Model):
author = models.ForeignKey(Author)
cover = models.OneToOneField(Cover)
tags = models.ManyToManyField(Tag)
title = models.CharField(max_length=100)
# models for 'deeply nested' relationships
class Person(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(blank=True)
class Article(models.Model):
author = models.ForeignKey(Person)
title = models.CharField(max_length=100)
class Comment(models.Model):
author = models.ForeignKey(Person)
article = models.ForeignKey(Article)
body = models.CharField(max_length=100)
|
import os
import unittest
from hamerkop.lang import *
def get_filename(filename):
return os.path.join(os.path.dirname(__file__), filename)
def read_file(filename):
with open(filename, 'r') as fp:
return fp.read()
def tokenize(string):
return string.split()
class LangTest(unittest.TestCase):
def test_from_code(self):
self.assertEqual(Lang.AMH, Lang.from_code('AMH'))
self.assertEqual(Lang.AMH, Lang.from_code('amh'))
def test_from_code_invalid(self):
self.assertIsNone(Lang.from_code('ZZZ'))
class NgramLangDetectorTest(unittest.TestCase):
DATA = {
Lang.ENG: tokenize(read_file(get_filename('data/lang_id/en.txt'))),
Lang.ZHO: tokenize(read_file(get_filename('data/lang_id/zh.txt'))),
}
def test(self):
detector = NgramLangDetector()
self.assertEqual(Lang.ENG, detector.detect('file1', self.DATA[Lang.ENG]))
self.assertEqual(Lang.ZHO, detector.detect('file2', self.DATA[Lang.ZHO]))
def test_without_data(self):
detector = NgramLangDetector()
self.assertIsNone(detector.detect('file3', []))
|
try:
from celery.task import task
except ImportError:
from celery import shared_task as task
@task(bind=True)
def subscribe(self, email, newsletter_list_id, user_id=None, **kwargs):
from courriers.backends import get_backend
from courriers.models import NewsletterList
from courriers import signals
from django.contrib.auth import get_user_model
User = get_user_model()
backend = get_backend()()
newsletter_list = None
if newsletter_list_id:
newsletter_list = NewsletterList.objects.get(pk=newsletter_list_id)
user = None
if user_id is not None:
user = User.objects.get(pk=user_id)
else:
user = User.objects.filter(email=email).last()
if user:
signals.subscribed.send(sender=User, user=user, newsletter_list=newsletter_list)
else:
try:
backend.subscribe(newsletter_list.list_id, email)
except Exception as e:
raise self.retry(exc=e, countdown=60)
@task(bind=True)
def unsubscribe(self, email, newsletter_list_id=None, user_id=None, **kwargs):
from courriers.backends import get_backend
from courriers.models import NewsletterList
from courriers import signals
from django.contrib.auth import get_user_model
User = get_user_model()
newsletter_lists = NewsletterList.objects.all()
if newsletter_list_id:
newsletter_lists = NewsletterList.objects.filter(pk=newsletter_list_id)
user = None
if user_id is not None:
user = User.objects.get(pk=user_id)
else:
user = User.objects.filter(email=email).last()
if user:
for newsletter_list in newsletter_lists:
signals.unsubscribed.send(
sender=User, user=user, newsletter_list=newsletter_list
)
else:
backend = get_backend()()
for newsletter in newsletter_lists:
backend.unsubscribe(newsletter.list_id, email)
|
#!/usr/bin/python
# Filename: ex_global.py
x = 50
def func():
global x
print('x is ', x)
x = 2
print('Changed local x to ', x)
func()
print('Value of x is ', x)
|
import sys
from util.IO import IO
from util.Calendar import Calendar
def main():
queryObj = IO('ssmall')
print(queryObj.userName)
cal = Calendar(queryObj.userName)
cal.plotEvents()
cal.plotNext()
cal.plotPrevious()
if __name__ == '__main__':
sys.exit(main())
|
from django.urls import path
from . import views
app_name = "payment"
urlpatterns = [
path('orders/<str:order_id>/payment/', views.AlipayView.as_view()), # 获取支付宝支付链接
path('payment/status/', views.SavePaymentView.as_view()), # 保存支付宝支付结果视图
] |
'''
The file takes in an excel file and creates a csv file for searching
To-Do Create a dataframe for the excel for excel_match.py
'''
import pandas as pd
def excel_to_csv(excel_path):
excel = pd.read_excel(excel_path)
cols = ['Author', 'Title', 'Edition', 'Pub Place', 'Publisher', 'Date']
excel[cols].to_csv('../test_data/test_queries.csv', sep='\t', index=None)
path = '../test_data/test_queries.xlsx'
excel_to_csv(path) |
import joswig_dijkstra as jd
import matplotlib.pyplot as plt
# import numpy as np
# Recursion depth
__depth__ = 10 # make sure the depth isn't too big, high enough accuracy will mess up the decimals
'''
******************
Function totalweight
******************
Given a weighted graph,
returns the sum of all constant
weights.
'''
def totalweight(tree):
result = 0
for v in tree.vertices:
for weight in v.weight:
if(weight != -1):
result += weight
return result
"""
***************
Function equaltree
***************
Given two arrays, x and y,
representing the shortest path
trees one receives as a result
of running Dijkstras,
determines if they are equal
"""
def equaltree(x,y):
result = True
for i in range(0, len(x)):
for j in range(0, len(x[0]) - 1): #minus 1 so that we ignore the weights
if (x[i][j] != y[i][j]):
result = False
return result
def mergeList(L1,L2):
L1 += L2
result = []
for item in L1:
if(not(item in result)):
result.append(item)
return result
'''
'''
def d2treebinary(left,right,slope,yint,interval):
depth = 10
buff1 = []
buff2 = []
T0.editlink(4,3,left)
T0.editlink(4,1,slope*left+yint)
T0.vert_false()
y = T0.shortestpath(4)
buff1.append([y,[[left,slope*left+yint]]])
T0.editlink(4,3,right)
T0.editlink(4,1,slope*right+yint)
T0.vert_false()
y = T0.shortestpath(4)
buff2.append([y,[[right,slope*right+yint]]])
buff1 = mergeForest(buff1,buff2)
treebinary(T0,left,right,depth,buff1,slope,yint)
result = mergeForest(interval,buff1)
return result
'''
Assumes that there are only at most two matching trees between F1 and F2;
This can be guaranteed if after each run, it is merged properly.
'''
def mergeForest(F1,F2):
F1 += F2
ignore = []
result = []
for i in range(0,len(F1)):
dup = False
for j in range(0,len(F1)):
if((i != j) & (not(i in ignore))):
if(equaltree(F1[i][0],F1[j][0])):
dup = True
ignore.append(j)
# print(ignore)
result.append([F1[i][0],mergeList(F1[i][1],F1[j][1])])
if((not(i in ignore)) & (not(dup))):
result.append(F1[i])
return result
"""
************
Function treebinary
************
Performs a recursive binary search on
the parameter space to approximate
the regions of validity for different
shortest path trees.
tree = graph we wish to search on
left = left bound of approximation
right = right bound of approximation
count = recursion depth
interval = array of trees and sample values
they are valid on
"""
def treebinary(tree,left,right,count,interval,slope,yint):
guess = 0.5*(left+right)
tree.editlink(4,3,guess)
tree.editlink(4,1,slope*guess+yint)
tree.vert_false()
fee = tree.shortestpath(4)
equal = False
for foo in interval:
equal = equaltree(foo[0],fee)
if(equal):
# if(foo[1].count(guess) == 0):
yval = slope*guess+yint
if([guess,yval] not in foo[1]):
foo[1].append([guess,yval])
foo[1].sort()
interval.sort()
samples = foo[1]
end = len(samples) - 1
ind = interval.index(foo)
if(guess == foo[1][end][0]): #guess was the biggest in the list
left = guess
elif(guess == foo[1][0][0]): #guess was the smallest in the list
right = guess
if(count > 0):
treebinary(tree,left,right,count-1,interval,slope,yint)
break
if(not(equal)):
interval.append([fee,[[guess,slope*guess+yint]]])
interval.sort()
treebinary(tree,left,guess,count,interval,slope,yint)
treebinary(tree,guess,right,count,interval,slope,yint)
T0 = jd.Tree(4)
T0.link(4,1,5)
T0.link(4,3,-1) # -1 denotes a variable edge weight
T0.link(4,2,4)
T0.link(3,1,2)
T0.link(3,2,3)
T0.editlink(4,3,0)
T0.editlink(4,1,14)
T0.vert_false()
x = T0.shortestpath(4)
# print(x)
totw = totalweight(T0)
interval = []
left = 0
right = totalweight(T0)
# print(right)
# right = 10
slope = 1
yint = 0
T0.editlink(4,3,left)
T0.editlink(4,1,slope*left+yint)
T0.vert_false()
x = T0.shortestpath(4)
interval.append([x,[[left,slope*left+yint]]])
intalt = []
T0.editlink(4,3,right)
T0.editlink(4,1,slope*right+yint)
T0.vert_false()
x = T0.shortestpath(4)
intalt.append([x,[[right,slope*right+yint]]])
# print("interval:",interval)
# print("intalt:",intalt)
interval = mergeForest(interval,intalt)
# print("Merged:",interval)
treebinary(T0,left,right,__depth__,interval,slope,yint)
# print("Int:",interval)
# print(len(interval))
left = 0
right = totw
slope = -1
yint = totw
__depth__ = 10
altinterval = []
T0.editlink(4,3,left)
T0.editlink(4,1,slope*left+yint)
T0.vert_false()
x = T0.shortestpath(4)
altinterval.append([x,[[left,slope*left+yint]]])
T0.editlink(4,3,right)
T0.editlink(4,1,slope*right+yint)
T0.vert_false()
x = T0.shortestpath(4)
altinterval.append([x,[[right,slope*right+yint]]])
treebinary(T0,left,right,__depth__,altinterval,slope,yint)
interval = mergeForest(interval,altinterval)
samples = 10 #determines the number of points on each edge that a line is drawn to
for i in range(1,samples): #binary search along lines emanating from each corner
left = 0
right = i/float(samples)*totw
slope = float(samples)/i
yint = 0
interval = d2treebinary(left,right,slope,yint,interval)
right = 23
slope = i/float(samples)
yint = 0
interval = d2treebinary(left,right,slope,yint,interval)
for i in range(1,samples):
left = 0
right = totw
slope = (float(samples)-i)/samples
yint = i*totw/float(samples)
interval = d2treebinary(left,right,slope,yint,interval)
left = i*totw/float(samples)
right = totw
slope = float(samples)/(samples-i)
yint = -(totw*i)/(float(samples)-i)
interval = d2treebinary(left,right,slope,yint,interval)
for i in range(1,samples):
left = 0
right = totw*i/float(samples)
slope = -float(samples)/i
yint = totw
interval = d2treebinary(left,right,slope,yint,interval)
left = 0
right = totw
slope = (i-float(samples))/samples
yint = totw
interval = d2treebinary(left,right,slope,yint,interval)
for i in range(1,5):
left = 0
right = totw
slope = -i/float(samples)
yint = totw*i/float(samples)
interval = d2treebinary(left,right,slope,yint,interval)
left = totw*i/float(samples)
right = totw
slope = float(samples)/(i-samples)
yint = -totw*float(samples)/(i-samples)
interval = d2treebinary(left,right,slope,yint,interval)
print(interval)
fig, ax = plt.subplots()
i = 0
for foo in interval:
t = []
s = []
for vals in foo[1]:
s.append(vals[0])
t.append(vals[1])
ax.scatter(s, t,label = "T"+str(i))
i+=1
ax.set(
xlabel='4-3 Edge weight parameter',
ylabel = '4-1 Edge weight parameter',
title='Tree regions of validity'
)
ax.set_xlim(-1,25)
ax.legend()
ax.grid()
plt.tick_params(
axis="y",
which="both",
left=False,
right=False)
plt.figure(figsize=(20,20),dpi=250)
fig.savefig("test.png")
plt.show() |
import queue
q=queue.PriorityQueue()
q.put(20)
q.put(10)
q.put(30)#[20,10,30]
q.get() #20
q.get() #10
#indexing with priority queue
qu=[]
qu.append((1,"Ashwin"))
qu.append((2,"Arjun"))
qu.append((3,"Hari"))
print(q)
#[(3,"Hari"),(2,"Arjun"),(3,"Ashwin"))]
|
"""
面试题 55(一):二叉树的深度
题目:输入一棵二叉树的根结点,求该树的深度。从根结点到叶结点依次经过的
结点(含根、叶结点)形成树的一条路径,最长路径的长度为树的深度。
"""
class BSTNode:
"""docstring for BSTNode"""
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def connect_bst_nodes(head: BSTNode, left: BSTNode, right: BSTNode) -> BSTNode:
if head:
head.left = left
head.right = right
def tree_depth(bst: BSTNode) -> int:
"""
Parameters
-----------
lst: the given bst
Returns
---------
the depth
Notes
------
"""
if not bst:
return 0
left = tree_depth(bst.left)
right = tree_depth(bst.right)
if left > right:
return left + 1
else:
return right + 1
if __name__ == '__main__':
tree = BSTNode(10)
connect_bst_nodes(tree, BSTNode(6), BSTNode(14))
connect_bst_nodes(tree.left, BSTNode(4), BSTNode(8))
connect_bst_nodes(tree.right, BSTNode(12), BSTNode(16))
res = tree_depth(tree)
print(res)
|
import csv
import os
from functools import lru_cache
from typing import Iterable, List
def get_first_common_element(first: Iterable[str], second: Iterable[str]) -> str:
""" Get first common element from two lists.
Returns 'None' if there are no common elements.
"""
return next((item for item in first if item in second), None)
def get_common_keys(first: List[str], second: List[str]) -> List[str]:
"""Get common elements of two sets of strings in a case insensitive way.
Args:
first (List[str]): First list of strings.
second (List[str]): List of strings to search for matches.
Returns:
List[str]: List of common elements without regarding case of first list.
"""
return [value for value in first if value in second or value.lower() in second]
def filter_none(iterable: Iterable) -> Iterable:
"""Filter iterable to remove 'None' elements.
Args:
iterable (Iterable): Iterable to filter.
Returns:
Iterable: Filtered iterable.
"""
return filter(lambda x: x is not None, iterable)
@lru_cache(maxsize=4)
def load_known_key_conversions(key_conversions_file: str = None) -> dict:
"""Load dictionary of known key conversions. Makes sure that file loading is cached.
"""
if key_conversions_file is None:
key_conversions_file = os.path.join(os.path.dirname(__file__), "data", "known_key_conversions.csv")
assert os.path.isfile(key_conversions_file), f"Could not find {key_conversions_file}"
with open(key_conversions_file, newline='', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
key_conversions = {}
for row in reader:
key_conversions[row['known_synonym']] = row['matchms_default']
return key_conversions
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker.sol_refactored.api import api_version
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.tests import base
class TestAPIVersion(base.BaseTestCase):
def test_init_null(self):
vers = api_version.APIVersion()
self.assertTrue(vers.is_null())
@mock.patch.object(api_version, 'supported_versions',
new=["3.1.4159", "2.0.0"])
def test_init(self):
for vers, vers_str in [("2.0.0", "2.0.0"),
("3.1.4159", "3.1.4159"),
("2.0.0-impl:foobar", "2.0.0")]:
v = api_version.APIVersion(vers)
self.assertEqual(str(v), vers_str)
def test_init_exceptions(self):
self.assertRaises(sol_ex.InvalidAPIVersionString,
api_version.APIVersion, "0.1.2")
self.assertRaises(sol_ex.APIVersionNotSupported,
api_version.APIVersion, "9.9.9")
@mock.patch.object(api_version, 'supported_versions',
new=["1.3.0", "1.3.1", "2.0.0"])
def test_compare(self):
self.assertTrue(api_version.APIVersion("1.3.0") <
api_version.APIVersion("1.3.1"))
self.assertTrue(api_version.APIVersion("2.0.0") >
api_version.APIVersion("1.3.1"))
@mock.patch.object(api_version, 'supported_versions',
new=["1.3.0", "1.3.1", "2.0.0"])
def test_matches(self):
vers = api_version.APIVersion("2.0.0")
self.assertTrue(vers.matches(api_version.APIVersion("1.3.0"),
api_version.APIVersion()))
self.assertFalse(vers.matches(api_version.APIVersion(),
api_version.APIVersion("1.3.1")))
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
SQS Message
A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS
Message are here:
http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html
So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes
back out. However, to allow messages to have richer semantics, the Message class must support the
following interfaces:
The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a
boto Queue object and represents the queue that the message will be stored in. The default value for
this parameter is None.
The constructor for the Message class must accept a keyword parameter "body" which represents the
content or body of the message. The format of this parameter will depend on the behavior of the
particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the
user the body passed to the constructor should be a dict-like object that can be used to populate
the initial state of the message.
The Message class must provide an encode method that accepts a value of the same type as the body
parameter of the constructor and returns a string of characters that are able to be stored in an
SQS message body (see rules above).
The Message class must provide a decode method that accepts a string of characters that can be
stored (and probably were stored!) in an SQS message and return an object of a type that is consistent
with the "body" parameter accepted on the class constructor.
The Message class must provide a __len__ method that will return the size of the encoded message
that would be stored in SQS based on the current state of the Message object.
The Message class must provide a get_body method that will return the body of the message in the
same format accepted in the constructor of the class.
The Message class must provide a set_body method that accepts a message body in the same format
accepted by the constructor of the class. This method should alter to the internal state of the
Message object to reflect the state represented in the message body parameter.
The Message class must provide a get_body_encoded method that returns the current body of the message
in the format in which it would be stored in SQS.
"""
import base64
import boto
from boto.compat import StringIO
from boto.compat import six
from boto.sqs.attributes import Attributes
from boto.sqs.messageattributes import MessageAttributes
from boto.exception import SQSDecodeError
class RawMessage(object):
"""
Base class for SQS messages. RawMessage does not encode the message
in any way. Whatever you store in the body of the message is what
will be written to SQS and whatever is returned from SQS is stored
directly into the body of the message.
"""
def __init__(self, queue=None, body=''):
self.queue = queue
self.set_body(body)
self.id = None
self.receipt_handle = None
self.md5 = None
self.attributes = Attributes(self)
self.message_attributes = MessageAttributes(self)
self.md5_message_attributes = None
def __len__(self):
return len(self.encode(self._body))
def startElement(self, name, attrs, connection):
if name == 'Attribute':
return self.attributes
if name == 'MessageAttribute':
return self.message_attributes
return None
def endElement(self, name, value, connection):
if name == 'Body':
self.set_body(value)
elif name == 'MessageId':
self.id = value
elif name == 'ReceiptHandle':
self.receipt_handle = value
elif name == 'MD5OfBody':
self.md5 = value
elif name == 'MD5OfMessageAttributes':
self.md5_message_attributes = value
else:
setattr(self, name, value)
def endNode(self, connection):
self.set_body(self.decode(self.get_body()))
def encode(self, value):
"""Transform body object into serialized byte array format."""
return value
def decode(self, value):
"""Transform seralized byte array into any object."""
return value
def set_body(self, body):
"""Override the current body for this object, using decoded format."""
self._body = body
def get_body(self):
return self._body
def get_body_encoded(self):
"""
This method is really a semi-private method used by the Queue.write
method when writing the contents of the message to SQS.
You probably shouldn't need to call this method in the normal course of events.
"""
return self.encode(self.get_body())
def delete(self):
if self.queue:
return self.queue.delete_message(self)
def change_visibility(self, visibility_timeout):
if self.queue:
self.queue.connection.change_message_visibility(self.queue,
self.receipt_handle,
visibility_timeout)
class Message(RawMessage):
"""
The default Message class used for SQS queues. This class automatically
encodes/decodes the message body using Base64 encoding to avoid any
illegal characters in the message body. See:
https://forums.aws.amazon.com/thread.jspa?threadID=13067
for details on why this is a good idea. The encode/decode is meant to
be transparent to the end-user.
"""
def encode(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
return base64.b64encode(value).decode('utf-8')
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
boto.log.warning('Unable to decode message')
return value
return value
class MHMessage(Message):
"""
The MHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
The encoding/decoding of this is handled automatically and after
the message body has been read, the message instance can be treated
like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def __init__(self, queue=None, body=None, xml_attrs=None):
if body is None or body == '':
body = {}
super(MHMessage, self).__init__(queue, body)
def decode(self, value):
try:
msg = {}
fp = StringIO(value)
line = fp.readline()
while line:
delim = line.find(':')
key = line[0:delim]
value = line[delim+1:].strip()
msg[key.strip()] = value.strip()
line = fp.readline()
except:
raise SQSDecodeError('Unable to decode message', self)
return msg
def encode(self, value):
s = ''
for item in value.items():
s = s + '%s: %s\n' % (item[0], item[1])
return s
def __contains__(self, key):
return key in self._body
def __getitem__(self, key):
if key in self._body:
return self._body[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self._body[key] = value
self.set_body(self._body)
def keys(self):
return self._body.keys()
def values(self):
return self._body.values()
def items(self):
return self._body.items()
def has_key(self, key):
return key in self._body
def update(self, d):
self._body.update(d)
self.set_body(self._body)
def get(self, key, default=None):
return self._body.get(key, default)
class EncodedMHMessage(MHMessage):
"""
The EncodedMHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
This variation encodes/decodes the body of the message in base64 automatically.
The message instance can be treated like a mapping object,
i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
raise SQSDecodeError('Unable to decode message', self)
return super(EncodedMHMessage, self).decode(value)
def encode(self, value):
value = super(EncodedMHMessage, self).encode(value)
return base64.b64encode(value.encode('utf-8')).decode('utf-8')
|
import os
import sys
import torch
from TorchProteinLibrary import FullAtomModel
import numpy as np
import unittest
import random
import math
from TorchProteinLibrary.Volume import VolumeConvolution
import _Volume
class TestVolumeConvolution(unittest.TestCase):
device = 'cuda'
dtype = torch.float
places = 5
batch_size = 4
max_num_atoms = 16
eps=1e-03
atol=1e-05
rtol=0.001
msg = "Testing VolumeConvolution"
def setUp(self):
print(self.msg, self.device, self.dtype)
self.vc = VolumeConvolution()
self.box_size = 30
self.resolution = 1.0
def fill_V1(self, r0, r1, R0, R1, volume):
volume_size = volume.size(2)
for x in range(volume_size):
for y in range(volume_size):
for z in range(volume_size):
r_0 = np.array([x,y,z]) - r0
r_1 = np.array([x,y,z]) - r1
dist0 = np.linalg.norm(r_0)
dist1 = np.linalg.norm(r_1)
if dist0 < R0 and dist1 > R1:
volume.data[0,0,x,y,z] = 1.0
def fill_V2(self, r0, R0, volume):
volume_size = volume.size(2)
for x in range(volume_size):
for y in range(volume_size):
for z in range(volume_size):
r_0 = np.array([x,y,z]) - r0
dist0 = np.linalg.norm(r_0)
if dist0 < R0:
volume.data[0,0,x,y,z] = 1.0
def get_boundary(self, volume_in, volume_out):
volume_size = volume_in.size(2)
for x in range(1,volume_size-1):
for y in range(1,volume_size-1):
for z in range(1,volume_size-1):
cs = torch.sum(volume_in.data[0,0, x-1:x+2, y-1:y+2, z-1:z+2]).item()
cc = volume_in.data[0,0,x,y,z].item()
if (cc<0.01) and (cs>0.9):
volume_out.data[0,0,x,y,z] = 1.0
def get_argmax(self, volume):
volume_size = volume.size(0)
arg = (0,0,0)
max_val = volume.data[0,0,0]
for x in range(volume_size):
for y in range(volume_size):
for z in range(volume_size):
val = float(volume.data[x,y,z])
if val>max_val:
arg = (x,y,z)
max_val = val
return max_val, arg
class TestVolumeConvolutionForward(TestVolumeConvolution):
msg = "Testing VolumeConvolutionFwd"
def runTest(self):
R0 = 8
x0 = [15,15,15]
x1 = [23,15,15]
y0 = [15,20,15]
R1 = 5
inp1 = torch.zeros(1, 1, self.box_size, self.box_size, self.box_size, dtype=self.dtype, device=self.device)
inp1_border = torch.zeros_like(inp1)
inp2 = torch.zeros_like(inp1)
inp2_border = torch.zeros_like(inp1)
self.fill_V1(np.array(x0), np.array(x1), R0, R1, inp1)
self.get_boundary(inp1, inp1_border)
self.fill_V2(np.array(y0), R1, inp2)
# self.get_boundary(inp2, inp2_border)
border_overlap = self.vc(inp1_border, inp2)
bulk_overlap = self.vc(inp1, inp2)
out = border_overlap - 0.5*bulk_overlap
score, r = self.get_argmax(out[0,0,:,:,:])
r = list(r)
if r[0]>=self.box_size:
r[0] = -(2*self.box_size - r[0])
if r[1]>=self.box_size:
r[1] = -(2*self.box_size - r[1])
if r[2]>=self.box_size:
r[2] = -(2*self.box_size - r[2])
self.assertEqual(r[0]+y0[0], x1[0])
self.assertEqual(r[1]+y0[1], x1[1])
self.assertEqual(r[2]+y0[2], x1[2])
# out2 = torch.zeros_like(inp1)
# self.fill_V2(np.array([y0[0]+r[0],y0[1]+r[1],y0[2]+r[2]]), R1, out2)
# v_out = inp1 + out2
# _Volume.Volume2Xplor(v_out.squeeze().cpu(), "vout.xplor", 1.0)
class TestVolumeConvolutionBackward(TestVolumeConvolution):
msg = "Testing VolumeConvolutionBwd"
eps=4.0
atol=1.0
rtol=0.01
def runTest(self):
inp1 = torch.zeros(1, 1, self.box_size, self.box_size, self.box_size, dtype=self.dtype, device=self.device)
inp2tmp_p = torch.zeros_like(inp1)
inp2tmp_m = torch.zeros_like(inp1)
inp2 = torch.zeros_like(inp1)
inp1.requires_grad_()
inp2.requires_grad_()
target = torch.zeros(1, 1, 2*self.box_size, 2*self.box_size, 2*self.box_size, dtype=self.dtype, device=self.device)
r0 = [20,15,15]
r1 = [23,15,7]
l0 = [7,24,7]
self.fill_V2(np.array([12, 30, 50]), 5, target)
self.fill_V1(np.array(r0), np.array(r1), 8, 5, inp1)
self.fill_V2(np.array(l0), 5, inp2)
out = self.vc(inp1, inp2)
E0 = torch.sum((out-target)*(out-target))
E0.backward()
# num_grads = []
# an_grads = []
for i in range(0, inp1.size(0)):
for j in range(0, inp1.size(2)):
x = j
y = 7
z = 7
inp2tmp_p.copy_(inp2)
inp2tmp_m.copy_(inp2)
inp2tmp_p[0,0,x,y,z] += self.eps
inp2tmp_m[0,0,x,y,z] -= self.eps
out1_p = self.vc(inp1, inp2tmp_p)
out1_m = self.vc(inp1, inp2tmp_m)
E1_p = torch.sum((out1_p-target)*(out1_p-target))
E1_m = torch.sum((out1_m-target)*(out1_m-target))
dE_dx = (E1_p.item() - E1_m.item())/(2.0*self.eps)
# print(inp2.grad[0, 0, x, y, z].item(), dE_dx)
self.assertLess(math.fabs(dE_dx - inp2.grad[0, 0, x, y, z].item()), math.fabs(dE_dx) * self.rtol + self.atol)
# num_grads.append(dE_dx)
# an_grads.append(inp2.grad[0,0,x,y,z].item())
# import matplotlib.pylab as plt
# fig = plt.figure()
# plt.plot(num_grads, 'r.-', label = 'num grad')
# plt.plot(an_grads,'bo', label = 'an grad')
# plt.ylim(0, 100)
# plt.legend()
# plt.savefig('TestFig/test_backward_new.png')
if __name__=='__main__':
unittest.main() |
import argparse
import borg_verifier
import logging
import sys
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('pushgateway')
parser.add_argument('repos', nargs='+')
parser.add_argument('--auth_username', default=None)
parser.add_argument('--auth_password', default=None)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--quiet', action='store_true')
args = parser.parse_args()
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
credentials = None
if args.auth_username or args.auth_password:
credentials = (args.auth_username, args.auth_password)
if any(x is None for x in credentials):
print("Both of auth username and password must be specified",
file=sys.stderr)
return 1
borg_verifier.run(args.pushgateway, args.repos,
auth_credentials=credentials)
if __name__ == '__main__':
sys.exit(main())
|
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
# @auth.route('/signup',methods=['GET','POST'])
# def signup():
# form = SignUpForm()
# if form.validate_on_submit():
# user = User(email = form.email.data, username = form.username.data,password = form.password.data)
# user.save_user()
# try:
# msg = Message('Hello...Welcome to pitches.We are glad you joined us',sender=('Dancansteric@gmail.com'))
# msg.add_recipient(user.email)
# mail.send(msg)
# except Exception as e:
# print('failed')
# return redirect(url_for('auth.login'))
# title = "New Account"
# return render_template('auth/register.html',registration_form = form)
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "One Minute Pitch login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to One Minute Pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index")) |
from flask import Flask,jsonify
from flask_restplus import Api, fields, Resource
from pathlib import Path
from Gender_Classifier import retrainModel, normalize, name_encoding
import tensorflow as tf
import numpy as np
import csv
app = Flask(__name__)
api = Api(
app,
version='1.X',
title='Gender Classifer API',
description='This Application Programming Interface is used to predict the Gender of a Person given the name of the person')
ns = api.namespace('api')
parser = api.parser()
parser.add_argument(
'Name',
required=True,
type= str,
help='Give the Name of a person like Ajay',
location='form',
action='append')
modelParser = api.parser()
modelParser.add_argument(
'Name',
required=True,
type= str,
help='Give the Name of a person like Ajay',
location='form')
modelParser.add_argument(
'Gender',
required=True,
type= str,
help='M/F',
location='form')
@ns.route('/classifyGender')
class ClassifiyGender(Resource):
@api.doc(parser=parser)
def post(self):
args = parser.parse_args()
nameList = args['Name']
resultList = []
for name in nameList:
if(name.isalpha()):
result = self.get_result(name)
resultList.append(result)
else:
return app.response_class(response="Error",status=404)
response = jsonify(resultList)
response.status_code=200
return response
def get_result(self, name):
model_dir = Path("Gender_Classifier/gender_model.h5")
model = tf.keras.models.load_model(model_dir)
nameList = [name]
prediction = model.predict(np.asarray([np.asarray(name_encoding(normalize(name))) for name in nameList]))
return {
'Name':name,
'Male':(prediction.tolist())[0][0],
'Female':(prediction.tolist())[0][1]
}
@ns.route('/retrainModel')
class RetrainModel(Resource):
@api.doc(parser=modelParser)
def post(self):
args = modelParser.parse_args()
dataset_dir = Path("Gender_Classifier/name_gender.csv")
with open(dataset_dir,'a') as f:
writer = csv.writer(f)
writer.writerow([args['Name'],args['Gender'],1])
retrainModel()
return app.response_class(response="Success",status=200)
if __name__ == '__main__':
app.run(debug=True)
|
crc_table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040)
def calcString(s):
crc = 0xffff
"""Given a bunary string and starting CRC, Calc a final CRC-16 """
for ch in s:
crc = (crc >> 8) ^ crc_table[(crc ^ ord(ch)) & 0xFF]
return "{}{}".format(chr(crc & 0xff), chr(crc >> 8 & 0xff))
def validate(s, crc):
return calcString(s) == crc |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import Index, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# create new table
task_log = Table('task_log', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted',
Boolean(create_constraint=True, name=None)),
Column('id', Integer(),
primary_key=True,
nullable=False,
autoincrement=True),
Column('task_name', String(255), nullable=False),
Column('state', String(255), nullable=False),
Column('host', String(255), index=True, nullable=False),
Column('period_beginning', String(255),
index=True, nullable=False),
Column('period_ending', String(255), index=True, nullable=False),
Column('message', String(255), nullable=False),
Column('task_items', Integer()),
Column('errors', Integer()),
)
try:
task_log.create()
except Exception:
meta.drop_all(tables=[task_log])
raise
if migrate_engine.name == "mysql":
migrate_engine.execute("ALTER TABLE task_log "
"Engine=InnoDB")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
task_log = Table('task_log', meta, autoload=True)
task_log.drop()
|
#!/usr/bin/env python
"""
Copyright Gerald Kaszuba 2008
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Change path so this module runs in any case
import os
os.chdir(os.path.abspath(os.path.dirname(__file__)))
from glob import glob
examples = glob('*.py')
notexamples = ['settings.py', 'helper.py', 'all.py']
for example in examples:
if example in notexamples:
continue
module = example[:-3]
print(module)
__import__(module).main()
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# 00 for 0, 01 for 1, 11 for 2
one = 0
two = 0
for i in nums:
# 00 -> 01
temp1 = one | ~one & ~two & i
# 01 -> 11
temp2 = two | one & ~two & i
# 11 -> 00
temp1 ^= one & two & i
temp2 ^= one & two & i
one = temp1
two = temp2
return one |
from collections import defaultdict
import operator
from .constants import LOG_LEVELS, LOG_TYPES, LOG_ROLES
def is_code_valid(code):
required_keys = {
"type": basestring,
"level": int,
"selectable_by": list,
"description": basestring,
"stops_timer": bool,
}
all_keys = required_keys.keys() + ["set_requires_action_by", "order"]
for key, type_ in required_keys.items():
if key not in code:
raise ValueError("%s is missing from code definition" % key)
if not isinstance(code[key], type_):
raise ValueError("%s is not of expected type: %s" % (key, type_))
if code["type"] not in LOG_TYPES.CHOICES_DICT:
raise ValueError("Unknown type %s (must be one of %s)" % (code["type"], ", ".join(LOG_TYPES.CHOICES_DICT)))
if code["level"] not in LOG_LEVELS.CHOICES_DICT:
raise ValueError(
"Unknown level %s (must be one of %s)"
% (code["level"], ", ".join([str(level) for level in LOG_LEVELS.CHOICES_DICT]))
)
for selectable_by in code["selectable_by"]:
if selectable_by not in LOG_ROLES.CHOICES_DICT:
raise ValueError(
"Unknown role %s (must be one of %s)" % (selectable_by, ", ".join(map(str, LOG_ROLES.CHOICES_DICT)))
)
diff = set(code.keys()) - set(all_keys)
if len(diff) > 0:
raise ValueError("Unknown key(s) %s (must be one of %s)" % (diff, all_keys))
return True
class EventRegistry(object):
def __init__(self):
self._registry = {}
def register(self, event_cls):
# checking that codes is not empty
if not event_cls.codes:
raise ValueError("%s does not define any codes. Please add codes={} to the class" % event_cls.__name__)
if not event_cls.key:
raise ValueError(
"%s does not define any key. Please add key='<action-key>' to the class" % event_cls.__name__
)
# checking that each code in codes has the right format
for code in event_cls.codes.values():
is_code_valid(code)
self._registry[event_cls.key] = event_cls
def get_event(self, key):
if key not in self._registry:
raise ValueError(u"%s not registered" % key)
return self._registry[key]
def get_selectable_events(self, role):
events = defaultdict(list)
for action_key, event_cls in self._registry.items():
selectable_codes = event_cls.get_selectable_codes(role)
if selectable_codes:
events[action_key] = selectable_codes
return events
def all(self):
"""
:return: all codes in the registry as a unified dictionary
"""
return dict(reduce(operator.add, [event_cls.codes.items() for event_cls in self._registry.values()]))
def filter(self, **kwargs):
"""
Similar to Django's models.objects.filter(...) this will filter
all of the codes in the system by the kwargs passed into this
function.
For example,
>> registry.filter(stops_timer=True)
would return all codes in the system that stop the timer. You can
supply multiple kwargs, so if you wanted to see all codes which
are of type 'OUTCOME' and don't stop the timer you can do this:
>> registry.filter(stops_timer=False, type=LOG_TYPES.OUTCOME)
:param kwargs: set of keyword args you want to filter the outcome
codes by
:return: returns a unified dictionary of filtered outcome codes
registered in this registry.
"""
return {k: v for k, v in self.all().items() if all([v[kk] == vv for kk, vv in kwargs.items()])}
event_registry = EventRegistry()
|
# Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tff.aggregator for implementing 3LC."""
import collections
import tensorflow as tf
import tensorflow_federated as tff
from compressed_communication.aggregators.utils import quantize_utils
class ThreeLCFactory(tff.aggregators.UnweightedAggregationFactory):
"""Aggregator that implements 3LC.
Expects `value_type` to be a `TensorType`.
Paper: https://arxiv.org/abs/1802.07389
"""
def __init__(self, sparsity_factor=1.):
"""Initializer for ThreeLCFactory.
Args:
sparsity_factor: By default 1.
"""
self._sparsity_factor = sparsity_factor
def create(self, value_type):
if not tff.types.is_structure_of_floats(
value_type) or not value_type.is_tensor():
raise ValueError("Expect value_type to be a float tensor, "
f"found {value_type}.")
@tff.tf_computation((value_type, tf.float32))
def decode(encoded_value):
quantized_value, scale_factor = encoded_value
decoded_value = scale_factor * quantized_value
return decoded_value
@tff.tf_computation
def get_zero_run_lengths(value):
# Append nonzero value at start and end to capture length of any leading
# or trailing zeros.
value = tf.cast(value, tf.int32)
padded_value = tf.concat(
[tf.constant([1]), value, tf.constant([1])], axis=0)
nonzero_indices = tf.where(tf.not_equal(padded_value, 0))
zero_run_lengths = nonzero_indices[1:] - nonzero_indices[:-1]
# Account for case where there are no trailing zeros.
zero_run_lengths = tf.cond(
tf.equal(zero_run_lengths[-1], 1), lambda: zero_run_lengths[:-1],
lambda: zero_run_lengths)
zero_run_lengths = tf.subtract(zero_run_lengths, 1)
zero_run_lengths = tf.reshape(zero_run_lengths,
[tf.size(zero_run_lengths)])
zero_run_lengths = tf.gather(zero_run_lengths,
tf.where(zero_run_lengths > 0))
return tf.cast(zero_run_lengths, tf.float32)
@tff.tf_computation(value_type)
def encode(value):
max_magnitude = tf.reduce_max(tf.abs(value))
scale_factor = max_magnitude * self._sparsity_factor
seed = tf.cast(tf.stack([tf.timestamp() * 1e6, tf.timestamp() * 1e6]),
dtype=tf.int64)
quantized_value = tf.cast(quantize_utils.stochastic_quantize(
value, scale_factor, seed), tf.float32)
encoded_value = (quantized_value, scale_factor)
decoded_value = decode(encoded_value)
value_size = tf.size(value, out_type=tf.float32)
distortion = tf.reduce_sum(
tf.square(value - decoded_value)) / value_size
@tf.function
def get_pad(size):
pad = 0
if tf.math.floormod(size, 5) > 0:
pad = 5 - tf.cast(tf.math.floormod(size, 5), tf.int32)
return pad
padded_value = tf.pad(quantized_value, [[0, get_pad(value_size)]])
quintuples = tf.reshape(padded_value, (-1, 5))
binarized_value = tf.cast(tf.logical_not(
tf.reduce_all(tf.equal(quintuples, 0), axis=-1)), tf.float32)
nonzero_bits = tf.reduce_sum(binarized_value) * 8.
runlengths = get_zero_run_lengths(binarized_value)
# base-3^5 encoding represents 2 <= runlengths <= 14 with a single byte
zero_bits = tf.reduce_sum(tf.math.ceil(runlengths / 14.)) * 8.
bitrate = (nonzero_bits + zero_bits + 32.) / value_size
return encoded_value, bitrate, distortion
@tff.federated_computation()
def init_fn():
return tff.federated_value((), tff.SERVER)
def sum_encoded_value(value):
@tff.tf_computation
def get_accumulator():
return tf.zeros(shape=value_type.shape, dtype=tf.float32)
@tff.tf_computation
def decode_accumulate_values(accumulator, encoded_value):
decoded_value = decode(encoded_value)
return accumulator + decoded_value
@tff.tf_computation
def merge_decoded_values(decoded_value_1, decoded_value_2):
return decoded_value_1 + decoded_value_2
@tff.tf_computation
def report_decoded_summation(summed_decoded_values):
return summed_decoded_values
return tff.federated_aggregate(
value,
zero=get_accumulator(),
accumulate=decode_accumulate_values,
merge=merge_decoded_values,
report=report_decoded_summation)
@tff.federated_computation(init_fn.type_signature.result,
tff.type_at_clients(value_type))
def next_fn(state, value):
encoded_value, bitrate, distortion = tff.federated_map(encode, value)
avg_bitrate = tff.federated_mean(bitrate)
avg_distortion = tff.federated_mean(distortion)
result = sum_encoded_value(encoded_value)
return tff.templates.MeasuredProcessOutput(
state=state,
result=result,
measurements=tff.federated_zip(
collections.OrderedDict(avg_bitrate=avg_bitrate,
avg_distortion=avg_distortion)))
return tff.templates.AggregationProcess(init_fn, next_fn)
|
import pytest
from co2_diag.formatters import numstr, my_round, \
tex_escape
def test_number_string_formatting_2decimals():
assert numstr(3.14159, decimalpoints=2) == '3.14'
def test_number_string_formatting_largenumber():
assert numstr(3141592.6534, decimalpoints=3) == '3,141,592.653'
def test_rounding_to_nearest_up10():
assert my_round(23, nearest=10, direction='up') == 30
def test_rounding_to_nearest_down100():
assert my_round(761523, nearest=100, direction='down') == 761500
def test_rounding_to_nearest_bad_direction():
with pytest.raises(ValueError):
my_round(761523, nearest=100, direction='side')
def test_escaping_tex():
assert tex_escape('\$ \_ >') == \
"\\textbackslash{}\\$ \\textbackslash{}\\_ \\textgreater{}"
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import unittest
import logging
import sys
import xml.etree.ElementTree as ET
import fedora.rest.api as fra
from fedora.utils import sha1_for_file
from fedora.rest.ds import DatastreamProfile
test_file = "easy-file:219890"
test_dataset = "easy-dataset:5958"
@unittest.skip("on-line test")
class TestFedora(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
cfg_file = os.path.join(os.path.expanduser("~"), "src", "teasy.cfg")
#cls.fedora = fra.instance(cfg_file)
cls.fedora = fra.Fedora.from_file(cfg_file)
# @unittest.skip
# def test_reset(self):
# old = str(self.fedora)
# #Fedora.reset()
# cfg_file = os.path.join(os.path.expanduser("~"), "src", "teasy.cfg")
# self.fedora = Fedora.from_file(cfg_file=cfg_file)
# new = str(self.fedora)
# self.assertNotEqual(old, new)
# self.assertEqual(self.fedora, Fedora())
# auth required
def test_object_xml(self):
objectxml = self.fedora.object_xml(test_file)
print(objectxml)
self.assertTrue(objectxml.startswith("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"))
def test_datastream_rels_ext(self):
datastream = self.fedora.datastream(test_file, "RELS-EXT")
#print(datastream)
self.assertTrue("<rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">" in datastream)
# auth required
def test_datastream_easy_file_metadata(self):
datastream = self.fedora.datastream(test_file, "EASY_FILE_METADATA", content_format="xml")
#print(datastream)
self.assertTrue(datastream.startswith("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"))
# no auth required -> the file it self can be downloaded without auth
def test_datastream_easy_file_metadata_no_content(self):
datastream = self.fedora.datastream(test_file, "EASY_FILE_METADATA")
#print(datastream)
self.assertTrue("<fimd:file-item-md" in datastream)
def test_datastream_easy_administrative_metadata_no_content(self):
datastream = self.fedora.datastream(test_dataset, "AMD")
print(datastream)
self.assertTrue("<damd:administrative-md" in datastream)
# auth required
def test_datastream_easy_file(self):
datastream = self.fedora.datastream(test_file, "EASY_FILE", content_format="xml")
#print(datastream)
self.assertTrue(datastream.startswith("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"))
def test_datastream_easy_file_no_content(self):
datastream = self.fedora.datastream(test_file, "EASY_FILE")
#print(datastream)
# downloads the file
# auth required
def test_add_relationship(self):
# the datatype dateTime should not be set, because of exception in
# nl.knaw.dans.common.fedora.rdf.FedoraRelationsConverter.rdfToRelations
self.fedora.add_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#wasDerivedFrom",
"info:fedora/easy-file:1")
self.fedora.add_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#wasGeneratedBy",
"http://dans.knaw.nl/aips/55e73f76-5b10-11e6-9822-685b357e70b6.5")
self.fedora.add_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#generatedAtTime",
"2016-11-23T00:00:00Z", is_literal=True) #,
#data_type="http://www.w3.org/2001/XMLSchema#dateTime")
# auth required
def test_purge_relationship(self):
self.fedora.purge_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#wasDerivedFrom",
"info:fedora/easy-file:1")
self.fedora.purge_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#wasGeneratedBy",
"http://dans.knaw.nl/aips/55e73f76-5b10-11e6-9822-685b357e70b6.5")
self.fedora.purge_relationship(test_file,
"https://www.w3.org/TR/2012/CR-prov-o-20121211/#generatedAtTime",
"2016-11-23T00:00:00Z", is_literal=True) #,
#data_type="http://www.w3.org/2001/XMLSchema#dateTime")
def test_add_mail_address_to_rdf(self):
self.fedora.add_relationship(test_file,
"http://www.w3.org/ns/prov#wasAssociatedWith",
"mailto:firstname.lastname@dans.knaw.nl")
def test_purge_mail_address_to_rdf(self):
self.fedora.purge_relationship(test_file,
"http://www.w3.org/ns/prov#wasAssociatedWith",
"mailto:firstname.lastname@dans.knaw.nl")
# # Adding blind nodes to RELS-EXT is not possible.
# def test_add_blind_node(self):
# self.fedora.add_relationship(test_file,
# "http://testing.com/hasMultiFacetProp",
# "_:bnode42multifacetprop")
# self.fedora.add_relationship2(test_file,
# "_:bnode42multifacetprop",
# "http://dans.knaw.nl/ontologies/conversions#isConversionOf",
# "info:fedora/easy-file:2")
# self.fedora.add_relationship2(test_file,
# "_:bnode42multifacetprop",
# "http://dans.knaw.nl/ontologies/conversions#conversionDate",
# "2016-11-23T00:00:00.000Z", is_literal=True,
# data_type="http://www.w3.org/2001/XMLSchema#dateTime")
def test_download(self):
folder = os.path.join(os.path.expanduser("~"), "tmp", "fedora_download")
meta = self.fedora.download(test_file, "EASY_FILE", folder=folder)
#print(meta)
self.fedora.download(test_file, "RELS-EXT", folder=folder)
self.fedora.download(test_file, "DC", folder=folder)
self.fedora.download(test_file, "EASY_FILE_METADATA", folder=folder)
def test_find_objects(self):
query = "cDate>=2015-01-01 pid~easy-dataset:* state=A"
result = self.fedora.find_objects(query)
#print(result)
# auth required because of post. can query unauthorized with get.
def test_risearch(self):
datasetId = "easy-dataset:450"
query = \
"PREFIX dans: <http://dans.knaw.nl/ontologies/relations#> " \
+ "PREFIX fmodel: <info:fedora/fedora-system:def/model#> " \
\
+ "SELECT ?s " \
+ "WHERE " \
+ "{ " \
+ " ?s dans:isSubordinateTo <info:fedora/" + datasetId + "> . " \
+ " ?s fmodel:hasModel <info:fedora/easy-model:EDM1FILE> " \
+ "}"
print(query)
result = self.fedora.risearch(query)
print(result)
@unittest.skip("Ignore Post methods")
def test_get_next_pid(self):
response = self.fedora.get_next_pid(namespace='easy-file')
print(response)
# easy-file:350704
@unittest.skip("Ignore post methods")
def test_ingest(self):
response = self.fedora.ingest(label='A label', namespace='tester')
print(response)
@unittest.skip("Ignore post methods")
def test_add_managed_datastream(self):
filepath = 'resources/license.pdf'
sha1 = sha1_for_file(filepath)
pid = 'easy-dataset:890'
ds_id = 'DATASET_LICENSE'
ds_label = 'license.pdf'
mediatype = 'application/pdf'
response = self.fedora.add_managed_datastream(pid, ds_id, ds_label, filepath, mediatype, sha1)
print(response)
def test_list_datastreams(self):
pid = 'easy-dataset:450'
text = self.fedora.list_datastreams(pid)
#print(text)
@unittest.skip('Ignore put methods')
def test_modify_datastream(self):
# get the existing datastream 'AMD'
dataset_id = test_dataset
# dataset_id = 'easy-dataset:387754'
ET.register_namespace('damd', 'http://easy.dans.knaw.nl/easy/dataset-administrative-metadata/')
ET.register_namespace('wfs', 'http://easy.dans.knaw.nl/easy/workflow/')
xml = self.fedora.datastream(dataset_id, "AMD")
root = ET.fromstring(xml)
eldp = root.find('depositorId')
existing_depositor_id = eldp.text
new_depositor_id = existing_depositor_id[::-1]
print('changing', existing_depositor_id, 'to', new_depositor_id)
eldp.text = new_depositor_id
doc = ET.ElementTree(element=root)
folder = os.path.join(os.path.expanduser("~"), "tmp", "fedora_modify")
os.makedirs(folder, exist_ok=True)
local_path = os.path.join(folder, 'damd.xml')
doc.write(local_path, encoding='UTF-8', xml_declaration=True)
#time.sleep(15)
response = self.fedora.modify_datastream(dataset_id, 'AMD',
ds_label='Administrative metadata for this dataset',
filepath=local_path,
mediatype='application/xml',
formatURI='http://easy.dans.knaw.nl/easy/dataset-administrative-metadata/',
logMessage='testing modify datastream')
print(response)
self.assertEqual(200, response.status_code)
xml = self.fedora.datastream(dataset_id, "AMD")
root = ET.fromstring(xml)
eldp = root.find('depositorId')
self.assertEqual(new_depositor_id, eldp.text)
dsp = DatastreamProfile(dataset_id, 'AMD', self.fedora)
dsp.from_xml(response.text)
print(dsp.props)
|
#!/usr/bin/env python3
from z3 import *
from graph_partitioning.Gralog import *
def graph2z3(g):
s = Solver()
global R
R = {}
global G
G = {}
global B
B = {}
for v in g.getVertices():
v = v.getId()
R[v] = Bool('R_%i' % v)
G[v] = Bool('G_%i' % v)
B[v] = Bool('B_%i' % v)
for v in g.getVertices():
v = v.getId()
s.add(Or(R[v], G[v], B[v]))
s.add(Or(Not(R[v]), Not(G[v])))
s.add(Or(Not(R[v]), Not(B[v])))
s.add(Or(Not(B[v]), Not(G[v])))
for e in g.getEdges():
u = e.getSource().getId()
v = e.getTarget().getId()
if u > v:
continue
s.add(Or(Not(R[u]), Not(R[v])))
s.add(Or(Not(G[u]), Not(G[v])))
s.add(Or(Not(B[u]), Not(B[v])))
return(s)
###### MAIN #####
g = Graph(None)
vertices = g.getVertices()
id_to_vertex = dict()
for v in vertices:
id_to_vertex[v.getId()] = v
s = graph2z3(g)
result = s.check()
sat = CheckSatResult(Z3_L_TRUE)
unsat = CheckSatResult(Z3_L_FALSE)
unknown = CheckSatResult(Z3_L_UNDEF)
if result == sat:
m = s.model()
elif result == unsat:
gPrint("Not 3-colourable")
exit()
else:
gPrint("The graph is too complicated, I could not solve the instance.")
exit()
for v in g.getVertices():
if is_true(m[R[v.getId()]]):
v.setColor("Red")
continue
if is_true(m[G[v.getId()]]):
v.setColor("Green")
continue
if is_true(m[B[v.getId()]]):
v.setColor("Blue")
continue
gPrint("Error: vertex " + str(v) + " has no colour, terminating.")
exit(1)
|
"""
Rules for defining the branching logic in :class:`Choice` states.
`See Step Functions docs for more details.
<https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-choice-state.html#amazon-states-language-choice-state-rules>`_
"""
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Any, Type
import attr
from attr.validators import instance_of, optional
from rhodes._util import RHODES_ATTRIB, docstring_with_param
from rhodes.exceptions import InvalidDefinitionError
from rhodes.structures import JsonPath
__all__ = (
"VariablePath",
"ChoiceRule",
"StringEquals",
"StringGreaterThan",
"StringGreaterThanEquals",
"StringLessThan",
"StringLessThanEquals",
"NumericEquals",
"NumericGreaterThan",
"NumericGreaterThanEquals",
"NumericLessThan",
"NumericLessThanEquals",
"BooleanEquals",
"TimestampEquals",
"TimestampGreaterThan",
"TimestampGreaterThanEquals",
"TimestampLessThan",
"TimestampLessThanEquals",
"And",
"Or",
"Not",
"all_",
"any_",
)
class VariablePath(JsonPath):
""":class:`JsonPath` variant with overloading helper methods to generate choice rules."""
# TODO: Add __and__ and __or__ behaviors?
def __lt__(self, other: Any) -> Type["ChoiceRule"]:
return _derive_rule(variable=self, operator="<", value=other)
def __le__(self, other: Any) -> Type["ChoiceRule"]:
return _derive_rule(variable=self, operator="<=", value=other)
def __eq__(self, other: Any) -> Type["ChoiceRule"]:
return _derive_rule(variable=self, operator="==", value=other)
def __ne__(self, other: Any) -> "Not":
inner_rule = _derive_rule(variable=self, operator="==", value=other)
return Not(Rule=inner_rule)
def __gt__(self, other: Any) -> Type["ChoiceRule"]:
return _derive_rule(variable=self, operator=">", value=other)
def __ge__(self, other: Any) -> Type["ChoiceRule"]:
return _derive_rule(variable=self, operator=">=", value=other)
def _required_next(instance):
if instance.Next is None:
raise InvalidDefinitionError("ChoiceRule missing state transition")
def _require_choice_rule_instance(*, class_name: str, attribute_name: str, value):
if not isinstance(value, ChoiceRule):
raise TypeError(f'"{class_name}.{attribute_name}" must be a "ChoiceRule". Received "{type(value)}"')
def _require_no_next(*, class_name: str, attribute_name: str, value):
if value.Next is not None:
raise ValueError(f'"{class_name}.{attribute_name}" must not have a "Next" value defined.')
def _single_to_dict(instance, suppress_next=False):
if not suppress_next:
_required_next(instance)
instance_dict = {instance.__class__.__name__: instance._serialized_value(), "Variable": str(instance.Variable)}
if instance.Next is not None:
instance_dict["Next"] = instance.Next
return instance_dict
def _convert_to_variable_path(value) -> VariablePath:
if isinstance(value, VariablePath):
return value
return VariablePath(value)
def _single(cls):
cls.Variable = RHODES_ATTRIB(validator=instance_of(VariablePath), converter=_convert_to_variable_path)
cls.__doc__ = docstring_with_param(
cls, "Variable", VariablePath, description="Path to value in state input that will be evaluated"
)
cls.Next = RHODES_ATTRIB(validator=optional(instance_of(str)))
cls.__doc__ = docstring_with_param(
cls, "Next", description="The state to which to continue if this rule evaluates as true"
)
cls.to_dict = _single_to_dict
return cls
def _multi_to_dict(instance, suppress_next=False):
if not suppress_next:
_required_next(instance)
# TODO: Validate that no children have a Next value
return {
instance.__class__.__name__: [rule.to_dict(suppress_next=True) for rule in instance.Rules],
"Next": instance.Next,
}
def _validate_multi_subrules(instance, attribute, value):
for pos, rule in enumerate(value):
position_name = f"{attribute.name}[{pos}]"
_require_choice_rule_instance(class_name=instance.__class__.__name__, attribute_name=position_name, value=rule)
_require_no_next(class_name=instance.__class__.__name__, attribute_name=position_name, value=rule)
def _multi(cls):
cls.Rules = RHODES_ATTRIB(validator=_validate_multi_subrules)
cls.__doc__ = docstring_with_param(
cls, "Rules", description="One or more :class:`ChoiceRule` to evaluate for this rule"
)
cls.Next = RHODES_ATTRIB(validator=optional(instance_of(str)))
cls.__doc__ = docstring_with_param(
cls, "Next", description="The state to which to continue if this rule evaluates as true"
)
cls.to_dict = _multi_to_dict
return cls
def _string(cls):
cls = _single(cls)
cls.Value = RHODES_ATTRIB(validator=instance_of(str))
cls.__doc__ = docstring_with_param(cls, "Value", str, description="The value to which to compare ``Variable``")
return cls
def _number(cls):
cls = _single(cls)
def _numeric_converter(value) -> Decimal:
if isinstance(value, Decimal):
return value
return Decimal(str(value))
def _value_serializer(instance) -> float:
return float(instance.Value)
# TODO: Note that for interoperability,
# numeric comparisons should not be assumed to work
# with values outside the magnitude or precision
# representable using the IEEE 754-2008 “binary64” data type.
# In particular,
# integers outside of the range [-(253)+1, (253)-1]
# might fail to compare in the expected way.
cls.Value = RHODES_ATTRIB(validator=instance_of(Decimal), converter=_numeric_converter)
cls.__doc__ = docstring_with_param(cls, "Value", description="The value to which to compare ``Variable``")
cls._serialized_value = _value_serializer
return cls
def _bool(cls):
cls = _single(cls)
cls.Value = RHODES_ATTRIB(validator=instance_of(bool))
cls.__doc__ = docstring_with_param(cls, "Value", bool, description="The value to which to compare ``Variable``")
return cls
def _timestamp(cls):
cls = _single(cls)
def _datetime_validator(instance, attribute, value):
if value.tzinfo is None:
raise ValueError(f"'{attribute.name}' must have a 'tzinfo' value set.")
def _value_serializer(instance):
return instance.Value.isoformat()
cls.Value = RHODES_ATTRIB(validator=[instance_of(datetime), _datetime_validator])
cls.__doc__ = docstring_with_param(cls, "Value", datetime, description="The value to which to compare ``Variable``")
cls._serialized_value = _value_serializer
return cls
@attr.s(eq=False)
class ChoiceRule:
"""Base class for all choice rules."""
member_of = None
def to_dict(self):
"""Serialize state as a dictionary."""
raise NotImplementedError()
def __eq__(self, other: "ChoiceRule") -> bool:
if not isinstance(other, self.__class__):
return False
if self.to_dict() != other.to_dict():
return False
if self.member_of != other.member_of:
return False
return True
def __ne__(self, other: "ChoiceRule") -> bool:
return not self.__eq__(other)
def _serialized_value(self):
return self.Value
def then(self, state):
if self.Next is not None:
raise InvalidDefinitionError(f"Choice rule already has a defined target")
self.member_of.member_of.add_state(state)
self.Next = state.title
return state
@attr.s(eq=False)
@_string
class StringEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_string
class StringLessThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_string
class StringGreaterThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_string
class StringLessThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_string
class StringGreaterThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_number
class NumericEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_number
class NumericLessThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_number
class NumericGreaterThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_number
class NumericLessThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_number
class NumericGreaterThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_bool
class BooleanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_timestamp
class TimestampEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_timestamp
class TimestampLessThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_timestamp
class TimestampGreaterThan(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_timestamp
class TimestampLessThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_timestamp
class TimestampGreaterThanEquals(ChoiceRule):
"""""" # pylint: disable=empty-docstring
@attr.s(eq=False)
@_multi
class And(ChoiceRule):
"""Matches only if all of the provided rules are true."""
@attr.s(eq=False)
@_multi
class Or(ChoiceRule):
"""Matches if any of the provided rules are true."""
@attr.s(eq=False)
class Not(ChoiceRule):
"""Matches only if the provided rule is false.
:param ChoiceRule Rule: Rule that must evaluate as false
:param Next: The state to which to continue if this rule evaluates as true
"""
Rule = RHODES_ATTRIB(validator=instance_of(ChoiceRule))
Next = RHODES_ATTRIB(validator=optional(instance_of(str)))
@Rule.validator
def _validate_rule(self, attribute, value):
_require_choice_rule_instance(class_name=self.__class__.__name__, attribute_name=attribute.name, value=value)
_require_no_next(class_name=self.__class__.__name__, attribute_name=attribute.name, value=value)
def to_dict(self, suppress_next=False):
"""Serialize state as a dictionary."""
if not suppress_next:
_required_next(self)
inner_rule = self.Rule.to_dict(suppress_next=True)
instance_dict = dict(Not=inner_rule)
if self.Next is not None:
instance_dict["Next"] = self.Next
return instance_dict
_OPERATORS = {
"string": {
"==": StringEquals,
"<": StringLessThan,
"<=": StringLessThanEquals,
">": StringGreaterThan,
">=": StringGreaterThanEquals,
},
"number": {
"==": NumericEquals,
"<": NumericLessThan,
"<=": NumericLessThanEquals,
">": NumericGreaterThan,
">=": NumericGreaterThanEquals,
},
"time": {
"==": TimestampEquals,
"<": TimestampLessThan,
"<=": TimestampLessThanEquals,
">": TimestampGreaterThan,
">=": TimestampGreaterThanEquals,
},
"boolean": {"==": BooleanEquals},
}
_TYPE_MAP = {bool: "boolean", int: "number", float: "number", Decimal: "number", str: "string", datetime: "time"}
def _derive_rule(*, variable: VariablePath, operator: str, value) -> Type[ChoiceRule]:
"""Derive the correct :class:`ChoiceRule` based on the specified operator and value.
:param variable: Path to variable in state data
:param operator: Desired equality operator string
:param value: Value to compare against
"""
if isinstance(value, Enum):
value = value.value
try:
value_type = _TYPE_MAP[type(value)]
except KeyError:
raise TypeError(f'Unhandled value type "{type(value)}"')
try:
operator_class = _OPERATORS[value_type][operator]
except KeyError:
raise ValueError(f'Unhandled operator "{operator}"')
return operator_class(Variable=variable, Value=value)
def all_(*rules: ChoiceRule) -> And:
"""Helper to assemble several rules into an :class:`And` rule."""
return And(Rules=list(rules))
def any_(*rules: ChoiceRule) -> Or:
"""Helper to assemble several rules into an :class:`Or` rule."""
return Or(Rules=list(rules))
|
import re
from time import strftime
import RPi.GPIO as GPIO
import Adafruit_DHT
import time_converter
from dht import DHT
from lcd import HD44780
import urllib
import json
# Length
LEN_CLOCK = 5
LEN_TEMP = 6
LEN_HUMIDITY = 5
LEN_SEPARATOR = 1
LEN_TIME = 3
LEN_DESTINATION_NUMBER = 3
LEN_DESTINATION = 8
LEN_SPACE = 1
LEN_FULLLINE = 20
METADATA_LINE = 0
INTERMEDIATE_LINE = 1
BUS_LINE_1 = 2
BUS_LINE_2 = 3
BUS_TIMES = {
"22O": "https://api-ratp.pierre-grimaud.fr/v3/schedules/bus/22/ranelagh/A?_format=json",
"52O": "https://api-ratp.pierre-grimaud.fr/v3/schedules/bus/52/ranelagh/A?_format=json",
"M9M": "https://api-ratp.pierre-grimaud.fr/v3/schedules/metros/9/ranelagh/A?_format=json",
"M9P": "https://api-ratp.pierre-grimaud.fr/v3/schedules/metros/9/ranelagh/R?_format=json",
"32A": "https://api-ratp.pierre-grimaud.fr/v3/schedules/bus/32/porte_de_passy/A?_format=json",
"32E": "https://api-ratp.pierre-grimaud.fr/v3/schedules/bus/32/la_muette_boulainvilliers/R?_format=json"
}
TRANSPORTS_TO_SHOW = [["22O", "52O"], ["M9M", "M9P"], ["32A", "32E"]]
def get_page(url):
try:
f = urllib.urlopen(url)
output = f.read()
json_output = json.loads(output)
schedules = json_output['result']['schedules']
return [
schedules[0]['destination'].encode('utf-8'), schedules[0]['message'].encode('utf-8'),
schedules[1]['destination'].encode('utf-8'), schedules[1]['message'].encode('utf-8')
]
except:
return connect_issue()
def unavailable():
return ["NAV", "NAV", "NAV", "NAV"]
def connect_issue():
return ["UNC", "UNC", "UNC", "UNC"]
class RATPWithExtras:
def __init__(self, lcd_rs_pin, lcd_e_pin, lcd_db_pins, button_pin, dht_pin):
# LCD
self.lcd_rs_pin = lcd_rs_pin
self.lcd_e_pin = lcd_e_pin
self.lcd_db_pins = lcd_db_pins
# Button
self.button_pin = button_pin
# DHT
self.dht_type = Adafruit_DHT.DHT22
self.dht_pin = dht_pin
self.dht = DHT(self.dht_pin, self.dht_type)
self.lcd = HD44780(self.lcd_e_pin, self.lcd_rs_pin, self.lcd_db_pins)
self.cur_transport = 0
self.transports = TRANSPORTS_TO_SHOW[self.cur_transport]
def set_temp_if_there(self, temp, col, sign, length):
temp_string = str(temp)
if temp_string != "None":
temp_string = str(round(temp, 1))
self.lcd.message_at(METADATA_LINE, col, temp_string + sign, length)
def set_humidity(self, temp, col, sign, length):
temp_string = str(temp)
if temp_string != "None":
temp_string = str(round(temp, 1))
self.lcd.message_at(METADATA_LINE, col, temp_string + sign, length)
def set_trans(self, time, row):
timings = time_converter.get_timings(time)
if isinstance(timings, time_converter.RegularTimings):
self.lcd.message_at(row, 4, timings.first_destination, LEN_DESTINATION)
self.lcd.message_at(row, 12, " ", LEN_SPACE)
self.lcd.message_at(row, 13, timings.first_timing, LEN_TIME)
self.lcd.message_at(row, 16, "/", LEN_SEPARATOR)
self.lcd.message_at(row, 17, timings.second_timing, LEN_TIME)
elif isinstance(timings, time_converter.TimingIssue):
self.lcd.message_at(row, 4, timings.message,
LEN_DESTINATION + LEN_SPACE + LEN_TIME + LEN_SEPARATOR + LEN_TIME)
def update_destinations_temperature(self):
next_first = get_page(BUS_TIMES[self.transports[0]])
self.set_trans(next_first, BUS_LINE_1)
next_second = get_page(BUS_TIMES[self.transports[1]])
self.set_trans(next_second, BUS_LINE_2)
self.lcd.message_at(METADATA_LINE, 0, strftime("%H:%M"), LEN_CLOCK)
humidity, temp = self.dht.get_temp_humidity()
temp_string = str(temp)
if temp_string != "None":
temp_string = str(round(temp, 1))
self.lcd.message_at(METADATA_LINE, 7, temp_string + chr(223) + "C", LEN_TEMP)
humidity_string = str(humidity)
if humidity_string != "None":
humidity_string = str(round(humidity, 1))
self.lcd.message_at(METADATA_LINE, 15, humidity_string + "%", LEN_HUMIDITY)
def init_gpio(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.button_pin, GPIO.IN)
GPIO.add_event_detect(self.button_pin, GPIO.FALLING, callback=self.cycle)
self.dht.init_gpio()
self.lcd.init_gpio()
# noinspection PyUnusedLocal
def cycle(self, channel):
self.cur_transport = (self.cur_transport + 1) % len(TRANSPORTS_TO_SHOW)
self.transports = TRANSPORTS_TO_SHOW[self.cur_transport]
self.set_destinations()
self.update_destinations_temperature()
def set_destinations(self):
self.lcd.message_at(BUS_LINE_1, 0, self.transports[0][:-1], LEN_DESTINATION_NUMBER)
self.lcd.message_at(BUS_LINE_2, 0, self.transports[1][:-1], LEN_DESTINATION_NUMBER)
self.lcd.message_at(INTERMEDIATE_LINE, 0, "--------------------", LEN_FULLLINE)
def clean(self):
self.lcd.clear()
GPIO.remove_event_detect(self.button_pin)
GPIO.cleanup()
|
import fastapi
import ulid
import socket
from fastapi import Request
from pkg_resources import require
def get_ulid() -> str:
return ulid.new().str
def get_request_info(request: Request) -> str:
return request.client.host
def get_host_by_ip_address(ip_address: str) -> str:
return socket.gethostbyaddr(ip_address)[0] |
# BEGIN_COPYRIGHT
#
# Copyright (C) 2015-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Quick hack to compare .npy img plane dumps.
Expected file name structure: <PREFIX>-z<Z_INDEX>-c<C_INDEX>-t<T_INDEX>.npy
where indices are 0-padded to 4 digits, e.g., foo-z0022-c0001-t0000.npy
"""
import sys
import os
import glob
import re
import numpy as np
FN_PATTERN = re.compile(r'^[^-]+-z(\d+)-c(\d+)-t(\d+).npy$')
def get_zct_max(prefix):
triples = []
for fn in glob.glob('%s*' % prefix):
bn = os.path.basename(fn)
triples.append(map(int, FN_PATTERN.match(bn).groups()))
zs, cs, ts = zip(*triples)
return max(zs), max(cs), max(ts)
try:
prefix_a = sys.argv[1]
prefix_b = sys.argv[2]
except IndexError:
sys.exit("Usage: %s PREFIX_A PREFIX_B\n%s" % (sys.argv[0], __doc__))
z_max, c_max, t_max = get_zct_max(prefix_a)
if get_zct_max(prefix_b) != (z_max, c_max, t_max):
sys.exit("ERROR: different file structure for the two prefixes")
for z in xrange(z_max + 1):
for c in xrange(c_max + 1):
for t in xrange(t_max + 1):
fnames = ['%s-z%04d-c%04d-t%04d.npy' % (_, z, c, t)
for _ in prefix_a, prefix_b]
print z, c, t,
sys.stdout.flush()
try:
a, b = [np.load(_) for _ in fnames]
except IOError as e:
print 'ERROR: %s' % e
else:
print 'OK' if np.array_equal(a, b) else 'ERROR: arrays differ'
|
from dictionarydb.language import get_language
def test_get_language():
assert get_language("deu") is not None
def test_get_invalid_language():
assert get_language("invalid") is None
|
#!/usr/bin/env python
"""
Utilities for running code: logging, timing/benchmarking.
Author: Johann Petrak
"""
import sys
import logging
import datetime
import time
logger = None
start = 0
def set_logger(args=None, name=None, file=None):
"""
Set up logger for the module "name". If file is given, log to that file as well.
If file is not given but args is given and has "outpref" parameter, log to
file "outpref.DATETIME.log" as well.
:param name: name to use in the log, if None, uses sys.argv[0]
:param file: if given, log to this destination in addition to stderr
:param args: if given, an argparser namespace, checks for: "d" and "outpref"
:return: the logger instance
"""
global logger
if name is None:
name = sys.argv[0]
if logger:
raise Exception("Odd, we should not have a logger yet?")
logger = logging.getLogger(name)
if args and hasattr(args, "d") and args.d:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logger.setLevel(lvl)
fmt = logging.Formatter('%(asctime)s|%(levelname)s|%(name)s|%(message)s')
hndlr = logging.StreamHandler(sys.stderr)
hndlr.setFormatter(fmt)
logger.addHandler(hndlr)
if not file and args and hasattr(args, "outpref"):
dt = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
file = args.outpref+f".{dt}.log"
if file:
hdnlr = logging.FileHandler(file)
hndlr.setFormatter(fmt)
logger.addHandler(hdnlr)
logger.info("Started: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M%S")))
if args:
logger.info("Arguments: {}".format(args))
return logger
def ensurelogger():
"""
Make sure the global logger is set to some logger. This should not be necessary
if the set_logger function is properly used, but guards against situations where
functions that require a logger are used without proper setting up the logger.
:return: global logger
"""
global logger
if not logger:
logger = logging.getLogger("UNINITIALIZEDLOGGER")
return logger
def run_start():
"""
Define time when running starts.
:return: system time in seconds
"""
global start
start = time.time()
return start
def run_stop():
"""
Log and return formatted elapsed run time.
:return: tuple of formatted run time, run time in seconds
"""
logger = ensurelogger()
if start == 0:
logger.warning("Run timing not set up properly, no time!")
return "",0
stop = time.time()
delta = stop - start
deltastr = str(datetime.timedelta(seconds=delta))
logger.info(f"Runtime: {deltastr}")
return deltastr, delta
|
import json
import numpy as np
import time
from keras.models import Sequential, Model, model_from_json
from keras.layers import Input, Masking, Dense, Activation, Dropout,CuDNNLSTM, LSTM, Conv1D, Flatten, GlobalAveragePooling1D, BatchNormalization, MaxPool1D, AveragePooling1D, PReLU
from keras.optimizers import Adam, SGD
from sklearn.preprocessing import MinMaxScaler
class Keras_Model:
"""
A Keras Sequential Model Wrapper
"""
def __init__(self):
"""
Keras Model Constructor
"""
self.model = Sequential()
def Compile(self, loss, optimizer, metrics='accuracy'):
"""
Compile the Keras Sequential Model
Args:
loss: Loss function to use
optimizer: Optimizer to use
metrics: Metric to validate performance
"""
start = time.time()
self.model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
print("Compilation Time: ", time.time() - start)
print(self.model.summary())
def FitData(self, X_train, y_train, batch_size, nb_epochs, verbose=1, validation_split = None):
"""
Fits training data to the model
Args:
X_train: Features to fit during training
y_train: Target column to fit during training
batch_size: Number of batches at a time
nb_epochs: Number of epochs to train the model
verbose: Log verbosity
validation_split: Take a split from the training data for validation?
Returns: Model History containing training performance
"""
if validation_split is not None :
hist = self.model.fit(X_train, y_train, batch_size, nb_epochs, validation_split=validation_split, verbose=verbose)
else:
hist = self.model.fit(X_train, y_train, batch_size, nb_epochs, verbose=verbose)
return hist
def FitDataWithValidation(self, X_train, y_train, X_val, y_val, batch_size, nb_epochs, verbose = 1):
"""
Fits training data to the model but uses seperate validation data
Args:
X_train: Features to fit during training
y_train: Target column to fit during training
X_val: Features to fit during validation
y_val: Target column to fit during validation
batch_size: Number of batches at a time
nb_epochs: Number of epochs to train the model
verbose: Log verbosity
Returns: Model History containing training performance
"""
return self.model.fit(X_train, y_train, batch_size, nb_epochs, verbose=verbose, validation_data=(X_val, y_val))
def FitDataWithValidationCallbacks(self, X_train, y_train, X_val, y_val, batch_size, nb_epochs, cb1, cb2, verbose):
"""
Fits training data to the model but uses seperate validation data and callbacks
Args:
X_train: Features to fit during training
y_train: Target column to fit during training
X_val: Features to fit during validation
y_val: Target column to fit during validation
batch_size: Number of batches at a time
nb_epochs: Number of epochs to train the model
cb1: Callback 1
cb2: Callback 2
verbose: Log verbosity
Returns: Model History containing training performance
"""
return self.model.fit(X_train, y_train, batch_size, nb_epochs, verbose=verbose, validation_data=(X_val, y_val), callbacks=[cb1, cb2])
def Evaluate(self, X_test, y_test, batch_size, verbose=1):
"""
Evaluates the fitted model on new data
Args:
X_test: Features to fit during training
y_test: Target column to fit during training
batch_size: Number of batches at a time
verbose: Log verbosity
Returns: Proba
"""
score, acc = self.model.evaluate(X_test, y_test, batch_size, verbose=verbose)
return score, acc
def Predict(self, X_test, y_test):
"""
Args:
X_test: Features to predict
y_test: Target column to validate predictions
Returns:
Y_score = Probabilies for each sample in the test data
Y_predict = Predicted classes for each sample in the test data
Y_true = True labels
"""
Y_score = self.model.predict(X_test)
Y_predict = self.model.predict_classes(X_test)
Y_true = np.argmax(y_test, axis=1)
return (Y_score, Y_predict, Y_true)
def GetModel(self):
"""
Returns: Keras sequential model
"""
return self.model
def SetModel(self, model):
"""
Overwrites the current Keras Sequential model
Args:
model: Keras sequential model
"""
self.model = model
class LSTM_Model(Keras_Model):
"""
LSTM child of KerasModel
"""
def __init__(self, nb_lstm_layers=0, nb_lstm_units=10, nb_fc_layers=0, nb_fc_units=32, output_dim=1, sequence_length=0, dropout=0.1, activation='relu', batch_normalisation=True):
"""
LSTM Constructor
Args:
nb_lstm_layers: Number of LSTM layers
nb_lstm_units: Number of LSTM hidden units
nb_fc_layers: Number of Fully Connected Layers
nb_fc_units: Number of Hidden Units in the Fully Connected Layers
output_dim: Output dimensionality
sequence_length: Length of the input sequence
dropout: Dropout fraction
activation: Activation function to use
batch_normalisation: Boolean flag for batch normalisation layers
"""
Keras_Model.__init__(self)
print('Initialising LSTM Model... \n')
self.nb_lstm_layers = nb_lstm_layers
self.nb_lstm_units = nb_lstm_units
self.nb_fc_layers = nb_fc_layers
self.nb_fc_units = nb_fc_units
self.output_dim = output_dim
self.sequence_length = sequence_length
self.dropout = dropout
self.activation = activation
self.batch_norm = batch_normalisation
def Build(self):
"""
Builds the topology of the network
"""
print('Building LSTM... \n' )
print('Number of LSTM layers: ', self.nb_lstm_layers)
print('Number of LSTM units: ', self.nb_lstm_units)
print('Number of FC layers: ', self.nb_fc_layers)
print('Number of FC units: ', self.nb_fc_units)
print('Dropout: ', self.dropout)
print('Activation:', self.activation)
print('Batch Normalisation:', self.batch_norm)
print('\n')
self.AddInputLayer()
for i in range(self.nb_lstm_layers):
self.AddLSTMLayer()
self.model.add(Flatten())
self.AddOutputLayer()
def AddMaskingLayer(self, value):
"""
Args:
value:
"""
# Masking layer
self.model.add(Masking(mask_value=value, input_shape=(self.sequence_length, self.output_dim)))
def AddInputLayer(self, return_seqs=True):
"""
Args:
return_seqs:
"""
# Add masking layer is masking value exists
self.model.add(CuDNNLSTM(self.nb_lstm_units, input_shape=(self.sequence_length, self.output_dim), return_sequences=return_seqs))
# Batch Norm Layer
self.model.add(BatchNormalization())
self.AddDropoutLayer()
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
def AddDropoutLayer(self):
"""
"""
self.model.add(Dropout(self.dropout))
def AddLSTMLayer(self):
"""
"""
self.model.add(CuDNNLSTM(self.nb_lstm_units, return_sequences=True))
# Batch Norm Layer
if (self.batch_norm):
self.model.add(BatchNormalization())
self.AddDropoutLayer()
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
def AddOutputLayer(self):
"""
"""
for i in range(self.nb_fc_layers):
self.AddFCLayer()
self.model.add(Dense(self.output_dim))
self.model.add(Activation('sigmoid'))
def AddFCLayer(self):
"""
"""
self.model.add(Dense(self.nb_fc_units))
# Dropout layer
self.model.add(Dropout(self.dropout))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
def LoadLSTMConfiguration(self, config):
"""
Args:
config:
"""
self.nb_lstm_layers = config['lstm_layers']
self.nb_lstm_units = config['lstm_units']
self.nb_fc_layers = config['fc_layers']
self.nb_fc_units = config['fc_units']
self.dropout = config['dropout']
self.activation = config['activation']
self.batch_norm = config['batch_norm']
self.Build()
self.Compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.001 * config['lr_rate_mult'], momentum=config['momentum'], decay=0.0001,
nesterov=True), metrics=['accuracy'])
print('Loaded and compiled Keras model succesfully. \n')
def SaveLSTMModel(self, name, config=False, weights=False):
"""
Args:
name:
config:
weights:
"""
root = 'models/'
# Save model with weights and training configuration
if (weights):
model_json = self.model.to_json()
with open(root + name + '.json', "w") as json_file:
json_file.write(model_json)
# Serialise weights
self.model.save_weights(root + name + '.h5')
if(config):
with open(root + 'configs/' + name + '.json', 'w') as fp:
json.dump(config, fp)
# Save configuration for rebuilding model
else:
dict = {}
dict['lstm_layers']= self.nb_lstm_layers
dict['lstm_units']= self.nb_lstm_units
dict['fc_layers']= self.nb_fc_layers
dict['fc_units']= self.nb_fc_units
dict['dropout']= self.dropout
dict['activation']= self.activation
dict['batch_norm']= self.batch_norm
with open(root + 'configs/' + name + '.json', 'w') as fp:
json.dump(dict, fp)
print('Saved model to disk.')
class CNN_Model(Keras_Model):
"""
"""
def __init__(self, output_dim=1, sequence_length=10,
nb_blocks=1,filters=8, kernel_size=5, activation = 'relu',
pooling='max', pool_strides = 2, pool_size = 5,
conv_dropout=0.2, fc_dropout = 0.5, dense_units = 64, batch_norm = True):
"""
Args:
output_dim: Output Dimensionality
sequence_length: Length of the input sequences
nb_blocks: Number of convolutional blocks
filters: Number of filters in the convolutional layers
kernel_size: Size of the kernel to use in convolutional layers
activation: Activation function
pooling: Type of pooling (Max or Average)
pool_strides: Pooling strides
pool_size: Pooling window size
conv_dropout: Convolutional layer dropout
fc_dropout: Fully Connected layer dropout
dense_units: Number of hidden units in fully connected layers
batch_norm: Boolean flag specifying whether to use batch normalisation layers
"""
print('Initialising CNN Model... \n')
Keras_Model.__init__(self)
self.output_dim = output_dim
self.sequence_length = sequence_length
self.nb_blocks = nb_blocks
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.pooling = pooling
self.conv_dropout = conv_dropout
self.fc_dropout = fc_dropout
self.dense_units = dense_units
self.batch_norm = batch_norm
self.pooling_strides = pool_strides
self.pooling_size = pool_size
def Build(self):
"""
Builds the topology of the network
"""
print('Building CNN... \n' )
print('Filters: ', self.filters)
print('Kernel Size: ', self.kernel_size)
print('Number of blocks: ', self.nb_blocks)
print('Pooling type:', self.pooling)
print('Pooling Strides:', self.pooling_strides)
print('Pooling Length:', self.pooling_size)
print('Conv Dropout: ', self.conv_dropout)
print('FC Dropout: ', self.fc_dropout)
print('Dense Units: ', self.dense_units)
print('Activation:', self.activation)
print('Batch Normalisation:', self.batch_norm)
print('\n')
self.AddInputBlock()
for i in range(self.nb_blocks):
self.AddCNNBlock()
self.AddOutputBlock()
def AddInputBlock(self):
"""
"""
self.model.add(Conv1D(filters=self.filters, kernel_size=self.kernel_size, input_shape=(self.sequence_length, self.output_dim)))
# Batch Norm Layer
if (self.batch_norm):
self.model.add(BatchNormalization())
# Pooling Layer
if (self.pooling == 'max'):
self.model.add(MaxPool1D(pool_size=self.pooling_size,strides=self.pooling_strides))
elif (self.pooling == 'average'):
self.model.add(AveragePooling1D(pool_size=self.pooling_size, strides=self.pooling_strides))
# Dropout layer
if (self.conv_dropout is not None):
self.model.add(Dropout(self.conv_dropout))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
def AddCNNBlock(self):
"""
"""
# Convolution Layer
self.model.add(Conv1D(filters=self.filters, kernel_size=self.kernel_size))
# Batch Norm Layer
if (self.batch_norm):
self.model.add(BatchNormalization())
# Pooling Layer
if (self.pooling == 'max'):
self.model.add(MaxPool1D(pool_size=self.pooling_size,strides=self.pooling_strides))
elif (self.pooling == 'average'):
self.model.add(AveragePooling1D(pool_size=self.pooling_size, strides=self.pooling_strides))
# Dropout layer
if (self.conv_dropout is not None):
self.model.add(Dropout(self.conv_dropout))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
def AddOutputBlock(self):
"""
"""
self.model.add(Flatten())
self.model.add(Dense(self.dense_units))
# Dropout layer
if self.fc_dropout is not None:
self.model.add(Dropout(self.fc_dropout))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
self.model.add(Dense(int(self.dense_units)))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
if self.fc_dropout is not None:
self.model.add(Dropout(self.fc_dropout))
self.model.add(Dense(int(self.dense_units)))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
if self.fc_dropout is not None:
self.model.add(Dropout(self.fc_dropout))
self.model.add(Dense(int(self.dense_units)))
# Activation Layer
if (self.activation == 'prelu'):
self.model.add(PReLU())
else:
self.model.add(Activation(self.activation))
self.model.add(Dense(1, activation='sigmoid'))
def SetSequenceLength(self, seq_length):
"""
Args:
seq_length:
"""
self.sequence_length = seq_length
def SetOutputDimension(self, dim):
"""
Args:
dim:
"""
self.output_dim = dim
def SaveCNNModel(self, name, config=False, weights=False):
"""
Args:
name:
config:
weights:
"""
root = 'models/'
# Save model with weights and training configuration
if (weights):
model_json = self.model.to_json()
with open(root + name + '.json', "w") as json_file:
json_file.write(model_json)
# Serialise weights
self.model.save_weights(root + name + '.h5')
if(config):
with open(root + 'configs/' + name + '.json', 'w') as fp:
json.dump(config, fp)
# Save configuration for rebuilding model
else:
dict = {}
dict['nb_blocks'] = self.nb_blocks
dict['filters'] = self.filters
dict['kernel_size'] = self.kernel_size
dict['activation'] = self.activation
dict['pooling'] = self.pooling
dict['pooling_strides'] = self.pooling_strides
dict['pooling_size'] = self.pooling_size
dict['conv_dropout'] = self.conv_dropout
dict['fc_dropout'] = self.fc_dropout
dict['fc_units'] = self.dense_units
dict['batch_norm'] = self.batch_norm
with open(root + 'configs/' + name + '.json', 'w') as fp:
json.dump(dict, fp)
print('Saved model to disk.')
def LoadCNNConfiguration(self, config):
"""
Args:
config:
"""
self.nb_blocks = config['nb_blocks']
self.filters = config['filters']
self.kernel_size = config['kernel_size']
self.activation = config['activation']
self.pooling = config['pooling']
self.pooling_strides = config['pooling_strides']
self.pooling_size = config['pooling_size']
self.conv_dropout = config['conv_dropout']
self.fc_dropout = config['fc_dropout']
self.dense_units = config['fc_units']
self.batch_norm = config['batch_norm']
self.Build()
self.Compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.001 * config['lr_rate_mult'], momentum=config['momentum'], decay=0.0001,
nesterov=True), metrics=['accuracy'])
print('Loaded and compiled Keras model succesfully. \n')
def LoadConfigurationFromFile(self, config_name):
"""
Args:
config_name:
"""
root = 'models/configs/'
# Load json configuration
json_file = open(root + config_name + '.json', 'r')
config_str = json_file.read()
config = json.loads(config_str)
json_file.close()
self.nb_blocks = config['nb_blocks']
self.filters = config['filters']
self.kernel_size = config['kernel_size']
self.activation = config['activation']
self.pooling = config['pooling']
self.pooling_strides = config['pooling_strides']
self.pooling_size = config['pooling_size']
self.conv_dropout = config['conv_dropout']
self.fc_dropout = config['fc_dropout']
self.dense_units = config['fc_units']
self.batch_norm = config['batch_norm']
#self.Build()
#self.Compile(loss='binary_crossentropy',
# optimizer=SGD(lr=0.001 * config['lr_rate_mult'], momentum=config['momentum'], decay=0.0001,
# nesterov=True), metrics=['accuracy'])
#print('Loaded and compiled Keras model succesfully. \n')
|
import logging
from datetime import datetime
import json
import core
from core import sqldb
from core.helpers import Url
from fuzzywuzzy import fuzz
logging = logging.getLogger(__name__)
class ScoreResults():
def __init__(self):
self.sql = sqldb.SQL()
return
# returns list of dictionary results after filtering and scoring
def score(self, results, imdbid=None, quality_profile=None):
''' Scores and filters search results.
results: list of dicts of search results
imdbid: str imdb identification number (tt123456) <optional*>
quality_profile: str quality profile name <optional*>
Either imdbid or quality_profile MUST be passed.
If imdbid passed, finds quality in database row.
If profile_quality passed, uses that quality and ignores db.
quality_profile can be set to 'import', which uses 'Default' settings,
but doesn't allow the result to be filtered out.
Iterates over the list and filters movies based on Words.
Scores movie based on reslution priority, title match, and
preferred words,
Word groups are split in to a list of lists:
[['word'], ['word2', 'word3'], 'word4']
Returns list of dicts.
'''
if imdbid is None and quality_profile is None:
logging.warning('Neither imdbid or quality_profile passed.')
return results
self.results = results
if quality_profile is None:
movie_details = self.sql.get_movie_details('imdbid', imdbid)
quality_profile = movie_details['quality']
title = movie_details['title']
else:
title = None
if quality_profile == 'import':
quality = self.import_quality()
elif quality_profile in core.CONFIG['Quality']['Profiles']:
quality = core.CONFIG['Quality']['Profiles'][quality_profile]
else:
quality = core.CONFIG['Quality']['Profiles']['Default']
resolution = {k: v for k, v in quality.iteritems() if k in ['4K', '1080P', '720P', 'SD']}
retention = core.CONFIG['Search']['retention']
seeds = core.CONFIG['Search']['mintorrentseeds']
required = [i.split('&') for i in quality['requiredwords'].lower().replace(' ', '').split(',') if i != '']
preferred = [i.split('&') for i in quality['preferredwords'].lower().replace(' ', '').split(',') if i != '']
ignored = [i.split('&') for i in quality['ignoredwords'].lower().replace(' ', '').split(',') if i != '']
today = datetime.today()
# These all just modify self.results
self.reset()
self.remove_inactive()
self.remove_ignored(ignored)
self.keep_required(required)
self.retention_check(retention, today)
self.seed_check(seeds)
self.score_resolution(resolution)
if quality['scoretitle']:
self.fuzzy_title(title)
self.score_preferred(preferred)
return self.results
def reset(self):
for i, d in enumerate(self.results):
self.results[i]['score'] = 0
def remove_ignored(self, group_list):
''' Remove results with ignored groups of 'words'
:param group_list: list of forbidden groups of words
word_groups is a list of lists.
Iterates through self.results and removes every entry that contains
any group of 'words'
A group of 'words' is multiple 'words' concatenated with an ampersand '&'
Does not return
'''
keep = []
if not group_list or group_list == [u'']:
return
for r in self.results:
cond = False
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group):
cond = True
break
if cond is False and r not in keep:
keep.append(r)
self.results = keep
def keep_required(self, group_list):
''' Remove results without required groups of 'words'
:param word_goups: list of required groups of words
Iterates through self.results and removes every entry that does not
contain any group of 'words'
A group of 'words' is multiple 'words' concatenated with an ampersand '&'
Does not return
'''
keep = []
if not group_list or group_list == [u'']:
return
for r in self.results:
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group) and r not in keep:
keep.append(r)
break
else:
continue
self.results = keep
def retention_check(self, retention, today):
''' Remove results older than 'retention' days
:param retention: int days of retention limit
:param today: datetime obj today's date
Iterates through self.results and removes any entry that was published
more than 'retention' days ago
Does not return
'''
if retention == 0:
return
lst = []
for result in self.results:
if result['type'] != u'nzb':
lst.append(result)
else:
pubdate = datetime.strptime(result['pubdate'], '%d %b %Y')
age = (today - pubdate).days
if age < retention:
lst.append(result)
self.results = lst
def seed_check(self, seeds):
''' Remove any torrents with fewer than 'seeds' seeders
seeds: int # of seeds required
Does not return
'''
if seeds == 0:
return
lst = []
for result in self.results:
if result['type'] not in ['torrent', 'magnet']:
lst.append(result)
else:
if int(result['seeders']) >= seeds:
lst.append(result)
self.results = lst
def score_preferred(self, group_list):
''' Increase score for each group of 'words' match
:param word_goups: list of preferred groups of words
Iterates through self.results and increases ['score'] each time a
preferred group of 'words' is found
A group of 'words' is multiple 'words' concatenated with an ampersand '&'
Does not return
'''
if not group_list or group_list == [u'']:
return
for r in self.results:
for word_group in group_list:
if all(word in r['title'].lower() for word in word_group):
r['score'] += 10
break
else:
continue
def fuzzy_title(self, title):
''' Score and remove results based on title match
title: str title of movie <optional*>
Iterates through self.results and removes any entry that does not
fuzzy match 'title' > 60.
Adds fuzzy_score / 20 points to ['score']
*If title is passed as None, assumes perfect match and scores +20
Does not return
'''
lst = []
if title is None:
for result in self.results:
result['score'] += 20
lst.append(result)
else:
for result in self.results:
title = title.replace(u' ', u'.').replace(u':', u'.').lower()
test = result['title'].replace(u' ', u'.').lower()
match = fuzz.token_set_ratio(title, test)
if match > 60:
result['score'] += (match / 5)
lst.append(result)
self.results = lst
def score_resolution(self, resolutions):
''' Score releases based on quality preferences
:param qualities: dict of quality preferences from MOVIES table
Iterates through self.results and removes any entry that does not
fit into quality criteria (resoution, filesize)
Adds to ['score'] based on resolution priority
Does not return
'''
lst = []
for result in self.results:
result_res = result['resolution']
size = result['size'] / 1000000
for k, v in resolutions.iteritems():
if v[0] is False:
continue
priority = v[1]
min_size = v[2]
max_size = v[3]
if result_res == k:
if min_size < size < max_size:
result['score'] += (8 - priority) * 100
lst.append(result)
self.results = lst
def import_quality(self):
profile = json.loads(json.dumps(core.CONFIG['Quality']['Profiles']['Default']))
profile['ignoredwords'] = u''
profile['requiredwords'] = u''
resolutions = ['4K', '1080P', '720P', 'SD']
for i in resolutions:
if profile[i][0] is False:
profile[i][1] = 4
profile[i][0] = True
profile[i][2] = 0
profile[i][3] = Ellipsis
return profile
"""
SCORING COLUMNS. I swear some day this will make sense.
4321
<4>
0-4
Resolution Match. Starts at 8.
Remove 1 point for the priority of the matched resolution.
So if we want 1080P then 720P in that order, 1080 movies will get 0 points
removed, where 720P will get 1 point removed.
We do this because the jquery sortable gives higher priority items a lower
number, so 0 is the most important item. This allows a large amount of
preferred word matches to overtake a resolution match.
<3-1>
0-100
Add 10 points for every preferred word match.
"""
|
import math
import torch
def compute_reward(monitor_interval):
"""
Get an Estimate of the throughput of the network
using on the last few packets (the lag)
"""
loss = len(monitor_interval.ack)/len(monitor_interval.sent)
latency = sum(monitor_interval.rtts)/len(monitor_interval.rtts)
##################################
## Need to calculate throughput ##
##################################
pass
class MonitorInterval(object):
"""docstring for MonitorInterval."""
def __init__(self, cwnd):
self.cwnd = cwnd
self.sent = []
self.ack = []
self.rtts = []
class MonitorHistory(object):
"""docstring for MonitorHistory."""
def __init__(self, history_length=10):
self.history_length = 10
self.cwnds = []
self.losses = []
def add_monitor_interval(self, monitor_interval):
if len(self.cwnds) < self.history_length:
## haven't seen a full history length yet ##
self.cwnds.append(monitor_interval.cwnd)
self.losses.append(compute_reward(monitor_interval))
else:
# clear out the oldest interval
self.cwnds = self.cwnds[1:]
self.losses = self.losses[1:]
# append the new one
self.cwnds.append(monitor_interval.cwnd)
self.losses.append(compute_reward(monitor_interval))
|
from abc import ABC, abstractmethod
from collections import defaultdict, deque
from curses import qiflush
import json
import pickle
from queue import Queue
import random
import time
from typing import Tuple
from astar_py.constants import WallPlacement
from astar_py.node import Node
class Maze:
def __init__(self, rows, width):
self.rows = rows
self.width = width
self.grid = self.__make_grid(self.rows, self.width)
self.start = None
self.end = None
def __make_grid(self, rows, width):
grid = []
gap = width // rows
for i in range(rows):
grid.append([])
for j in range(rows):
node = Node(i, j, gap, rows)
grid[i].append(node)
return grid
def generate(self, draw) -> tuple[Node, Node]:
maze_generator: MazeGenerator = KruskalsGenerator(self)
return maze_generator.generate_maze(draw)
def remove_neighbours(self):
for row in self.grid:
for node in row:
node.remove_all_neighbors()
class MazeGenerator(ABC):
def __init__(self, maze: Maze):
self.maze = maze
@abstractmethod
def generate_maze(self) -> None:
"""Generate paths through a maze."""
raise NotImplemented
class Wall:
def __init__(self, from_node: Node, position: WallPlacement):
self.from_node = from_node
self.position = position
class KruskalsGenerator(MazeGenerator):
"""MazeGenerator subclass that generates mazes using a modified version of Kruskal's algorithm."""
def generate_maze(self, draw) -> None:
"""Generate paths through a maze using a modified version of Kruskal's algorithm."""
grid = self.maze.grid
sets = DisjointSet()
all_nodes = [item for sublist in grid for item in sublist]
sets.make_set(all_nodes)
walls = deque()
for node in all_nodes:
node.reset()
node.add_walls()
node.update_neighbors(grid)
walls.extend([Wall(node, w) for w in node.walls])
random.shuffle(walls)
start_node = all_nodes[0]
end_node = all_nodes[-1]
start_node.make_start()
end_node.make_end()
print("Generating maze...")
while len(walls) > 0:
wall: Wall = walls.pop()
other_node = wall.from_node.neighbors[wall.position]
if other_node and sets.find(wall.from_node) != sets.find(other_node):
wall.from_node.open_walls(wall.position)
other_node.open_walls(wall.position.opposite())
sets.union(wall.from_node, other_node)
return (start_node, end_node)
class DisjointSet:
parent = {}
def make_set(self, universe):
for i in universe:
self.parent[i] = i
def find(self, k):
return k if self.parent[k] == k else self.find(self.parent[k])
def union(self, a, b):
x = self.find(a)
y = self.find(b)
self.parent[x] = y
if __name__ == '__main__':
maze = Maze(rows = 20, width = 20)
maze.generate(None) |
"""
http://cs-people.bu.edu/sbargal/Fall%202016/lecture_notes/Nov_3_3d_geometry_representation
https://en.wikipedia.org/wiki/Quadratic_form
https://en.wikipedia.org/wiki/Conic_section
https://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
"""
import numpy as np
from sympy import *
def print_implicit(title, Q, p, eig=True):
M = Matrix(Q)
v = expand(np.dot(p.T, np.dot(Q, p)))
n = 2 * np.dot(Q, p)
print(title)
print("Matrix")
pprint(M)
print("Implicit")
print(v)
print("Normal")
print(n[:len(n)-1])
print("Determinant")
print(M.det())
if eig:
print("Eigenvalues and Eigenvectors")
pprint(M.eigenvals())
pprint(M.eigenvects())
print()
"""
Conic sections can be viewed as a 2D quadric surface and can be represented by the 3x3 symmetric matrix
M = [A B/2 D/2
B/2 C E/2
D/2 E/2 F]
where A-F are constants
To test if a point lies on the conic section we can do
p = [x y 1] (homogeneous point)
transpose(p) * M * p = 0
If we symbolically multiply the equation above out, we get
A*x**2 + B*x*y + C*y**2 + D*x + E*y + F = 0
The determinant of the matrix M gives us what "shape" of the solution set is
det(M) < 0 gives ellipse (If A=C, B=0), gives us a circle
det(M) = 0 gives parabola
det(M) > 0 gives hyperbola (if A+C = 0), gives us a rectangular hyperbola
Circle
M = [1 0 0
0 1 0
0 0 -r^2]
x^2 + y^2 - r^2
Ellipse
M = [1/a^2 0 0
0 1/b^2 0
0 0 -1]
x^2/a^2 + y^2/b^2 - 1
Parabola
M = [4a/x 0 0
0 -1 0
0 0 0]
y^2 = 4ax
Hyperbola
M = [1/a^2 0 0
0 -1/b^2 0
0 0 -1]
x^2/a^2 - y^2/b^2 = 1
"""
def test_implicit_matrix_form_2d():
A = Symbol('A')
B = Symbol('B')
C = Symbol('C')
D = Symbol('D')
E = Symbol('E')
F = Symbol('F')
x = Symbol('x')
y = Symbol('y')
r = Symbol('r')
a = Symbol('a')
b = Symbol('b')
p = np.array([x, y, 1])
print("Conic Sections Implicit Matrix Form")
Q = np.array([[A, B/2, D/2], [B/2, C, E/2], [D/2, E/2, F]])
print_implicit("General", Q, p, eig=False)
Q = np.array([[1, 0, 0], [0, 1, 0], [0, 0, -(r*r)]])
print_implicit("Circle", Q, p)
Q = np.array([[1/(a*a), 0, 0], [0, 1/(b*b), 0], [0, 0, -1]])
print_implicit("Ellipse", Q, p)
Q = np.array([[1/(a*a), 0, 0], [0, -1/(b*b), 0], [0, 0, -1]])
print_implicit("Hyperbola", Q, p)
Q = np.array([[4*a/x, 0, 0], [0, -1, 0], [0, 0, 0]])
print_implicit("Parabola", Q, p)
"""
Implicit equations that describe quadric surfaces can be represented as a 4x4 symmetric matrix
M = [A B C D
B E F G
C F H I
D G I J]
where A-J are constants
To test whether a point (represented in homogenous form)
p = [x y z 1] lies on the quadric surface we have
tranpose(p) * M * p = 0
If we symbolically do the matrix multiplication above, we get this equation:
A*x**2 + 2*B*x*y + 2*C*x*z + 2*D*x + E*y**2 + 2*F*y*z + 2*G*y + H*z**2 + J + 2*I*z = 0
In a sense, M is a matrix of a quadratic form
transpose(x)*A*x + transpose(b)*x = c
and transpose(b)*x and c is 0 in this case
The eigenvalues of A determine the "shape" solution set, such as a ellipsoid or hyperboloid
If all eigenvalues of A are positive, then it is a ellipsoid
If all eigenvalues of A are negative, then it is a imginary ellipsoid
If some eigenvalues are positive and some negative, it is a hyperboloid
If one or more eigenvalues is zero, then the shape depends on the b vector, then it can be elliptic/hyperbolic paraboloid
The normal can be calculated by taking the gradient of equation
gradient(transpose(p) * M * p) = 2*Q*p
This gives us the normal vector for every point that is determined to be on the surface by the implicit form
Depending what we set the constants A-J as, we can get a sphere, hyperbolas, ellipsoid, cones
Sphere
M = [1 0 0 0
0 1 0 0
0 0 1 0
0 0 0 -r^2]
N = [2x 2y 2z]
x^2 + y^2 + z^2 - r^2
Ellipsoid
M = [1/rx^2 0 0 0
0 1/ry^2 0 0
0 0 1/rz^2 0
0 0 0 -1]
N = [2*x/rx^2 2*y/ry^2 2*z/rz^2]
x^2/rx^2 + y^2/ry^2 + z^2/rz^2 - 1
Cylinder
M = [1/rx^2 0 0 0
0 1/ry^2 0 0
0 0 0 0
0 0 0 -1]
N = [2*x/rx^2 2*y/ry^2 0]
x^2/rx^2 + y^2/ry^2 - 1
Cone
M = [1/rx^2 0 0 0
0 1/ry^2 0 0
0 0 -1/s^2 0
0 0 0 0]
N = [2*x/rx^2 2*y/ry^2 -2*z/s^2]
x^2/rx^2 + y^2/ry^2 - z^2/s^2
Plane (Not a quadric surface but it can be represented in this form)
M = [0 0 0 a/2
0 0 0 b/2
0 0 0 c/2
a/2 b/2 c/2 -d]
N = [a b c]
ax + by + cz - d
"""
def test_implicit_matrix_form_3d():
A = Symbol('A')
B = Symbol('B')
C = Symbol('C')
D = Symbol('D')
E = Symbol('E')
F = Symbol('F')
G = Symbol('G')
H = Symbol('H')
J = Symbol('J')
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
r = Symbol('r')
s = Symbol('s')
rx = Symbol('rx')
ry = Symbol('ry')
rz = Symbol('rz')
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
s = Symbol('s')
p = np.array([x, y, z, 1])
print("Quadric Surfaces Implicit Matrix Form")
print()
Q = np.array([[A, B, C, D], [B, E, F, G], [C, F, H, I], [D, G, I, J]])
print_implicit("General", Q, p, eig=False)
Q = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -(r*r)]])
print_implicit("Sphere", Q, p)
Q = np.array([[1/(rx*rx), 0, 0, 0], [0, 1/(ry*ry), 0, 0], [0, 0, 1/(rz*rz), 0], [0, 0, 0, -1]])
print_implicit("Ellipsoid", Q, p)
Q = np.array([[1/(rx*rx), 0, 0, 0], [0, 1/(ry*ry), 0, 0], [0, 0, 0, 0], [0, 0, 0, -1]])
print_implicit("Cylinder", Q, p)
Q = np.array([[1/(rx*rx), 0, 0, 0], [0, 1/(ry*ry), 0, 0], [0, 0, -1/(s*s), 0], [0, 0, 0, 0]])
print_implicit("Cone", Q, p)
Q = np.array([[0, 0, 0, a/2], [0, 0, 0, b/2], [0, 0, 0, c/2], [a/2, b/2, c/2, -d]])
print_implicit("Plane", Q, p)
init_printing()
test_implicit_matrix_form_2d()
print("")
test_implicit_matrix_form_3d()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import argparse
import shutil
import sys
from subprocess import call
def run_cmd(command):
try:
call(command, shell=True)
except KeyboardInterrupt:
print("Process interrupted")
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_folder", type=str, default="", help="Test images")
parser.add_argument(
"--output_folder",
type=str,
default="/home/jingliao/ziyuwan/workspace/codes/PAMI/outputs",
help="Restored images, please use the absolute path",
)
parser.add_argument("--GPU", type=str, default="6,7", help="0,1,2")
parser.add_argument(
"--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint"
)
parser.add_argument("--with_scratch", action="store_true")
opts = parser.parse_args()
gpu1 = opts.GPU
# resolve relative paths before changing directory
opts.input_folder = os.path.abspath(opts.input_folder)
opts.output_folder = os.path.abspath(opts.output_folder)
if not os.path.exists(opts.output_folder):
os.makedirs(opts.output_folder)
main_environment = os.getcwd()
## Stage 1: Overall Quality Improve
print("Running Stage 1: Overall restoration")
os.chdir("./Global")
stage_1_input_dir = opts.input_folder
stage_1_output_dir = os.path.join(opts.output_folder, "stage_1_restore_output")
if not os.path.exists(stage_1_output_dir):
os.makedirs(stage_1_output_dir)
if not opts.with_scratch:
stage_1_command = (
"python test.py --test_mode Full --Quality_restore --test_input "
+ stage_1_input_dir
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
)
run_cmd(stage_1_command)
else:
mask_dir = os.path.join(stage_1_output_dir, "masks")
new_input = os.path.join(mask_dir, "input")
new_mask = os.path.join(mask_dir, "mask")
stage_1_command_1 = (
"python detection.py --test_path "
+ stage_1_input_dir
+ " --output_dir "
+ mask_dir
+ " --input_size full_size"
)
stage_1_command_2 = (
"python test.py --Scratch_and_Quality_restore --test_input "
+ new_input
+ " --test_mask "
+ new_mask
+ " --outputs_dir "
+ stage_1_output_dir
)
run_cmd(stage_1_command_1)
run_cmd(stage_1_command_2)
## Solve the case when there is no face in the old photo
stage_1_results = os.path.join(stage_1_output_dir, "restored_image")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
for x in os.listdir(stage_1_results):
img_dir = os.path.join(stage_1_results, x)
shutil.copy(img_dir, stage_4_output_dir)
print("Finish Stage 1 ...")
print("\n")
## Stage 2: Face Detection
print("Running Stage 2: Face Detection")
os.chdir(".././Face_Detection")
stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_2_output_dir = os.path.join(opts.output_folder, "stage_2_detection_output")
if not os.path.exists(stage_2_output_dir):
os.makedirs(stage_2_output_dir)
detect_all_dlib(url=stage_2_input_dir, save_url=stage_2_output_dir)
# stage_2_command = (
# "python detect_all_dlib.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir
# )
# run_cmd(stage_2_command)
print("Finish Stage 2 ...")
print("\n")
## Stage 3: Face Restore
print("Running Stage 3: Face Enhancement")
os.chdir(".././Face_Enhancement")
stage_3_input_mask = "./"
stage_3_input_face = stage_2_output_dir
stage_3_output_dir = os.path.join(opts.output_folder, "stage_3_face_output")
if not os.path.exists(stage_3_output_dir):
os.makedirs(stage_3_output_dir)
stage_3_command = (
"python test_face.py --old_face_folder "
+ stage_3_input_face
+ " --old_face_label_folder "
+ stage_3_input_mask
+ " --tensorboard_log --name "
+ opts.checkpoint_name
+ " --gpu_ids "
+ gpu1
+ " --load_size 256 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 4 --results_dir "
+ stage_3_output_dir
+ " --no_parsing_map"
)
run_cmd(stage_3_command)
print("Finish Stage 3 ...")
print("\n")
## Stage 4: Warp back
print("Running Stage 4: Blending")
os.chdir(".././Face_Detection")
stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
align_wrap_back_multiple_dlib(origin_url=stage_4_input_image_dir, replace_url=stage_4_input_face_dir, save_url=stage_4_output_dir)
# stage_4_command = (
# "python align_warp_back_multiple_dlib.py --origin_url "
# + stage_4_input_image_dir
# + " --replace_url "
# + stage_4_input_face_dir
# + " --save_url "
# + stage_4_output_dir
# )
# run_cmd(stage_4_command)
print("Finish Stage 4 ...")
print("\n")
print("All the processing is done. Please check the results.")
|
# -*- coding: utf-8 -*-
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.base import ContextMixin
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from dictionaries.models import Category
from django.core.urlresolvers import reverse_lazy
from dictionaries.views import EDIT_TEMPLATE, get_template
template = 'dictionaries/category/'
class CategoriesBaseView(LoginRequiredMixin, ContextMixin):
model = Category
context_object_name = 'category'
slug_url_kwarg = 'category'
class CategoriesListView(CategoriesBaseView, ListView):
context_object_name = 'categories'
template_name = get_template(template, 'list')
def get_queryset(self):
return Category.objects.all()
def get_context_data(self, **kwargs):
context = super(CategoriesListView, self).get_context_data(**kwargs)
context['add_category'] = reverse_lazy('dictionaries:categories-add')
return context
class CategoriesCreateView(CategoriesBaseView, CreateView):
template_name = EDIT_TEMPLATE
fields = '__all__'
def get_context_data(self, **kwargs):
context = super(CategoriesCreateView, self).get_context_data(**kwargs)
context['back_url'] = self.request.META.get(
'HTTP_REFERER', reverse_lazy('dictionaries:categories-list'))
return context
class CategoriesDetailView(CategoriesBaseView, DetailView):
template_name = get_template(template, 'detail')
class CategoriesUpdateView(CategoriesBaseView, UpdateView):
template_name = EDIT_TEMPLATE
fields = '__all__'
def get_context_data(self, **kwargs):
context = super(CategoriesUpdateView, self).get_context_data(**kwargs)
context['back_url'] = \
self.request.META.get('HTTP_REFERER',
reverse_lazy('dictionaries:categories-detail',
kwargs={'category': self.object.slug}))
return context
class CategoriesDeleteView(CategoriesBaseView, DeleteView):
pass
def get_success_url(self):
return reverse_lazy('dictionaries:categories-list')
|
"""Simple AES interface via command line."""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
|
def dig(num):
return int(str(num))
a, b = map(int, input().split())
mult = 1
if b - a > 4:
print(0)
else:
for i in range(b-a):
mult *= dig(b-i)
print(str(mult)[-1])
|
#!/usr/bin/env python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#L2TRACERT
#
# Version 1.2 - 7/21/2015
# Written by:
# Jeremy Georges - Arista Networks
# jgeorges@arista.com
#
# Revision history:
# 1.0 - initial release - 4/29/2015
# 1.1 - Added MAC and VLAN validation. Fixed minor bug. - 5/5/2015
# 1.2 - Added additional logic for Port-channels - 7/21/2015
""" l2tracert
The purpose of this script is to provide a traceroute function but at layer 2. The user must specify a destination MAC address
and a VLAN. Additionally, LLDP must be enabled between the switches and they must leverage EAPI.
If LLDP and or EAPI is not enabled, the script will stop at that switch hop.
Additionally, the ma1 interface must have an IP and all neighbor Arista switches need to be reachable via the ma1 interface,
since this is the IP that shows up in the LLDP management IP field.
The script uses the output of the 'show mac address-table' command and LLDP information to build a hop by hop representation
of the l2 route from beginning to final egress switch port.
In large Layer 2 environments, this can be helpful to understand each switch and physical interface that is used to reach
a specific mac address, especially when troubleshooting.
A user account must be created that has enough privilege to run the 'show mac address-table' and 'show lldp' commands.
Either the variables DEFAULTUSER and DEFAULTPW will need to be set or command line arguments can be specified.
To make it easier to execute from EOS, an alias can be setup to provide those required authentication paramenters.
For example:
7050S-64(config)#alias l2trace bash /mnt/flash/l2tracert.py -u admin -p mypassword
Now the alias l2trace can be used instead of having to type the username and password each time.
7050S-64#l2trace -m 000e.c687.8c93 -v 1
L2 Trace Route to 000e.c687.8c93 on VLAN 1
Hop Host Egress Remote Host Ingress
********************************************************************************
1 7050S-64 Ethernet49/1 7050QX Ethernet49/1
2 7050QX Ethernet33 7048-LAB-R1 Ethernet49
3 7048-LAB-R1 Ethernet48 NA
INSTALLATION:
Copy to the /mnt/flash directory of each Arista switch that you want to use l2tracert.
"""
VERSION='1.2'
DEFAULTUSER='admin'
DEFAULTPW='4me2know'
#=====================================================
# Variables
#=====================================================
#***********************************************************************************
# Modules
#***********************************************************************************
import os
import re
import sys
import optparse
import syslog
from jsonrpclib import Server
#==========================================================
# Function Definitions
#==========================================================
def matchme(strg, pattern):
search=re.compile(pattern).search
return bool(search(strg))
def macchk(mac):
import re
#Check mac format to be in the form of 0000.0000.0000
# This is the format used in EOS
if re.match("[0-9a-f]{4}([.])[0-9a-f]{4}(\\1[0-9a-f]{4}){1}$", mac.lower()):
return 1
else:
return 0
def switchparse(switch,mac,vlan):
'''
switchparse function parses output of show mac address table and show lldp neighbor details.
It will return a list that can be used to print out the next hop info if its exists.
'''
try:
showhostname = switch.runCmds( 1,[ "enable","show hostname" ],"json")
except:
#Return 0 and we'll use this to determine that we can't connect.
#Probably because EAPI is not enabled!
return 0
try:
showmactable = switch.runCmds( 1,[ "enable","show mac address-table address %s vlan %s" % (mac, vlan) ],"json")
except:
return 0
try:
#If this throws an exception, it means the mac address is not there...
egressinterface=showmactable[1]['unicastTable']['tableEntries'][0]['interface']
except:
#Return 1 and we'll use this to determine that MAC is not found
# Return 1 because if we returned 0 above, that means we couldn't connect
return 1
#Lets create an empty list that will hold the following items:
# -egress interface
# -Neighbor Port ID
# -System Name
# -Management Address
# -System Description
# -Hostname of device queried.
#
# If the System Description is "Arista Networks EOS", lets change it to "Arista"
# If the Management Address is blank, just set it to "NA"
# If the System Description is something other than Arista, we'll leave it alone.
# The reason for this, we can't parse the next neighbor since this script is written around EOS constructs.
# Therefore, if the System Description is not set to Arista, we'll assume its the end host we're actually trying to
# do the l2trace route on. If its another switch, then user will have to manually look at that other switch to see if there
# are any more next hop switches to analyze. But they should be able to ascertain this by the 'system name'.
lldplist=[]
# Since the show lldp neighbors command does not support port-channels (since this is a link level protocol)
# we need to add additional logic to check the lldp neighbor of one of the member interfaces of the port-channel.
# That should be sufficient for our needs.
if re.findall("Ethernet.*", egressinterface):
lldplist.append(("".join(egressinterface)))
showlldpneighbor = switch.runCmds( 1,[ "enable","show lldp neighbors %s detail" % (egressinterface) ],"text")
switchneighbor=showlldpneighbor[1] ["output"]
elif re.findall("Port-Channel.*", egressinterface):
#We need to look at the LLDP neighbor on just one member interface. Lets just look at the first one, that
#should be sufficient.
try:
showportchannel = switch.runCmds( 1,[ "enable","show interfaces %s " % (egressinterface) ],"json")
except:
print "Issue with parsing Port Channel Members"
return 0
#First member interface listed should be listed as first one
phyegressinterfaces=showportchannel[1]['interfaces'][egressinterface]['memberInterfaces'].keys()[0]
#append the egressint to our list which will be displayed as the port-channel here.
lldplist.append(("".join(egressinterface)))
#Here we need to override that and use the first member interface of our port-channel for the lldp
#neighbor command.
showlldpneighbor = switch.runCmds( 1,[ "enable","show lldp neighbors %s detail" % (phyegressinterfaces) ],"text")
switchneighbor=showlldpneighbor[1] ["output"]
if re.findall("Port ID :.*", switchneighbor):
currentneighborport = re.findall("Port ID :.*", switchneighbor)
else:
#This means the next device doesn't have LLDP enabled...so we'll just have to stuff an NA flag here.
currentneighborport = "NA"
#We'll just fall through the logic below if we have NA flagged.
#Ok we need to strip out the field label and whitespace
currentneighborport = map(lambda currentneighborport:currentneighborport.replace("Port ID : ", ""),currentneighborport)
#Strip out the quotes now...
currentneighborport = map(lambda currentneighborport:currentneighborport.replace("\"", ""),currentneighborport)
# Append our list with a string form of our final modified output :-)
lldplist.append(("".join(currentneighborport)))
# Now parse for System Name
if re.findall("System Name:.*", switchneighbor):
currentneighborsystemname = re.findall("System Name:.*", switchneighbor)
else:
currentneighborsystemname = "NA"
#Ok we need to strip out the field label and whitespace
currentneighborsystemname = map(lambda currentneighborsystemname:currentneighborsystemname.replace("System Name: ", ""),currentneighborsystemname)
#Strip out the quotes now
currentneighborsystemname = map(lambda currentneighborsystemname:currentneighborsystemname.replace("\"", ""),currentneighborsystemname)
# Append our list with a string form of our final modified output :-)
lldplist.append(("".join(currentneighborsystemname)))
#Now parse for Management Address
if re.findall("Management Address :.*", switchneighbor):
currentneighbormgmtaddress = re.findall("Management Address :.*", switchneighbor)
else:
currentneighbormgmtaddress = "NA"
#Ok we need to strip out the field label and whitespace
currentneighbormgmtaddress = map(lambda currentneighbormgmtaddress:currentneighbormgmtaddress.replace("Management Address : ", ""),currentneighbormgmtaddress)
#Strip out the quotes now
currentneighbormgmtaddress = map(lambda currentneighbormgmtaddress:currentneighbormgmtaddress.replace("\"", ""),currentneighbormgmtaddress)
# Append our list with a string form of our final modified output :-)
lldplist.append(("".join(currentneighbormgmtaddress)))
#Finally, parse for System Description
#
#We're going to make things simple here. If we regex and find 'Arista Networks EOS', we'll just set this list item to 'Arista'
#That way, we'll have some logic to know if we can actually query the next host. If its not Arista...then we'll just show the egress
#interface.
# The logic for this will be checked in the main section of script.
if re.findall("System Description:.*", switchneighbor) and re.findall("Arista Networks EOS.*", switchneighbor):
currentneighbordescription = 'Arista'
else:
currentneighbordescription = "NA"
# Append our list with a string form of our final modified output :-)
lldplist.append(("".join(currentneighbordescription)))
# Add the current hostname as the final element.
lldplist.append(("".join(showhostname[1]['hostname'])))
return (lldplist)
#==========================================================
# MAIN
#==========================================================
def main():
usage = "usage: %prog [options] arg1 arg2"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-V", "--version", action="store_true",dest="version", help="The version")
parser.add_option("-v", "--vlan", type="string", dest="vlan", help="Vlan that MAC resides on",metavar="VLAN")
parser.add_option("-m", "--mac", type="string", dest="mac", help="MAC Address to Traceroute on",metavar="MAC")
parser.add_option("-d", action="store_true", dest="verbose", help="Verbose logging")
parser.add_option("-u", "--user", type="string", dest="USERNAME", help="Username for EAPI",metavar="username",default=DEFAULTUSER)
parser.add_option("-p", "--password", type="string", dest="PASSWORD", help="Password for EAPI",metavar="password",default=DEFAULTPW)
(options, args) = parser.parse_args()
if options.version:
print os.path.basename(sys.argv[0]), " Version: ", VERSION
sys.exit(0)
# Do some simple validation of mac and vlan id
if options.vlan and options.mac:
if not macchk(options.mac):
print "MAC format not valid. You must enter MAC in the following format: aaaa.bbbb.cccc"
sys.exit(0)
if not (0 < int(options.vlan) < 4095):
print "VLAN ID not correct"
sys.exit(0)
else:
print "VLAN & MAC address required as arguments to execute l2tracert"
sys.exit(0)
# General login setup
localswitch = Server( "https://%s:%s@127.0.0.1/command-api" % (options.USERNAME,options.PASSWORD))
#remoteswitch = Server( "https://%s:%s@%s/command-api" % (options.USERNAME,options.PASSWORD,remote_IP))
# switchparse function takes 3 arguments and returns a list with the following:
# - egress interface
# -Neighbor Port ID
# -System Name
# -Management Address
# -System Description
# - hostname being queried
local=switchparse(localswitch,options.mac,options.vlan)
if local == 1:
# If the first hop (local switch) returns a 0, then MAC address doesn't exist on switch.
print "MAC Address not found!"
sys.exit(0)
if local == 0:
# This means EAPI failed!
print "EAPI Request Failed."
sys.exit(0)
# Set iteration to 1, increment on each run.
iteration=1
#Need to setup print function here!
print "L2 Trace Route to %s on VLAN %s" % (options.mac, options.vlan)
print " "
print "{0:12} {1:16} {2:16} {3:16} {4:16}".format("Hop","Host", "Egress", "Remote Host" , "Ingress")
print "*"*80
iteration=1
print "{0:12} {1:16} {2:16} {3:16} {4:16}".format(str(iteration),local[5], local[0], local[2] , local[1])
# Go into a loop and we'll break out of the loop if we get a System Description that is not "Arista"
remote_IP=local[3]
while True:
iteration += 1
remoteswitch = Server( "https://%s:%s@%s/command-api" % (options.USERNAME,options.PASSWORD,remote_IP))
remote=switchparse(remoteswitch,options.mac,options.vlan)
#import pdb; pdb.set_trace()
if remote == 0:
print "Appears that the next switch does not have EAPI enabled."
sys.exit(0)
elif remote == 1:
print "MAC not found on remote switch %s. Try pinging the destination address first." % remote_IP
break
elif remote[4] == 'Arista':
print "{0:12} {1:16} {2:16} {3:16} {4:16}".format(str(iteration),remote[5], remote[0], remote[2] , remote[1])
remote_IP=remote[3]
else:
print "{0:12} {1:16} {2:16} {3:16} {4:16}".format(str(iteration),remote[5], remote[0], remote[2] , " ")
break
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright 2019 Johannes von Oswald
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :split_cifar.py
# @author :jvo
# @contact :oswald@ini.ethz.ch
# @created :05/13/2019
# @version :1.0
# @python_version :3.7.3
"""
Split CIFAR-10/100 Dataset
^^^^^^^^^^^^^^^^^^^^^^^^^^
The module :mod:`data.special.split_cifar` contains a wrapper for data handlers
for the Split-CIFAR10/CIFAR100 task.
"""
# FIXME The code in this module is mostly a copy of the code in the
# corresponding `split_mnist` module.
import numpy as np
from data.cifar100_data import CIFAR100Data
from data.cifar10_data import CIFAR10Data
# DELETEME
def get_split_CIFAR_handlers(data_path, use_one_hot=True, validation_size=0,
use_data_augmentation=False):
"""Function has been removed. Use :func:`get_split_cifar_handlers` instead.
"""
raise NotImplementedError('Function has been removed. Use function ' +
'"get_split_cifar_handlers" instead.')
def get_split_cifar_handlers(data_path, use_one_hot=True, validation_size=0,
use_data_augmentation=False, num_tasks=6):
"""This method will combine 1 object of the class
:class:`data.cifar10_data.CIFAR10Data` and 5 objects of the class
:class:`SplitCIFAR100Data`.
The SplitCIFAR benchmark consists of 6 tasks, corresponding to the images
in CIFAR-10 and 5 tasks from CIFAR-100 corresponding to the images with
labels [0-10], [10-20], [20-30], [30-40], [40-50].
Args:
data_path: Where should the CIFAR-10 and CIFAR-100 datasets
be read from? If not existing, the datasets will be downloaded
into this folder.
use_one_hot (bool): Whether the class labels should be represented in a
one-hot encoding.
validation_size: The size of the validation set of each individual
data handler.
use_data_augmentation (optional): Note, this option currently only
applies to input batches that are transformed using the class
member :meth:`data.dataset.Dataset.input_to_torch_tensor`
(hence, **only available for PyTorch**).
num_tasks (int): A number between 1 and 11, specifying the number of
data handlers to be returned. If ``num_tasks=6``, then there will be
the CIFAR-10 data handler and the first 5 splits of the CIFAR-100
dataset (as in the usual CIFAR benchmark for CL).
Returns:
(list) A list of data handlers. The first being an instance of class
:class:`data.cifar10_data.CIFAR10Data` and the remaining ones being an
instance of class :class:`SplitCIFAR100Data`.
"""
assert (num_tasks >= 1 and num_tasks <= 11)
print('Creating data handlers for SplitCIFAR tasks ...')
handlers = []
handlers.append(CIFAR10Data(data_path, use_one_hot=use_one_hot,
validation_size=validation_size,
use_data_augmentation=use_data_augmentation))
for i in range(0, (num_tasks - 1) * 10, 10):
handlers.append(SplitCIFAR100Data(data_path,
use_one_hot=use_one_hot, validation_size=validation_size,
use_data_augmentation=use_data_augmentation, labels=range(i, i + 10)))
print('Creating data handlers for SplitCIFAR tasks ... Done')
return handlers
class SplitCIFAR100Data(CIFAR100Data):
"""An instance of the class shall represent a single SplitCIFAR-100 task.
Args:
data_path: Where should the dataset be read from? If not existing,
the dataset will be downloaded into this folder.
use_one_hot (bool): Whether the class labels should be
represented in a one-hot encoding.
validation_size: The number of validation samples. Validation
samples will be taking from the training set (the first :math:`n`
samples).
use_data_augmentation (optional): Note, this option currently only
applies to input batches that are transformed using the class
member :meth:`data.dataset.Dataset.input_to_torch_tensor`
(hence, **only available for PyTorch**).
Note, we are using the same data augmentation pipeline as for
CIFAR-10.
labels: The labels that should be part of this task.
full_out_dim: Choose the original CIFAR instead of the the new
task output dimension. This option will affect the attributes
:attr:`data.dataset.Dataset.num_classes` and
:attr:`data.dataset.Dataset.out_shape`.
"""
def __init__(self, data_path, use_one_hot=False, validation_size=1000,
use_data_augmentation=False, labels=range(0, 10),
full_out_dim=False):
super().__init__(data_path, use_one_hot=use_one_hot, validation_size=0,
use_data_augmentation=use_data_augmentation)
K = len(labels)
self._labels = labels
train_ins = self.get_train_inputs()
test_ins = self.get_test_inputs()
train_outs = self.get_train_outputs()
test_outs = self.get_test_outputs()
# Get labels.
if self.is_one_hot:
train_labels = self._to_one_hot(train_outs, reverse=True)
test_labels = self._to_one_hot(test_outs, reverse=True)
else:
train_labels = train_outs
test_labels = test_outs
train_labels = train_labels.squeeze()
test_labels = test_labels.squeeze()
train_mask = train_labels == labels[0]
test_mask = test_labels == labels[0]
for k in range(1, K):
train_mask = np.logical_or(train_mask, train_labels == labels[k])
test_mask = np.logical_or(test_mask, test_labels == labels[k])
train_ins = train_ins[train_mask, :]
test_ins = test_ins[test_mask, :]
train_outs = train_outs[train_mask, :]
test_outs = test_outs[test_mask, :]
if validation_size > 0:
assert (validation_size < train_outs.shape[0])
val_inds = np.arange(validation_size)
train_inds = np.arange(validation_size, train_outs.shape[0])
else:
train_inds = np.arange(train_outs.shape[0])
test_inds = np.arange(train_outs.shape[0],
train_outs.shape[0] + test_outs.shape[0])
outputs = np.concatenate([train_outs, test_outs], axis=0)
if not full_out_dim:
outputs = self.transform_outputs(outputs)
# Note, we may also have to adapt the output shape appropriately.
if self.is_one_hot:
self._data['out_shape'] = [len(labels)]
images = np.concatenate([train_ins, test_ins], axis=0)
### Overwrite internal data structure. Only keep desired labels.
# Note, we continue to pretend to be a 100 class problem, such that
# the user has easy access to the correct labels and has the original
# 1-hot encodings.
if not full_out_dim:
self._data['num_classes'] = 10
else:
self._data['num_classes'] = 100
self._data['in_data'] = images
self._data['out_data'] = outputs
self._data['train_inds'] = train_inds
self._data['test_inds'] = test_inds
if validation_size > 0:
self._data['val_inds'] = val_inds
n_val = 0
if validation_size > 0:
n_val = val_inds.size
print('Created SplitCIFAR task with labels %s and %d train, %d test '
% (str(labels), train_inds.size, test_inds.size) +
'and %d val samples.' % (n_val))
def transform_outputs(self, outputs):
"""Transform the outputs from the 100D CIFAR100 dataset
into proper 10D labels.
Args:
outputs: 2D numpy array of outputs.
Returns:
2D numpy array of transformed outputs.
"""
labels = self._labels
if self.is_one_hot:
assert (outputs.shape[1] == self._data['num_classes'])
mask = np.zeros(self._data['num_classes'], dtype=np.bool)
mask[labels] = True
return outputs[:, mask]
else:
assert (outputs.shape[1] == 1)
ret = outputs.copy()
for i, l in enumerate(labels):
ret[ret == l] = i
return ret
def get_identifier(self):
"""Returns the name of the dataset."""
return 'SplitCIFAR100'
if __name__ == '__main__':
pass
|
import io
import os
import tempfile
import pytest
from dagger import DeserializationError, Serializer
from dagger_contrib.serializer.path.as_tar import AsTar
SUPPORTED_COMPRESSION_MODES = [
None,
"gzip",
"xz",
"bz2",
]
def test__conforms_to_protocol():
with tempfile.TemporaryDirectory() as tmp:
assert isinstance(AsTar(output_dir=tmp), Serializer)
def test_serialization_and_deserialization_are_symmetric_for_a_single_file():
original_content = "original content"
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
# The original content, backed by the file system
original_file = os.path.join(tmp, "original")
with open(original_file, "w") as f:
f.write(original_content)
output_dir = os.path.join(tmp, "output_dir")
os.mkdir(output_dir)
serializer = AsTar(output_dir=output_dir, compression=compression)
# The serializer produces a tar file
serialized_tar = os.path.join(tmp, f"serialized_tar.{serializer.extension}")
with open(serialized_tar, "wb") as writer:
serializer.serialize(original_file, writer)
# And it can read it back
with open(serialized_tar, "rb") as reader:
deserialized_file = serializer.deserialize(reader)
# Retrieving a value equivalent to the original one (a filename pointing to the original content)
assert deserialized_file.startswith(output_dir)
with open(deserialized_file, "r") as f:
assert f.read() == original_content
def test_serialization_and_deserialization_are_symmetric_for_a_directory():
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
# The original content, backed by the file system
original_dir = os.path.join(tmp, "original_dir")
original_subdir = os.path.join(original_dir, "subdir")
os.makedirs(original_subdir)
original_filenames = [
"a",
os.path.join("subdir", "a"),
os.path.join("subdir", "b"),
]
for filename in original_filenames:
with open(os.path.join(original_dir, filename), "w") as f:
f.write(filename)
output_dir = os.path.join(tmp, "output_dir")
os.mkdir(output_dir)
serializer = AsTar(output_dir=output_dir, compression=compression)
# The serializer produces a tar file
serialized_tar = os.path.join(tmp, f"serialized_tar.{serializer.extension}")
with open(serialized_tar, "wb") as writer:
serializer.serialize(original_dir, writer)
# And it can read it back
with open(serialized_tar, "rb") as reader:
deserialized_dir = serializer.deserialize(reader)
# Retrieving a value equivalent to the original one (a directory
# containing files with the original structure and contents)
assert deserialized_dir.startswith(output_dir)
structure = {
root: (set(dirs), set(files))
for root, dirs, files in os.walk(deserialized_dir)
}
assert structure == {
os.path.join(output_dir, "original_dir"): ({"subdir"}, {"a"}),
os.path.join(output_dir, "original_dir", "subdir"): (set(), {"a", "b"}),
}
for filename in original_filenames:
with open(os.path.join(deserialized_dir, filename), "r") as f:
assert f.read() == filename
def test_deserialize_invalid_tar_file():
invalid_values = [
b"",
b"123",
]
for value in invalid_values:
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
serializer = AsTar(output_dir=tmp, compression=compression)
with pytest.raises(DeserializationError):
serializer.deserialize(io.BytesIO(value))
def test_extension_depends_on_compression():
cases = [
(None, "tar"),
("gzip", "tar.gz"),
("xz", "tar.xz"),
("bz2", "tar.bz2"),
]
for compression, expected_extension in cases:
with tempfile.TemporaryDirectory() as tmp:
assert (
AsTar(output_dir=tmp, compression=compression).extension
== expected_extension
)
def test_extension_fails_when_compression_is_not_supported():
with pytest.raises(AssertionError):
with tempfile.TemporaryDirectory() as tmp:
AsTar(output_dir=tmp, compression="unsupported")
|
import pytest
import os, sys
import pickle
from tensorflow.keras import losses
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(1, os.getcwd())
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.keras.losses import Huber
from deepts.models import TCN
from deepts.metrics import MASE, ND, NRMSE
from unittests.config import (SAMPLE_SIZE, MODEL_DIR, LAG, BATCH_SIZE,
WINDOW_SIZE, N_BACK, N_FORE)
from IPython import embed
def test_TCN():
model_name = 'TCN'
metrics = [ND(), NRMSE()]
with open('/home/haohy/TSF/deepts/examples/data/electricity/NewTCNQuantile/feature_prepare.pkl', 'rb') as f:
trainX_dt, _, trainY_dt, trainY2_dt, testX_dt, _, testY_dt, testY2_dt = pickle.load(f)
with open('/home/haohy/TSF/deepts/examples/data/electricity/NewTCNQuantile/electricity_standard_scaler.pkl', 'rb') as f:
scaler = pickle.load(f)['standard_scaler']
x = [trainX_dt, trainY2_dt]
y = trainY_dt
model = TCN()
print(model.summary())
model.compile(
optimizer='adam',
loss=[Huber()],
metrics=metrics
)
model.fit(x, y, batch_size=BATCH_SIZE, epochs=5)
print("{} test train pass!".format(model_name))
x_test = [testX_dt, testY2_dt]
y_test = testX_dt
embed(header="predict")
y_pred = model.predict(x_test)
if __name__ == '__main__':
# physical_devices = tf.config.list_physical_devices('GPU')
# tf.config.set_visible_devices(physical_devices[1:], 'GPU')
test_TCN() |
import pymysql
import models
import random
import queue
from util import СircleCollection, RandomData
from faker import Faker
from db_logic import MySQLConnector
class Generator:
"""Генерация данных"""
CURRENT_CLIENT_ID = 1
CURRENT_ORDER_ID = 1
CURRENT_PRODUCT_ID = 1
CURRENT_BOOKING_ID = 1
CURRENT_HOUSE_ID = 1
CURRENT_STAFF_ID = 1
def __init__(self, data_gen: RandomData) -> None:
self.data_gen = data_gen
self.fake_ru = data_gen.fake_ru
self.fake_en = data_gen.fake_en
def client_generator(self) -> models.Client:
"""Генерация клиента"""
# Текущий ID
cli_id = Generator.CURRENT_CLIENT_ID
Generator.CURRENT_CLIENT_ID += 1
# Работаем с именами
first_name, last_name = self.data_gen.get_random_name()
email = self.fake_ru.email()
phone = self.fake_ru.phone_number()
document_title = "Паспорт"
# Рандомные байты
document_file = (
"0x" + bytearray(random.getrandbits(8) for _ in range(100)).hex()
)
document_text = f"ID {self.data_gen.get_number_range(4)} {self.data_gen.get_number_range(8)}"
document_comments = random.choice((None, "срок действия заканчивется", "ok"))
return models.Client(
cli_id,
first_name,
last_name,
email,
phone,
document_title,
document_file,
document_text,
document_comments,
)
def order_generator(self, client_id: int) -> models.Order:
"""Генерация заказа клиента"""
order_id = Generator.CURRENT_ORDER_ID
Generator.CURRENT_ORDER_ID += 1
order_date = self.fake_ru.date_time_between(start_date="-2y", end_date="now")
cost = None # <- рассчитывается позже в программе
return models.Order(order_id, order_date, client_id, cost)
def product_count_generator(
self, product_id: int, order_id: int
) -> models.ProductCount:
"""Генерация кол-ва заказанных продуктов"""
count = random.randint(1, 15)
return models.ProductCount(count, product_id, order_id)
def product_generator(self) -> models.Product:
"""Генерация заказанных клиентом продукта"""
product_id = Generator.CURRENT_PRODUCT_ID
Generator.CURRENT_PRODUCT_ID += 1
title = self.fake_ru.words(1)[0]
price = round(random.uniform(2.0, 1000.9), 2)
return models.Product(product_id, title, price)
def booking_generator(self, client_id: int, staff_id: int, house_id: int) -> models.Booking:
"""Генерация бронированя клиента"""
booking_id = Generator.CURRENT_BOOKING_ID
Generator.CURRENT_BOOKING_ID += 1
date_in, date_out = self.data_gen.data_range_generator()
cost = None
return models.Booking(booking_id, date_in, date_out, client_id, staff_id, house_id, cost)
def house_generator(self) -> models.House:
"""Генерация дома"""
house_types = ("Вилла", "Бунгало", "Таунхаус", "Пентхаус", "Коттедж")
# Текущий ID
house_id = Generator.CURRENT_HOUSE_ID
Generator.CURRENT_HOUSE_ID += 1
# Название и тип дома
buf_name = " ".join(self.fake_en.words(2)).upper()
house_type = random.choice(house_types)
house_name = f"{house_type} {buf_name}"
# Цена
house_price = round(random.uniform(1500.0, 8000.9), 2)
# Доп опции
house_ac = random.choice((0, 1))
house_tv = random.choice((0, 1))
house_safe = random.choice((0, 1))
house_description = f"Описание для дома с id {house_id}"
return models.House(
house_id,
house_name,
house_price,
house_ac,
house_tv,
house_safe,
house_description,
house_type,
)
def staff_generator(self) -> models.Staff:
"""Генерация обслуживающего персонала отеля"""
staff_id = Generator.CURRENT_STAFF_ID
Generator.CURRENT_STAFF_ID += 1
# Работаем с именами
first_name, last_name = self.data_gen.get_random_name()
type_ = random.choice(("staff_booking", "staff_house"))
if type_ == "staff_house":
position = random.choice(
(
"старший обслуживающий персонал",
"обслуживающий персонал",
"младший обслуживающий персонал",
)
)
else:
position = random.choice(("менеджер", "администратор"))
phone = self.fake_ru.phone_number()
return models.Staff(staff_id, first_name, last_name, position, type_, phone)
def staffs_houses_generator(self, house_id : int, staff_id : int) -> models.StaffsHouses:
return models.StaffsHouses(house_id, staff_id)
def main():
HOST = "127.0.0.1"
USER = "root"
PASSWORD = "tiger"
DB = "CONTROL_FA"
# Осуществляем подключение
locale_dict = {
"host": HOST,
"user": USER,
"password": PASSWORD,
"db": DB,
"cursorclass": pymysql.cursors.DictCursor,
}
connection = MySQLConnector(locale_dict)
# Инициализация генератора данных
gen = Generator(RandomData())
staffs_dict = {
"staff_booking": СircleCollection(),
"staff_house": СircleCollection(),
}
houses_list = []
# Генерируем обслуживающий персонал
for _ in range(30):
staff = gen.staff_generator()
connection.write(staff.insert())
staffs_dict[staff.type].put(staff)
# Генерируем дома
for _ in range(40):
house = gen.house_generator()
connection.write(house.insert())
# Заносим в список (для назначения сотрудников)
houses_list.append(house)
print(f"Записали дом {house.id} -> {house.name}")
#Коллекция домов
houses_collection = СircleCollection(houses_list)
# Генерируем пользователей
for _ in range(100):
client = gen.client_generator()
connection.write(client.insert())
print(f"Записали клиента {client.id} -> {client.first_name} {client.last_name}")
# Добавляем заказы пользователя
if random.choice((True, False)):
for _ in range(random.randint(1, 20)):
curent_order = gen.order_generator(client.id)
connection.write(curent_order.insert())
# Общая стоимость заказа
order_cost = 0
# Добавляем продукты в заказ
for _ in range(random.randint(1, 20)):
product = gen.product_generator()
connection.write(product.insert())
# Добавляем кол-во продуктов
products_count = gen.product_count_generator(
product.id, curent_order.id
)
connection.write(products_count.insert())
order_cost += product.price * products_count.count
curent_order.cost = order_cost
connection.write(curent_order.update())
# Добавляем бронирование для пользователя
if random.choice((True, False)):
for _ in range(random.randint(1, 5)):
# Берем администратора
current_staff = staffs_dict["staff_booking"].get()
# Берем дом для бронирования
current_house = houses_collection.get()
# Создаем бронирование
current_booking = gen.booking_generator(client.id, current_staff.id, current_house.id)
connection.write(current_booking.insert())
#Аттачим дома
prices_sum = current_house.price
# Обновляем стоимость бронирования
days = current_booking.date_out - current_booking.date_in
# Стоимость = цена дома * кол-во дней бронирования
current_booking.cost = prices_sum * days.days
connection.write(current_booking.update())
# TODO: Один человек может обслуживать несколько домов и один дом может обслуживаться несколькими людьми.
#Цикл по каждому дому
for house in houses_list:
# 1 дом могут обслуживать до 4 сотрудников
for _ in range(random.randint(1,4)):
current_staff = staffs_dict["staff_booking"].get()
current_staffs_houses = gen.staffs_houses_generator(house.id, current_staff.id)
#Записываем данные связывающей таблицы
connection.write(current_staffs_houses.insert())
print("Сгенерировали все данные")
if __name__ == "__main__":
main() |
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read()
setup_requirements = ['setuptools_scm', ]
test_requirements = ['pytest>=3', 'pytest-runner']
setup(
author="USDA ARS Northwest Watershed Research Center",
author_email='snow@ars.usda.gov',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Take 2 for pysnobal in pure python",
entry_points={
'console_scripts': [
'pysnobal=pysnobal.cli:main',
],
},
install_requires=requirements,
license="CC0 1.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='pysnobal',
name='pysnobal',
packages=find_packages(include=['pysnobal', 'pysnobal.*']),
package_data={
'pysnobal': [
'./pysnobal_core_config.ini'
]
},
use_scm_version={
'local_scheme': 'node-and-date',
},
setup_requires=setup_requirements,
test_suite='pysnobal.tests',
tests_require=test_requirements,
url='https://github.com/scotthavens/pysnobal',
zip_safe=False,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.