content stringlengths 5 1.05M |
|---|
import sys
def isInteresting(line):
listOfInterestingThings=["models","all sites","training site 3 done","sensitivity","exp4UNF"]
for x in listOfInterestingThings:
if x in line:return True
return False or line[:2]=="9 "
def main(argv):
classes=["deciduous","healthyfir","sickfir"]
mode=int(argv[1])
resDict={}
numClasses=3
numSites=3
targetClass=argv[3]
critID=int(argv[4])
if mode==1:# training individual sites, without all sites
currentArch="noArch"
modelName="noName"
with open(argv[2]) as f1:
for x in f1:
if isInteresting(x):
#print(x.strip())
if "models" in x:
print(x)
modelName=x.strip().split(" ")[2].split("L")[0].split("l")[1]+"LR"+x.strip().split(" ")[2].split("R")[1].split("e")[0]
resDict[modelName]=[]
#print(modelName)
if "training site 3 done" in x:
change=True
elif targetClass+" sensitivity" in x:
print(x)
start=4
allCriteria=(float(x.strip().split(" ")[start][:-1]),float(x.strip().split(" ")[start+3][:-1]),float(x.strip().split(" ")[start+6][:-1]))
resDict[modelName].append(allCriteria[critID])
print(resDict)
#print("\n")
# HERE WE SHOULD OUTPUT AVERAGE PER CLASS
print("weighted average between all sites ")
bestValue=-1
bestModel=(0,0,0)
weights=[45,44,80]
total=sum(weights)
for k,v in resDict.items():
#print(v)
try:
for site in range(numSites):
weightAv=(weights[0]*v[0]+weights[1]*v[1]+weights[2]*v[2])/total
print(str(k)+" "+str(weightAv))
if bestValue==-1 or ( weightAv >bestValue):
bestValue=weightAv
bestModel=(k,weightAv,v)
except Exception as E:
print("problem when checking all sites "+str(E))
print(bestValue)
print(bestModel)
if mode==2:# take file with just one site and extract error rate for every learning rate.
currentModel="NOMODEL"
augm="NOIDEAAUGM"
with open(argv[2]) as f1:
for x in f1:
if isInteresting(x):
#print(x.strip())
if "models" in x:
print(x)
modelName=augm+x.strip().split(" ")[2].split("L")[0].split("l")[1]+"LR"+x.strip().split(" ")[2].split("R")[1].split("e")[0]
if not modelName in resDict: resDict[modelName]=[]
currentModel=modelName
#elif x[:2]=="9 ":
#print(x.strip().split(" "))
# resDict[currentModel].append(float(x.strip().split(" ")[-5][:-1]))
if "exp4UNF" in x:
print(x)
augm="augm"+str(x.strip().split("F")[1].split("a")[1][6:])
print(augm)
elif "sickfir sensitivity" in x:
start=0
print(x)
# sickfir accuracy
#resDict[modelName].append(float(x.strip().split(" ")[4][:-1]))
resDict[modelName].append((float(x.strip().split(" ")[4][:-1]),float(x.strip().split(" ")[7][:-1]),float(x.strip().split(" ")[10][:-1])))
# print(resDict)
# make average over the 4 sites
for k,v in resDict.items():
avs=[0,0,0]
outString=str(k)
for x in v:#loop over tuples
for i in range(3):
avs[i]+=x[i]
for i in range(3):
outString+=" "+str(avs[i]/len(v))
print(outString)
# for k,v in resDict.items():
# try:arch=str(k).split("h")[1].split("L")[0]
# except Exception as E: arch="NOARCH"
# try:lRate=str(k).split("R")[1]
# except Exception as E: lRate="NOLR"
# try:er=str(v[0])
# except Exception as E: er="NOER"
# print(arch+" "+lRate+" "+er)
# bestSickfirSens=-1
# bestModel=(0,0)
if __name__ == '__main__':
main(sys.argv)
|
class Node(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, value):
_node = Node(value)
_node.next = self.head
self.head = _node
def detect_and_remove_loop(self):
if self.head is None:
return
if self.head.next is None:
return
slow = self.head
fast = self.head
# move slow by one pointer and fast by two pointer
slow = slow.next
fast = fast.next.next
while fast is not None:
if fast.next is None:
break
if slow == fast:
break
slow = slow.next
fast = fast.next.next
if slow == fast:
slow = self.head
while slow.next != fast.next:
slow = slow.next
fast = fast.next
fast.next = None
|
class node(object):
def __init__(self, value = None):
self.value = value
self.next = None
class Stack(object):
def __init__(self, value = None):
self.top = node(value)
def push(self, value):
n1 = node(value)
if self.top == None:
self.top = n1
return
n1.next = self.top
self.top = n1
def pop(self):
if self.top == None:
raise ValueError('Underflow')
temp = self.top.value
self.top = self.top.next
return temp
def displayAll(self):
if self.top == None:
raise ValueError('Underflow')
ptr = self.top
while ptr != None:
print(ptr.value, end=' ')
ptr = ptr.next
print()
# testing code
S = Stack(1)
S.push(2)
S.push(3)
S.displayAll()
S.push(4)
S.push(5)
S.displayAll()
S.pop()
S.pop()
S.displayAll()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: The Soloist
# @Time: 2021-06-02 08:20:59
# @File: /main.py
# @Description:
import sys
import configparser
from pathlib import Path
from notion2md.export_manager import export_cli
work_path = Path(__file__).parent
ini_path = work_path / "config.ini"
try:
conf = configparser.ConfigParser()
conf.read(ini_path, encoding="utf-8")
token = conf.get('notion', 'token_v2')
except Exception as e:
print("%s is not exist or token_v2 is null." % ini_path, e)
conf.clear()
conf.add_section('notion')
token = input("Please input your token: ").strip()
conf.set('notion', 'token_v2', token)
conf.write(open(ini_path, 'w'))
if __name__ == "__main__":
# print(sys.argv)
url = sys.argv[1] if len(sys.argv) == 2 else None
export_cli(token_v2=token, url=url, bmode=False)
|
# Copyright (c) 2012-2015. The Regents of the University of California (Regents)
# and Richard Plevin. See the file COPYRIGHT.txt for details.
import os
import numpy as np
from pygcam.matplotlibFix import plt
import pandas as pd
import seaborn as sns
from six import iteritems
from six.moves import xrange
from pygcam.config import getParam, getParamAsBoolean
from pygcam.log import getLogger
from pygcam.utils import mkdirs
from .error import PygcamMcsSystemError, PygcamMcsUserError
from .Database import getDatabase, Input
_logger = getLogger(__name__)
DEFAULT_BIN_COUNT = 3
DEFAULT_MAX_TORNADO_VARS = 15
def makePlotPath(value, simId):
plotDir = getParam('MCS.PlotDir')
subDir = os.path.join(plotDir, "s%d" % simId)
mkdirs(subDir)
plotType = getParam('MCS.PlotType')
path = os.path.join(subDir, "%s.%s" % (value, plotType))
#print "Plot path: ", path
return path
def printExtraText(fig, text, loc='top', color='lightgrey', weight='ultralight', fontsize='xx-small'):
"""
Print 'extra' text at the top, bottom, right, or left edge of the figure.
"""
if not text:
return
rot = 0
ha = 'center'
va = 'center'
x = 0.5
y = 0.5
if loc == 'top':
y = 0.98
va = 'top'
elif loc == 'bottom':
y = 0.02
va = 'bottom'
elif loc == 'right':
x = 0.98
ha = 'right'
rot = 270
else: # left
x = 0.02
ha = 'left'
rot = 90
fig.text(x, y, text, color=color, weight=weight, fontsize=fontsize, va=va, ha=ha, rotation=rot)
def plotHistogram(values, xlabel=None, ylabel=None, title=None, xmin=None, xmax=None,
extra=None, extraColor='grey', extraLoc='right',
hist=True, showCI=False, showMean=False, showMedian=False,
color=None, shade=False, kde=True, show=True, filename=None):
fig = plt.figure()
style = "white"
colorSet = "Set1"
sns.set_style(style)
sns.set_palette(colorSet, desat=0.6)
red, blue, green, purple = sns.color_palette(colorSet, n_colors=4)
color = blue if color is None else color
count = values.count()
bins = count // 10 if count > 150 else (count // 5 if count > 50 else (count // 2 if count > 20 else None))
sns.distplot(values, hist=hist, bins=bins, kde=kde, color=color, kde_kws={'shade': shade})
#sns.axlabel(xlabel=xlabel, ylabel=ylabel)
if xlabel:
plt.xlabel(xlabel) # , size='large')
if ylabel:
plt.ylabel(ylabel) # , size='large')
sns.despine()
if title:
t = plt.title(title)
t.set_y(1.02)
printExtraText(fig, extra, color=extraColor, loc=extraLoc)
if xmin is not None or xmax is not None:
ax = plt.gca()
ax.set_autoscale_on(False)
ax.set_xlim(xmin, xmax)
if showCI or showMean:
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
textSize = 9
labely = ymax * 0.95
deltax = (xmax-xmin) * 0.01
if showCI:
color = red
ciLow = np.percentile(values, 2.5)
ciHigh = np.percentile(values, 97.5)
plt.axvline(ciLow, color=color, linestyle='solid', linewidth=2)
plt.axvline(ciHigh, color=color, linestyle='solid', linewidth=2)
plt.text(ciLow + deltax, labely, '2.5%%=%.2f' % ciLow, size=textSize, rotation=90, color=color)
plt.text(ciHigh + deltax, labely, '97.5%%=%.2f' % ciHigh, size=textSize, rotation=90, color=color)
if showMean:
color = green
mean = np.mean(values)
plt.axvline(mean, color=color, linestyle='solid', linewidth=2)
plt.text(mean + deltax, labely, 'mean=%.2f' % mean, color=color, size=textSize, rotation=90)
if showMedian:
color = purple
median = np.percentile(values, 50)
labely = ymax * 0.50
plt.axvline(median, color=color, linestyle='solid', linewidth=2)
plt.text(median + deltax, labely, 'median=%.2f' % median, color=color, size=textSize, rotation=90)
if show:
plt.show()
if filename:
_logger.info("plotHistogram writing to: %s", filename)
fig.savefig(filename)
plt.close(fig)
def plotTornado(data, colname='value', labelsize=9, title=None, color=None, height=0.8,
maxVars=DEFAULT_MAX_TORNADO_VARS, rlabels=None, xlabel='Contribution to variance', figsize=None,
show=True, filename=None, extra=None, extraColor='grey', extraLoc='right', importanceCSV=None):
'''
:param data: A sorted DataFrame or Series indexed by variable name, with
column named 'value' and if rlabels is set, a column of that
name holding descriptive labels to display.
:param labelsize: font size for labels
:param title: If not None, the title to show
:param color: The color of the horizontal bars
:param height: Bar height
:param maxVars: The maximum number of variables to display
:param rlabels: If not None, the name of a column holding values to show on the right
:param xlabel: Label for X-axis
:param figsize: tuple for desired figure size. Defaults to (12,6) if rlabels else (8,6).
:param show: If True, the figure is displayed on screen
:param filename: If not None, the figure is saved to this file
:param extra: Extra text to display in a lower corner of the plot (see extraLoc)
:param extraColor: (str) color for extra text
:param extraLoc: (str) location of extra text, i.e., 'right', or 'left'.
:param importanceCSV: (str) None, or the name of a file into which to save CSV data used to plot the tornado.
:return: nothing
'''
count, cols = data.shape
if 0 < maxVars < count:
data = data[:maxVars] # Truncate the DF to the top "maxVars" rows
count = maxVars
# Reverse the order so the larger (abs) values are at the top
revIndex = list(reversed(data.index))
data = data.loc[revIndex]
itemNums = list(range(count))
# ypos = np.array(itemNums) - 0.08 # goose the values to better center labels
if not figsize:
figsize = (12, 6) if rlabels else (8, 6)
#fig = plt.figure(figsize=figsize)
#fig = plt.figure(facecolor='white', figsize=figsize)
#plt.plot()
# if it's a dataframe, we expect to find the data in the value column
values = data if isinstance(data, pd.Series) else data[colname]
if importanceCSV:
values.to_csv(importanceCSV)
if color is None:
color = sns.color_palette("deep", 1)
# TBD: This looks like it has been resolved; try this again using seaborn
# tried pandas; most of the following manipulations can be handled in one call, but
# it introduced an ugly dashed line at x=0 which I didn't see how to remove. Maybe
# address this again if seaborn adds a horizontal bar chart.
values.plot(kind="barh", color=sns.color_palette("deep", 1), figsize=figsize,
xlim=(-1, 1), ylim=(-1, count), xticks=np.arange(-0.8, 1, 0.2))
plt.xlabel(xlabel)
right = 0.6 if rlabels else 0.9
plt.subplots_adjust(left=0.3, bottom=0.1, right=right, top=0.9) # more room for rlabels
fig = plt.gcf()
ax = plt.gca()
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=labelsize)
ax.tick_params(axis='y', labelsize=labelsize)
ax.set_yticklabels(data.index)
ax.set_yticks(itemNums)
if rlabels:
ax2 = plt.twinx()
plt.ylim(-1, count)
ax2.tick_params(axis='y', labelsize=labelsize)
ax2.set_yticklabels(data[rlabels])
ax2.set_yticks(itemNums)
for t in ax2.xaxis.get_major_ticks() + ax2.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# show vertical grid lines only
ax.yaxis.grid(False)
ax.xaxis.grid(True)
# Remove tickmarks from both axes
for t in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
if title:
plt.title(title, y=1.05) # move title up to avoid tick labels
printExtraText(fig, extra, loc=extraLoc, color=extraColor)
if show:
plt.show()
if filename:
_logger.debug("Saving tornado plot to %s" % filename)
fig.savefig(filename)
plt.close(fig)
def plotConvergence(simId, expName, paramName, values, show=True, save=False):
'''
Examine the first 3 moments (mean, std, skewness) in the data set
for increasing number (N) of values, growing by the given increment.
Optionally plot the relationship between each of the moments and N,
so we can when (if) convergence occurs.
'''
_logger.debug("Generating convergence plots...")
count = values.count()
results = {'Mean': [], 'Stdev': [], 'Skewness': [], '95% CI': []}
increment = min(100, count // 20)
nValues = list(range(increment, count + increment - 1, increment))
for N in nValues:
sublist = values[:N]
results['Mean'].append(sublist.mean())
results['Stdev'].append(sublist.std())
results['Skewness'].append(sublist.skew())
ciLow = np.percentile(sublist, 2.5)
ciHigh = np.percentile(sublist, 97.5)
results['95% CI'].append(ciHigh - ciLow)
# Insert zero value at position 0 for all lists to ensure proper scaling
nValues.insert(0,0)
for dataList in results.values():
dataList.insert(0,0)
labelsize=12
for key, values in iteritems(results):
plt.clf() # clear previous figure
ax = plt.gca()
ax.tick_params(axis='x', labelsize=labelsize)
ax.tick_params(axis='y', labelsize=labelsize)
plt.plot(nValues, results[key])
plt.title("%s" % paramName, size='large')
ax.yaxis.grid(False)
ax.xaxis.grid(True)
plt.xlabel('Trials', size='large')
plt.ylabel(key, size='large')
plt.figtext(0.12, 0.02, "SimId=%d, Exp=%s" % (simId, expName),
color='black', weight='roman', size='x-small')
if save:
filename = makePlotPath("%s-s%02d-%s-%s" % (expName, simId, paramName, key), simId)
_logger.debug("Saving convergence plot to %s" % filename)
plt.savefig(filename)
if show:
plt.show()
fig = plt.gcf()
plt.close(fig)
# Could use series.describe() but I like this format better
def printStats(series):
name = series.name
count = series.count()
mean = series.mean()
median = series.median()
stdev = series.std()
skew = series.skew()
minv = series.min()
maxv = series.max()
ciLow = series.quantile(0.025)
ciHigh = series.quantile(0.975)
ciLower = series.quantile(0.01)
ciHigher = series.quantile(0.99)
print('''
%s:
count: %d
mean: %.2f
median: %.2f
stdev: %.2f
skew: %.2f
min: %.2f
max: %.2f
95%% CI: [%.2f, %.2f]
99%% CI: [%.2f, %.2f]''' % (name, count, mean, median, stdev, skew, minv, maxv,
ciLow, ciHigh, ciLower, ciHigher))
def normalizeSeries(series):
'Normalize a series by dividing each element by the sum'
total = series.sum()
return series / total
def normalizeDF(df):
'''
Perform max-min normalization on all columns.
:param df: (pandas.DataFrame) data to operate on
:return: (pandas.DataFrame) normalized values.
'''
dfMin = df.min()
df = (df - dfMin) / (df.max() - dfMin)
return df
def spearmanCorrelation(inputs, results):
'''
Compute Spearman ranked correlation and normalized Spearman ranked
correlation between values in a DataFrame of inputs and a Series of
results. Returns a Series with the spearman rank correlation values.
:param inputs: (pandas.DataFrame) input values for each parameter and trial
:param results: (pandas.Series) values for one model result, per trial
:return: (pandas.Series) rank correlations of each input to the output vector.
'''
corrList = [results.corr(inputs[col], method='spearman') for col in inputs.columns]
spearman = pd.Series(data=corrList, index=inputs.columns, name='spearman')
return spearman
def plotSensitivityResults(varName, data, filename=None, extra=None, maxVars=None, printIt=True):
'''
Prints results and generates a tornado plot with normalized squares of Spearman
rank correlations between an output variable and all input variables.
'''
# Sort descending by absolute value (normalized are all positive from squaring)
data.sort_values(by=['normalized'], ascending=False, inplace=True)
if printIt:
print("UNCERTAINTY IMPORTANCE (%s)" % varName)
print("----------------------")
print(data.to_string(columns=['spearman', 'value'], float_format="{:4.2f}".format))
title = 'Sensitivity of %s' % varName
plotTornado(data, title=title, show=False, filename=filename, extra=extra, maxVars=maxVars) #, importanceCSV="importance.csv")
# Deprecated?
def plotGroupSensitivityResults(varName, data, filename=None, extra=None, maxVars=None, printIt=True):
'''
Sum the normalized contribution to variance for subscripted parameters,
along with contribution from unsubscripted ones. For example, we sum
the contributions for "ETA[1,5]" and "ETA[1,6]" into "ETA".
'''
totals = pd.Series(name='totals')
for idx, row in data.iterrows():
# Lop off bracketed indices at first '['.
# e.g., from "ETA[1,5]", we'll extract "ETA"
pos = idx.find('[')
paramName = idx if pos < 0 else idx[0:pos]
# Sum the values; initialize on demand
try:
totals[paramName] += row.value
except KeyError:
totals[paramName] = row.value
df = pd.DataFrame(totals)
negatives = (totals < 0)
df['sign'] = 1
df.ix[negatives, 'sign'] = -1
df['absval'] = totals * df['sign']
df['normalize'] = normalizeSeries(df['absval'])
df['value'] = df['normalize'] * df['sign']
# Sort by absolute value
df.sort_values('absval', ascending=False, inplace=True)
db = getDatabase()
with db.sessionScope() as session:
result = session.query(Input.paramName, Input.description).distinct().all()
resultDF = pd.DataFrame(result)
resultDF.fillna('')
df['description'] = resultDF['description']
if printIt:
print("\nPARAMETER GROUPS (%s)" % varName)
print("----------------")
print(df.to_string(columns=['value', 'description'], float_format="{:4.2f}".format))
title = 'Sensitivity of %s' % varName
plotTornado(df, title=title, figsize=None, show=False, filename=filename, maxVars=maxVars, extra=extra)
def plotInputDistributions(simId, inputDF):
'''Plot the input values individually to test that the distributions are as expected'''
showHist = True
showKDE = False
showShade = getParamAsBoolean('MCS.PlotShowShading')
for heading, series in iteritems(inputDF):
plotHistogram(series, showCI=False,
xlabel='Parameter value', ylabel='Probability density',
title='Distribution for values of %s' % heading,
color=None, hist=showHist, kde=showKDE, shade=showShade,
show=False, filename=makePlotPath(heading, simId))
def plotOutputDistribution(simId, expName, resultSeries, resultName, xlabel, trials):
filename = makePlotPath('%s-s%02d-%s-%d-trials' % (expName, simId, resultName, resultSeries.count()), simId)
showHist = getParamAsBoolean('MCS.PlotShowHistogram')
showKDE = getParamAsBoolean('MCS.PlotShowKDE')
showShade = getParamAsBoolean('MCS.PlotShowShading')
numValues = resultSeries.count()
db = getDatabase()
xlabel = db.getOutputUnits(resultName)
plotHistogram(resultSeries, xlabel=xlabel, ylabel='Probability density',
title='Frequency distribution for %s' % resultName,
extra='SimId=%d, Exp=%s, Trials=%d/%d' % (simId, expName, numValues, trials),
color=None, hist=showHist, kde=showKDE, shade=showShade,
showCI=True, showMean=True, showMedian=True, show=False, filename=filename)
# TBD: If row/col are obsolete, this info can now be read from trialData.csv or data.sa
def readParameterValues(simId, trials):
def makeKey(paramName, row, col):
return paramName
# row & col were used in GTAP MCS only...
#return "%s[%d][%d]" % (paramName, row, col) if row or col else paramName
db = getDatabase()
paramTuples = db.getParameters() # Returns paramName, row, col
paramNames = [makeKey(*tup) for tup in paramTuples] # names like "foo[1][14]"
inputDF = pd.DataFrame(index=xrange(trials), columns=paramNames, dtype=float)
_logger.debug("Found %d distinct parameter names" % len(paramNames))
paramValues = db.getParameterValues(simId, asDataFrame=False)
numParams = len(paramValues)
_logger.info('%d parameter values read' % numParams)
for row, col, value, trialNum, pname in paramValues:
key = makeKey(pname, row, col)
inputDF[key][trialNum] = value
return inputDF
def _fixColname(name):
pos = name.find('[')
return name[:pos] if pos >= 0 else name
def exportInputs(exportFile, inputs):
df = inputs.copy()
# remove [x][y] from colnames
df.columns = [_fixColname(c) for c in df.columns]
_logger.debug("Exporting data to '%s'", exportFile)
df.to_csv(exportFile)
# export all available results and their matching inputs for a single scenario,
# in wide format, with 'trialNum' as index, each input/result in a column.
def exportAllInputsOutputs(simId, expName, inputDF, exportFile, sep=','):
df = None
db = getDatabase()
resultList = db.getOutputsWithValues(simId, expName)
inputDF.index.rename('trialNum', inplace=True)
for resultName in resultList:
resultDf = db.getOutValues(simId, expName, resultName)
if resultDf is None:
raise PygcamMcsUserError('No results were found for sim %d, experiment %s, result %s' % (simId, expName, resultName))
# Copy inputs for which there are outputs
if df is None:
df = inputDF.iloc[resultDf.index].copy()
# Add each output
df[resultName] = resultDf[resultName]
_logger.debug("Exporting inputs and results to '%s'", exportFile)
df.to_csv(exportFile, sep=sep)
return df
def exportResults(simId, resultList, expList, exportFile, sep=','):
db = getDatabase()
df = None
for expName in expList:
for resultName in resultList:
# resultDf has 'trialNum' as index, 'value' holds float value
resultDf = db.getOutValues(simId, expName, resultName)
if resultDf is None:
raise PygcamMcsUserError('No results were found for sim %d, experiment %s, result %s' % (simId, expName, resultName))
# Add columns needed for boxplots
resultDf['expName'] = expName
resultDf['resultName'] = resultName
resultDf.rename(columns = {resultName:'value'}, inplace=True)
if df is None:
df = resultDf
else:
df = pd.concat([df, resultDf])
_logger.debug("Exporting results to '%s'", exportFile)
df.to_csv(exportFile, sep=sep)
#
# Based on ema_workbench/core/utils.py:save_results()
#
def saveForEMA(simId, expNames, resultNames, inputDF, filename):
"""
Save simulation results to the specified tar.gz file. The results are
stored as csv files. There is an x.csv, and a csv for each outcome. In
addition, there is a metadata csv which contains the datatype information
for each of the columns in the x array. Unlike the version of this function
in the EMA Workbench, this version collects data from the SQL database to
generate a file in the required format.
:param simId: (int) the id of the simulation
:param expNames: (list of str) the names of the experiments to save results for
:param resultNames: (list of str) all model input values, each row holding values for 1 trial
:param inputDF: (pandas.DataFrame) the input data
:param filename: (str) the path of the file
:raises: IOError if file not found
:return: none
"""
from io import BytesIO
import tarfile
import time
def add_file(tgzfile, string_to_add, filename):
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(string_to_add)
tarinfo.mode = 0o644
tarinfo.mtime = time.time()
tgzfile.addfile(tarinfo, BytesIO(string_to_add.encode('UTF-8')))
db = getDatabase()
# InValue.row, InValue.col, InValue.value, Trial.trialNum, Input.paramName
rows = inputDF.shape[0]
with tarfile.open(filename, 'w:gz') as z:
# Write the input values to the zipfile
expData = inputDF.to_csv(None, sep=',', index=False) # index_label='trialNum'
add_file(z, expData, 'experiments.csv')
# Write experiment metadata
dtypes = inputDF.dtypes
# list(dtypes.items()) produces results like:
# [('A', dtype('int64')), ('B', dtype('int64')), ('C', dtype('float64'))] and
# map(lambda dt: (dt[0], dt[1].descr), dtypes.items()) produces:
# [('A', [('', '<i8')]), ('B', [('', '<i8')]), ('C', [('', '<f8')])]
# So, map(lambda dt: (dt[0], dt[1].descr[0][1]), dtypes.items()) produces:
# [('A', '<i8'), ('B', '<i8'), ('C', '<f8')]
tuples = [(dt[0], dt[1].descr[0][1]) for dt in iteritems(dtypes)]
tuples = map(lambda dt: (dt[0], dt[1].descr[0][1]), iteritems(dtypes))
strings = ["{},{}".format(name, dtype) for name, dtype in tuples]
fileText = "\n".join(strings) + '\n'
add_file(z, fileText, 'experiments metadata.csv')
# Write outcome metadata # TBD: deal with timeseries
# outcome_meta = ["{},{}".format(outcome, ','.join(outcomes[outcome].shape))
# for outcome in resultNames]
strings = ["{},{}".format(resultName, rows) for resultName in resultNames]
fileText = "\n".join(strings) + '\n'
add_file(z, fileText, "outcomes metadata.csv")
# Write outcomes
for expName in expNames:
for resultName in resultNames:
outValueDF = db.getOutValues(simId, expName, resultName) # cols are trialNum and value; might need to do outValueDF[resultName].to_csv
allTrialsDF = pd.DataFrame(index=xrange(rows)) # ensure that all trials are represented (with NA if need be)
allTrialsDF[resultName] = outValueDF[resultName]
fileText = allTrialsDF.to_csv(None, header=False, index=False)
fname = "{}-{}.csv".format(resultName, expName)
add_file(z, fileText, fname)
print("Results saved successfully to {}".format(filename))
def getCorrDF(inputs, output):
'''
Generate a DataFrame with rank correlations between each input vector
and the given output vector, and sort by abs(correlation), descending.
:param inputs: (pandas.DataFrame) input values for each parameter and trial
:param output: (pandas.Series) output values for one result, per trial
:return: (pandas.DataFrame) two columns, "spearman" and "abs", the prior
holding the Spearman correlations between each input and the output
vector, and the latter with the absolute values of these correlations.
The DataFrame is indexed by variable name and sorted by "abs", descending.
'''
corrDF = pd.DataFrame(spearmanCorrelation(inputs, output))
corrDF['abs'] = corrDF.spearman.abs()
corrDF.sort_values('abs', ascending=False, inplace=True)
return corrDF
def binColumns(inputDF, bins=DEFAULT_BIN_COUNT):
columns = inputDF.columns
binned = pd.DataFrame(columns=columns)
for col in columns:
s = inputDF[col]
binned[col] = pd.cut(s, bins, labels=False)
return binned
# TBD: Finish refactoring this
class Analysis(object):
def __init__(self, simId, scenarioNames, resultNames, limit=0):
self.simId = simId
self.scenarioNames = scenarioNames
self.resultNames = resultNames
self.limit = limit
self.db = getDatabase()
self.trials = self.db.getTrialCount(simId) if limit <= 0 else limit
if not self.trials:
raise PygcamMcsUserError('No trials were found for simId %d' % simId)
self.inputDF = None
self.resultDict = {} # DFs of results for a scenario, keyed by scenario name
def getInputs(self):
'''
Read inputs for the given simId. If already read, return the
cached DataFrame.
:return: (pandas.DataFrame) input values for all parameters
'''
if self.inputDF is not None:
return self.inputDF
self.inputDF = df = readParameterValues(self.simId, self.trials)
# TBD: [x][y] is probably obsolete
# remove [x][y] subscripts from colnames
df.columns = [_fixColname(c) for c in df.columns]
return df
def exportInputs(self, exportFile, columns=None, sep=','):
'''
Export the inputs for the current simulation / scenario. If
provided, limit the set of inputs to the named columns.
:param columns: (iterable of str) names of columns to export
:return: none
'''
df = self.getInputs()
if columns:
df = df[columns]
_logger.debug("Exporting data to '%s'", exportFile)
df.to_csv(exportFile, sep=sep)
def getResults(self, scenarioList=None, resultList=None):
'''
Get the results for the given result names, or, if none are
specified, for the results identified when at instantiation.
Results are cached and thus read only once from the database.
:param scenarioList: (iterable of str) the scenarios for which
to get results
:param resultList: (iterable of str) the results to export
:param sep: (str) column separator to use in output file
:return: none
'''
db = self.db
simId = self.simId
resultDict = self.resultDict
resultList = resultList or self.resultNames
scenarioList = scenarioList or self.scenarioNames
for scenario in scenarioList:
resultDF = resultDict.get(scenario)
for resultName in resultList:
# returns DF with 'trialNum' as index, 'value' holds float value
if resultDF is None or resultName not in resultDF.columns:
values = db.getOutValues(simId, scenario, resultName, limit=self.limit)
if values is None:
raise PygcamMcsUserError(
'No results were found for sim %d, experiment %s, result %s' % (simId, scenario, resultName))
resultDF = values if not resultDF else pd.concat([resultDF, values])
resultDict[scenario] = resultDF
return resultDict
def exportResults(self, exportFile, scenarioList=None, resultList=None, sep=','):
'''
Export the results for the given scenario and result names, or, if not
specified, the ones identified when at instantiation.
:param exportFile: (str) filename to create
:param scenList: (iterable of str) the scenarios for which to export results
:param resultList: (iterable of str) the results to export
:param sep: (str) column separator to use in output file
:return: none
'''
resultDict = self.getResults(scenarioList=scenarioList, resultList=resultList)
exportDF = None
for scenario in scenarioList:
resultDF = resultDict[scenario]
for resultName in resultList:
# The resultDF has 'trialNum' as index, 'value' holds float value.
# Denormalize these to store all scenarios' results in one DF.
df = resultDF[resultName].copy()
df.rename(columns={resultName: 'value'}, inplace=True)
# Add columns needed for boxplots
df['expName'] = scenario
df['resultName'] = resultName
# concatenate each result set below the accumulated data
exportDF = pd.concat([exportDF, df]) if exportDF else df
_logger.debug("Exporting results to '%s'", exportFile)
df.to_csv(exportFile, sep=sep)
def plotInputDistributions(self):
'''Plot the input values individually to test that the distributions are as expected'''
showHist = True
showKDE = False
showShade = getParamAsBoolean('MCS.PlotShowShading')
inputDF = self.getInputs()
simId = self.simId
for heading, series in iteritems(inputDF):
plotHistogram(series, showCI=False,
xlabel='Parameter value', ylabel='Probability density',
title='Distribution for values of %s' % heading,
color=None, hist=showHist, kde=showKDE, shade=showShade,
show=False, filename=makePlotPath(heading, simId))
def plotUncertaintyImportance(self, inputDF, resultSeries, filename=None,
extra=None, printIt=True):
'''
Prints results and generates a tornado plot with normalized squares of Spearman
rank correlations between an output variable and all input variables.
'''
spearman = spearmanCorrelation(inputDF, resultSeries)
data = pd.DataFrame(spearman)
squared = spearman ** 2
data['normalized'] = squared / squared.sum()
data['sign'] = 1
data.ix[(data.spearman < 0), 'sign'] = -1
data['value'] = data.normalized * data.sign # normalized squares with signs restored
# Sort descending by normalized values (all are positive from squaring)
data.sort(columns=['normalized'], ascending=False, inplace=True)
varName = resultSeries.name
if printIt:
print("UNCERTAINTY IMPORTANCE (%s)" % varName)
print("----------------------")
print(data.to_string(columns=['spearman', 'value'], float_format="{:4.2f}".format))
title = 'Sensitivity of %s' % varName
plotTornado(data, title=title, show=False, filename=filename, extra=extra)
def plotParallelCoordinates(self, inputDF, resultSeries, numInputs=None,
filename=None, extra=None, inputBins=None,
outputLabels=['Low', 'Medium', 'High'],
quantiles=False, normalize=True, invert=False,
show=False, title=None, rotation=None):
'''
Plot a parallel coordinates figure.
:param inputDF: (pandas.DataFrame) trial inputs
:param resultSeries: (pandas.Series) results to categorize lines
:param numInputs: (int) the number of inputs to plot, choosing these
from the most-highly correlated (or anti-correlated) to the lowest.
If not provided, all variables in `inputDF` are plotted.
:param filename: (str) name of graphic file to create
:param extra: (str) text to draw down the right side, labeling the figure
:param inputBins: (int) the number of bins to use to quantize inputs
:param quantiles: (bool) create bins with equal numbers of values rather than
bins of equal boundary widths. (In pandas terms, use qcut rather than cut.)
:param normalize: (bool) normalize values to percentages of the range for each var.
:param invert: (bool) Plot negatively correlated values as (1 - x) rather than (x).
:param outputLabels: (list of str) labels to assign to outputs (and thus the number
of bins to group the outputs into.)
:param title: (str) Figure title
:param show: (bool) If True, show the figure.
:return: none
'''
from pandas.plotting import parallel_coordinates
corrDF = getCorrDF(inputDF, resultSeries)
numInputs = numInputs or len(corrDF)
cols = list(corrDF.index[:numInputs])
# isolate the top-correlated columns
inputDF = inputDF[cols]
# trim down to trials with result (in case of failures)
inputDF = inputDF.ix[resultSeries.index]
if normalize or invert:
inputDF = normalizeDF(inputDF)
if invert:
for name in cols:
# flip neg. correlated values to reduce line crossings
if corrDF.spearman[name] < 0:
inputDF[name] = 1 - inputDF[name]
inputDF.rename(columns={name: "(1 - %s)" % name}, inplace=True)
cols = inputDF.columns
# optionally quantize inputs into the given number of bins
plotDF = binColumns(inputDF, bins=inputBins) if inputBins else inputDF.copy()
# split results into equal-size or equal-quantile bins
cutFunc = pd.qcut if quantiles else pd.cut
plotDF['category'] = cutFunc(resultSeries, len(outputLabels), labels=outputLabels)
colormap = 'rainbow'
alpha = 0.4
# color = [
# [0.8, 0.0, 0.1, alpha],
# [0.0, 0.8, 0.1, alpha],
# [0.1, 0.1, 0.8, alpha],
# ]
parallel_coordinates(plotDF, 'category', cols=cols, alpha=alpha,
#color=color,
colormap=colormap,
)
fig = plt.gcf()
fig.canvas.draw() # so that ticks / labels are generated
if rotation is not None:
plt.xticks(rotation=rotation)
# Labels can come out as follows for, say, 4 bins:
# [u'', u'0.0', u'0.5', u'1.0', u'1.5', u'2.0', u'2.5', u'3.0', u'']
# We eliminate the "x.5" labels by substituting '' and convert the remaining
# numerical values to integers (i.e., eliminating ".0")
def _fixTick(text):
if inputBins:
return '' if (not text or text.endswith('.5')) else str(int(float(text)))
# If not binning, just show values on Y-axis
return text
locs, ylabels = plt.yticks()
ylabels = [_fixTick(t._text) for t in ylabels]
plt.yticks(locs, ylabels)
if extra:
printExtraText(fig, extra, loc='top', color='lightgrey', weight='ultralight', fontsize='xx-small')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if title:
plt.title(title)
if show:
plt.show()
if filename:
_logger.debug("Saving parallel coordinates plot to %s" % filename)
plt.savefig(filename, bbox_inches='tight')
plt.close(fig)
# TBD:
def analyzeSimulationNew(args):
'''
Analyze a simulation by reading parameters and results from the database.
'''
simId = args.simId
expNames = args.expName
plotHist = args.plot
stats = args.stats
importance = args.importance
groups = args.groups
plotInputs = args.plotInputs
convergence = args.convergence
resultName = args.resultName
xlabel = args.xlabel
inputsFile = args.exportInputs
resultFile = args.resultFile
exportEMA = args.exportEMA
minimum = args.min
maximum = args.max
parallel = args.parallel
anaObj = Analysis(args.simId, args.scenarioNames, args.resultNames, args.limit)
trials = anaObj.trials
if inputsFile:
anaObj.exportInputs(inputsFile)
if not (expNames and resultName):
raise PygcamMcsUserError("expName and resultName must be specified")
expList = expNames.split(',')
resultList = resultName.split(',')
if resultFile:
anaObj.exportResults(resultFile, scenarioList=expList, resultList=resultList)
return
inputDF = anaObj.getInputs()
inputRows, inputCols = inputDF.shape
if exportEMA:
saveForEMA(simId, expList, resultList, inputDF, exportEMA)
return
if plotInputs:
plotInputDistributions()
if not (importance or groups or plotHist or convergence or stats):
return
resultsDict = anaObj.getResults(scenarioList=expList, resultList=[resultName])
for expName in expList:
resultDF = resultsDict[expName]
if resultDF is None:
raise PygcamMcsSystemError('analyze: No results for simId=%d, expName=%s' % (simId, expName))
if maximum is not None:
before = resultDF.shape[0]
resultDF = resultDF[resultDF[resultName] <= maximum]
after = resultDF.shape[0]
_logger.debug('Applying maximum value (%f) eliminated %d rows.', maximum, before - after)
if minimum is not None:
before = resultDF.shape[0]
resultDF = resultDF[resultDF[resultName] >= minimum]
after = resultDF.shape[0]
_logger.debug('Applying minimum value (%f) eliminated %d rows.', minimum, before - after)
resultSeries = resultDF[resultName]
numResults = resultSeries.count()
if plotHist:
plotOutputDistribution(simId, expName, resultSeries, resultName, xlabel, trials)
if stats:
printStats(resultSeries) # TBD: use resultSeries.describe() instead?
if convergence:
plotConvergence(simId, expName, resultName, resultSeries, show=False, save=True)
if (importance or groups or inputsFile) and (numResults != trials or numResults != inputRows):
_logger.info("SimID %d has %d trials, %d input rows, and %d results",
simId, trials, inputRows, numResults)
if importance or groups or parallel:
inputsWithResults = inputDF.ix[resultDF.index]
# Drop any inputs with names ending in '-linked' since these are an artifact
linked = [s for s in inputsWithResults.columns if s.endswith('-linked')]
if linked:
inputsWithResults.drop(linked, axis=1, inplace=True)
extraText = 'SimId=%d, Exp=%s, Trials=%d/%d' % (simId, expName, numResults, trials)
# TBD: eliminate 'groups' and call it importance instead.
if importance or groups:
basename = '%s-s%02d-%s-sensitivity' % (expName, simId, resultName)
filename = makePlotPath(basename, simId)
anaObj.plotUncertaintyImportance(inputsWithResults, resultSeries,
filename=filename, extra=extraText)
# TBD: drop this from 'analyze' command?
if parallel:
basename = '%s-s%02d-%s-parallel' % (expName, simId, resultName)
filename = makePlotPath(basename, simId)
# TBD: plot parallel coordinates figure
anaObj.plotParallelCoordinates(inputsWithResults, resultSeries,
filename=filename, extra=extraText)
def analyzeSimulation(args):
'''
Analyze a simulation by reading parameters and results from the database.
'''
simId = args.simId
expNames = args.expName
plotHist = args.plot
stats = args.stats
importance = args.importance
groups = args.groups
plotInputs = args.plotInputs
convergence = args.convergence
resultName = args.resultName
limit = args.limit
maxVars = args.maxVars
xlabel = args.xlabel
inputsFile = args.exportInputs
resultFile = args.resultFile
exportEMA = args.exportEMA
exportAll = args.exportAll
minimum = args.min
maximum = args.max
# Determine which inputs are required for each option
requireInputs = (exportAll or exportEMA or groups or importance or plotInputs or inputsFile)
requireScenario = (exportAll or exportEMA or groups or importance or resultFile or plotHist or convergence or stats)
requireResult = (groups or importance or resultFile or plotHist or convergence or stats)
if requireResult and not resultName:
raise PygcamMcsUserError("result name must be specified")
if requireScenario:
expList = expNames and expNames.split(',')
if (not (expList and expList[0])):
raise PygcamMcsUserError("scenario name must be specified")
else:
expList = None
db = getDatabase()
trials = db.getTrialCount(simId) if limit <= 0 else limit
if not trials:
raise PygcamMcsUserError('No trials were found for simId %d' % simId)
# inputs are shared across experiments, so gather these before looping over experiments
if requireInputs:
inputDF = readParameterValues(simId, trials)
inputRows, inputCols = inputDF.shape
_logger.info("Each trial has %d parameters", inputCols)
else:
inputDF = None
if inputsFile:
exportInputs(inputsFile, inputDF)
if plotInputs:
plotInputDistributions(simId, inputDF)
if exportAll:
exportAllInputsOutputs(simId, expList[0], inputDF, exportAll)
if resultFile:
resultList = resultName.split(',')
exportResults(simId, resultList, expList, resultFile)
if exportEMA:
resultList = resultName.split(',')
saveForEMA(simId, expList, resultList, inputDF, exportEMA)
if not (requireScenario and requireResult):
return
for expName in expList:
resultDF = db.getOutValues(simId, expName, resultName, limit=limit)
if resultDF is None:
raise PygcamMcsSystemError('analyzeSimulation: No results for simId=%d, expName=%s, resultName=%s' % (simId, expName, resultName))
if maximum is not None:
before = resultDF.shape[0]
resultDF = resultDF[resultDF[resultName] <= maximum]
after = resultDF.shape[0]
_logger.debug('Applying maximum value (%f) eliminated %d rows.', maximum, before - after)
if minimum is not None:
before = resultDF.shape[0]
resultDF = resultDF[resultDF[resultName] >= minimum]
after = resultDF.shape[0]
_logger.debug('Applying minimum value (%f) eliminated %d rows.', minimum, before - after)
resultSeries = resultDF[resultName]
numResults = resultSeries.count()
if plotHist:
plotOutputDistribution(simId, expName, resultSeries, resultName, xlabel, trials)
if stats:
printStats(resultSeries)
if convergence:
plotConvergence(simId, expName, resultName, resultSeries, show=False, save=True)
if (importance or groups or inputsFile) and (numResults != trials or numResults != inputRows):
_logger.info("SimID %d has %d trials, %d input rows, and %d results", simId, trials, inputRows, numResults)
if importance or groups:
inputsWithResults = inputDF.ix[resultDF.index]
# Drop any inputs with names ending in '-linked' since these are an artifact
# Column names can look like 'foobar[0][34]', so we strip off indexing part.
def _isLinked(colname):
pos = colname.find('[')
colname = colname if pos < 0 else colname[0:pos]
return colname.endswith('-linked')
linked = list(filter(_isLinked, inputsWithResults.columns))
if linked:
inputsWithResults.drop(linked, axis=1, inplace=True)
spearman = spearmanCorrelation(inputsWithResults, resultSeries)
data = pd.DataFrame(spearman)
data['normalized'] = normalizeSeries(spearman ** 2)
data['sign'] = 1
negatives = (data.spearman < 0)
data.ix[negatives, 'sign'] = -1
data['value'] = data.normalized * data.sign # normalized squares with signs restored
if importance:
plotSensitivityResults(resultName, data, maxVars=maxVars,
filename=makePlotPath('%s-s%02d-%s-ind' % (expName, simId, resultName), simId),
extra='SimId=%d, Exp=%s, Trials=%d/%d' % (simId, expName, numResults, trials))
if groups:
plotGroupSensitivityResults(resultName, data, maxVars=maxVars,
filename=makePlotPath('%s-s%02d-%s-grp' % (expName, simId, resultName), simId),
extra='SimId=%d, Exp=%s, Trials=%d/%d' % (simId, expName, numResults, trials))
|
import pandas as pd
import yfinance as yf
#from tqdm import tqdm
from datetime import datetime
stockList = pd.read_csv('Stock_List.csv')
stockListLen = len(stockList)
def getStockHistory():
'''
Runs through a csv file with stock abreviations
to download the daily history of every stock in the file.
By default, this function is looking for a file called
'Stock_List.csv', located at the root of the directory
you specified with 'fileDirectory', with a column called [Symbol].
Without modification, this function will not work on your csv file,
unless you have it formatted this way.
'''
print('Getting the full history of the stock market.')
arr = stockList['Symbol'].to_numpy()
start = datetime.now()
arr_len = len(arr)
arr_count = 0
for i in arr.T:
try:
stock = yf.Ticker(i)
hist = stock.history(period="max")
hist.to_csv('Data/StockHistory/'+i+'.csv')
except:
pass
now = datetime.now()
durration = now - start
p_d = str(durration)
#print(p_d)
arr_count = arr_count +1
print(f'{p_d} getStockHistory {arr_count}/{arr_len}: {i}')
def main():
getStockHistory()
if __name__ == "__main__":
main()
|
import sublime
import sublime_plugin
import re
import sys
from collections import defaultdict
class SortTodoCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in [sublime.Region(0, self.view.size())]:
# Determine the current line ending setting, so we can rejoin the
# sorted lines using the correct line ending character.
lend = '\n' # Default.
line_endings = self.view.line_endings()
if line_endings == 'CR':
lend = '\r'
elif line_endings == 'Windows':
lend = '\r\n'
projects = defaultdict(list)
unassigned = []
done = []
lines = [self.view.substr(r) for r in self.view.lines(region)]
for line in lines:
project = re.findall('\+(\w+)', line)
if line.startswith('x'):
done.append(line)
elif len(project) == 1:
projects[project[0]].append(line)
elif len(line) > 1:
unassigned.append(line)
output = ''
# unassigned
for item in sorted(unassigned):
output += '%s%s' % (item, lend)
output += lend * 2
# projects
for p_name in sorted(projects):
for item in sorted(projects[p_name]):
output += '%s%s' % (item, lend)
output += lend * 2
# done
for item in done:
output += '%s%s' % (item, lend)
output += lend
self.view.replace(edit, region, output)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class Delta(object):
'''Brief Diff'''
def __init__(self, repo, diff, patch):
self.repo = repo
self.diff = diff
self._patch = patch
self.old_sha = patch['old_sha']
self.new_sha = patch['new_sha']
self.old_file_sha = patch['old_oid']
self.new_file_sha = patch['new_oid']
self.status = patch['status']
self.old_file_path = patch['old_file_path']
self.new_file_path = patch['new_file_path']
self.binary = patch['binary']
@property
def status_text(self):
status = self.status
if status == 'A':
return 'added'
elif status == 'D':
return 'deleted'
elif status == 'M':
return 'modified'
elif status == 'C':
return 'added'
elif status == 'R':
return 'renamed'
return 'modified'
|
def schnet_network(tensors):
""" Network function for
SchNet: https://doi.org/10.1063/1.5019779
TODO: Implement this
Args:
tensors: input data (nested tensor from dataset).
gamma (float): "width" of the radial basis.
miu_max (float): minimal distance of the radial basis.
miu_min (float): maximal distance of the radial basis.
n_basis (int): number of radial basis.
n_atomic (int): number of nodes to be used in atomic layers.
n_cfconv (int): number of nodes to be used in cfconv layers.
T (int): number of interaction blocks.
pre_level (int): flag for preprocessing:
0 for no preprocessing;
1 for preprocess till the cell list nl;
2 for preprocess all filters (cannot do force training).
Returns:
- preprocessed nested tensors if n<0
- prediction tensor if n>=0
"""
pass
|
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from constents import (
SHORT_CHAR_LENGTH,
USERNAME_MIN_LENGTH,
MEDIUM_CHAR_LENGTH,
VERIFY_CODE_LENGTH,
)
USER_MODEL = get_user_model()
class LoginFormSerializer(serializers.Serializer):
"""登录表单"""
username = serializers.CharField(
min_length=USERNAME_MIN_LENGTH,
max_length=SHORT_CHAR_LENGTH,
error_messages={
"invalid": _("用户名格式有误"),
"blank": _("用户名不能为空"),
"max_length": _("用户名过长"),
"min_length": _("用户名过短"),
"required": _("用户名不能为空"),
},
)
password = serializers.CharField(
max_length=MEDIUM_CHAR_LENGTH,
error_messages={
"invalid": _("密码格式有误"),
"blank": _("密码不能为空"),
"max_length": _("密码过长"),
"required": _("密码不能为空"),
},
)
class UserInfoSerializer(serializers.ModelSerializer):
"""用户信息"""
class Meta:
model = USER_MODEL
fields = ["username", "uid", "date_joined", "avatar", "active_index"]
class RegisterSerializer(serializers.ModelSerializer):
"""用户注册"""
username = serializers.CharField(max_length=SHORT_CHAR_LENGTH)
code = serializers.CharField(
min_length=VERIFY_CODE_LENGTH, max_length=VERIFY_CODE_LENGTH
)
class Meta:
model = USER_MODEL
fields = ["username", "phone", "password", "code"]
def validate_username(self, value: str):
if value.isdigit():
raise serializers.ValidationError(_("用户名不能为纯数字"))
# 只允许a-z,A-Z,0-9,-,_
for letter in value:
if letter.isdigit() or letter.isalpha() or letter == "_" or letter == "-":
continue
raise serializers.ValidationError(_("用户名只能包含a-z,A-Z,0-9,-,_"))
return value
def validate_code(self, value: str):
if value.isdigit() and len(value) == VERIFY_CODE_LENGTH:
return value
raise serializers.ValidationError(_("验证码格式有误"))
class RePasswordSerializer(RegisterSerializer):
"""重置密码"""
pass
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
class MaskGenerator(nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("device_info", torch.ones(1))
def forward(self, src_mask, delta, max_len=None):
src_lens = src_mask.sum(1).long()
tgt_lens = src_lens + delta
max_len = tgt_lens.max().item() if max_len is None else max_len
tgt_lens = torch.clamp(tgt_lens, min=1, max=max_len)
arange = torch.arange(max_len).to(self.device_info.device)
tgt_mask = (arange[None, :].repeat(src_mask.size(0), 1) < tgt_lens[:, None]).float().detach()
return tgt_lens, tgt_mask
|
import random
from collections import deque
import numpy as np
import tensorflow as tf
from agent.forward import Forward
from params import *
_EPSILON = 1e-6 # avoid nan
class Framework(object):
def __init__(self):
# placeholder
self.inputs = tf.placeholder(tf.float32, INPUT_SHAPE, 'input')
self.actions = tf.placeholder(tf.int32, [None], 'action')
self.rewards = tf.placeholder(tf.float32, [None], 'reward')
self.targets = tf.placeholder(tf.float32, [None], 'targets')
# Q value eval
self.value_eval = Forward('value')(self.inputs)
# Q_ target eval
value_next = tf.stop_gradient(self.value_eval)
action_next = tf.one_hot(tf.argmax(value_next, axis=1), ACTION_SIZE)
target_eval = Forward('target')(self.inputs)
self.target_eval = tf.reduce_sum(target_eval * action_next, axis=1)
# loss function
action_choice = tf.one_hot(self.actions, ACTION_SIZE, 1., 0.)
action_eval = tf.reduce_sum(
self.value_eval * action_choice, axis=1)
loss = tf.squared_difference(self.targets, action_eval)
self._loss = tf.reduce_sum(loss)
# train op
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'value')
grads, _ = tf.clip_by_global_norm(
tf.gradients(self._loss, trainable_variables), MAX_GRAD_NORM)
optimizer = tf.contrib.opt.NadamOptimizer()
self._train_op = optimizer.apply_gradients(zip(grads, trainable_variables))
# update target net params
eval_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "value")
target_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "target")
self._target_params_swap = \
[tf.assign(n, q) for n, q in zip(target_net_params, eval_net_params)]
# cache for experience replay
self.cache = deque(maxlen=MEMORY_SIZE)
# get random sample from experience pool
def _get_samples(self):
samples = random.sample(self.cache, BATCH_SIZE)
state = np.vstack([i[0] for i in samples])
action = np.squeeze(np.vstack([i[1] for i in samples]))
reward = np.squeeze(np.vstack([i[2] for i in samples]))
next_state = np.vstack([i[3] for i in samples])
done = [i[4] for i in samples]
return state, action, reward, next_state, done
def get_deterministic_policy(self, sess, inputs):
value_eval = sess.run(self.value_eval, {self.inputs: inputs})
return np.argmax(value_eval, axis=1)[0]
def get_stochastic_policy(self, sess, inputs, epsilon=0.9):
if np.random.uniform() < epsilon:
return self.get_deterministic_policy(sess, inputs)
else:
return np.random.randint(ACTION_SIZE)
# update target network params
def update_target_net(self, sess):
sess.run(self._target_params_swap)
# update experience replay pool
def update_cache(self, state, action, reward, next_state, done):
self.cache.append((state, action, reward, next_state, done))
# train, update value network params
def update_value_net(self, sess):
state, action, reward, next_state, done = self._get_samples()
mask = np.array(done).astype('float')
target_eval = sess.run(self.target_eval, {self.inputs: next_state})
target = mask * reward + (1 - mask) * (reward + GAMMA * target_eval)
sess.run(self._train_op, {self.inputs: state, self.actions: action, self.targets: target})
|
import unittest
from unittest import mock
from unittest.mock import patch
import json
from requests import Session
from pvaw.wmi import decode_wmi, get_wmis, WMIInfo
class TestWMI(unittest.TestCase):
TEST_3_DIGIT_WMI_DECODE_URL = (
"https://vpic.nhtsa.dot.gov/api/vehicles/DecodeWMI/1FD?format=json"
)
TEST_6_DIGIT_WMI_DECODE_URL = (
"https://vpic.nhtsa.dot.gov/api/vehicles/DecodeWMI/1G9340?format=json"
)
TEST_GET_WMIS_URL = "https://vpic.nhtsa.dot.gov/api/vehicles/GetWMIsForManufacturer/honda?format=json"
def test_wmi_exceptions(self):
with self.assertRaises(TypeError):
decode_wmi(1)
with self.assertRaises(ValueError):
decode_wmi("abcd")
with self.assertRaises(TypeError):
get_wmis(1)
@mock.patch("requests.get")
def test_decode_wmi_3_digit(self, mock_get):
with open("tests/responses/decode_3_digit_wmi_response.json") as f:
expected_response = json.load(f)
mock_get.return_value.json.return_value = expected_response
wmi_info = decode_wmi("1FD")
self.assertTrue(
mock.call(self.TEST_3_DIGIT_WMI_DECODE_URL) in mock_get.mock_calls
)
self.assertEqual(wmi_info.wmi, "1FD")
self.assertEqual(wmi_info.manufacturer, "FORD MOTOR COMPANY, USA")
self.assertEqual(wmi_info.vehicle_type, "Incomplete Vehicle")
expected_results = expected_response["Results"]
# testing get_results()
self.assertEqual(expected_results[0], wmi_info.get_results())
# Making sure that get_df() doesn't error out
wmi_info.get_df()
wmi_info.get_df(raw=True)
wmi_info.get_df(raw=True, drop_na=False)
# Making sure that string and html reps don't error out
str(wmi_info)
wmi_info._repr_html_()
@mock.patch("requests.get")
def test_decode_wmi_6_digit(self, mock_get):
with open("tests/responses/decode_6_digit_wmi_response.json") as f:
expected_response = json.load(f)
mock_get.return_value.json.return_value = expected_response
wmi_info = decode_wmi("1G9340")
self.assertTrue(
mock.call(self.TEST_6_DIGIT_WMI_DECODE_URL) in mock_get.mock_calls
)
self.assertEqual(wmi_info.wmi, "1G9340")
self.assertEqual(wmi_info.manufacturer, "GRYPHON BIKES & CHOPPERS")
self.assertEqual(wmi_info.vehicle_type, "Motorcycle")
@mock.patch("requests.get")
def test_get_wmis(self, mock_get):
with open("tests/responses/get_wmis_response.json") as f:
expected_response = json.load(f)
mock_get.return_value.json.return_value = expected_response
wmi_infos = get_wmis("honda")
self.assertTrue(mock.call(self.TEST_GET_WMIS_URL) in mock_get.mock_calls)
for w in wmi_infos:
self.assertTrue(isinstance(w, WMIInfo))
first = wmi_infos[0]
self.assertEqual(first.get_results(), expected_response["Results"][0])
self.assertEqual(first.wmi, "JHM")
self.assertEqual(first.manufacturer, "HONDA MOTOR CO., LTD")
self.assertEqual(first.vehicle_type, "Passenger Car")
expected_results = expected_response["Results"]
# testing get_results()
self.assertEqual(expected_results, wmi_infos.get_results())
self.assertEqual(expected_results[0], first.get_results())
# Making sure that get_df() doesn't error out
wmi_infos.get_df()
wmi_infos.get_df(raw=True)
wmi_infos.get_df(raw=True, drop_na=False)
# Making sure that string and html reps don't error out
str(wmi_infos)
wmi_infos._repr_html_()
str(first)
first._repr_html_()
|
"""
Rewrite engine to get disjuntive normal form of the rules
"""
from collections import OrderedDict
from functools import singledispatch
from itertools import chain, product
from experta.conditionalelement import AND, OR, NOT
from experta.fieldconstraint import ORFC, NOTFC, ANDFC
from experta.rule import Rule
from experta.fact import Fact
def unpack_exp(exp, op):
for x in exp:
if isinstance(x, op):
yield from x
else:
yield x
@singledispatch
def dnf(exp):
return exp
@dnf.register(Rule)
def _(exp):
last, current = None, exp.new_conditions(AND(*[dnf(e) for e in exp]))
while last != current:
last, current = (current,
current.new_conditions(
*[dnf(e) for e in current]))
return current.new_conditions(*unpack_exp(current, AND))
@dnf.register(NOT)
def _(exp):
if isinstance(exp[0], NOT): # Double negation
return dnf(exp[0][0])
elif isinstance(exp[0], OR): # De Morgan's law (OR)
return AND(*[NOT(dnf(x)) for x in exp[0]])
elif isinstance(exp[0], AND): # De Morgan's law (AND)
return OR(*[NOT(dnf(x)) for x in exp[0]])
else: # `exp` is already dnf. We have nothing to do.
return exp
@dnf.register(OR)
def _(exp):
if len(exp) == 1:
return dnf(exp[0])
else:
return OR(*[dnf(x) for x in unpack_exp(exp, OR)])
@dnf.register(AND)
def _(exp):
if len(exp) == 1:
return dnf(exp[0])
elif any(isinstance(e, OR) for e in exp): # Distributive property
parts = []
for e in exp:
if isinstance(e, OR):
parts.append([dnf(x) for x in e])
else:
parts.append([dnf(e)])
return OR(*[dnf(AND(*p)) for p in product(*parts)])
else:
return AND(*[dnf(x) for x in unpack_exp(exp, AND)])
@dnf.register(Fact)
def _(exp):
fact_class = exp.__class__
if any(isinstance(v, ORFC) for v in exp.values()):
and_part = OrderedDict()
or_part = OrderedDict()
for k, v in exp.items():
if isinstance(v, ORFC):
or_part[k] = v
and_part[k] = None
else:
and_part[k] = v
facts = []
for p in product(*or_part.values()):
current_or = OrderedDict(zip(or_part.keys(), p))
ces = [(k, dnf(v)) if v is not None else (k, dnf(current_or[k]))
for k, v in and_part.items()]
facts.append(fact_class.from_iter(ces))
return OR(*facts)
else:
return fact_class.from_iter(((k, dnf(v)) for k, v in exp.items()))
@dnf.register(NOTFC)
def _(exp):
if isinstance(exp[0], NOTFC): # Double negation
return dnf(exp[0][0])
elif isinstance(exp[0], ORFC): # De Morgan's law (ORFC)
return ANDFC(*[NOTFC(dnf(x)) for x in exp[0]])
elif isinstance(exp[0], ANDFC): # De Morgan's law (ANDFC)
return ORFC(*[NOTFC(dnf(x)) for x in exp[0]])
else: # `exp` is already dnf. We have nothing to do.
return exp
@dnf.register(ANDFC)
def _(exp):
if len(exp) == 1:
return dnf(exp[0])
elif any(isinstance(e, ORFC) for e in exp): # Distributive property
and_part = []
or_part = []
for e in exp:
if isinstance(e, ORFC):
or_part.extend(e)
else:
and_part.append(e)
return ORFC(*[dnf(ANDFC(*(and_part + [dnf(e)]))) for e in or_part])
else:
return ANDFC(*[dnf(x) for x in unpack_exp(exp, ANDFC)])
|
vowels = set('AEIOU')
stuart = kevin = 0
word = input()
for i, c in enumerate(word):
if c in vowels:
kevin += len(word) - i
else:
stuart += len(word) - i
if stuart > kevin:
print('Stuart', stuart)
elif kevin > stuart:
print('Kevin', kevin)
else:
print('Draw') |
from dagster import build_assets_job
from hacker_news_assets.assets.comment_stories import comment_stories
from hacker_news_assets.assets.items import comments, stories
from hacker_news_assets.assets.recommender_model import component_top_stories, recommender_model
from hacker_news_assets.assets.user_story_matrix import user_story_matrix
from hacker_news_assets.assets.user_top_recommended_stories import user_top_recommended_stories
from hacker_news_assets.resources import RESOURCES_PROD, RESOURCES_STAGING
assets = [
comment_stories,
user_story_matrix,
recommender_model,
component_top_stories,
user_top_recommended_stories,
]
source_assets = [comments, stories]
story_recommender_prod_job = build_assets_job(
"story_recommender",
assets=assets,
source_assets=source_assets,
resource_defs=RESOURCES_PROD,
)
story_recommender_staging_job = build_assets_job(
"story_recommender",
assets=assets,
source_assets=source_assets,
resource_defs=RESOURCES_STAGING,
)
|
# -*- coding: utf-8 -*-
"""
:mod:`papy.core`
================
This module provides classes and functions to construct and run a **PaPy**
pipeline.
"""
#
from types import FunctionType
from inspect import isbuiltin, getsource
from itertools import izip, imap, chain, repeat, tee
from threading import Thread, Event, Lock
from multiprocessing import TimeoutError
from collections import defaultdict
from logging import getLogger
from time import time
import itertools, os, sys
from numap.NuMap import _inject_func, imports, _Weave
# self-imports
from graph import DictGraph
from util.codefile import I_SIG, L_SIG, P_LAY, P_SIG, W_SIG
from util.config import get_defaults
from util.runtime import get_runtime
class WorkerError(Exception):
"""
Exceptions raised or related to ``Worker`` instances.
"""
pass
class PiperError(Exception):
"""
Exceptions raised or related to ``Piper`` instances.
"""
pass
class DaggerError(Exception):
"""
Exceptions raised or related to ``Dagger`` instances.
"""
pass
class PlumberError(Exception):
"""
Exceptions raised or related to ``Plumber`` instances.
"""
pass
class Dagger(DictGraph):
"""
The ``Dagger`` is a directed acyclic graph. It defines the topology of a
``PaPy`` pipeline / workflow. It is a subclass of ``DictGraph``.
``DictGraph`` edges are called within the ``Dagger`` pipes and have an
inverted direction which reflects dataflow not dependency. Edges can be
thought of as dependencies, while pipes as dataflow between ``Pipers`` or
nodes of the graph.
Arguments:
- pipers(sequence) [default: ``()``] A sequence of valid ``add_piper``
inputs (see the documentation for the ``add_piper`` method).
- pipes(sequence) [default: ``()``] A sequence of valid ``add_pipe``
inputs (see the documentation for the ``add_piper`` method).
"""
def __init__(self, pipers=(), pipes=(), xtras=None):
self.log = getLogger("papy")
self.log.debug('Creating %s from %s and %s' % \
(repr(self), pipers, pipes))
self.add_pipers(pipers, xtras)
self.add_pipes(pipes)
def __repr__(self):
"""
A short but unique representation.
"""
return 'Dagger(%s)' % id(self)
def __str__(self):
"""
A long descriptive representation.
"""
return repr(self) + "\n" + \
"\tPipers:\n" + \
"\n".join(('\t\t' + repr(p) + ' ' for p in self.postorder())) + '\n'\
"\tPipes:\n" + \
"\n".join(('\t\t' + repr(p[1]) + '>>>' + \
repr(p[0]) for p in self.edges()))
def children_after_parents(self, piper1, piper2):
"""
Custom compare function. Returns ``1`` if the first ``Piper`` instance
is upstream of the second ``Piper`` instance, ``-1`` if the first
``Piper`` is downstream of the second ``Piper`` and ``0`` if the two
``Pipers`` are independent.
Arguments:
- piper1(``Piper``) ``Piper`` instance.
- piper2(``Piper``) ``Piper`` instance.
"""
if piper1 in self[piper2].deep_nodes():
return 1
elif piper2 in self[piper1].deep_nodes():
return - 1
else:
return 0
def resolve(self, piper, forgive=False):
"""
Given a ``Piper`` instance or the ``id`` of the ``Piper``. Returns the
``Piper`` instance if it can be resolved else raises a ``DaggerError``
or returns ``False`` depending on the "forgive" argument.
Arguments:
- piper(``Piper`` or id(``Piper``)) a ``Piper`` instance or its id to be
found in the ``Dagger``.
- forgive(``bool``) [default: ``False``] If "forgive" is ``False`` a
``DaggerError`` is raised whenever a ``Piper`` cannot be resolved in
the ``Dagger``. If "forgive" is ``True`` then ``False`` is returned.
"""
try:
if piper in self:
resolved = piper
else:
resolved = [p for p in self if id(p) == piper][0]
except (TypeError, IndexError):
resolved = False
if resolved:
self.log.debug('%s resolved a piper from %s' % (repr(self), piper))
else:
self.log.debug('%s could not resolve a piper from %s' % \
(repr(self), repr(piper)))
if not forgive:
raise DaggerError('%s could not resolve a Piper from %s' % \
(repr(self), repr(piper)))
resolved = False
return resolved
def connect(self, datas=None):
"""
Connects ``Pipers`` in the order input -> output. See ``Piper.connect``.
According to the pipes (topology). If "datas" is given will connect the
input ``Pipers`` to the input data see: ``Dagger.connect_inputs``.
Argumensts:
- datas(sequence) [default: ``None``] valid sequence of input data.
see: ``Dagger.connect_inputs``.
"""
# if data connect inputs
if datas:
self.connect_inputs(datas)
# connect the remaining pipers
postorder = self.postorder()
self.log.debug('%s trying to connect in the order %s' % \
(repr(self), repr(postorder)))
for piper in postorder:
if not piper.connected and self[piper].nodes():
# 1. sort inputs by index in postorder
inputs = [p for p in postorder if p in self[piper].nodes()]
# 2. sort postorder so that all parents come before children
# mind that the top of a pipeline is a child!
inputs.sort(cmp=self.children_after_parents)
# 2. branch age sorted inputs
piper.connect(inputs)
self.log.debug('%s succesfuly connected' % repr(self))
def connect_inputs(self, datas):
"""
Connects input ``Pipers`` to "datas" input data in the correct order
determined, by the ``Piper.ornament`` attribute and the ``Dagger._cmp``
function.
It is assumed that the input data is in the form of an iterator and
that all inputs have the same number of input items. A pipeline will
**deadlock** otherwise.
Arguments:
- datas (sequence of sequences) An ordered sequence of inputs for
all input ``Pipers``.
"""
start_pipers = self.get_inputs()
self.log.debug('%s trying to connect inputs in the order %s' % \
(repr(self), repr(start_pipers)))
for piper, data in izip(start_pipers, datas):
piper.connect([data])
self.log.debug('%s succesfuly connected inputs' % repr(self))
def disconnect(self, forced=False):
"""
Given the pipeline topology disconnects ``Pipers`` in the order output
-> input. This also disconnects inputs. See ``Dagger.connect``,
``Piper.connect`` and ``Piper.disconnect``. If "forced" is ``True``
``NuMap`` instances will be emptied.
Arguments:
- forced(``bool``) [default: ``False``] If set ``True`` all tasks from
all ``NuMaps`` instances used in the ``Dagger`` will be removed even
if they did not belong to this ``Dagger``.
"""
reversed_postorder = reversed(self.postorder())
self.log.debug('%s trying to disconnect in the order %s' % \
(repr(self), repr(reversed_postorder)))
for piper in reversed_postorder:
if piper.connected:
# we don't want to trigger an exception
piper.disconnect(forced)
self.log.debug('%s succesfuly disconnected' % repr(self))
def start(self):
"""
Given the pipeline topology starts ``Pipers`` in the order input ->
output. See ``Piper.start``. ``Pipers`` instances are started in two
stages, which allows them to share ``NuMaps``.
"""
# top - > bottom of pipeline
pipers = self.postorder()
#
for piper in pipers:
piper.start(stages=(0, 1))
for piper in pipers:
piper.start(stages=(2,))
def stop(self):
"""
Stops the ``Pipers`` according to pipeline topology.
"""
self.log.debug('%s begins stopping routine' % repr(self))
self.log.debug('%s triggers stopping in input pipers' % repr(self))
inputs = self.get_inputs()
for piper in inputs:
piper.stop(forced=True)
self.log.debug('%s pulls output pipers until stop' % repr(self))
outputs = self.get_outputs()
while outputs:
for piper in outputs:
try:
# for i in xrange(stride)?
piper.next()
except StopIteration:
outputs.remove(piper)
self.log.debug("%s stopped output piper: %s" % \
(repr(self), repr(piper)))
continue
except Exception, excp:
self.log.debug("%s %s raised an exception: %s" % \
(repr(self), piper, excp))
self.log.debug("%s stops the remaining pipers" % repr(self))
postorder = self.postorder()
for piper in postorder:
if piper not in inputs:
piper.stop(ends=[0])
self.log.debug("%s finishes stopping of input pipers" % repr(self))
for piper in inputs:
if hasattr(piper.imap, 'stop'):
piper.imap.stop(ends=[0])
self.log.debug('%s finishes stopping routine' % repr(self))
def get_inputs(self):
"""
Returns ``Piper`` instances, which are inputs to the pipeline i.e. have
no incoming pipes (outgoing dependency edges).
"""
start_p = [p for p in self.postorder() if not self.outgoing_edges(p)]
self.log.debug('%s got input pipers %s' % (repr(self), start_p))
return start_p
def get_outputs(self):
"""
Returns ``Piper`` instances, which are outputs to the pipeline i.e. have
no outgoing pipes (incoming dependency edges).
"""
end_p = [p for p in self.postorder() if not self.incoming_edges(p)]
self.log.debug('%s got output pipers %s' % (repr(self), end_p))
return end_p
def add_piper(self, piper, xtra=None, create=True, branch=None):
"""
Adds a ``Piper`` instance to this ``Dagger``, but only if the ``Piper``
is not already there. Optionally creates a new ``Piper`` if the "piper"
argument is valid for the ``Piper`` constructor. Returns a ``tuple``
(new_piper_created, piper_instance) indicating whether a new ``Piper``
has been created and the instance of the added ``Piper``. Optionally
takes "branch" and "xtra" arguments for the topological node in the
graph.
Arguments:
- piper(``Piper``, ``Worker`` or id(``Piper``)) ``Piper`` instance or
object which will be converted to a ``Piper`` instance.
- create(``bool``) [default: ``True``] Should a new ``Piper`` be
created if "piper" cannot be resolved in this ``Dagger``?
- xtra(``dict``) [default: ``None``] Dictionary of ``graph.Node``
properties.
"""
self.log.debug('%s trying to add piper %s' % (repr(self), piper))
piper = (self.resolve(piper, forgive=True) or piper)
if not isinstance(piper, Piper):
if create:
try:
piper = Piper(piper)
except PiperError:
self.log.error('%s cannot resolve or create a piper from %s' % \
(repr(self), repr(piper)))
raise DaggerError('%s cannot resolve or create a piper from %s' % \
(repr(self), repr(piper)))
else:
self.log.error('%s cannot resolve a piper from %s' % \
(repr(self), repr(piper)))
raise DaggerError('%s cannot resolve a piper from %s' % \
(repr(self), repr(piper)))
new_piper_created = self.add_node(piper, xtra, branch)
if new_piper_created:
self.log.debug('%s added piper %s' % (repr(self), piper))
return (new_piper_created, piper)
def del_piper(self, piper, forced=False):
"""
Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and
will also remove it.
"""
self.log.debug('%s trying to delete piper %s' % \
(repr(self), repr(piper)))
try:
piper = self.resolve(piper, forgive=False)
except DaggerError:
self.log.error('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
raise DaggerError('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
if self.incoming_edges(piper) and not forced:
self.log.error('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
raise DaggerError('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
self.del_node(piper)
self.log.debug('%s deleted piper %s' % (repr(self), piper))
def add_pipe(self, pipe, branch=None):
"""
Adds a pipe (A, ..., N) which is an N-``tuple`` tuple of ``Pipers``
instances. Adding a pipe means to add all the ``Pipers`` and connect
them in the specified left to right order.
The direction of the edges in the ``DictGraph`` is reversed compared to
the left to right data-flow in a pipe.
Arguments:
- pipe(sequence) N-``tuple`` of ``Piper`` instances or objects which
are valid ``add_piper`` arguments. See: ``Dagger.add_piper`` and
``Dagger.resolve``.
"""
#TODO: Check if consume/spawn/produce is right!
self.log.debug('%s adding pipe: %s' % (repr(self), repr(pipe)))
for i in xrange(len(pipe) - 1):
edge = (pipe[i + 1], pipe[i])
edge = (self.add_piper(edge[0], create=True, branch=branch)[1], \
self.add_piper(edge[1], create=True, branch=branch)[1])
if edge[0] in self.dfs(edge[1], []):
self.log.error('%s cannot add the %s>>>%s edge (introduces a cycle)' % \
(repr(self), edge[0], edge[1]))
raise DaggerError('%s cannot add the %s>>>%s edge (introduces a cycle)' % \
(repr(self), edge[0], edge[1]))
self.add_edge(edge)
self.clear_nodes() #dfs
self.log.debug('%s added the %s>>>%s edge' % \
(repr(self), edge[0], edge[1]))
def del_pipe(self, pipe, forced=False):
"""
Deletes a pipe (A, ..., N) which is an N-``tuple`` of ``Piper``
instances. Deleting a pipe means to delete all the connections between
``Pipers`` and to delete all the ``Pipers``. If "forced" is ``False``
only ``Pipers`` which are not used anymore (i.e. have not downstream
``Pipers``) are deleted.
The direction of the edges in the ``DictGraph`` is reversed compared to
the left to right data-flow in a pipe.
Arguments:
- pipe(sequence) N-``tuple`` of ``Piper`` instances or objects which
can be resolved in the ``Dagger`` (see: ``Dagger.resolve``). The
``Pipers`` are removed in the order from right to left.
- forced(``bool``) [default: ``False``] The forced argument will be
given to the ``Dagger.del_piper`` method. If "forced" is ``False``
only ``Pipers`` with no outgoing pipes will be deleted.
"""
self.log.debug('%s removes pipe%s forced: %s' % \
(repr(self), repr(pipe), forced))
pipe = list(reversed(pipe))
for i in xrange(len(pipe) - 1):
edge = (self.resolve(pipe[i]), self.resolve(pipe[i + 1]))
self.del_edge(edge)
self.log.debug('%s removed the %s>>>%s edge' % \
(repr(self), edge[0], edge[1]))
try:
self.del_piper(edge[0], forced)
self.del_piper(edge[1], forced)
except DaggerError:
pass
def add_pipers(self, pipers, *args, **kwargs):
"""
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the
specified order. Takes optional arguments for ``Dagger.add_piper``.
Arguments:
- pipers(sequence of valid ``add_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to
the ``Dagger`` in the left to right order.
"""
for piper in pipers:
self.add_piper(piper, *args, **kwargs)
def del_pipers(self, pipers, *args, **kwargs):
"""
Deletes a sequence of ``Pipers`` instances from the ``Dagger`` in the
reverse of the specified order. Takes optional arguments for
``Dagger.del_piper``.
Arguments:
- pipers (sequence of valid ``del_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.del_piper`` arguments to be removed
from the ``Dagger`` in the right to left order.
"""
pipers.reverse()
for piper in pipers:
self.del_piper(piper, *args, **kwargs)
def add_pipes(self, pipes, *args, **kwargs):
"""
Adds a sequence of pipes to the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.add_pipe``.
Arguments:
- pipes(sequence of valid ``add_pipe`` arguments) Sequence of pipes
or other valid ``Dagger.add_pipe`` arguments to be added to the
``Dagger`` in the left to right order.
"""
for pipe in pipes:
self.add_pipe(pipe, *args, **kwargs)
def del_pipes(self, pipes, *args, **kwargs):
"""
Deletes a sequence of pipes from the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.del_pipe``.
Arguments:
- pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or
other valid ``Dagger.del_pipe`` arguments to be removed from the
``Dagger`` in the left to right order.
"""
for pipe in pipes:
self.del_pipe(pipe * args, **kwargs)
class Plumber(Dagger):
"""
The ``Plumber`` is a subclass of ``Dagger`` and ``Graph`` with added
run-time methods and a high-level interface for working with ``PaPy``
pipelines.
Arguments:
- dagger(``Dagger`` instance) [default: ``None``] An optional ``Dagger``
instance. if ``None`` a new one is created.
"""
def _finish(self, ispausing):
"""
(internal) executes when last output piper raises ``StopIteration``.
"""
if ispausing:
self.log.debug('%s paused' % repr(self))
else:
self._finished.set()
self.log.debug('%s finished' % repr(self))
@staticmethod
def _plunge(tasks, pausing, finish):
"""
(internal) calls the next method of weaved tasks until they are finished
or The ``Plumber`` instance is stopped see: ``Dagger.chinkup``.
"""
# If no result received either not started or start & stop
# could have been called before the plunger thread
while True:
if pausing():
tasks.stop()
try:
tasks.next()
except StopIteration:
finish(pausing())
break
def __init__(self, logger_options={}, **kwargs):
self._started = Event() # after start till stop
self._running = Event() # after run till pause
self._pausing = Event() # during pause
self._finished = Event() # after finishing the input
#start_logger(**logger_options)
self.log = getLogger("papy")
# init
self.filename = None
#TODO: check if this works with and the stats attributes are correctly
# set for a predefined dagger.
Dagger.__init__(self, **kwargs)
def _code(self):
"""
(internal) generates imports, code and runtime calls to save a pipeline.
"""
icode, tcode = '', '' # imports, task code
icall, pcall = '', '' # imap calls, piper calls
tdone, idone = [], [] # task done, imap done
for piper in self:
p = piper
w = piper.worker
i = piper.imap
in_ = i.name if hasattr(i, 'name') else False
if in_ and in_ not in idone:
icall += I_SIG % (in_, i.worker_type, i.worker_num, i.stride, \
i.buffer, i.ordered, i.skip, in_)
idone.append(in_)
ws = W_SIG % (",".join([t.__name__ for t in w.task]), w.args, w.kwargs)
pcall += P_SIG % (p.name, ws, in_, p.consume, p.produce, p.spawn, \
p.timeout, p.branch, p.debug, p.name, p.track)
for t in w.task:
if (t in tdone) or not t:
continue
tm, tn = t.__module__, t.__name__
if (tm == '__builtin__') or hasattr(p, tn):
continue
if tm == '__main__' or tm == self.filename:
tcode += getsource(t)
else:
icode += 'from %s import %s\n' % (tm, tn)
tdone.append(t)
pipers = [p.name for p in self]
pipers = '[%s]' % ", ".join(pipers)
pipes = [L_SIG % (d.name, s.name) for s, d in self.edges()]
pipes = '[%s]' % ", ".join(pipes) # pipes
xtras = [str(self[p].xtra) for p in self]
xtras = '[%s]' % ",".join(xtras) # node xtra
return (icode, tcode, icall, pcall, pipers, xtras, pipes)
def __repr__(self):
"""
A short but unique representation.
"""
return "Plumber(%s)" % super(Plumber, self).__repr__()
def __str__(self):
"""
A long descriptive representation.
"""
return super(Plumber, self).__str__()
def save(self, filename):
"""
Saves pipeline as a Python source code file.
Arguments:
- filename(``path``) Path to save the pipeline source code.
"""
handle = open(filename, 'wb')
handle.write(P_LAY % self._code())
handle.close()
def load(self, filename):
"""
Instanciates (loads) pipeline from a source code file.
Arguments:
- filename(``path``) location of the pipeline source code.
"""
dir_name = os.path.dirname(filename)
mod_name = os.path.basename(filename).split('.')[0]
self.filename = mod_name
sys.path.insert(0, dir_name)
mod = __import__(mod_name)
sys.path.remove(dir_name) # do not pollute the path.
pipers, xtras, pipes = mod.pipeline()
self.add_pipers(pipers, xtras)
self.add_pipes(pipes)
def start(self, datas):
"""
Starts the pipeline by connecting the input ``Pipers`` of the pipeline
to the input data, connecting the pipeline and starting the ``NuMap``
instances.
The order of items in the "datas" argument sequence should correspond
to the order of the input ``Pipers`` defined by ``Dagger._cmp`` and
``Piper.ornament``.
Arguments:
- datas(sequence) A sequence of external input data in the form of
sequences or iterators.
"""
if not self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet():
# Plumber statistics
self.stats = {}
self.stats['start_time'] = None
self.stats['run_time'] = None
# connects input pipers to external data
self.connect_inputs(datas)
# connects pipers within the pipeline
self.connect()
# make pointers to results collected for pipers by imaps
self.stats['pipers_tracked'] = {}
for piper in self.postorder():
if hasattr(piper.imap, '_tasks_tracked') and piper.track:
self.stats['pipers_tracked'][piper] = \
[piper.imap._tasks_tracked[t.task] for t in piper.imap_tasks]
self.stats['start_time'] = time()
# starts the Dagger
# this starts Pipers and NuMaps
super(Plumber, self).start()
# transitioning to started state
self._started.set()
self._finished.clear()
else:
raise PlumberError
def run(self):
"""
Executes a started pipeline by pulling results from it's output
``Pipers``. Processing nodes i.e. ``Pipers`` with the ``track``
attribute set ``True`` will have their returned results stored within
the ``Dagger.stats['pipers_tracked']`` dictionary. A running pipeline
can be paused.
"""
# remove non-block results for end tasks
if self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet() and \
not self._finished.isSet():
stride = 1 # FIXME
tasks = self.get_outputs()
wtasks = _Weave(tasks, repeats=stride)
self._plunger = Thread(target=self._plunge, args=(wtasks, \
self._pausing.isSet, self._finish))
self._plunger.deamon = True
self._plunger.start()
self._running.set()
else:
raise PlumberError
def wait(self, timeout=None):
"""
Waits (blocks) until a running pipeline finishes.
Arguments:
- timeout(``int``) [default: ``None``] Specifies the timeout,
``RuntimeError`` will be raised. The default is to wait indefinetely
for the pipeline to finish.
"""
if self._started.isSet() and \
self._running.isSet() and \
not self._pausing.isSet():
self._finished.wait(timeout)
else:
raise PlumberError
def pause(self):
"""
Pauses a running pipeline. This will stop retrieving results from the
pipeline. Parallel parts of the pipeline will stop after the ``NuMap``
buffer is has been filled. A paused pipeline can be run or stopped.
"""
# 1. stop the plumbing thread by raising a StopIteration on a stride
# boundary
if self._started.isSet() and \
self._running.isSet() and \
not self._pausing.isSet():
self._pausing.set()
self._plunger.join()
del self._plunger
self._pausing.clear()
self._running.clear()
else:
raise PlumberError
def stop(self):
"""
Stops a paused pipeline. This will a trigger a ``StopIteration`` in the
inputs of the pipeline. And retrieve the buffered results. This will
stop all ``Pipers`` and ``NuMaps``. Python will not terminate cleanly
if a pipeline is running or paused.
"""
if self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet():
# stops the dagger
super(Plumber, self).stop()
# disconnects all pipers
self.disconnect()
self.stats['run_time'] = time() - self.stats['start_time']
self._started.clear()
else:
raise PlumberError
#
#
class Piper(object):
"""
Creates a new ``Piper`` instance. The ``Piper`` is an ``object`` that acts
a a processing node in a PaPy pipeline.
A ``Piper`` can be created from a ``Worker`` instance another ``Piper``
instance or a sequence of functions or ``Worker`` instances in every case a
new ``Piper`` instance is created.
``Piper`` instances evaluate functions in parallel if they are created
with a ``NuMap`` instance provided otherwise they use the ``itertools.imap``
function.
The "produce" and "consume" arguments allow for different than 1:1 mappings
between the number of input and output items, while "spawn" allows
accomodate a ``Piper`` to handle additional outputs. Additional outputs are
created from the elements of the sequence returned by the wrapped ``Worker``
instance.
The product of "produce" and "spawn" of the upstream ``Piper`` has to equal
the product of "consume" and "spawn" of the downstream ``Piper``, for
**each** pair of pipers connected.
The "branch" argument sets the "branch" attribute of a ``Piper`` instance.
If two ``Pipers`` have no upstream->downstream relation they will be sorted
according to their "branch" attributes. If neither of them has a "branch"
attribute or both are identical their sort order will be semi-random.
``Pipers`` will implicitly inherit the "branch" of an up-stream ``Piper``,
thus it is only necessary to sepcify the branch of a ``Piper`` if it is the
first one after a branch point.
It is possible to construct pipelines without specifying branches if
``Pipers`` which are connected to multiple up-stream ``Pipers``
(the order of which is by default semi-random) use ``Workers`` that act
correctly regardless of the order of results in their inbox.
If "debug" is ``True`` exceptions are raised on all errors. This will most
likely hang the Python interpreter after the error occurs. Use during
development only!
Arguments:
- worker(``Worker``, ``Piper`` or sequence of functions or``Workers``)
- parallel(``False`` or ``NuMap``) [default: ``False``] If parallel is
``False`` the ``Piper`` instance will not process data-items in parallel
- consume(``int``) [default: ``1``] The number of input items consumed
from **all** directly connected upstream ``Pipers`` per one evaluation.
- produce(``int``) [default: 1] The number of results to generate for each
evaluation result.
- spawn(``int``) [default: 1] The number of times this `Piper`` is
implicitly added to the pipeline to consume the specified number of
results.
- timeout(``int``) [default: ``None``] Time to wait till a result is
available. Otherwise a ``PiperError`` is **returned** not raised.
- branch(``object``) [default: ``None``] This affects the order of
``Pipers`` in the ``Dagger``. ``Piper`` instances are sorted according
to the data-flow upstream->downstream and their "branch" attributes.
The argument can be any object which can be used by the ``cmp``
built-in function. If necessary they can override the ``__cmp__``
method.
- debug(``bool``) [default: ``False``] Verbose debugging mode. Raises a
``PiperError`` on ``WorkerErrors``.
- name(``str``) [default: ``None``] A string to identify the ``Piper``.
- track(``bool``) [default: ``False``] If ``True`` results of this
``Piper`` will be tracked by the ``NuMap`` (ignored if ``Piper`` is
linear).
- repeat(``bool``) [default: ``False``] If ``True``and "produce" is
larger than ``1`` the evaluated results will be repeated. If ``False``
it assumes that the evaluated results are sequences and produce will
iterate over that ``list`` or ``tuple``.
"""
@staticmethod
def _cmp(x, y):
"""
(internal) compares pipers by ornament.
"""
return cmp(x.ornament, y.ornament)
def __init__(self, worker, parallel=False, consume=1, produce=1, \
spawn=1, timeout=None, branch=None, debug=False, \
name=None, track=False, repeat=False):
self.inbox = None
self.outbox = None
self.connected = False
self.started = False
self.finished = False
self.imap_tasks = []
self.consume = consume
self.spawn = spawn
self.produce = produce
self.timeout = timeout
self.debug = debug
self.track = track
self.repeat = repeat
self.tee_locks = [Lock()]
self.tee_num = 0
self.tees = []
self.log = getLogger("papy")
self.log.debug('Creating a new Piper from %s' % repr(worker))
self.imap = parallel if parallel else imap # this is itetools.imap
#self.cmp = cmp if cmp else None
self.branch = (None or branch)
is_p, is_w, is_f, is_ip, is_iw, is_if = _inspect(worker)
if is_p:
self.worker = worker.worker
elif is_w:
self.worker = worker
elif is_f or is_if or is_iw:
self.log.debug('Creating new worker from %s' % worker)
try:
self.worker = Worker(worker)
self.log.debug('Created a new worker from %s' % worker)
except Exception, excp:
self.log.error('Could not create a new Worker from %s' % \
worker)
raise PiperError('Could not create a new Worker from %s' % \
worker, excp)
else:
self.log.error('Do not know how to create a Piper from %s' % \
repr(worker))
raise PiperError('Do not know how to create a Piper from %s' % \
repr(worker))
# initially return self by __iter__
self._iter = self
self.name = name or "piper_%s" % id(self)
self.log.debug('Created Piper %s' % self)
def __iter__(self):
"""
(internal) returns a ``Piper._iter``, which should be overwritten
after each ``itertools.tee``.
"""
return self._iter
def __repr__(self):
return "%s(%s)" % (self.name, repr(self.worker))
def start(self, stages=None):
"""
Makes the ``Piper`` ready to return results. This involves starting the
the provided ``NuMap`` instance. If multiple ``Pipers`` share a
``NuMap`` instance the order in which these ``Pipers`` are started is
important. The valid order is upstream before downstream. The ``NuMap``
instance can only be started once, but the process can be done in 2
stages. This methods "stages" argument is a ``tuple`` which can contain
any the numbers ``0`` and/or ``1`` and/or ``2`` specifying which stage
of the start routine should be carried out:
- stage 0 - creates the needed ``itertools.tee`` objects.
- stage 1 - activates ``NuMap`` pool. A call to ``next`` will block..
- stage 2 - activates ``NuMap`` pool managers.
If this ``Piper`` shares a ``NuMap`` with other ``Pipers`` the proper
way to start them is to start them in a valid postorder with stages
``(0, 1)`` and ``(2,)`` separately.
Arguments:
- stages(tuple) [default: ``(0,)`` if linear; ``(0,1,2)`` if parallel]
Performs the specified stages of the start of a ``Piper`` instance.
Stage ``0`` is necessary and sufficient to start a linear ``Piper``
which uses an ``itertools.imap``. Stages ``1`` and ``2`` are
required to start any parallel ``Piper`` instance.
"""
# defaults differ linear vs. parallel
stages = stages or ((0,) if self.imap is imap else (0, 1, 2))
if not self.connected:
self.log.error('Piper %s is not connected.' % self)
raise PiperError('Piper %s is not connected.' % self)
if not self.started:
if 0 in stages:
self.tees.extend(tee(self, self.tee_num))
if hasattr(self.imap, 'start'):
# parallel piper
self.imap.start(stages)
if 2 in stages:
self.log.debug('Piper %s has been started using %s' % \
(self, self.imap))
self.started = True
else:
# linear piper
self.log.debug('Piper %s has been started using %s' % \
(self, self.imap))
self.started = True
def connect(self, inbox):
"""
Connects the ``Piper`` instance to its upstream ``Pipers`` that should
be given as a sequence. This connects this ``Piper.inbox`` with the
upstream ``Piper.outbox`` respecting any "consume", "spawn" and
"produce" arguments.
Arguments:
- inbox(sequence) sequence of ``Piper`` instances.
"""
if self.started:
self.log.error('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
raise PiperError('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
elif self.connected:
self.log.error('Piper %s is connected and cannot connect to %s.' % \
(self, inbox))
raise PiperError('Piper %s is connected and cannot connect to %s.' % \
(self, inbox))
elif hasattr(self.imap, '_started') and self.imap._started.isSet():
self.log.error('Piper %s cannot connect (NuMap is started).' % \
self)
raise PiperError('Piper %s cannot connect (NuMap is started).' % \
self)
else:
# not started and not connected and NuMap not started
self.log.debug('Piper %s connects to %s' % (self, inbox))
# determine the stride with which result will be consumed from the
# input.
stride = self.imap.stride if hasattr(self.imap, 'stride') else 1
# Tee input iterators. The idea is to create a promise object for a
# tee. The actual teed iterator will be created on start. Each tee
# is protected with a seperate lock the reasons for this are:
# - tee objects are as a collection not thread safe
# - tee objects might be next'ed from different threads, a single
# lock will not guarantee that a thread might be allowed to finish
# it's stride. (How it works that a thread releases the next
# thread only if it finished a stride
teed = []
for piper in inbox:
if hasattr(piper, '_iter'): # isinstance Piper?
piper.tee_num += 1
tee_lock = Lock()
tee_lock.acquire()
piper.tee_locks.append(tee_lock)
piper = _TeePiper(piper, piper.tee_num - 1, stride)
teed.append(_InputIterator(piper, self))
# set how much to consume from input iterators.
self.inbox = _Zip(*teed) if self.consume == 1 else\
_Consume(_Zip(*teed), n=self.consume, stride=stride)
# set how much to
for i in xrange(self.spawn):
self.imap_tasks.append(\
self.imap(self.worker, self.inbox) \
if self.imap is imap else \
self.imap(self.worker, self.inbox, timeout=self.timeout, \
track=self.track))
# chain the results together.
outbox = _Chain(self.imap_tasks, stride=stride)
# Make output
#prd = ProduceFromSequence if self.produce_from_sequence else Produce
if self.produce == 1:
self.outbox = outbox
elif self.repeat:
self.outbox = _Repeat(outbox, n=self.produce, stride=stride)
else:
self.outbox = _Produce(outbox, n=self.produce, stride=stride)
self.connected = True
return self # this is for __call__
def stop(self, forced=False, **kwargs):
"""
Attempts to cleanly stop the ``Piper`` instance. A ``Piper`` is
"started" if its ``NuMap`` instance is "started". Non-parallel
``Pipers`` do not have to be started or stopped. An ``NuMap`` instance
can be stopped by triggering its stopping procedure and retrieving
results from the ``NuMaps`` end tasks. Because neither the ``Piper`` nor
the ``NuMap`` "knows" which tasks i.e. ``Pipers`` are the end tasks
they have to be specified::
end_task_ids = [0, 1] # A list of NuMap task ids
piper_instance.stop(ends =end_task_ids)
results in::
NuMap_instance.stop(ends =[0,1])
If the ``Piper`` did not finish processing the data before the
stop method is called the "forced" argument has to be ``True``::
piper_instance.stop(forced =True, ends =end_task_ids)
If the ``Piper`` (and consequently ``NuMap``) is part of a ``Dagger``
graph the ``Dagger.stop`` method should be called instead. See:
``NuMap.stop`` and ``Dagger.stop``.
# verify this:
# If "forced" is set ``True`` but the ends ``NuMap`` argument is not
# given. The ``NuMap`` instance will not try to retrieve any results and
# will not call the ``NuMap._stop`` method.
Arguments:
- forced(``bool``) [default: ``False``] The ``Piper`` will be forced
to stop the ``NuMap`` instance.
Additional keyworded arguments are passed to the ``Piper.imap``
instance.
"""
# ends valid only if forced specified.
if not self.started:
self.log.error('Piper %s has not yet been started.' % self)
raise PiperError('Piper %s has not yet been started.' % self)
elif not self.finished and not forced:
msg = 'Piper %s has not finished. Use forced =True' % self
self.log.error(msg)
raise PiperError(msg)
else:
# started and (finished or forced)
if hasattr(self.imap, 'stop'):
self.imap.stop(forced=forced, **kwargs)
self.started = False
self.log.debug('Piper %s stops (finished: %s)' % \
(self, self.finished))
def disconnect(self, forced=False):
"""
Disconnects the ``Piper`` instance from its upstream ``Pipers`` or
input data if the ``Piper`` is the input node of a pipeline.
Arguments:
- forced(``bool``) [default: ``False``] If ``True`` the ``Piper`` will
try to forcefully remove all tasks (including the spawned ones) from
the ``NuMap`` instance.
"""
if not self.connected:
self.log.error('Piper %s is not connected and cannot be disconnected' % self)
raise PiperError('Piper %s is not connected and cannot be disconnected' % self)
elif self.started:
self.log.error('Piper %s is started and cannot be disconnected (stop first)' % self)
raise PiperError('Piper %s is started and cannot be disconnected (stop first)' % self)
elif hasattr(self.imap, '_started') and self.imap._started.isSet():
self.log.error('Piper %s cannot disconnect as its NuMap is started' % self)
raise PiperError('Piper %s cannot disconnect as its NuMap is started' % self)
else:
# connected and not started
if hasattr(self.imap, '_started'):
if self.imap._tasks == []:
# fully stopped
pass
elif self.imap_tasks[-1].task == len(self.imap._tasks) - 1:
# the last task of this piper is the last task in the NuMap
self.imap.pop_task(number=self.spawn)
elif forced:
# removes all tasks from the NuMap can be called multiple
# times.
self.imap.pop_task(number=True)
else:
msg = 'Piper %s is not the last Piper added to the NuMap' % \
self
self.log.error(msg)
raise PiperError(msg)
self.log.debug('Piper %s disconnected from %s' % (self, self.inbox))
self.imap_tasks = []
self.inbox = None
self.outbox = None
self.connected = False
def __call__(self, *args, **kwargs):
"""
This is just a convenience mapping to the ``Piper.connect`` method.
"""
return self.connect(*args, **kwargs)
def next(self):
"""
Returns the next result. If no result is availble within the specified
(during construction) "timeout" then a ``PiperError`` which wraps a
``TimeoutError`` is **returned**.
If the result is a ``WorkerError`` it is also wrapped in a
``PiperError`` and is returned or raised if "debug" mode was specified
at initialization. If the result is a ``PiperError`` it is propagated.
"""
try:
next = self.outbox.next()
except StopIteration, excp:
self.log.debug('Piper %s has processed all jobs (finished)' % self)
self.finished = True
# We re-raise StopIteration as part of the iterator protocol.
# And the outbox should do the same.
raise excp
except (AttributeError, RuntimeError), excp:
# probably self.outbox.next() is self.None.next()
self.log.error('Piper %s has not yet been started.' % self)
raise PiperError('Piper %s has not yet been started.' % self, excp)
except IndexError, excp:
# probably started before connected
self.log.error('Piper %s has been started before connect.' % self)
raise PiperError('Piper %s has been started before connect.' % self, excp)
except TimeoutError, excp:
self.log.error('Piper %s timed out waited %ss.' % \
(self, self.timeout))
next = PiperError(excp)
# we do not raise TimeoutErrors so they can be skipped.
if isinstance(next, WorkerError):
# return the WorkerError instance returned (not raised) by the
# worker Process.
self.log.error('Piper %s generated %s"%s" in func. %s on argument %s' % \
(self, type(next[0]), next[0], next[1], next[2]))
if self.debug:
# This makes only sense if you are debugging a piper as it will
# most probably crash papy and python NuMap worker processes
# threads will hang.
raise PiperError('Piper %s generated %s"%s" in func %s on argument %s' % \
(self, type(next[0]), next[0], next[1], next[2]))
next = PiperError(next)
elif isinstance(next, PiperError):
# Worker/PiperErrors are wrapped by workers
if self.debug:
raise next
self.log.debug('Piper %s propagates %s' % (self, next[0]))
return next
class Worker(object):
"""
The ``Worker`` is an ``object`` that composes sequences of functions. When
called these functions are evaluated from left to right. The function on the
right will receive the return value from the function on the left.
The constructor takes optionally sequences of positional and keyworded
arguments for none or all of the composed functions. Positional arguments
should be given in a ``tuple``. Each element of this ``tuple`` should be a
``tuple`` of positional arguments for the corresponding function. If a
function does not take positional arguments its corresponding element in the
arguments ``tuple`` should be an empty ``tuple`` i.e. ``()``. Keyworded
arguments should also be given in a ``tuple``. Each element of this
``tuple`` should be a dictionary of arguments for the corresponding
function. If a function does not take any keyworded arguments
its corresponding element in the keyworded arguments ``tuple`` should be an
empty ``dict`` i.e. ``{}``. If none of the functions takes arguments of a
given type the positional and/or keyworded arguments ``tuple`` can be
omitted.
All exceptions raised by the functions are caught, wrapped and returned
**not** raised. If the ``Worker`` is called with the first argument being a
sequence which contains an ``Exception`` no function is evaluated and the
``Exception`` is re-wrapped and returned.
A ``Worker`` instance can be constructed in a variety of ways:
- with a sequence of functions and a optional sequences of positional and
keyworded arguments e.g.::
Worker((func1, func2, func3),
((arg11, arg21), (arg21,), ()),
({}, {}, {'arg31':arg31}))
- with another ``Worker`` instance, which results in their functional
equivalence e.g.::
Worker(worker_instance)
- with multiple ``Worker`` instances, where the functions and arguments of
the ``Workers`` are combined e.g.::
Worker((worker1, worker2))
this is equivalent to::
Worker(worker1.task + worker2.task, \
worker1.args + worker2.args, \
worker1.kwargs + worker2.kwargs)
- with a single function and its arguments in a tuple e.g.::
Worker(function, (arg1, arg2, arg3))
this is equivalent to::
Worker((function,),((arg1, arg2, arg3),))
"""
def __init__(self, functions, arguments=None, kwargs=None, name=None):
is_p, is_w, is_f, is_ip, is_iw, is_if = _inspect(functions)
if is_f:
self.task = (functions,)
if arguments is not None:
self.args = (arguments,)
else:
self.args = ((),)
if kwargs is not None:
self.kwargs = (kwargs,)
else:
self.kwargs = ({},)
elif is_w: # copy from other
self.task = functions.task
self.args = functions.args
self.kwargs = functions.kwargs
elif is_if:
self.task = tuple(functions)
if arguments is not None:
self.args = arguments
else:
self.args = tuple([() for i in self.task])
if kwargs is not None:
self.kwargs = kwargs
else:
self.kwargs = tuple([{} for i in self.task])
elif is_iw:
self.task = tuple(chain(*[w.task for w in functions]))
self.args = tuple(chain(*[w.args for w in functions]))
self.kwargs = tuple(chain(*[w.kwargs for w in functions]))
else:
# e.g. is piper
raise TypeError("The Worker expects an iterable of functions or" + \
" workers got: %s" % functions)
if len(self.task) != len(self.args) or len(self.task) != len(self.args):
raise TypeError("The Worker expects the arguents as ((args1) " + \
"... argsN)) and keyword arguments as " + \
"({kwargs}, ... ,{kwargs.}) got: %s" % \
repr(arguments))
# for representation
self.__name__ = ">".join([f.__name__ for f in self.task]) or name
# for identification
self.name = "%s_%s" % (self.__name__, id(self))
def __repr__(self):
"""
Functions within a worker e.g. (f, g, h) are evaluated from left to
right i.e.: h(g(f(x))) thus their representation f>g>h.
"""
return "%s(%s)" % (self.__name__, id(self))
def __hash__(self):
"""
``Worker`` instances are not hashable.
"""
raise TypeError('Worker instances are not hashable')
def __eq__(self, other):
"""
Custom ``Worker`` equality comparison. ``Worker`` instances are
functionally equivalent if they evaluate the same functions, in the same
order and have the same positional and keyworded arguments. Two
different ``Worker`` instances (objects with different ids) can be
equivalent if their functions have been initialized with the same
arguments.
"""
return (self.task == getattr(other, 'task', None) and
self.args == getattr(other, 'args', None) and
self.kwargs == getattr(other, 'kwargs', None))
def _inject(self, conn):
"""
(internal) inject/replace all functions into a rpyc connection object.
"""
# provide PAPY_DEFAULTS remotely
# provide PAPY_RUNTIME remotely
if not 'PAPY_INJECTED' in conn.namespace:
_inject_func(get_defaults, conn)
_inject_func(get_runtime, conn)
conn.execute('PAPY_DEFAULTS = get_defaults()')
conn.execute('PAPY_RUNTIME = get_runtime()')
conn.execute('PAPY_INJECTED = True')
# inject all functions
for func in self.task:
_inject_func(func, conn)
# create list of functions called TASK
# and inject a function comp_task which
_inject_func(_comp_task, conn)
conn.execute('TASK = %s' % \
str(tuple([i.__name__ for i in self.task])).replace("'", ""))
# ['func1', 'func2'] -> "(func1, func2)"
# inject compose function, will ...
self.task = [conn.namespace['_comp_task']]
self.args = [[self.args]]
self.kwargs = [{'kwargs':self.kwargs}]
# instead of multiple remote back and the combined functions is
# evaluated remotely.
return self
def __call__(self, inbox):
"""
Evaluates the function(s) and argument(s) with which the ``Worker``
instance has been initialized given the input data i.e. "inbox".
If an ``Exception`` is raised by the function the ``Worker`` returns a
``WorkerError`` instance. Typically a raised ``WorkerError`` should be
wrapped into a ``PiperError`` by the ``Piper`` instance which wraps this
``Worker`` instance. If any of the data in the "inbox" is a
``PiperError`` instance then no function is called and the ``Worker``
instance propagates the ``Exception`` (i.e. ``PiperError`` instance) to
its ``Piper`` instance. The originial ``Exception`` travels along as the
first argument of the innermost ``Exception``.
Arguments:
- inbox(sequence) A sequence of items to be evaluated by the
function ``f(sequence)`` is ``f((data1, data2, ..., data2))``.
"""
outbox = inbox # we save the input to raise a better exception.
exceptions = [e for e in inbox if isinstance(e, PiperError)]
if not exceptions:
# upstream did not raise exception, running functions
try:
for func, args, kwargs in \
zip(self.task, self.args, self.kwargs):
outbox = (func(outbox, *args, **kwargs),)
outbox = outbox[0]
except Exception, excp:
# an exception occured in one of the f's do not raise it
# instead return it.
outbox = WorkerError(excp, func.__name__, inbox)
else:
# if any of the inputs is a PiperError just propagate it.
outbox = PiperError(*exceptions)
return outbox
def _inspect(instance):
"""
(internal) Determines the type of a given ``object``. Discriminates between
``Piper``, ``Worker``, ``FunctionType`` and ``Iterable`` instances. It
returns a ``tuple`` of boolean variables i.e: (is_piper, is_worker,
is_function, is_iterable_of_pipers, is_iterable_of_workers,
is_iterable_of_functions).
"""
is_piper = isinstance(instance, Piper)
is_function = isinstance(instance, FunctionType) or isbuiltin(instance)
is_worker = isinstance(instance, Worker)
is_iterable = getattr(instance, '__iter__', False) and not \
(is_piper or is_function or is_worker)
is_iterable_p = is_iterable and isinstance(instance, Piper)
is_iterable_f = is_iterable and (isinstance(instance[0], FunctionType) or \
isbuiltin(instance[0]))
is_iterable_w = is_iterable and isinstance(instance[0], Worker)
return (is_piper, is_worker, is_function, is_iterable_p, is_iterable_w, \
is_iterable_f)
@imports(['itertools'])
def _comp_task(inbox, args, kwargs):
"""
(internal) Composes a sequence of functions in the global variable TASK. The
resulting composition is given the input "inbox" and arguments "args",
"kwargs".
"""
# Note. this function uses a global variable which must be defined on the
# remote host.
for func, args, kwargs in itertools.izip(TASK, args, kwargs):
inbox = (func(inbox, *args, **kwargs),)
return inbox[0]
class _Consume(object):
"""
(internal) iterator-wrapper consumes "n" results from the input iterator and
weaves the results together in "strides". If the result is an exception it
is **not** raised, but is returned.
Arguments:
- iterable(iterable) a sequence or other iterable
- n(``int``) number of results to consume.
- stride(``int``) see documentation
"""
def __init__(self, iterable, n=1, stride=1):
self.iterable = iterable
self.stride = stride
self._stride_buffer = None
self.n = n
def __iter__(self):
return self
def _rebuffer(self):
batch_buffer = defaultdict(list)
self._stride_buffer = []
for i in xrange(self.n): # number of consumed
for stride in xrange(self.stride): # results
try:
res = self.iterable.next()
except StopIteration:
continue
except Exception, res:
pass
batch_buffer[stride].append(res)
for stride in xrange(self.stride):
batch = batch_buffer[stride]
self._stride_buffer.append(batch)
self._stride_buffer.reverse()
def next(self):
"""
Returns the next sequence of results, given stride and n.
"""
try:
results = self._stride_buffer.pop()
except (IndexError, AttributeError):
self._rebuffer()
results = self._stride_buffer.pop()
if not results:
raise StopIteration
return results
class _Zip(object):
"""
(internal) replacement for ``itertools.zip``, which pulls from all iterators
regardless of a raised ``StopIteration``.
"""
def __init__(self, *iterables):
self.iterables = [iter(itr) for itr in iterables]
def __iter__(self):
return self
def next(self):
results = []
stop = False
for iter in self.iterables:
try:
results.append(iter.next())
except StopIteration:
stop = True
if stop:
raise StopIteration
else:
return results
class _Chain(object):
"""
(internal) This is a generalization of the ``itertools.izip`` and
``itertools.chain`` functions. If "stride" is ``1`` it behaves like
``itertools.izip``, if "stride" is ``len(iterable)`` it behaves like
``itertools.chain`` in any other case it zips iterables in strides e.g::
a = Chain([iter([1,2,3]), iter([4,5,6], stride =2)
list(a)
>>> [1,2,4,5,3,6]
It is further resistant to exceptions i.e. if one of the iterables
raises an exception the ``Chain`` does not end in a ``StopIteration``, but
continues with other iterables.
"""
def __init__(self, iterables, stride=1):
self.iterables = iterables
self.stride = stride
self.l = len(self.iterables)
self.s = self.stride
self.i = 0
def __iter__(self):
return self
def next(self):
"""
Returns the next result from the chained iterables given ``"stride"``.
"""
if self.s:
self.s -= 1
else:
self.s = self.stride - 1
self.i = (self.i + 1) % self.l # new iterable
return self.iterables[self.i].next()
class _Repeat(object):
"""
(very internal) This iterator-wrapper returns n-times each result from the
wrapped iterator. i.e. if n =2 and the input iterators results are (1,
Exception, 2) then the ``__Produce`` instance will return 6 (i.e. ``2 * 3``)
results in the order [1, 1, Exception, Exception, 2, 2] if the stride =1.
If stride =2 the output will look like this: [1, Exception, 1, Exception,
2, 2]. Note that ``StopIteration`` is also an exception, and the Produce
iterator might return values after a ``StopIteration`` is raised.
"""
def __init__(self, iterable, n=1, stride=1):
self.iterable = iterable
self.stride = stride
self._stride_buffer = None
self._repeat_buffer = None
self.n = n # times the results in the buffer are repeated
def __iter__(self):
return self
def _rebuffer(self):
"""
(very internal) refill the repeat buffer
"""
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
self._repeat_buffer = repeat((results, exceptions), self.n)
def next(self):
"""
(very internal) returns the next result, given ``"stride"`` and ``"n"``.
"""
try:
res, excp = self._stride_buffer.next()
except (StopIteration, AttributeError):
try:
self._stride_buffer = izip(*self._repeat_buffer.next())
except (StopIteration, AttributeError):
self._rebuffer()
self._stride_buffer = izip(*self._repeat_buffer.next())
res, excp = self._stride_buffer.next()
if excp:
raise res
else:
return res
class _Produce(_Repeat):
"""
(internal) This iterator wrapper is an iterator, but it returns elements
from the sequence returned by the wrapped iterator. The number of returned
elements is defined by n and should not be smaller then the sequence
returned by the wrapped iterator.
For example if the wrapped iterator results are ((11, 12), (21, 22),
(31, 32)) then n **should** equal ``2``. For "stride" ``1`` the result will
be: [11, 12, 21, 22, 31, 32]. For "stride" ``2``, [11, 21, 12, 22, 31, 32].
Note that ``StopIteration`` is also an exception!
"""
def _rebuffer(self):
"""
(internal) refill the repeat buffer
"""
# collect a stride worth of results(result lists) or exceptions
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
# un-roll the result lists
res_exc = []
for rep in xrange(self.n):
flat_results = []
for i in xrange(self.stride):
result_list, exception = results[i], exceptions[i]
if not exception:
flat_results.append(result_list[rep])
else:
flat_results.append(result_list)
res_exc.append((flat_results, exceptions))
# make an iterator (like repeat)
self._repeat_buffer = iter(res_exc)
class _InputIterator(object):
"""
(internal) wraps a piper and iterator together.
"""
def __init__(self, iterator, piper):
self.iterator = iter(iterator)
self.piper = piper
def __iter__(self):
return self
def next(self):
"""
(internal) returns the next result from the iterator if the piper is
started.
"""
if self.piper.imap is imap and \
self.piper.started is False:
raise StopIteration
else:
return self.iterator.next()
class _TeePiper(object):
"""
(internal) This is wrapper around a ``Piper`` instance if another ``Piper``
instance connects to it. The actual call to ``itertools.tee`` is delayed and
happens on a call to ``Piper.start``. A ``TeePiper`` instance protects the
``itertools.tee`` object with a ``threading.Lock``. This lock is held for a
stride. If a ``StopIteration`` exception occurs the next ``TeePiper`` is
released and subsequent calls to the ``next`` method of this ``TeePiper``
will not involve acquiring a lock and calling the ``next`` method of the
wrapped tee object. This guarantees that the ``next`` method of a ``Piper``
will yield a ``StopIteration`` only once. This is required because the
``NuMap`` will finish a task after the first ``StopIteration`` and will not
call ``Piper.next`` any more and will automatically raise ``StopIterations``
for subsequent calls to ``NuMap.next``.
Arguments:
- piper(``Piper``) ``Piper`` instance to be tee'd
- i(``int``) index of the ``itertools.tee`` object within ``Piper.tees``.
- stride(``int``) The stride of the ``Piper`` downstream of the wrapped
``Piper``. In a pipeline they should be the same or compatible
(see manual).
"""
def __init__(self, piper, i, stride):
self.finished = False
self.piper = piper
self.stride = stride
self.i = i
self.s = 1
def __iter__(self):
return self
def next(self):
"""
(internal) returns the next result from the ``itertools.tee`` object for
the wrapped ``Piper`` instance or re-raises an ``Exception``.
"""
# do not acquire lock if NuMap is not finished.
if self.finished:
raise StopIteration
# get per-tee lock
self.piper.tee_locks[self.i].acquire()
# get result or exception
exception = True
try:
result = self.piper.tees[self.i].next()
exception = False
except StopIteration, result:
self.finished = True
except Exception, result:
pass
# release per-tee lock either self or next
if self.s == self.stride or self.finished:
self.s = 1
self.piper.tee_locks[(self.i + 1) % len(self.piper.tees)].release()
else:
self.s += 1
self.piper.tee_locks[self.i].release()
if exception:
raise result
else:
return result
|
# -*- coding: utf-8 -*-
import numpy as np
from keras.models import load_model
from Config import *
from DataProcessor import DataProcessor
class CNNPredictor:
def __init__(self, model_path=None):
self.config = DataConfig()
self.dp = DataProcessor(self.config)
self.num_channels = self.config.num_channels
self.row = self.config.img_height
self.col = self.config.img_width
self.ch = self.config.num_channels
self.model = self.load_model(model_path)
def load_model(self, model_path=None):
return load_model(model_path)
def predict(self, img_json):
X = np.zeros((1, int(self.row * self.config.clip), self.col, self.ch), dtype=np.float32)
X[0] = self.dp.get_X(img_json)
return self.model.predict(X)[0]
|
#!/usr/bin/env python
import app_config
import calendar
import unittest
from app import utils
from fabfile import data
from models import models
from peewee import *
from time import time
from datetime import datetime
class ResultsLoadingTestCase(unittest.TestCase):
"""
Test bootstrapping postgres database
"""
def setUp(self):
data.load_results()
data.create_calls()
data.create_race_meta()
def test_results_loading(self):
results_length = models.Result.select().count()
self.assertEqual(results_length, 29882)
def test_calls_creation(self):
calls_length = models.Call.select().count()
self.assertEqual(calls_length, 185)
def test_race_meta_creation(self):
race_meta_length = models.RaceMeta.select().count()
self.assertEqual(race_meta_length, 191)
def test_multiple_calls_creation(self):
data.create_calls()
calls_length = models.Call.select().count()
self.assertEqual(calls_length, 185)
def test_results_deletion(self):
data.delete_results()
results_length = models.Result.select().count()
self.assertEqual(results_length, 0)
def test_results_collation_dem(self):
results = models.Result.select().where(
(models.Result.party == 'Dem'),
(models.Result.level == 'state')
)
filtered, other_votecount, other_votepct = utils.collate_other_candidates(list(results), 'Dem')
filtered_length = len(filtered)
whitelist_length = len(utils.DEM_CANDIDATES)
# whitelist_length times number of races
self.assertEqual(filtered_length, whitelist_length * 11)
def test_results_collation_gop(self):
results = models.Result.select().where(
(models.Result.party == 'GOP'),
(models.Result.level == 'state')
)
filtered, other_votecount, other_votepct = utils.collate_other_candidates(list(results), 'GOP')
filtered_length = len(filtered)
whitelist_length = len(utils.GOP_CANDIDATES)
# whitelist_length times number of races
self.assertEqual(filtered_length, whitelist_length * 11)
def test_vote_tally(self):
tally = utils.tally_results('12044', 'Georgia')
self.assertEqual(tally, 167554)
def test_last_updated_precinctsreporting(self):
races = utils.get_results('gop', app_config.NEXT_ELECTION_DATE)
last_updated = utils.get_last_updated(races)
self.assertEqual(last_updated, datetime(2016, 3, 2, 2, 4, 50))
def test_last_updated_before(self):
models.Result.update(precinctsreporting=0).execute()
races = utils.get_results('gop', app_config.NEXT_ELECTION_DATE)
last_updated = utils.get_last_updated(races)
last_updated_ts = calendar.timegm(last_updated.timetuple())
now = time()
self.assertAlmostEqual(now, last_updated_ts, delta=10)
def test_last_updated_called_noprecincts(self):
models.Result.update(precinctsreporting=0).execute()
models.Call.update(override_winner=True).where(models.Call.call_id == '12044-polid-1746-state-1').execute()
races = utils.get_results('dem', app_config.NEXT_ELECTION_DATE)
last_updated = utils.get_last_updated(races)
self.assertEqual(last_updated, datetime(2016, 3, 2, 2, 4, 1))
# There is a known bug in the underlying systems the next two tests call.
# See https://app.asana.com/0/52192842111966/110546796824271
"""
def test_poll_closing_grouping(self):
races = utils.get_results('gop', app_config.NEXT_ELECTION_DATE)
poll_closings = utils.group_poll_closings(races)
poll_closings_length = len(poll_closings)
self.assertEqual(poll_closings_length, 5)
def test_poll_closing_listing(self):
races = utils.get_results('dem', app_config.NEXT_ELECTION_DATE)
poll_closings = utils.group_poll_closings(races)
nine_pm_closings = poll_closings[4]['races']
self.assertEqual(len(nine_pm_closings), 2)
"""
class DelegatesLoadingTestCase(unittest.TestCase):
"""
Test bootstrapping postgres database
"""
def setUp(self):
data.load_delegates()
self.now = time()
def test_delegates_timestamp(self):
filesystem_datetime = utils.get_delegates_updated_time()
filesystem_ts = calendar.timegm(filesystem_datetime.timetuple())
# Compare timestamps; can be +/- 10 seconds
self.assertAlmostEqual(self.now, filesystem_ts, delta=10)
def test_delegates_length(self):
results_length = models.CandidateDelegates.select().where(
models.CandidateDelegates.level == 'nation'
).count()
self.assertEqual(results_length, 30)
def test_delegate_count(self):
record = models.CandidateDelegates.select().where(
models.CandidateDelegates.level == 'nation',
models.CandidateDelegates.last == 'Fiorina'
).get()
self.assertEqual(record.delegates_count, 1)
def test_superdelegate_count(self):
record = models.CandidateDelegates.select().where(
models.CandidateDelegates.level == 'nation',
models.CandidateDelegates.last == 'Clinton'
).get()
self.assertEqual(record.superdelegates_count, 359)
def test_pledged_delegates_percent(self):
record = models.CandidateDelegates.select().where(
models.CandidateDelegates.level == 'nation',
models.CandidateDelegates.last == 'Clinton'
).get()
pledged_delegate_percent = record.pledged_delegates_pct()
self.assertEqual(pledged_delegate_percent, 8.942065491183879)
def test_super_delegates_percent(self):
record = models.CandidateDelegates.select().where(
models.CandidateDelegates.level == 'nation',
models.CandidateDelegates.last == 'Clinton'
).get()
super_delegate_percent = record.superdelegates_pct()
self.assertEqual(super_delegate_percent, 15.071368597816962)
def test_delegates_deletion(self):
data.delete_delegates()
all_records = models.CandidateDelegates.select().count()
self.assertEqual(all_records, 0)
if __name__ == '__main__':
unittest.main()
|
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(12, 4))
# Sample from the latent variable prior
normal_data = np.random.normal(size=(x_train.shape[0], 2))
ax0.scatter(normal_data[:, 0], normal_data[:, 1], alpha=0.1)
ax0.set_title("Samples from the latent prior $p(z)$")
ax0.set_xlim(-4, 4)
ax0.set_ylim(-4, 4)
# Sample a z_i from the conditional posterior for each x_i in the test set:
z = np.vstack([
np.random.multivariate_normal(
x_test_encoded[i], np.diag(np.exp(x_test_encoded_log_var[i] / 2)))
for i in range(x_test_encoded.shape[0])])
ax1.scatter(z[:, 0], z[:, 1], alpha=0.1)
ax1.set_title("Samples from the latent posterior $q(z|x^i)$")
ax1.set_xlim(-4, 4)
ax1.set_ylim(-4, 4)
# Posterior mean value for each sample x_i from the test set:
ax2.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], alpha=0.1)
ax2.set_title("Test samples encoded in latent space")
ax2.set_xlim(-4, 4)
ax2.set_ylim(-4, 4);
# Analysis:
#
# The VAE KL divergence term of the likelihood lower bound objective function
# is trying to force the encoder to match the posterior distribution with the
# prior of the latent variable. In our case we used:
# Normal(mean=[0, 0], std=diag([1, 1])
# as the prior distribution which means that 99.7% of the points are expected
# to lie within a radius of 3 around the origin of the 2D latent plan.
#
# Selecting different location and scale parameters for the prior (or even
# a different distribution such as the uniform distribution) would impact the
# shape of the encoded data. |
import os
import numpy as np
import torch
import torchvision
import resnet as res
import time
from PIL import Image
from load import dataLoader, dataAugmentation
class modelEval():
def __init__(self, model, state_dict):
self.model = model
self.weight = state_dict
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def loadWeight(self):
self.model.load_state_dict(torch.load(self.weight))
self.model.eval()
model = self.model.to(self.device)
return model
def loadData(self, dataPath, dataList, dataset = ''):
augmentation = dataAugmentation()
data = dataLoader(dataPath, dataList, dataset = dataset, data_transforms = augmentation.data_transforms)
return data
def singleSearchL2(self):
rightCnt = 0
t0 = time.time()
for i in range(len(self.test)):
label = self.testLabel[i]
feature = self.test[i]
dist = np.sum(np.square(self.lib - feature), axis = 1)
index = dist.argsort(axis = 0)[0]
top1 = self.libLabel[int(index)]
if top1 == label:
rightCnt += 1
t1 = time.time()
print("Average time is %f" % ((t1 - t0) / len(self.test)))
print("Precision is %f" % (rightCnt / len(self.test)))
def getMatrix(self, path, listName, fileName):
data = self.loadData(path, listName, 'testImages')
dataloaders = torch.utils.data.DataLoader(data, batch_size = 1)
t = 0.0
matrix = []
labels = []
cnt = 0
for image, label in dataloaders:
cnt += 1
with torch.no_grad():
image = image.to(self.device)
t0 = time.time()
feature1, feature2, feature3, feature4, output1, output2, output3, output4 = model(image)
t1 = time.time()
t += t1 - t0
features = feature4.to("cpu")
f = features.numpy()
matrix.append(f[0])
labels.append(label)
if cnt % 10000 == 0:
print("%d images" % cnt)
print("All %d images" % cnt)
matrix = np.stack(matrix, axis = 0)
labels = np.array(labels)
print(labels)
print(labels.shape)
np.savetxt(fileName, matrix, fmt = "%f", delimiter = " ")
print("There are %d images. Average time of each iteration is %f" % (cnt, t / cnt))
return matrix, labels
def evaluate(self, searchPath, searchList, testPath, testList):
self.model = self.loadWeight()
print("Start building search pool")
self.lib, self.libLabel = self.getMatrix(searchPath, searchList, "dataset.txt")
print("Build search pool successfully.")
print("Start test images")
self.test, self.testLabel = self.getMatrix(testPath, testList, "testset.txt")
print("All features of test images are saved.")
self.singleSearchL2()
if __name__ == "__main__":
model = res.resnet50(pretrained = False, num_classes = 461)
instance = modelEval(model, "model/resnet_Asoftmax_iter_100000.pt")
instance.evaluate("/export/home/dyh/workspace/circle_k/for_douyuhao/data/searchImages/", "searchListPart.txt", "/export/home/dyh/workspace/circle_k/for_douyuhao/data/testImages/", "testList.txt")
|
# vim: set fileencoding=utf-8 filetype=python :
"""
Loads all views in Python's default sort order.
"""
from django.core.management.base import BaseCommand
# noinspection PyProtectedMember
from pygipo.utils import runviews
class Command(BaseCommand):
help = 'Load views defined in <app_dir>/migrations/views/*.sql'
def handle(self, *args, **options):
runviews()
|
import pytest
from pawapp import helpers
@pytest.mark.parametrize('data,from_fields,to_fields,expected', [
({'id': 1}, ['id'], ['to_id'], {'to_id': 1}),
({'id': 2}, ['ids'], ['to_ids'], {'id': 2}),
(
{'id': 1, 'field_1': 1, 'field_2': '2', 'field_3': [1, 2, 3]},
['id', 'field_1', 'field_3'],
['new_id', 'new_field_1', 'new_field_3'],
{
'new_id': 1,
'new_field_1': 1,
'field_2': '2',
'new_field_3': [1, 2, 3]
},
)
])
def test_map_dict_fields(data, from_fields, to_fields, expected):
helpers.map_dict_fields(data, from_fields, to_fields)
for key, value in expected.items():
assert key in data
assert data[key] == value
def test_add_list_value():
source = {}
helpers.add_list_value(source, 'test', 'new item')
helpers.add_list_value(source, 'test', 'second value')
helpers.add_list_value(source, 'new', 'another')
assert len(source) == 2
assert source['test'] == ['new item', 'second value']
assert source['new'] == ['another']
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class InputTransformNet(nn.Module):
def __init__(self):
super(InputTransformNet, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(3, 64, 1)
self.bn1 = nn.BatchNorm1d(64)
self.conv2 = nn.Conv1d(64, 128, 1)
self.bn2 = nn.BatchNorm1d(128)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn3 = nn.BatchNorm1d(1024)
self.fc1 = nn.Linear(1024, 512)
self.bn4 = nn.BatchNorm1d(512)
self.fc2 = nn.Linear(512, 256)
self.bn5 = nn.BatchNorm1d(256)
self.transform = nn.Linear(256, 9)
init.constant_(self.transform.weight, 0)
init.eye_(self.transform.bias.view(3, 3))
def forward(self, x):
"""
x: [B, 3, N]
"""
B, N = x.shape[0], x.shape[2]
x = self.relu(self.bn1(self.conv1(x))) #[B, 64, N]
x = self.relu(self.bn2(self.conv2(x))) #[B, 128, N]
x = self.relu(self.bn3(self.conv3(x))) #[B, 1024, N]
x = nn.MaxPool1d(N)(x) #[B, 1024, 1]
x = x.view(B, 1024) #[B, 1024]
x = self.relu(self.bn4(self.fc1(x))) #[B, 512]
x = self.relu(self.bn5(self.fc2(x))) #[B, 256]
x = self.transform(x) #[B, 9]
x = x.view(B, 3, 3) #[B, 3, 3]
return x
class FeatureTransformNet(nn.Module):
def __init__(self):
super(FeatureTransformNet, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(64, 64, 1)
self.bn1 = nn.BatchNorm1d(64)
self.conv2 = nn.Conv1d(64, 128, 1)
self.bn2 = nn.BatchNorm1d(128)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn3 = nn.BatchNorm1d(1024)
self.fc1 = nn.Linear(1024, 512)
self.bn4 = nn.BatchNorm1d(512)
self.fc2 = nn.Linear(512, 256)
self.bn5 = nn.BatchNorm1d(256)
self.transform = nn.Linear(256, 64 * 64)
init.constant_(self.transform.weight, 0)
init.eye_(self.transform.bias.view(64, 64))
def forward(self, x):
"""
x: [B, 64, N]
"""
B, N = x.shape[0], x.shape[2]
x = self.relu(self.bn1(self.conv1(x))) #[B, 64, N]
x = self.relu(self.bn2(self.conv2(x))) #[B, 128, N]
x = self.relu(self.bn3(self.conv3(x))) #[B, 1024, N]
x = nn.MaxPool1d(N)(x) #[B, 1024, 1]
x = x.view(B, 1024) #[B, 1024]
x = self.relu(self.bn4(self.fc1(x))) #[B, 512]
x = self.relu(self.bn5(self.fc2(x))) #[B, 256]
x = self.transform(x) #[B, 64]
x = x.view(B, 64, 64) #[B, 64, 64]
return x
if __name__ == '__main__':
a = torch.rand(8, 3, 1000)
t = InputTransformNet()
x = t(a)
print(x.shape) |
def split_string(string):
# Split the string based on space delimiter
list_string = string.split(' ')
return list_string
def join_string(list_string):
# Join the string based on '-' delimiter
string = '-'.join(list_string)
return string
# Driver Function
if __name__ == '__main__':
string = 'Geeks for Geeks'
# Splitting a string
list_string = split_string(string)
print(list_string)
# Join list of strings into one
new_string = join_string(list_string)
print(new_string)
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import unittest
from ui_perf_test_utils import UIPerfTestUtils
class TestUIPerfUtils(unittest.TestCase):
"""Test UIPerfUtils class."""
def testConvertDataListToString(self):
times = [1.023344324, 2.3233333, 2.442324444]
output_string = UIPerfTestUtils.ConvertDataListToString(times)
self.assertEqual(output_string, '[1.02334, 2.32333, 2.44232]',
'result output is wrong')
def testGetResultStringForPerfBot(self):
"""Test PrintResultList method."""
times = [1.023, 2.323, 2.44232]
output_string = UIPerfTestUtils.GetResultStringForPerfBot(
'playback', '', 'bear', times, 'ms')
self.assertEqual(output_string,
'RESULT playback: bear= [1.02300, 2.32300, 2.44232] ms',
'result output is wrong')
def testGetResultStringForPerfBotEmptyData(self):
"""Test PrintResultList method with empty data."""
times = []
output_string = UIPerfTestUtils.GetResultStringForPerfBot(
'playback', '', 'bear', times, 'ms')
self.assertFalse(output_string, msg='Result output is not empty.')
def testFindProcessesAndGetResourceInfo(self):
"""Test FindProcesses and GetResourceInfo methods.
Python process should be found when we run this script. Assert all
elements in processInfo are not None.
"""
list = UIPerfTestUtils.FindProcesses('python')
self.assertTrue(len(list) > 0, 'python process cannot be found')
info = UIPerfTestUtils.GetResourceInfo(list[0], time.time())
self._AssertProcessInfo(info)
def GetChromeRendererProcessInfo(self):
"""Test GetChromeRendererProcessInfo method.
You must start Chrome before you run your test. Otherwise, it fails.
So, this test is not included in the unit test (i.e., the method name
does not start with "test").
TODO(imasaki@chromium.org): find a way to start Chrome automatically.
"""
start_time = time.time()
info = UIPerfTestUtils.GetChromeRendererProcessInfo(start_time)
self._AssertProcessInfo(info)
def _AssertProcessInfo(self, info):
"""Assert process info has correct length and each element is not null."""
# See UIPerfTestUtils.chrome_process_info_names.
self.assertEqual(len(info), 7, 'the length of info should be 7')
for i in range(len(info)):
self.assertTrue(info[i] is not None, 'process info has None data')
def _CreateFakeProcessInfo(self, time, process_info_length):
"""Create fake process info for testing.
Args:
time: time used for measured_time.
Returns:
a process info with some data for testing.
"""
chrome_renderer_process_info = []
for i in range(process_info_length):
chrome_renderer_process_info.append(i + time)
return chrome_renderer_process_info
def testPrintMeasuredData(self):
# Build process info for testing.
chrome_renderer_process_infos = []
run_info1 = []
run_info1.append(self._CreateFakeProcessInfo(10, 7))
run_info1.append(self._CreateFakeProcessInfo(20, 7))
chrome_renderer_process_infos.append(run_info1)
run_info2 = []
run_info2.append(self._CreateFakeProcessInfo(10, 7))
chrome_renderer_process_infos.append(run_info2)
chrome_process_info_names = ['measure-time', 'pct-cpu', 'cpu-user',
'cpu-system', 'memory-rss', 'memory-vms',
'pct-process-memory']
chrome_process_info_units = ['sec', 'percent', 'load',
'load', 'MB', 'MB', 'percent']
output_string = UIPerfTestUtils.PrintMeasuredData(
chrome_renderer_process_infos,
chrome_process_info_names,
chrome_process_info_units,
False, 'p', 'title')
expected_output_string = (
'RESULT p-measure-time-0: title= [10.00000, 10.00000] sec\n'
'RESULT p-measure-time-1: title= [20.00000] sec\n'
'RESULT p-pct-cpu-0: title= [11.00000, 11.00000] percent\n'
'RESULT p-pct-cpu-1: title= [21.00000] percent\n'
'RESULT p-cpu-user-0: title= [12.00000, 12.00000] load\n'
'RESULT p-cpu-user-1: title= [22.00000] load\n'
'RESULT p-cpu-system-0: title= [13.00000, 13.00000] load\n'
'RESULT p-cpu-system-1: title= [23.00000] load\n'
'RESULT p-memory-rss-0: title= [14.00000, 14.00000] MB\n'
'RESULT p-memory-rss-1: title= [24.00000] MB\n'
'RESULT p-memory-vms-0: title= [15.00000, 15.00000] MB\n'
'RESULT p-memory-vms-1: title= [25.00000] MB\n'
'RESULT p-pct-process-memory-0: title= [16.00000, 16.00000] percent\n'
'RESULT p-pct-process-memory-1: title= [26.00000] percent\n')
self.assertEqual(output_string, expected_output_string,
'output string is wrong')
|
"""Const for Tests."""
TEST_USER = "testuser"
TEST_PASS = "testpass"
TEST_HOST = "example.com"
TEST_PORT = 7728 |
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from django.conf import settings
from django.core.files.storage import default_storage
from rest_framework.authtoken.models import Token
from rest_framework.test import force_authenticate
from rest_framework.test import APIRequestFactory
from rest_framework import status
from jarvis.resume.views import (Resume as Res,
GetUploadLimitView,
TrialUserView,
ValidateTrialUser,
TopCompaniesList,
SkillsSuggestion,
ResumeDetailView,
TrialResumeDetailView,
ResumeFilterDetailView,
SampleResumeView,
ResumeParseInternal,
ResumeSyncView,
AcademicDegreeList,
SampleResumeDetailView,
ResumeFilter)
from jarvis.resume.models import Skill, TrialUser, Company, Resume
from jarvis.resume.utils.parser_helper import get_sim_hash_for_resume_content
from jarvis.resume.utils.extractor import get_text
from jarvis.resume.tasks import parse_resume
import shutil
import json
from uuid import UUID, uuid4
class ResumeUploadTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_resume_upload_without_token(self):
request = self.factory.post('/api/resumes/')
request.user = self.user
response = Res.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_resume_upload_with_token_without_filling_out_fields(self):
request = self.factory.post('/api/resumes')
force_authenticate(request, user=self.user, token=self.user.auth_token)
response = Res.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.content, '{"skills": ["This field is required."], "file": ["This field is required."]}')
def test_resume_upload_with_token_and_fields(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
data = 'python, django, flask'
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
request = self.factory.post('/api/resumes', {'skills':data, 'file':files})
force_authenticate(request, user=self.user, token=self.user.auth_token)
response = Res.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_status = json.loads(response.content)
self.assertEqual(json_status['status'], 'processing')
# def test_generation_of_client_key_and_client_secret(self):
class GetUploadLimitViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_get_resume_upload_limit(self):
request = self.factory.get('/api/resumes/limit')
force_authenticate(request, user=self.user, token=self.user.auth_token)
response = GetUploadLimitView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['upload_remaining'], 20)
# Uploading a resume and then making request to 'limits' url.
data = 'python, django, flask'
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
request = self.factory.post('/api/resumes', {'skills':data, 'file':files})
force_authenticate(request, user=self.user, token=self.user.auth_token)
response = Res.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
request = self.factory.get('/api/resumes/limit')
force_authenticate(request, user=self.user, token=self.user.auth_token)
response = GetUploadLimitView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['upload_remaining'], 19)
class TrialUserViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_trial_user_resume_upload_without_providing_email(self):
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files})
response = TrialUserView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'email_address': ['This field is required.']})
def test_trial_user_resume_upload_with_email(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
trialuser_count = TrialUser.objects.count()
self.assertEqual(trialuser_count, 0)
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'anubhavs286@gmail.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
trialuser_count = TrialUser.objects.count()
trialuser = TrialUser.objects.first()
self.assertEqual(trialuser_count, 1)
self.assertEqual(trialuser.email_address, 'anubhavs286@gmail.com')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], 'processing')
class ValidateTrialUserTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_with_trial_user_email_which_has_not_reached_threshold(self):
email = 'anubhavs286@gmail.com'
request = self.factory.get('/api/resumes/trial-user/validate/?email={}'.format(email))
response = ValidateTrialUser.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], 'success')
self.assertEqual(response.data['message'], 'anubhavs286@gmail.com is valid.')
def test_with_user_trial_email_which_has_reached_threshold(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'anubhavs286@gmail.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], 'processing')
email = 'anubhavs286@gmail.com'
request = self.factory.get('/api/resumes/trial-user/validate/?email={}'.format(email))
response = ValidateTrialUser.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data['status'], 'failure')
self.assertEqual(response.data['message'], 'This email has already been used for the trial version.')
def test_with_trial_user_email_present_in_DESPOSABLE_EMAIL_DOMAINS(self):
email = 'anubhavs286@027168.com'
request = self.factory.get('/api/resumes/trial-user/validate/?email={}'.format(email))
response = ValidateTrialUser.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data['status'], 'failure')
self.assertEqual(response.data['message'], 'Please use a valid email address.')
class TopCompaniesListView(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
Company.objects.create(name='Google', rank=1)
Company.objects.create(name='Facebook', rank=3)
Company.objects.create(name='Quora', rank=2)
def test_order_list_of_companies_according_to_rank(self):
request = self.factory.get('api/resumes/companies/top')
response = TopCompaniesList.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data[0]['name'], 'Google')
self.assertEqual(response.data[1]['name'], 'Quora')
self.assertEqual(response.data[2]['name'], 'Facebook')
class SkillsSuggestionTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_when_no_skill_is_provided_in_query_param(self):
request = self.factory.get('api/resumes/skill-suggestion/')
response = SkillsSuggestion.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['result'], '')
def test_when_skill_is_provided_in_query_param(self):
request = self.factory.get('api/resumes/skill-suggestion/?q=python')
response = SkillsSuggestion.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['result'], ['flask', 'python', 'pyramid', 'tornado', 'django'])
request = self.factory.get('api/resumes/skill-suggestion/?q=javascript')
response = SkillsSuggestion.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('react-js', response.data['result'])
class ResumeDetailViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
Resume.objects.create(parse_status=0)
def test_resume_details_endpoint(self):
resume = Resume.objects.first()
request = self.factory.get('/api/resumes/')
response = ResumeDetailView.as_view()(request, id=resume.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_trial_user_resume_details_are_filtered(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'anubhavs286@gmail.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
resume = Resume.objects.first()
request = self.factory.get('/api/resumes/')
response = ResumeDetailView.as_view()(request, id=resume.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class TrialResumeDetailViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'anubhavs286@gmail.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
def test_trial_users_resume_details(self):
request = self.factory.get('/api/resumes/trial/')
resume = Resume.objects.first()
response = TrialResumeDetailView.as_view()(request, id=resume.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
class ResumeFilterViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_resume_filter_view_without_providing_ids(self):
request = self.factory.post('/api/resumes/filter/', HTTP_AUTHORIZATION='Token {}'.format(self.user.auth_token))
force_authenticate(request, user=self.user)
response = ResumeFilter.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
self.assertEqual(response.data.get('next'), None)
def test_resume_filter_view_with_ids(self):
resume_1 = Resume.objects.create(parse_status=0)
resume_2 = Resume.objects.create(parse_status=0)
data = {'ids':[resume_1.id, resume_2.id],}
# data = json.loads(data)
request = self.factory.post('/api/resumes/filter/', data, HTTP_AUTHORIZATION='Token {}'.format(self.user.auth_token))
force_authenticate(request, user=self.user)
response = ResumeFilter.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_resume_filter_view_with_ids_and_skills_as_params(self):
resume = Resume.objects.create(parse_status=0)
request = self.factory.post('/api/resumes/filter/?skills=python', {'ids':resume.id,}, HTTP_AUTHORIZATION='Token {}'.format(self.user.auth_token))
force_authenticate(request, user=self.user)
response = ResumeFilter.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
# class ResumeFilterDetailViewTest(TestCase):
# def setUp(self):
# self.factory = APIRequestFactory()
# self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
# def test_resume_filter_detail_view(self):
# resume = Resume.objects.create(parse_status=0)
# request = self.factory.get('/api/resumes/filter/{}'.format(resume.id))#, kwargs={'id':resume.id})
# kwargs = {'id':resume.id}
# response = ResumeFilterDetailView.as_view()(request, kwargs={'id': resume.id})
# self.assertEqual(response.status_code, status.HTTP_200_OK)
class SampleResumeViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_sample_resume_response_for_existing_trial_user_email(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'anubhavs286@gmail.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
skills = 'python, hadoop'
email = email_address
file = 'nda.pdf'
request = self.factory.post('/api/resumes/sample', {'skills':skills, 'email':email, 'file':file})
response = SampleResumeView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['message'], 'Email Address already used. Try with another one.')
def test_sample_resume_response_when_data_is_not_present(self):
skills = 'python, hadoop'
email = 'anubhavs286@gmail.com'
file = 'nda.pdf'
request = self.factory.post('/api/resumes/sample', {'skills':skills, 'email':email, 'file':file})
response = SampleResumeView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['message'], 'Sample data not found.')
def test_sample_resume_response_when_data_is_present(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'sample@aircto.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
resume = Resume.objects.first()
resume.file_name = 'nda.pdf'
resume.save()
skills = 'python, hadoop'
email = 'anubhavs286@gmail.com'
file = 'nda.pdf'
request = self.factory.post('/api/resumes/sample', {'skills':skills, 'email':email, 'file':file})
response = SampleResumeView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], 'processing')
class ResumeParseInternalTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_parsed_response_of_view(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
request = self.factory.post('/api/resumes/internal', {'file':files})
force_authenticate(request, user = self.user)
response = ResumeParseInternal.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['first_name'], 'anubhav')
self.assertEqual(response.data['email'], 'anubhavs286@gmail.com')
class ResumeSyncViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_resume_sync_view_response_with_correct_uuid(self):
resume = Resume.objects.create(parse_status=0)
request = self.factory.post('/api/resumes/sync', {'resume_id':resume.id})
force_authenticate(request, user=self.user, token='self.user.auth_token')
response = ResumeSyncView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['message'], 'success')
def test_resume_sync_view_response_with_incorrect_uuid(self):
request = self.factory.post('/api/resumes/sync', {'resume_id': 'asdasd'})
force_authenticate(request, user=self.user, token='self.user.user_token')
response = ResumeSyncView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['message'], 'not a valid uuid')
class AcademicDegreeListTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_view_response_and_no_of_disciplines(self):
request = self.factory.get('/api/resumes/degrees')
response = AcademicDegreeList.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 14)
class SampleResumeDetailViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(username='anubhav', email='anubhav@gmail.com', password='password123')
def test_sample_resume_detail_view(self):
try:
shutil.rmtree(settings.MEDIA_ROOT)
except OSError:
pass
files = open(settings.TESTDATA_DIRS + 'view_tests/nda.pdf', 'rb')
email_address = 'sample@aircto.com'
request = self.factory.post('/api/resumes/trial', {'skills':'python, django', 'file':files, 'email_address':email_address})
response = TrialUserView.as_view()(request)
resume = Resume.objects.first()
resume.file_name = 'nda.pdf'
resume.save()
skills = 'python, hadoop'
email = 'anubhavs286@gmail.com'
file = 'nda.pdf'
request = self.factory.post('/api/resumes/sample', {'skills':skills, 'email':email, 'file':file})
response = SampleResumeView.as_view()(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], 'processing')
request = self.factory.get('/api/resumes/sample/')
response = SampleResumeDetailView.as_view()(request, id = response.data['resume_id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['file_name'], 'nda.pdf')
|
import pytest
@pytest.fixture
def runner(app):
return app.test_cli_runner()
|
# Created by yingwen at 2019-03-10
import numpy as np
from copy import deepcopy
from collections import defaultdict
from functools import partial
from malib.agents.tabular.q_learning.base_q import QAgent
from malib.agents.tabular.utils import softmax
class RRQAgent(QAgent):
def __init__(self, id_, action_num, env, phi_type='count', a_policy='softmax', **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'RR2Q'
self.phi_type = phi_type
self.a_policy = a_policy
self.count_AOS = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_OS = defaultdict(partial(np.zeros, (self.action_num, )))
self.opponent_best_pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.pi_history = [deepcopy(self.pi)]
self.opponent_best_pi_history = [deepcopy(self.opponent_best_pi)]
self.Q = defaultdict(partial(np.random.rand, *(self.action_num, self.action_num)))
self.Q_A = defaultdict(partial(np.random.rand, self.action_num))
self.R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
def update(self, s, a, o, r, s2, env, done=False, tau=0.5):
self.count_AOS[s][a][o] += 1.0
self.count_OS[s][o] += 1.
decay_alpha = self.step_decay()
if self.phi_type == 'count':
count_sum = np.reshape(np.repeat(np.sum(self.count_AOS[s], 1), self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = self.count_AOS[s] / (count_sum + 0.1)
self.opponent_best_pi[s] = self.opponent_best_pi[s] / (np.sum(self.opponent_best_pi[s]) + 0.1)
elif self.phi_type == 'norm-exp':
self.Q_A_reshaped = np.reshape(np.repeat(self.Q_A[s], self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = np.log(np.exp((self.Q[s] - self.Q_A_reshaped)))
self.opponent_best_pi[s] = self.opponent_best_pi[s] / np.reshape(
np.repeat(np.sum(self.opponent_best_pi[s], 1), self.action_num), (self.action_num, self.action_num))
self.count_R[s][a][o] += 1.0
self.R[s][a][o] += (r - self.R[s][a][o]) / self.count_R[s][a][o]
Q = self.Q[s]
V = self.val(s2)
if done:
Q[a][o] = Q[a][o] + decay_alpha * (r - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r - self.Q_A[s][a])
else:
Q[a][o] = Q[a][o] + decay_alpha * (r + self.gamma * V - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r + self.gamma * V - self.Q_A[s][a])
if self.verbose:
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
def update_policy(self, s, a, game):
if self.a_policy == 'softmax':
self.pi[s] = softmax(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
else:
Q = np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1)
self.pi[s] = (Q == np.max(Q)).astype(np.double)
self.pi_history.append(deepcopy(self.pi))
self.opponent_best_pi_history.append(deepcopy(self.opponent_best_pi))
if self.verbose:
print('opponent pi of {}: {}'.format(self.id_, self.opponent_best_pi))
class GRRQAgent(QAgent):
def __init__(self, id_, action_num, env, k=0, phi_type='count', a_policy='softmax', **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'GRRQ'
self.k = k
self.phi_type = phi_type
self.a_policy = a_policy
self.count_AOS = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.opponent_best_pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.pi_history = [deepcopy(self.pi)]
self.opponent_best_pi_history = [deepcopy(self.opponent_best_pi)]
self.Q = defaultdict(partial(np.random.rand, *(self.action_num, self.action_num)))
self.Q_A = defaultdict(partial(np.random.rand, self.action_num))
self.R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
def update(self, s, a, o, r, s2, env, done=False):
self.count_AOS[s][a][o] += 1.0
decay_alpha = self.step_decay()
if self.phi_type == 'count':
count_sum = np.reshape(np.repeat(np.sum(self.count_AOS[s], 1), self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = self.count_AOS[s] / (count_sum + 0.1)
elif self.phi_type == 'norm-exp':
self.Q_A_reshaped = np.reshape(np.repeat(self.Q_A[s], self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = np.log(np.exp(self.Q[s] - self.Q_A_reshaped))
self.opponent_best_pi[s] = self.opponent_best_pi[s] / np.reshape(
np.repeat(np.sum(self.opponent_best_pi[s], 1), self.action_num), (self.action_num, self.action_num))
self.count_R[s][a][o] += 1.0
self.R[s][a][o] += (r - self.R[s][a][o]) / self.count_R[s][a][o]
Q = self.Q[s]
V = self.val(s2)
if done:
Q[a][o] = Q[a][o] + decay_alpha * (r - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r - self.Q_A[s][a])
else:
Q[a][o] = Q[a][o] + decay_alpha * (r + self.gamma * V - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r + self.gamma * V - self.Q_A[s][a])
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
def update_policy(self, s, a, game):
if self.a_policy == 'softmax':
self.pi[s] = softmax(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
else:
Q = np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1)
self.pi[s] = (Q == np.max(Q)).astype(np.double)
self.pi_history.append(deepcopy(self.pi))
self.opponent_best_pi_history.append(deepcopy(self.opponent_best_pi))
print('opponent pi of {}: {}'.format(self.id_, self.opponent_best_pi))
|
# -*- coding: utf-8 -*-
from datasetxy import DatasetXY as dd
from initialization import Initialization as ii
from activation import Activation as aa
from cost import Cost as cc
from regularization import Regularization as rr
from training import Training as tt
from neuralNetwork import NeuralNetwork as nn
if __name__ == '__main__':
''' dataset '''
groups = ([0], [1])
def f(p):
p_x = p[0]
p_y = p[1]
# if (p_y > p_x + 2):
if (p_y > (p_x - 10)**2 + (p_x - 10) * 2 + 2):
return groups[0]
else:
return groups[1]
x_type = dd.XType.ALL
x_lims = (1, 11)
traing_data_ratio = 5/1
d_set = dd(x_type, x_lims, groups, traing_data_ratio, lambda x : f(x))
''' neural network '''
layers = [['i'] * len(d_set.x_list[0]),
['h'] * 4,
['h'] * 3,
['o'] * len(d_set.y_list[0])]
actvtn_types = [aa.Type.TANH for i in range(1,5)]
initn_weight_type = ii.WeightType.RANDOM
initn_bias_type = ii.BiasType.ZERO
cost_type = cc.Type.QUADRATIC
traing_type = tt.Type.GRADIENT_D
reg_type = rr.Type.NONE
reg_lambda = 0.01
learng_eta = 0.03
epochs = 50001
batch_size = 0 # len(traing_x)
input_log = True
traing_qtts_log = False
traing_acts_log = False
traing_cost_log = False
evaltn_cost_log = False
n_net = nn(layers, actvtn_types,
initn_weight_type, initn_bias_type,
cost_type,
reg_type, reg_lambda,
traing_type, epochs, batch_size, learng_eta,
d_set.groups,
d_set.traing_x_list, d_set.traing_y_list,
d_set.evaltn_x_list, d_set.evaltn_y_list,
input_log,
traing_qtts_log, traing_acts_log, traing_cost_log,
evaltn_cost_log)
n_net.training()
n_net.predictions()
|
# Copyright (c) 2016 Alex Parmentier, Andrew Hankinson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections import OrderedDict
from .base_validator import BaseValidator
class AnnotationValidator(BaseValidator):
KNOWN_FIELDS = BaseValidator.COMMON_FIELDS | {"motivation", "resource", "on", "@context"}
FORBIDDEN_FIELDS = {"format", "height", "width", "viewingDirection", "navDate", "startCanvas", "first",
"last", "total", "next", "prev", "startIndex", "collections", "manifests", "members",
"sequences", "structures", "canvases", "resources", "otherContent", "images", "ranges"}
REQUIRED_FIELDS = {"@type", "on", "motivation", "resource"}
def __init__(self, iiif_validator):
super().__init__(iiif_validator)
self.ImageSchema = OrderedDict((
("@id", self.id_field),
('@type', self.type_field),
('motivation', self.motivation_field),
("on", self.on_field),
('height', self.height_field),
('width', self.width_field),
('resource', self.resource_field)
))
self.canvas_uri = None
self.setup()
def _raise_additional_warnings(self, validation_results):
pass
def _run_validation(self, canvas_uri=None, **kwargs):
self.canvas_uri = canvas_uri
self._check_all_key_constraints("annotation", self._json)
return self._compare_dicts(self.ImageSchema, self._json)
def type_field(self, value):
"""Assert that ``@type == 'oa:Annotation'``."""
if value != "oa:Annotation":
self.log_error("@type", "@type must be 'oa:Annotation'.")
return value
def motivation_field(self, value):
"""Assert that ``motivation == 'sc:painting'``."""
if value != "sc:painting":
self.log_error("motivation", "motivation must be 'sc:painting'.")
return value
def on_field(self, value):
"""Validate the ``on`` field."""
if self.canvas_uri and value != self.canvas_uri:
self.log_error("on", "'on' must reference the canvas URI.")
return value
def resource_field(self, value):
"""Validate ``resources`` list.
Calls a sub-validation procedure handled by the :class:`ImageContentValidator`.
"""
path = self._path + ("resource", )
return self._sub_validate(self.ImageContentValidator, value, path)
|
from flask import Flask, redirect, url_for, render_template, \
request, make_response, Response, Markup, Blueprint, current_app, g
blueprint_game = Blueprint("game", __name__,
template_folder="templates")
@blueprint_game.before_request
def before_request():
g.app = current_app._main
@blueprint_game.route("/create_game")
def create_game():
game = g.app.engine.new_game()
return redirect("/game/%s" % game.id)
@blueprint_game.route("/game/<path:game_id>")
def game_session(game_id):
game = g.app.engine.get_game(game_id)
if not game:
return redirect("/")
return render_template("game.html", game=game)
|
#Copyright (c) 2009, Walter Bender
#Copyright (c) 2009, Michele Pratusevich
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import os.path
from sprites import *
#
# class for defining individual cards
# tw - image related
# pattern - game logic related
# card index is generated in the following loop:
"""
for shape in range(0,3):
for color in range(0,4):
for num in range(0,3):
for fill in range(0,3):
"""
# if shape == -1 then generate special card-selected overlay
#
class Card:
def __init__(self,tw,shape,color,num,fill):
# what do we need to know about each card?
if shape == -1:
self.spr = sprNew(tw, 0, 0, self.load_image(tw.path+"selected",
tw.card_w*tw.scale,
tw.card_h*tw.scale))
self.index = 0
else:
self.shape = shape
self.color = color
self.num = num
self.fill = fill
self.index = self.shape*4*3*3+self.color*3*3+self.num*3+self.fill+1
# create sprite from svg file
self.spr = sprNew(tw, 0, 0, self.load_image(tw.path+\
str(self.index),
tw.card_w*tw.scale,
tw.card_h*tw.scale))
self.spr.label = ""
def show_card(self):
setlayer(self.spr,2000)
draw(self.spr)
def hide_card(self):
hide(self.spr)
def load_image(self, file, w, h):
return gtk.gdk.pixbuf_new_from_file_at_size(os.path.join(file+".svg"),
int(w),
int(h))
|
# -*- coding: UTF-8 -*-
import json
import re
from TamTamBot.utils.utils import get_param_value, str_to_int, get_md5_hash_str
from openapi_client import Update, MessageCallbackUpdate, MessageLinkType, NewMessageLink, BotStartedUpdate, MessageCreatedUpdate, ChatType, User, Message, Recipient, Callback
class UpdateCmn(object):
def __init__(self, update):
# type: (Update) -> None
self.update_current = update
self.update_type = update.update_type
self.timestamp = update.timestamp
self.message = None
self.cmd_bot = None
self.cmd = None
self.cmd_args = None
self.link = None
self.user = None
self.user_id = None
self.user_name = None
self.user_id_recipient = None
self.chat_id = None
self.chat_type = None
self.this_cmd_response = False
self.required_cmd_response = False
self.update_previous = None
self.recipient = None
self.user_locale = update.user_locale if hasattr(update, 'user_locale') else None
if isinstance(update, MessageCallbackUpdate):
self.cmd = update.callback.payload
self.link = None
try:
payload = json.loads(update.callback.payload)
except json.decoder.JSONDecodeError:
payload = None
if isinstance(payload, dict):
self.cmd_bot = payload.get('bot')
self.cmd = payload.get('cmd')
self.cmd_args = payload.get('cmd_args')
mid = payload.get('mid')
if mid:
self.link = NewMessageLink(MessageLinkType.REPLY, mid)
else: # Для совместимости со старым форматом payload
cmd = get_param_value(update.callback.payload, 'cmd')
if cmd:
self.cmd = cmd
mid = get_param_value(update.callback.payload, 'mid')
if mid:
self.link = NewMessageLink(MessageLinkType.REPLY, mid)
fk = get_param_value(update.callback.payload, 'cmd_args')
if fk:
self.cmd_args = fk
chat_id = str_to_int(fk)
if chat_id is not None:
self.cmd_args = {'chat_id': chat_id}
else:
self.cmd_args = {'id_str': fk}
self.user = update.callback.user
elif isinstance(update, MessageCreatedUpdate):
self.cmd = update.message.body.text
self.link = NewMessageLink(MessageLinkType.REPLY, update.message.body.mid)
# Обработка аргументов команды типа /get_ids 1 2 7
# Поддерживается два формата:
# * update.cmd_arg['l1']['c1'] - строка1, колонка 1
# * update.cmd_arg['c_parts'] - список строк, каждая из которых содержит список колонок
# Разделение на строки и колоонки производится по реальным строкам и элементам в строке, разделённых пробелом
f = re.match(r'(/\w+) (.+)', self.cmd, re.DOTALL)
if f:
self.cmd = f.group(1)
self.cmd_args = self.cmd_args or {}
i = 1
for ln in f.group(2).split('\n'):
if not isinstance(self.cmd_args.get('c_parts'), list):
self.cmd_args['c_parts'] = [[]]
else:
self.cmd_args['c_parts'].append([])
ind_l = 'l%s' % i
j = 1
for c in ln.split(' '):
if len(c.strip()) > 0:
self.cmd_args['c_parts'][-1].append(c)
ind_c = 'c%s' % j
if not self.cmd_args.get(ind_l):
self.cmd_args[ind_l] = {}
self.cmd_args[ind_l][ind_c] = c
j += 1
i += 1
elif isinstance(update, BotStartedUpdate):
self.cmd = '/start'
self.chat_type = ChatType.DIALOG
if self.user is None:
if hasattr(update, 'user'):
self.user = update.user
elif hasattr(update, 'sender'):
self.user = update.message.sender
if self.chat_id is None:
if hasattr(update, 'chat_id'):
self.chat_id = update.chat_id
if self.user_id is None:
if hasattr(update, 'user_id'):
self.user_id = update.user_id
if hasattr(update, 'message') and isinstance(update.message, Message):
self.message = update.message
if isinstance(self.message, Message):
if isinstance(self.message.recipient, Recipient):
self.recipient = update.message.recipient
self.chat_id = self.chat_id or self.recipient.chat_id
self.chat_type = self.chat_type or self.recipient.chat_type
self.user_id_recipient = self.user_id_recipient or self.recipient.user_id
if isinstance(self.message.sender, User):
self.user = self.user or self.message.sender
if isinstance(self.user, User):
self.user_id = self.user_id or self.user.user_id
self.user_name = self.user_name or self.user.name
if self.cmd:
self.cmd = self.cmd[1:]
self._index = None
@property
def index(self):
if not self._index:
self._index = '%s_%s' % (self.chat_id, self.user_id)
return self._index
def is_double_click(self, callbacks_list):
# type: ([]) -> str
res = False
if isinstance(self.update_current, MessageCallbackUpdate):
ind = self.get_callback_index(self.update_current.callback)
if len(callbacks_list[ind]) == 2:
res = (callbacks_list[ind][0] - callbacks_list[ind][1]) <= 1000
return res
@staticmethod
def get_callback_index(callback):
# type: (Callback) -> str
ind = '%s#%s' % (callback.user.user_id, get_md5_hash_str(callback.payload))
return ind
|
import smbus
import logging
import time
class Powerpi:
PORT = 1
ADDRESS = 0x6a #I2C address of the ups
#Refer to http://www.ti.com/lit/ds/symlink/bq25895.pdf for register maps
REG_WATCHDOG = 0x07
BYTE_WATCHDOG_STOP = 0b10001101 #Stop Watchdog timer
REG_SYSMIN = 0x03
BYTE_SYSMIN = 0b00010000
REG_ILIM = 0x00 #ILIM register
REG_VREG = 0x06 #Charge voltage register
####Edit this section to suit your needs######
#BYTE_ILIM = 0b01101000 #2A input current limit
BYTE_ILIM = 0b01111111 #3.25A input current limit
#BYTE_ICHG = 0b00001000 #.5A charging current limit
BYTE_ICHG = 0b00010000 #1A charging current limit
BAT_CAPACITY = 2900 #Battery capacity in mAh
CURRENT_DRAW = 2000 #Current draw in mAh approximately
VBAT_LOW = 3.2
VBAT_MAX = 4.208
#Charge Voltage
#BYTE_VREG = 0b00000010 #3.84v
#BYTE_VREG = 0b00010010 #3.9V
#BYTE_VREG = 0b00101010 #4V
#BYTE_VREG = 0b01000110 #4.112V
BYTE_VREG = 0b01011110 #4.208V
#BYTE_VREG = 0b01110110 #4.304V
#BYTE_VREG = 0b10001110 #4.4V
#BYTE_VREG = 0b10101010 #4.512V
#BYTE_VREG = 0b11000010 #4.608V
###############################################
REG_ICHG = 0x04
REG_ICHGR = 0x12
REG_CONV_ADC = 0x02
REG_BATFET = 0x09
BYTE_BATFET = 0b01001000 #delay before battery is disconnected
REG_CONV_ADC = 0x02
BYTE_CONV_ADC_START = 0b10011101
BYTE_CONV_ADC_STOP = 0b00011101
REG_BATFET_DIS = 0x09
BYTE_BATFET_DIS = 0b01101000
REG_STATUS = 0x0B #address of status register
REG_VBAT = 0x0e
REG_FAULT = 0x0c
REG_IBAT = 0x12
REG_VBUS = 0x11
def __init__(self):
pass
def initialize(self):
try:
self.bus = smbus.SMBus(self.PORT)
self.bus.write_byte_data(self.ADDRESS, self.REG_WATCHDOG, self.BYTE_WATCHDOG_STOP)
self.bus.write_byte_data(self.ADDRESS, self.REG_ILIM,self.BYTE_ILIM)
self.bus.write_byte_data(self.ADDRESS, self.REG_ICHG, self.BYTE_ICHG)
self.bus.write_byte_data(self.ADDRESS, self.REG_BATFET, self.BYTE_BATFET)
self.bus.write_byte_data(self.ADDRESS, self.REG_SYSMIN, self.BYTE_SYSMIN)
self.bus.write_byte_data(self.ADDRESS, self.REG_VREG, self.BYTE_VREG)
logging.info("UPS initialized")
return 0
except Exception as ex:
logging.error("Initialization failed, check connection to the UPS:"+ str(ex))
return 1
def _int_to_bool_list(self,num):
return [bool(num & (1<<n)) for n in range(8)]
def _vbat_convert(self,vbat_byte):
vbat_bool = self._int_to_bool_list(vbat_byte)
vbat = 2.304
vbat += vbat_bool[6] * 1.280
vbat += vbat_bool[5] * 0.640
vbat += vbat_bool[4] * 0.320
vbat += vbat_bool[3] * 0.160
vbat += vbat_bool[2] * 0.08
vbat += vbat_bool[1] * 0.04
vbat += vbat_bool[0] * 0.02
return vbat
def _ibat_convert(self,ibat_byte):
ibat_bool = self._int_to_bool_list(ibat_byte)
ibat = 0
ibat += ibat_bool[6] * 3200
ibat += ibat_bool[5] * 1600
ibat += ibat_bool[4] * 800
ibat += ibat_bool[3] * 400
ibat += ibat_bool[2] * 200
ibat += ibat_bool[1] * 100
ibat += ibat_bool[0] * 50
return ibat
def _vbus_convert(self,vbus_byte):
vbus_bool = self._int_to_bool_list(vbus_byte)
vbus = 2.6
vbus += vbus_bool[6] * 6.4
vbus += vbus_bool[5] * 3.2
vbus += vbus_bool[4] * 1.6
vbus += vbus_bool[3] * 0.8
vbus += vbus_bool[2] * 0.4
vbus += vbus_bool[1] * 0.2
vbus += vbus_bool[0] * 0.1
return vbus
def _calc_bat_charge_percent(self,vbat):
bat_charge_percent = (vbat-self.VBAT_LOW)/(self.VBAT_MAX - self.VBAT_LOW)
if bat_charge_percent < 0:
bat_charge_percent = 0
elif bat_charge_percent > 1:
bat_charge_percent = 1
return bat_charge_percent
def _calc_time_left(self,vbat):
time_left = int(self._calc_bat_charge_percent(vbat) * 60 * self.BAT_CAPACITY / self.CURRENT_DRAW)
if time_left < 0:
time_left = 0
return time_left
def read_status(self, clear_fault=False):
try:
if clear_fault:
self.bus.read_byte_data(self.ADDRESS, self.REG_FAULT)
self.bus.write_byte_data(self.ADDRESS, self.REG_CONV_ADC, self.BYTE_CONV_ADC_START)
time.sleep(2)
status = self.bus.read_byte_data(self.ADDRESS, self.REG_STATUS)
status = self._int_to_bool_list(int(status))
vbat = self._vbat_convert(self.bus.read_byte_data(self.ADDRESS, self.REG_VBAT))
ibat = self._ibat_convert(self.bus.read_byte_data(self.ADDRESS, self.REG_ICHGR))
vbus = self._vbus_convert(self.bus.read_byte_data(self.ADDRESS, self.REG_VBUS))
self.bus.write_byte_data(self.ADDRESS, self.REG_CONV_ADC, self.BYTE_CONV_ADC_STOP)
except Exception as ex:
logging.error("An exception occurred while reading values from the UPS: " + str(ex))
time.sleep(2)
return 1, None
if status[2]:
power_status = "Connected"
time_left = -1
else:
power_status = "Not Connected"
time_left = self._calc_time_left(vbat)
if status[3] and status[4]:
charge_status = "Charging done"
elif status[4] and not status[3]:
charge_status = "Charging"
elif not status[4] and status[3]:
charge_status = "Pre-Charge"
else:
charge_status = "Not Charging"
data = {
'PowerInputStatus': power_status,
'InputVoltage' : round(vbus,3),
'ChargeStatus' : charge_status,
'BatteryVoltage' : round(vbat,3),
"BatteryPercentage" : int(self._calc_bat_charge_percent(vbat)*100),
'ChargeCurrent' : ibat,
'TimeRemaining' : int(time_left)
}
return 0, data
def bat_disconnect(self):
for i in (0,3):
try:
self.bus.write_byte_data(self.ADDRESS, self.REG_BATFET_DIS, self.BYTE_BATFET_DIS)
return 0
except:
time.sleep(1)
return 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import atexit
import time
import RPi.GPIO as GPIO
import spi
# assegurar que a função cleanup será chamada na saída do script
atexit.register(GPIO.cleanup)
# usar numeração lógica dos pinos
GPIO.setmode(GPIO.BCM)
DISPLAY = [17, 4, 9, 11, 7, 27, 22, 10]
SPI_CLK = 18
SPI_MISO = 23
SPI_MOSI = 24
SPI_CS = 25
conversor_ad = spi.Mcp3008(SPI_CLK, SPI_MISO, SPI_MOSI, SPI_CS)
CANAL_POTENCIOMETRO = 1
for led in DISPLAY[:6]:
GPIO.setup(led, GPIO.OUT)
GPIO.output(led, 0)
while True:
for led in DISPLAY[:6]:
GPIO.output(led, 1)
atraso = conversor_ad.read(CANAL_POTENCIOMETRO)/1000.0
time.sleep(atraso)
GPIO.output(led, 0)
|
from .mixins import APIEndpointMixin
from django.urls import reverse
class BuildsEndpointTests(APIEndpointMixin):
def test_projects_builds_list(self):
self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token.key}')
response = self.client.get(
reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': self.project.slug,
}),
)
self.assertEqual(response.status_code, 200)
def test_projects_builds_detail(self):
self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token.key}')
response = self.client.get(
reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': self.project.slug,
'build_pk': self.build.pk,
}),
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
response.json(),
self._get_response_dict('projects-builds-detail'),
)
def test_projects_versions_builds_list_post(self):
self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token.key}')
self.assertEqual(self.project.builds.count(), 1)
response = self.client.post(
reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': self.project.slug,
'parent_lookup_version__slug': self.version.slug,
}),
)
self.assertEqual(response.status_code, 202)
self.assertEqual(self.project.builds.count(), 2)
response_json = response.json()
response_json['build']['created'] = '2019-04-29T14:00:00Z'
self.assertDictEqual(
response_json,
self._get_response_dict('projects-versions-builds-list_POST'),
)
|
from datetime import date, time, timedelta
from forty.views.status import StatusView
from forty.actions import WorkOptions
from forty.tools import ActionsBuilder as A
from forty.controllers import StatusController
from forty.managers.project_manager import Config
from ..controller_test_case import ControllerTestCase
class TestStatusControllerWhatsupCommand(ControllerTestCase):
def __init__(self, *args, **kwargs):
ControllerTestCase.__init__(self, *args, **kwargs)
@property
def controller_class(self):
return StatusController
def test_whatsup_started(self):
self.now_to_return(hour=12, minute=34, second=56)
actions = A().start().at(hour=8).done()
self.actions_to_return(actions)
view: StatusView = self.handle(["status", "whatsup"])
self.assertEqual(view.status, WorkOptions.START)
self.assertEqual(view.today_passed_time, timedelta(hours=4, minutes=34, seconds=56))
self.assertEqual(view.today_remained_time, timedelta(hours=3, minutes=25, seconds=4))
self.assertEqual(view.total_passed_time, timedelta(hours=4, minutes=34, seconds=56))
self.assertEqual(view.total_remained_time, timedelta(hours=35, minutes=25, seconds=4))
self.assertEqual(view.from_time, time(hour=8))
self.assertEqual(view.to_time, time(hour=16))
def test_whatsup_finished(self):
self.now_to_return(hour=18, minute=0, second=0)
actions = A().start().at(hour=8).finish().at(hour=12, minute=34, second=56).done()
self.actions_to_return(actions)
view: StatusView = self.handle(["status", "whatsup"])
self.assertEqual(view.status, WorkOptions.FINISH)
self.assertEqual(view.today_passed_time, timedelta(hours=4, minutes=34, seconds=56))
self.assertEqual(view.today_remained_time, timedelta(hours=3, minutes=25, seconds=4))
self.assertEqual(view.total_passed_time, timedelta(hours=4, minutes=34, seconds=56))
self.assertEqual(view.total_remained_time, timedelta(hours=35, minutes=25, seconds=4))
self.assertEqual(view.from_time, time(hour=8))
self.assertEqual(view.to_time, None)
def test_whatsup_started_today_overtime(self):
self.now_to_return(day=1, hour=9, minute=8, second=7)
actions = A().start().at().done()
self.actions_to_return(actions)
view: StatusView = self.handle(["status", "whatsup"])
self.assertEqual(view.status, WorkOptions.START)
self.assertEqual(view.today_passed_time, timedelta(hours=9, minutes=8, seconds=7))
self.assertEqual(view.today_remained_time, timedelta(hours=-1, minutes=-8, seconds=-7))
self.assertEqual(view.total_passed_time, timedelta(hours=9, minutes=8, seconds=7))
self.assertEqual(view.total_remained_time, timedelta(hours=30, minutes=51, seconds=53))
self.assertEqual(view.from_time, time())
self.assertEqual(view.to_time, time(hour=8))
def test_whatsup_finished_total_overtime(self):
test_config = Config(day_limit=8, total_limit=40)
test_config.today = date(year=2021, month=1, day=5)
self.config_to_return(config=test_config)
self.now_to_return(day=5, hour=19)
actions = (A()
.start().at(day=1,hour=9)
.finish().at(day=1, hour=17)
.start().at(day=2,hour=9)
.finish().at(day=2, hour=17)
.start().at(day=3,hour=9)
.finish().at(day=3, hour=17)
.start().at(day=4,hour=9)
.finish().at(day=4, hour=17)
.start().at(day=5,hour=9)
.finish().at(day=5, hour=18)
.done())
self.actions_to_return(actions)
view: StatusView = self.handle(["status", "whatsup"])
self.assertEqual(view.status, WorkOptions.FINISH)
self.assertEqual(view.today_passed_time, timedelta(hours=9))
self.assertEqual(view.today_remained_time, timedelta(hours=-1))
self.assertEqual(view.total_passed_time, timedelta(hours=41))
self.assertEqual(view.total_remained_time, timedelta(hours=-1))
self.assertEqual(view.from_time, time(hour=9))
self.assertEqual(view.to_time, None)
|
import datetime
import pytz
from django.core.paginator import Paginator
from django.db.models import Q, F, QuerySet
from bsmodels.models import Contest, ContestProblem, Problem, Registration
from utils.auxiliary import ret_response
from utils.decorators import require_nothing
@require_nothing
def contest_list(request, data):
pagesize = int(data['pagesize'])
pagenum = int(data['pagenum'])
lst = Contest.objects.all()
if data.get('type'):
type = data['type'].split(' ')
satisfy = []
for contest in lst:
if contest.get_status() in type:
satisfy.append(contest.id)
lst = lst.filter(id__in=satisfy)
if data.get('author'):
author = data['author']
lst = lst.select_related('authorid').filter(authorid__username__icontains=author)
if data.get('keyword'):
keyword = data['keyword']
lst = lst.filter(name__icontains=keyword)
lst = lst.order_by('-start')
total = lst.count()
paginator = Paginator(lst, pagesize)
page = paginator.page(pagenum)
items = page.object_list.values()
temp = list(items)
items = []
for x in temp:
contestid = x['id']
contest = Contest.objects.get(id=contestid)
item = {
'contestid': contest.id,
'name': contest.name,
'start': contest.start.strftime('%Y-%m-%d %H:%M:%S'),
'latest': contest.latest.strftime('%Y-%m-%d %H:%M:%S'),
'public': contest.password is None,
'rated': contest.rated,
'time_limited': {
"single": 0,
"multiple": 0,
"binary": 0,
"completion": 0
},
'author': contest.authorid.username,
'total_number': contest.count_problem()
}
if request.session.get('openid'):
item['started'] = False
if Registration.objects.filter(contestid=contestid, userid_id=request.session.get('openid')).exists():
item['registered'] = True
reg = Registration.objects.filter(contestid=contestid, userid_id=request.session.get('openid'))[0]
if reg.starttime is not None:
item['started'] = True
else:
item['registered'] = False
contest_problem = ContestProblem.objects.filter(contestid=contest) \
.exclude(problemid__isnull=True).order_by('number')
problems = list(contest_problem.values('problemid', 'duration'))
for it in problems:
problemid = it['problemid']
if Problem.objects.filter(id=problemid).exists():
problem = Problem.objects.get(id=problemid)
item['time_limited'][problem.type] = it['duration']
item['register_num'] = Registration.objects.filter(contestid=contestid).count()
if contest.end is None:
contest.end = contest.get_end_time()
item['status'] = contest.get_status()
items.append(item)
return ret_response(0, {'items': items, 'total': total})
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from tripleo_common.tests import base
from tripleo_common import update
class UpdateManagerTest(base.TestCase):
def setUp(self):
super(UpdateManagerTest, self).setUp()
@mock.patch('time.time')
def test_update(self, mock_time):
heatclient = mock.MagicMock()
novaclient = mock.MagicMock()
mock_time.return_value = 123.5
heatclient.stacks.get.return_value = mock.MagicMock(
stack_name='stack', id='stack_id')
stack_fields = {
'stack_id': 'stack_id',
'stack_name': 'mystack',
'template': 'template body',
'environment': {},
'files': {},
}
update.PackageUpdateManager(
heatclient=heatclient,
novaclient=novaclient,
stack_id='stack_id',
stack_fields=stack_fields,
).update()
params = {
'existing': True,
'stack_name': 'mystack',
'stack_id': 'stack_id',
'template': 'template body',
'files': {},
'environment': {
'resource_registry': {
'resources': {
'*': {
'*': {
'UpdateDeployment': {'hooks': 'pre-update'}
}
}
}
},
'parameter_defaults': {
'DeployIdentifier': 123,
'UpdateIdentifier': 123,
'StackAction': 'UPDATE'
},
},
'timeout_mins': 240,
}
heatclient.stacks.update.assert_called_once_with(**params)
|
"""
validataclass
Copyright (c) 2021, binary butterfly GmbH and contributors
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
import pytest
from validataclass.exceptions import RequiredValueError, InvalidTypeError, StringInvalidLengthError, InvalidUrlError
from validataclass.validators import UrlValidator
class UrlValidatorTest:
# General tests
@staticmethod
def test_invalid_none():
""" Check that UrlValidator raises exceptions for None as value. """
validator = UrlValidator()
with pytest.raises(RequiredValueError) as exception_info:
validator.validate(None)
assert exception_info.value.to_dict() == {'code': 'required_value'}
@staticmethod
def test_invalid_wrong_type():
""" Check that UrlValidator raises exceptions for values that are not of type 'str'. """
validator = UrlValidator()
with pytest.raises(InvalidTypeError) as exception_info:
validator.validate(123)
assert exception_info.value.to_dict() == {
'code': 'invalid_type',
'expected_type': 'str',
}
@staticmethod
def test_invalid_empty_string():
""" Check that UrlValidator raises exceptions for empty strings. """
validator = UrlValidator()
with pytest.raises(StringInvalidLengthError) as exception_info:
validator.validate('')
assert exception_info.value.to_dict() == {
'code': 'string_too_short',
'min_length': 1,
'max_length': 2000,
}
@staticmethod
def test_invalid_string_too_long():
""" Check that UrlValidator raises exceptions for strings that are too long. """
# Construct a URL that is technically valid but too long
input_string = 'https://example.com/'
input_string += 'a' * (2001 - len(input_string))
validator = UrlValidator()
with pytest.raises(StringInvalidLengthError) as exception_info:
validator.validate(input_string)
assert exception_info.value.to_dict() == {
'code': 'string_too_long',
'min_length': 1,
'max_length': 2000,
}
# Tests for regex validation of URL format
@staticmethod
@pytest.mark.parametrize(
'input_string', [
'https://example.com',
'https://example.com/foo/bar.html#baz',
'https://example.com/foo%20bar.html%3Ffoo%3Dbar',
'https://foo.bar.baz.example.com/foo.html',
'http://localhost/',
'http://123.45.67.89:8080/?foo=bar',
'http://user:password@[2001:abc::1234]:8080?foo=bar#baz@bloop',
'ftp://user@examplehost/file/path',
'git://github.com/binary-butterfly/validataclass.git',
'git+https://github.com/binary-butterfly/validataclass@0.1.0#egg=validataclass',
]
)
def test_url_regex_valid(input_string):
""" Test UrlValidator regex validation with valid strings. """
# Choose options to be most permissive to focus on the regex validation (allow any scheme, etc.)
validator = UrlValidator(allowed_schemes=[], require_tld=False, allow_ip=True, allow_userinfo=True)
assert validator.validate(input_string) == input_string
@staticmethod
@pytest.mark.parametrize(
'input_string', [
# No or invalid scheme, missing delimiters
'://example.com',
'123://example.com',
'http//example.com',
'http:example.com',
# Empty or invalid host
'https://',
'https:///path',
'https://example.com@',
'https://@example.com',
'https://[2001:abc::1234/foo/bar',
# Invalid port
'https://example.com:/foo/bar',
'https://example.com:0/foo/bar',
# Invalid URL encoding
'https://example.com/foo%xxbar',
'https://example.com/foo%1',
]
)
def test_url_regex_invalid(input_string):
""" Test UrlValidator with default options with invalid URL strings that fail the regex validation. """
validator = UrlValidator(allowed_schemes=[], require_tld=False, allow_ip=True, allow_userinfo=True)
with pytest.raises(InvalidUrlError) as exception_info:
validator.validate(input_string)
assert exception_info.value.to_dict() == {
'code': 'invalid_url',
'reason': 'Invalid URL format.',
}
# Tests with default options
@staticmethod
@pytest.mark.parametrize(
'input_string', [
'https://example.com',
'https://sub.domain.example.com/foo/bar',
'https://xn--hxajbheg2az3al.xn--qxam/?foo=bar',
'http://123.45.67.89:8080?',
'http://[2001:abc::1234]:8080?',
]
)
def test_url_with_default_options_valid(input_string):
""" Test UrlValidator with default options with valid URL strings. """
validator = UrlValidator()
assert validator.validate(input_string) == input_string
@staticmethod
@pytest.mark.parametrize(
'input_string, error_reason', [
# Invalid scheme
('ftp://example.com', 'URL scheme is not allowed.'),
# No TLD
('https://example', 'Invalid host in URL.'),
('https://example/foo/bar.com', 'Invalid host in URL.'),
('https://example?foo=bar.com', 'Invalid host in URL.'),
# Invalid domain names and IP addresses
('https://$$$.com', 'Invalid host in URL.'),
('https://-example.com', 'Invalid host in URL.'),
('https://256.256.256.256/foo.bar', 'Invalid host in URL.'),
('https://[2001]/foo.bar', 'Invalid host in URL.'),
('https://[2001:abc::xxxx]/foo.bar', 'Invalid URL format.'),
# Contains userinfo
('https://username@example.com', 'Userinfo component not allowed in URL.'),
# Invalid port number
('https://example.com:123456/', 'Invalid port number in URL.'),
]
)
def test_url_with_default_options_invalid(input_string, error_reason):
""" Test UrlValidator with default options with invalid URL strings. """
validator = UrlValidator()
with pytest.raises(InvalidUrlError) as exception_info:
validator.validate(input_string)
assert exception_info.value.to_dict() == {
'code': 'invalid_url',
'reason': error_reason,
}
# Tests for allowed_schemes option
@staticmethod
@pytest.mark.parametrize(
'allowed_schemes, input_string', [
# Default (http and https only)
(None, 'http://example.com'),
(None, 'https://example.com'),
# Empty list means "allow any (valid) scheme"
([], 'https://example.com'),
([], 'git+https://github.com/foo'),
([], 'file://example.com/etc/passwd'),
# Custom list
(['ftp', 'gopher'], 'ftp://example.com'),
(['ftp', 'gopher'], 'gopher://example.com:123/1foobar'),
]
)
def test_url_allowed_schemes_valid(allowed_schemes, input_string):
""" Test UrlValidator with `allowed_schemes` option with valid URL strings. """
validator = UrlValidator(allowed_schemes=allowed_schemes)
assert validator.validate(input_string) == input_string
@staticmethod
@pytest.mark.parametrize(
'allowed_schemes, input_string', [
# Default (http and https only)
(None, 'ftp://example.com'),
# Custom lists
(['https'], 'http://example.com'),
(['ftp', 'gopher'], 'https://example.com'),
]
)
def test_url_allowed_schemes_invalid(allowed_schemes, input_string):
""" Test UrlValidator with `allowed_schemes` option with invalid URL strings. """
validator = UrlValidator(allowed_schemes=allowed_schemes)
with pytest.raises(InvalidUrlError) as exception_info:
validator.validate(input_string)
assert exception_info.value.to_dict() == {
'code': 'invalid_url',
'reason': 'URL scheme is not allowed.',
}
# Tests for boolean validator options
@staticmethod
@pytest.mark.parametrize(
'require_tld, allow_ip, allow_userinfo, input_string', [
# Domain name with TLD
(False, False, False, 'https://example.com'),
(True, True, True, 'https://example.com'),
# Domain name without TLD
(False, False, False, 'https://localhost'),
(False, True, True, 'https://example'),
# IP addresses
(False, True, False, 'https://123.45.67.89'),
(True, True, True, 'https://123.45.67.89'),
# URLs with userinfo
(True, False, True, 'https://username@example.com'),
(False, False, True, 'https://username@localhost'),
(True, True, True, 'https://username:password@123.45.67.89'),
# URLs without userinfo
(False, False, False, 'https://example.com/username@foo.bar'),
]
)
def test_url_with_boolean_options_valid(require_tld, allow_ip, allow_userinfo, input_string):
""" Test UrlValidator with various boolean options (require_tld, etc.) with valid URL strings. """
validator = UrlValidator(require_tld=require_tld, allow_ip=allow_ip, allow_userinfo=allow_userinfo)
assert validator.validate(input_string) == input_string
@staticmethod
@pytest.mark.parametrize(
'require_tld, allow_ip, allow_userinfo, input_string, error_reason', [
# Domain name without TLD
(True, False, False, 'https://localhost#foo.bar', 'Invalid host in URL.'),
(True, True, True, 'https://example/foo.bar', 'Invalid host in URL.'),
(True, True, True, 'https://user.name@example', 'Invalid host in URL.'),
# IP addresses
(False, False, False, 'https://123.45.67.89', 'Invalid host in URL.'),
(True, False, True, 'https://[2001:abc::1234]', 'Invalid host in URL.'),
(True, False, True, 'https://user.name@123.45.67.89', 'Invalid host in URL.'),
# URLs with userinfo
(False, True, False, 'https://username@123.45.67.89', 'Userinfo component not allowed in URL.'),
(True, False, False, 'https://username:123@example.com/foo', 'Userinfo component not allowed in URL.'),
]
)
def test_url_with_boolean_options_invalid(require_tld, allow_ip, allow_userinfo, input_string, error_reason):
""" Test UrlValidator with various boolean options (require_tld, etc.) with invalid URL strings. """
validator = UrlValidator(require_tld=require_tld, allow_ip=allow_ip, allow_userinfo=allow_userinfo)
with pytest.raises(InvalidUrlError) as exception_info:
validator.validate(input_string)
assert exception_info.value.to_dict() == {
'code': 'invalid_url',
'reason': error_reason,
}
|
import sys
from loguru import logger
# sys.stdout.reconfigure(encoding="utf-8")
class Format:
time = "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green>"
level = "<level>{level: <8}</level>"
module = "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>"
message = "<level>{message}</level>"
LEVEL = "INFO"
SAVE_TO_FILE = False
_filename = "data/logs/user.log"
logger.remove()
logger.add(sys.stdout, level=LEVEL)
if SAVE_TO_FILE:
logger.add(_filename, encoding="utf8")
# from collections import defaultdict
# from random import choice
#
# colors = ["blue", "green", "magenta", "red", "yellow"]
# color_per_module = defaultdict(lambda: choice(colors))
#
# logger.bind(synthesizer_name=name)
# _color_tag = choice(colors)
# _name_fmt = "<{}>".format(_color_tag) + "{extra[synthesizer_name]}" + "</{}>".format(_color_tag)
# _formatter = " | ".join((Format.time, Format.level, Format.module, _name_fmt, Format.message))
# logger.add(sys.stdout, format=_formatter) |
'''
@copyright: 2022 - Symas Corporation
'''
from .config import Config
from .validator import Validator
from .date import Date
from .day import Day
from .lockdate import LockDate
from .time import Time
from .timeout import TimeOut
from .current_date_time import CurrentDateTime
from .global_ids import ACTV_FAILED_DAY, ACTV_FAILED_DATE, ACTV_FAILED_TIMEOUT, ACTV_FAILED_TIME, ACTV_FAILED_LOCK, SUCCESS
from .fortress_error import RbacError
from .logger import logger |
#!/usr/bin/env python
import os
import re
import sys
import time
import json
import cPickle as pickle
import pprint
import logging
import webapp2
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.ext.webapp import template
import datetime
# Set the current latest terms depending on the time of year
today = datetime.datetime.today()
# Important note -
# The thing with getting the student's current year, or for that matter,
# latest term, we need to keep in mind that Winter results will arrive only
# 'next' year (i.e. in January) & Summer, sometime in June.
# The calculations change accordingly.
if today.month < 6:
latest_term = "AUTUMN"
latest_normal_term = str(today.year-1) + " " + latest_term
latest_normal_term2 = str(today.year-1) + " " + "WINTER"
acad_year = "{}-{}".format(today.year-1, today.year)
latest_terms = [latest_normal_term,
latest_normal_term + " RE-EXAM",
latest_normal_term2]
else:
latest_term = "SPRING"
acad_year = "{}-{}".format(today.year, today.year+1)
latest_normal_term = str(today.year) + " " + latest_term
latest_terms = [latest_normal_term,
latest_normal_term + " RE-EXAM",
str(today.year) + " TERM SUMMER"]
# Manual over-ride
# latest_terms = ['2013 SPRING','2013 SPRING RE-EXAM', '2013 TERM SUMMER']
grades = {'AA': 10, 'AB': 9, 'BB': 8, 'BC': 7, 'CC': 6,
'SS': 10, 'CD': 5, 'DD': 4, 'FF': 0, 'W': 0}
terms = ['SPRING', 'AUTUMN', 'RE-EXAM', 'SUMMER']
# We still include the Analyser class (cp cv) because I can't handle globals
class Analyser:
def All_Courses(self, serial=True, terms=True, alphabetically=True):
data = list()
for each in course_data:
to_print = str(each)
if serial:
to_print += ' || Serial - ' + str(course_data[each]['Serial'])
if terms:
course_terms = list(course_data[each]['Records'].keys())
to_print += ' || Terms - ' + str(course_terms)
data.append(to_print)
if alphabetically:
data.sort()
return data
def Individual_Record(self,roll,term=None):
if roll in database:
if not term:
return database[roll]
else:
return database[roll]['Records'][term]
def Make_Marklist(self,course=False,course_term=None,branch=None,batch=None,term=latest_terms[0],cg=False,sg=False,names=False):
mark_list = list()
if course and course in course_data:
if course_term and course_term in course_data[course]['Records']:
for rolls in course_data[course]['Records'][course_term]:
mark_list.append(course_data[course]['Records'][course_term][rolls])
return mark_list
else:
big_list = list()
for course_term in course_data[course]['Records']:
mark_list.append(course_term)
for rolls in course_data[course]['Records'][course_term]:
if names:
mark_list.append((course_data[course]['Records'][course_term][rolls],rolls))
else:
mark_list.append(course_data[course]['Records'][course_term][rolls])
big_list.append(mark_list)
mark_list = list()
return big_list
elif cg:
for roll in database:
cur_cg = database[roll]['CGPA']
should_add = True
if branch:
if not database[roll]['Branch'] == branch:
should_add = False
if batch:
if not batch == database[roll]['Batch']:
should_add = False
if should_add:
if names:
name = database[roll]['Name']
mark_list.append((cur_cg,name))
else:
mark_list.append(cur_cg)
return mark_list
elif sg:
for roll in database:
should_add = True
if branch:
if not database[roll]['Branch'] == branch:
should_add = False
if batch:
if not batch == database[roll]['Batch']:
should_add = False
if should_add:
for cur_term in database[roll]['Records']:
if cur_term == term:
cur_sg = database[roll]['Records'][cur_term]['SGPA']
if names:
name = database[roll]['Name']
mark_list.append((cur_sg,name))
else:
mark_list.append(cur_sg)
break
return mark_list
# If the input was wrong, we dont want to return None.
return list()
def Mean_Deviation(self,marklist): # Takes marks, outputs mean & std deviation
if len(marklist) > 0:
if not isinstance(marklist[0],tuple):
total = sum(marklist)
fail = 0
N = len(marklist)
for each in marklist:
if not each:
fail += 1
N = N - fail
devn = 0
if N: # Division by zero error avoidance
mean = total/N
else:
mean = total
for each in marklist:
if each:
devn += (mean - each)**2
if N:
devn = (devn/N)**0.5
return mean, devn, fail
else:
total = 0
fail = 0
for each in marklist:
total += each[0]
if not each[0]:
fail += 1
N = len(marklist)
N = N - fail
devn = 0
if N: # Division by zero error avoidance
mean = total/N
else:
mean = total
for each in marklist:
if each[0]:
devn += (mean - each[0])**2
if N:
devn = (devn/N)**0.5
return mean, devn, fail
def Gradify(self,marklist,percent=True,cumulative=False):
categories = [[10,0],[9,0],[8,0],[7,0],[6,0],[5,0],[4,0],['F',0]]
if cumulative:
categories = [['F',0],[4,0],[5,0],[6,0],[7,0],[8,0],[9,0],[10,0]]
for mark in marklist:
if isinstance(mark,tuple):
mark = mark[0]
#if mark == 0:
# categories[7][1] += 1
for i in range(len(categories)-1):
if mark >= categories[i][0]:
categories[i][1] += 1
if not cumulative:
break
if percent:
total = len(marklist)
for i in range(len(categories)):
categories[i][1] = categories[i][1]*100/total
return categories
def Ranking(self,marklist):
if marklist:
if not isinstance(marklist[0],tuple):
return sorted(marklist,reverse=True)
else:
data_dict = dict()
marks_list = list()
for each in marklist:
if each[0] in data_dict:
data_dict[each[0]].append(each[1])
else:
data_dict[each[0]] = [each[1]]
marks_list.append(each[0])
marks_list = set(marks_list)
to_return = list()
for mark in sorted(marks_list,reverse=True):
cur_data = sorted(data_dict[mark])
to_return.append((len(to_return)+1,cur_data))
return to_return
def Course_Performance(self,course,exclude_re=True,percent=True,cumulative=False):
# Need terms & their graded data.
poss_grades = [10, 9, 8, 7, 6, 5, 4, 0]
big_list = self.Make_Marklist(course)
big_list.sort(key=(lambda k: k[0]))
graded_list = list()
course_terms = list()
for each in big_list:
if exclude_re and each[0][-1] == 'M':
continue
course_terms.append(str(each[0]))
graded_list.append(self.Gradify(each[1:],percent,cumulative))
assert len(course_terms) == len(graded_list)
to_return = [course_terms,list()]
for i in range(len(poss_grades)):
cur = dict()
cur['name'] = str(poss_grades[i])
cur['data'] = list()
for k in range(len(graded_list)):
cur['data'].append(graded_list[k][i][1])
to_return[1].append(cur)
return to_return
def Student_Performance(self, roll, egp=True):
# Need terms, egps, term-wise course grades
to_return, terms, term_data = list(), list(), list()
stud_data = database[roll]
for term in sorted(stud_data['Records'].keys()):
cur_data = dict()
if egp:
cur_data['egp'] = stud_data['Records'][term]['EGP']
else:
cur_data['sg'] = stud_data['Records'][term]['SGPA']
cur_data['courses'], cur_data['data'] = list(), list()
cur_data['name'] = str(term)
for course in stud_data['Records'][term]['Courses']:
cur_data['courses'].append(str(course_data[course]['Name']+' ('+course+')'))
cur_data['data'].append(stud_data['Records'][term]['Courses'][course])
term_data.append(cur_data)
terms.append(str(cur_data['name']))
for i in range(len(term_data)):
term_data[i]['color'] = i
to_return.append(terms)
to_return.append(term_data)
return to_return
database = dict() # For individual student data storing
course_data = dict() # Records of every course for every sem
department_data = dict() # Dept.-wise student records by cur_cg
cg_avgs = dict()
cg_distribution = dict()
cg_stats = dict()
course_stats = dict()
rank_data = dict()
def run_once():
global cg_avgs, cg_distribution, cg_stats, course_data, course_stats, \
database, department_data, rank_data
with open('cg_avgs'+'.txt') as cur_file:
cg_avgs = json.load(cur_file)
with open('cg_distribution'+'.txt') as cur_file:
cg_distribution = json.load(cur_file)
with open('course_data'+'.txt') as cur_file:
course_data = json.load(cur_file)
with open('course_stats'+'.txt') as cur_file:
course_stats = json.load(cur_file)
with open('database'+'.txt') as cur_file:
database = json.load(cur_file)
with open('rank_data'+'.txt') as cur_file:
rank_data = json.load(cur_file)
run_once()
#All the necessary funcs go here...
class MainHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_main__')
if not to_render:
to_render = template.render("templates/index.html", {})
memcache.set('_static_main__',to_render)
self.response.out.write(to_render)
class CourseHandler(webapp2.RequestHandler):
def get(self):
template_values={}
serial = self.request.get('serial')
template_values['serial'] = serial
if not serial or serial == "all":
to_render = memcache.get('_static_courses_all__')
if not to_render:
path = "templates/course_all.html"
data = sorted(course_data.keys())
data = sorted(map(lambda x: (x,course_data[x]['Name']),course_data),key=lambda x: x[0])
## categorised_data = {}
## for each in data:
## if each[0][:3] not in categorised_data:
## categorised_data[each[0][:3]] = [each]
## else:
## categorised_data[each[0][:3]].append(each)
## template_values['categorised_data'] = categorised_data
template_values['data'] = data
to_render = template.render(path, template_values)
memcache.set('_static_courses_all__', to_render)
else:
q = self.request.get('q')
path = "templates/courses.html"
if q == 'data':
data = course_data[serial]
else:
course = course_data[serial]['Name']
template_values['course'] = course
re = self.request.get('exclude_re')
per = self.request.get('percent')
exclude_re,percent = True,True
if str(re) == '0':
exclude_re = False
if str(per) == '0':
percent = False
data = None
do = Analyser()
render_data = do.Course_Performance(serial,exclude_re,percent)
template_values['terms'] = render_data[0]
template_values['series'] = render_data[1]
template_values['exclude_re'] = exclude_re
template_values['percent'] = percent
template_values['data'] = data
to_render = template.render(path, template_values)
self.response.out.write(to_render)
class StudentHandler(webapp2.RequestHandler):
def render(self,roll):
template_values={}
roll = roll.upper()
path = "templates/student.html"
template_values['present'] = True
template_values['roll'] = roll
template_values['data'] = None
if roll:
if roll in database:
data = database[roll]
template_values['data'] = pprint.pformat(data)
graph = self.request.get('graph')
if not str(graph) == '0':
egp_str = self.request.get('egp')
egp = True
if str(egp_str) == '0':
egp = False
mem_template_values = memcache.get('_roll_'+str(roll)+str(graph)+str(egp))
if not mem_template_values:
template_values['graph'] = True
template_values['egp'] = egp
do = Analyser()
render_data = do.Student_Performance(roll,egp)
template_values['terms'] = render_data[0]
template_values['term_data'] = render_data[1]
ranks = {}
cg_roll = database[roll]['CGPA']
dept = database[roll]['Branch']
batch = database[roll]['Batch']
ranks['insti'] = (rank_data['All'].index(cg_roll) + 1,len(rank_data['All']))
ranks['dept'] = (rank_data[dept]['All'].index(cg_roll) + 1,len(rank_data[dept]['All']))
ranks['batch_dept'] = (rank_data[dept][batch].index(cg_roll) + 1,len(rank_data[dept][batch]))
ranks['batch_insti'] = (rank_data[batch].index(cg_roll) + 1,len(rank_data[batch]))
template_values['ranks'] = ranks
else: template_values = mem_template_values
else: template_values['graph'] = False
memcache.set('_roll_'+str(roll)+str(graph),template_values)
else:
template_values['present'] = False
self.response.out.write(template.render(path, template_values))
def get(self):
roll = self.request.get('roll')
self.render(roll)
def post(self):
roll = self.request.get('roll')
self.render(roll)
class StatsHandler(webapp2.RequestHandler):
def get(self):
template_values={}
path = "templates/stats.html"
template_values['present'] = True
batch = self.request.get('batch','')
branch = self.request.get('branch','')
if not branch and not batch: # Display all statistics links
data = memcache.get('_stats_data')
if not data:
data = {'Lists':[],'Dicts':{}}
for each in cg_distribution['FalseFalse']:
if isinstance(cg_distribution['FalseFalse'][each], list):
data['Lists'].append(each)
else:
data['Dicts'][each] = sorted(list(cg_distribution['FalseFalse'][each].keys()))
data['Lists'] = sorted(data['Lists'])
memcache.set('_stats_data',data)
template_values['data'] = data
else: # Display the relevant statistics
cumulative = self.request.get('cumulative',False)
percent = self.request.get('percent',True)
if str(percent) == '0':
percent = False
else:
percent = True
if str(cumulative) == '0':
cumulative = False
else:
cumulative = True
template_values['percent'] = percent
template_values['cumulative'] = cumulative
template_values['branch'] = branch
template_values['batch'] = batch
series = memcache.get('_stats_'+str(percent)+str(cumulative)+str(branch)+str(batch))
if not series:
if not branch:
series = cg_distribution[str(percent)+str(cumulative)][batch]
else:
if not batch:
series = cg_distribution[str(percent)+str(cumulative)][branch]['All']
else:
series = cg_distribution[str(percent)+str(cumulative)][branch][batch]
memcache.set('_stats_'+str(percent)+str(cumulative)+str(branch)+str(batch),series)
template_values['series'] = series
self.response.out.write(template.render(path, template_values))
class CommentsHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_comments')
if not to_render:
to_render = template.render("templates/comments.html", {})
memcache.set('_static_comments',to_render)
self.response.out.write(to_render)
class PerformanceHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
q = self.request.get('q', False)
if q == 'batchwise':
to_render = memcache.get('_perf_batchwise')
if not to_render:
template_values['perfdata'] = cg_avgs[1]
to_render = template.render("templates/perfbatchwise.html", template_values)
memcache.set('_perf_batchwise', to_render)
else:
to_render = memcache.get('_perf_complete')
if not to_render:
template_values['perfdata'] = cg_avgs[0]
to_render = template.render("templates/performance.html", template_values)
memcache.set('_perf_complete', to_render)
self.response.out.write(to_render)
class CoursePerfHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_perf_course')
if not to_render:
stats = []
for cur in sorted(course_stats):
stats.append([cur, course_stats[cur]])
template_values = {'course_stats': stats}
to_render = template.render("templates/course_perf.html", template_values)
memcache.set('_perf_course',to_render)
self.response.out.write(to_render)
class PrivacyHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_privacy')
if not to_render:
to_render = template.render("templates/privacy.html", {})
memcache.set('_static_privacy',to_render)
self.response.out.write(to_render)
class TOSHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_tos')
if not to_render:
to_render = template.render("templates/tos.html", {})
memcache.set('_static_tos',to_render)
self.response.out.write(to_render)
class AboutHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_about')
if not to_render:
to_render = template.render("templates/about.html", {})
memcache.set('_static_about',to_render)
self.response.out.write(to_render)
class ChangelogHandler(webapp2.RequestHandler):
def get(self):
to_render = memcache.get('_static_changelog')
if not to_render:
to_render = template.render("templates/changelog.html", {})
memcache.set('_static_changelog',to_render)
self.response.out.write(to_render)
def handle_404(request, response, exception):
logging.exception(exception)
response.write('Oops! Yoou seem to have wandered off! '
'The requested url/page does not exist. ')
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write('A server error occurred! Report has been logged. '
'If you think this is SEVERE & NOT your fault, '
'kindly report it to me by any convenient means :) '
'<ashishnitinpatil@gmail.com>')
response.set_status(500)
app = webapp2.WSGIApplication([('/',MainHandler),
('/courses',CourseHandler),
('/student',StudentHandler),
('/tos',TOSHandler),
('/about',AboutHandler),
('/privacy',PrivacyHandler),
('/comments',CommentsHandler),
('/changelog',ChangelogHandler),
('/stats',StatsHandler),
('/performance',PerformanceHandler),
('/courseperf',CoursePerfHandler),],
debug=True)
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500 |
import enum
import itertools
import platform
import random
import datetime as dt
import statistics
import sys
from dataclasses import dataclass, asdict, field
from pathlib import Path
from typing import List, Optional
import pdfkit
from jinja2 import Template
if platform.system() == "Windows":
# ugh. Sorry. I need a better OS on this box, but this is a quick dirty hack
path_wkhtmltopdf = r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe'
PDF_CONFIG = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
else:
PDF_CONFIG = pdfkit.configuration()
TEMPLATES = Path(__file__).parent
with open(TEMPLATES / "template.jinja2") as fh:
template = Template(source=fh.read())
# The probability any given test will fall within the "normal" range
P_PASS = 0.8
# Some numbers are formatted as a single decimal
ONE_DECIMAL = "{:0.1f}"
@dataclass
class Between:
"""
The normal range of a test result.
"""
low: float
high: float
fmt_precision: str = "{:0.0f}"
fmt_result: str = "{}" # extra stuff on the report?
fmt_range: str = "{low} - {high}"
BELOW = -1
OK = 0
ABOVE = 1
def __str__(self):
"""
String representation of the range itself
"""
high, low = self.fmt_precision.format(self.high), self.fmt_precision.format(self.low)
return self.fmt_range.format(high=high, low=low)
def sample(self, rand_seed, p_pass) -> 'Sample':
"""
Use a specific seed to deterministically generate a random-looking result within (or slightly out of) the
expected high/low range
"""
# Bad assumption: average Americans have metabolic panel values in the middle of the range. Haha what a joke.
mean = (self.high + self.low) / 2
# Math problem: What standard deviation would give us an out-of-range value `failure_pct` of the time?
# Work backwards from z-score P values, and the fact that 1-sigma is 68%.
# TODO: implement bias by messing with this distribution function.
dist = statistics.NormalDist(0, 1)
z_top = dist.inv_cdf((1 + p_pass)/2)
# He who controls the spice controls the universe.
# By spice I mean psuedo-random number generator seed.
z_sample = dist.samples(1, seed=rand_seed)[0]
print(f"{z_sample}/{z_top}")
allowed_deviation = abs(mean - self.high)
actual_deviation = z_sample * allowed_deviation / z_top
val = mean + actual_deviation
return Sample(
range=self, # Contains formatting directives, and in/out of bounds info
val=val,
)
def check_bounds(self, val: float) -> str:
out_of_range = self.contains(val)
if out_of_range == Between.BELOW:
return f"L"
if out_of_range == Between.ABOVE:
return f"H"
return ""
def contains(self, value):
# The value is called out with a prefix if it's too high or too low
if self.low < value < self.high:
return self.OK
if value < self.low:
return Between.BELOW
if self.high < value:
return Between.ABOVE
@dataclass
class LTHigh(Between):
"""
Expect below. Must provide low anyway, so we can generate a number
"""
fmt_range: str = "<{high}"
def contains(self, value):
if self.high < value:
return Between.ABOVE
return Between.OK
@dataclass
class GTLow(Between):
"""
Expect above. Must provide high anyway, so we can generate a number
"""
fmt_range: str = ">{low}"
def contains(self, value):
if value < self.low:
return Between.BELOW
return Between.OK
@dataclass
class Sample:
"""
The result of sampling a range, formatted according to that range's conventions.
"""
range: Between
val: float # pre-formatted for precision
@property
def value(self):
precision = self.range.fmt_precision.format(self.val)
final = self.range.fmt_result.format(precision)
return final
@property
def ok(self):
return self.range.check_bounds(self.val)
@dataclass
class Test:
"""
Quantitative description of a diagnostic test to run, including name, expected range, units, etc.
"""
# Parameters to generate a test result.
name: str
range: Between
units: str
def sample(self, rand_seed, p_pass) -> 'Result':
"""
Psuedo-random result generator
"""
return Result(
test=self,
result=self.range.sample(rand_seed, p_pass)
)
@dataclass
class Result:
"""
The sampled result of a test
"""
test: Test
result: Sample
class GenList:
"""
Data descriptor to get random data from bunch of test data generators
Alsa a data attribute to generate a Result
"""
def __init__(self, *generators):
self.tests = generators
def __get__(self, instance: 'LabReport', owner) -> List['Result']:
"""
Render a result based on the lab report's patient number. Should be random-looking, but deterministic based
on the patient number. Result includes test meta-data like expected reference range, test name, etc. It's
ready to render on a template.
"""
# Use the patient's ID number as a random seed. Same person, same results. Every time.
# Good point of argument for skeptical patients.
# But don't use the literal same seed value for every sample -- use a deterministic array of them, so
# they're each in or out of range independently of one another
not_rand = random.Random(instance.patient_number)
results: List[Result] = [t.sample(not_rand.randint(0, sys.maxsize), instance.p_pass) for t in self.tests]
for r in results:
if val := instance.override_samples.get(r.test.name, None):
r.result.val = val
return results
@dataclass
class LabReport:
"""
Fake data for a fake lab report
"""
# Configurable report parameters
patient_number: int
collected_at: dt.datetime
has_disease: bool
p_pass: float = P_PASS
override_samples: dict = field(default_factory=dict)
# Data descriptor for making a list of fake test results.
# Use it like a property, e.g. `results = self.metabolic_panel`
metabolic_panel = GenList(
Test("Sodium", Between(132, 146), "mM"),
Test("Potassium", Between(3.4, 5.4, fmt_precision=ONE_DECIMAL), "mM", ),
Test("Chloride", Between(99, 109), "mM"),
Test("Bicarbonate", Between(19, 33), "mM"),
Test("Glucose", Between(73, 105, fmt_result="{}**"), "mg/dL"),
Test("Bun", Between(6, 24), "mg/dL", ),
Test("Creatine", Between(0.5, 1.2, fmt_precision=ONE_DECIMAL), "mg/dL", ),
Test("Calcium", Between(8.3, 10.6, fmt_precision=ONE_DECIMAL), "g/dL", ),
Test("Protein, Total", Between(6, 8, fmt_precision=ONE_DECIMAL), "g/dL", ),
Test("Albumin", Between(3.5, 5.1, fmt_precision=ONE_DECIMAL), "g/dL", ),
Test("Bilirubin, Total", Between(0.3, 1.4, fmt_precision=ONE_DECIMAL), "mg/dl", ),
Test("ALP", Between(44, 135), "U/L", ),
Test("ALT", Between(7.9, 40.9, fmt_precision=ONE_DECIMAL), "U/L"),
Test("AST", Between(0, 35), "U/L"),
)
lipid_panel = GenList(
Test("Cholesterol, Total", LTHigh(100, 240), "mg/dL"),
Test("Triglycerides", LTHigh(100, 200), "mg/dL"),
Test("HDL Cholesterol", GTLow(40, 90), "mg/dL"),
Test("LDL Cholesterol", LTHigh(85, 130), "mg/dL"),
)
def force_result(self, name, value):
"""
Force a specific test, by name, to have a specific value
"""
# Ew, gross, just us a hash map why don't you (because this is a hack job)
for t in itertools.chain(self.metabolic_panel, self.lipid_panel):
if t.test.name == name:
t.result.value = value
return
raise KeyError(f"Test {name} not found")
def as_html(self) -> str:
"""
use the above template to get an HTML report
"""
ctx = asdict(self)
ctx['metabolic_panel'] = self.metabolic_panel
ctx['lipid_panel'] = self.lipid_panel
ctx['has_disease'] = self.has_disease
# PDF requires inline style sheets, which we inject via templating
with open(TEMPLATES / "style.css") as fh:
ctx['style'] = fh.read()
with open(TEMPLATES / "normalize.css") as fh:
ctx['normalize'] = fh.read()
print(ctx)
return template.render(ctx)
def save_html(self, filename):
with open(filename, 'w') as fh:
fh.write(self.as_html())
def save_pdf(self, filename):
"""
Generate psuedorandom results, and render them as a PDF
"""
pdfkit.from_string(
self.as_html(), filename, configuration=PDF_CONFIG,
options={
'encoding': "UTF-8",
'print-media-type': '',
'page-size': 'A4',
'zoom': '1.1'
}
)
def generate(patient_number, output_folder, has_disease, p_pass):
r = LabReport(patient_number=patient_number, collected_at=dt.datetime.now(), has_disease=has_disease, p_pass=p_pass)
out = Path(output_folder) / f"{patient_number}.pdf"
r.save_pdf(out)
def gen_samples():
output_folder = Path(__file__).parent / "okay"
output_folder.mkdir(exist_ok=True)
START_AT = 15900
NUM_REPORTS = 60
# Arbitrary range of patient numbers; all healthy
for i, patient_number in enumerate(range(START_AT, START_AT + NUM_REPORTS)):
r = LabReport(patient_number=patient_number, collected_at=dt.datetime.now(), has_disease=False, p_pass=P_PASS)
out = Path(output_folder) / f"report-{i}.pdf"
# r.save_pdf(out)
# One bad patient, with ID 10
BAD_ID = 10
output_folder = Path(__file__).parent / "bad"
output_folder.mkdir(exist_ok=True)
r = LabReport(patient_number=START_AT + BAD_ID, collected_at=dt.datetime.now(), has_disease=True, p_pass=P_PASS/2)
# Some specific hard-coded changes for this one
r.override_samples = {
'Sodium': 162,
'Potassium': 6.8,
"Bicarbonate": 40,
"Chloride": 118,
'Glucose': 152,
'Bun': 41,
'Creatine': 1.44,
'Calcium': 15,
'Protein, Total': 6.6,
'Albumin': 33,
'Bilirubin, Total': 2.4,
"ALP": 188.8,
'ALT': 31,
'AST': 93,
"Cholesterol, Total": 259,
"Triglycerides": 213,
"HDL Cholesterol": 22,
"LDL Cholesterol": 158,
}
out = Path(output_folder) / f"report-{BAD_ID}.pdf"
r.save_pdf(out)
if __name__ == "__main__":
gen_samples()
import argparse, sys
parser = argparse.ArgumentParser()
parser.add_argument('patient_number', action='store')
parser.add_argument('--output_folder', '-o', type=str, default='.')
parser.add_argument('--has_disease', '-d', action='store_true')
args = parser.parse_args(sys.argv[1:])
generate(args.patient_number, args.output_folder, args.has_disease, P_PASS/2 if args.has_disease else P_PASS)
|
from .Integer import ZZ
from .QuotientRing import QuotientRing
from .polynomial_uni import UnivariatePolynomialRing
from .polynomial_multi import BivariatePolynomialRing
|
class Solution(object):
def oddEvenJumps(self, A):
N = len(A)
def make(B):
ans = [None] * N
stack = [] # invariant: stack is decreasing
for i in B:
while stack and i > stack[-1]:
ans[stack.pop()] = i
stack.append(i)
return ans
B = sorted(range(N), key = lambda i: A[i])
oddnext = make(B)
B.sort(key = lambda i: -A[i])
evennext = make(B)
odd = [False] * N
even = [False] * N
odd[N-1] = even[N-1] = True
for i in xrange(N-2, -1, -1):
if oddnext[i] is not None:
odd[i] = even[oddnext[i]]
if evennext[i] is not None:
even[i] = odd[evennext[i]]
return sum(odd) |
# Generated by Django 2.0.7 on 2018-08-14 22:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_company_owner'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'verbose_name': 'Client'},
),
migrations.AlterModelOptions(
name='employee',
options={'verbose_name': 'Employee'},
),
migrations.AlterModelOptions(
name='owner',
options={'verbose_name': 'Owner'},
),
migrations.AlterField(
model_name='client',
name='address',
field=models.CharField(blank=True, max_length=12, null=True, verbose_name='Address'),
),
]
|
from .users import UserViewSet |
import collections
import re
import numpy as np
TOKEN_RE = re.compile(r"[\w\d]+")
def tokenize_text_simple_regex(txt, min_token_size=4):
txt = txt.lower()
all_tokens = TOKEN_RE.findall(txt)
return [token for token in all_tokens if len(token) >= min_token_size]
def character_tokenize(txt):
return list(txt)
def tokenize_corpus(texts, tokenizer=tokenize_text_simple_regex, **tokenizer_kwargs):
return [tokenizer(text, **tokenizer_kwargs) for text in texts]
def add_fake_token(word2id, token="<PAD>"):
word2id_new = {token: i + 1 for token, i in word2id.items()}
word2id_new[token] = 0
return word2id_new
def texts_to_token_ids(tokenized_texts, word2id):
return [
[word2id[token] for token in text if token in word2id]
for text in tokenized_texts
]
def build_vocabulary(
tokenized_texts,
max_size=1000000,
max_doc_freq=0.8,
min_count=5,
pad_word=None,
sublinear_df=False,
smooth_df=False,
):
word_counts = collections.defaultdict(int)
doc_n = 0
# посчитать количество документов, в которых употребляется каждое слово
# а также общее количество документов
for txt in tokenized_texts:
doc_n += 1
unique_text_tokens = set(txt)
for token in unique_text_tokens:
word_counts[token] += 1
# убрать слишком редкие и слишком частые слова
word_counts = {
word: cnt
for word, cnt in word_counts.items()
if cnt >= min_count and cnt / doc_n <= max_doc_freq
}
# отсортировать слова по убыванию частоты
sorted_word_counts = sorted(
word_counts.items(), reverse=True, key=lambda pair: pair[1]
)
# добавим несуществующее слово с индексом 0 для удобства пакетной обработки
if pad_word is not None:
sorted_word_counts = [(pad_word, 0)] + sorted_word_counts
# если у нас по прежнему слишком много слов, оставить только max_size самых частотных
if len(word_counts) > max_size:
sorted_word_counts = sorted_word_counts[:max_size]
# нумеруем слова
word2id = {word: i for i, (word, _) in enumerate(sorted_word_counts)}
# нормируем частоты слов (получаем вектор DF)
word2freq = []
for _, cnt in sorted_word_counts:
cnt_cont = cnt
doc_n_cont = doc_n
if smooth_df:
cnt_cont += 1
doc_n_cont += 1
word2freq.append(cnt_cont / doc_n_cont)
word2freq = np.array(word2freq, dtype="float32")
if sublinear_df:
word2freq = np.log(word2freq) + 1
return word2id, word2freq
PAD_TOKEN = "__PAD__"
NUMERIC_TOKEN = "__NUMBER__"
NUMERIC_RE = re.compile(r"^([0-9.,e+\-]+|[mcxvi]+)$", re.I)
def replace_number_nokens(tokenized_texts):
return [
[token if not NUMERIC_RE.match(token) else NUMERIC_TOKEN for token in text]
for text in tokenized_texts
]
def generate_tokens_n_grams(tokens, ngram_range):
assert len(ngram_range) == 2, "The ngram range must be a tuple of two elements"
range_start, range_end = ngram_range[0], ngram_range[1] + 1
result = []
for i in range(range_start, range_end):
result += [
" ".join(tokens_ngram.tolist())
for tokens_ngram in np.lib.stride_tricks.sliding_window_view(
tokens, window_shape=i
)
]
return result
|
import numpy as np
from rlkit.torch import pytorch_util as ptu
from rorlkit.torch.data import MotionGraphDataset
from rorlkit.torch.data_management.rosbag_data import batch_features_to_pose_arrays, get_identity_feature
class MotionGCNEvaluator(object):
def __init__(
self,
model,
test_dataset_info,
save_path,
batch_size=1,
):
model.to(ptu.device)
self.model = model
self.database = MotionGraphDataset(**test_dataset_info)
self.save_path = save_path
self.batch_size = batch_size
def make_rollout(
self,
unit_idx,
n_rollout,
turbulence=None
):
self.model.eval()
rollout = []
# Add the starting point for better visualization
base_feature = get_identity_feature()
left_hand_feature, right_hand_feature = self.database.get_motion_features(0, unit_idx)
if turbulence is not None:
left_hand_feature += turbulence
right_hand_feature += turbulence
pose_arrays = batch_features_to_pose_arrays([base_feature, left_hand_feature, right_hand_feature])
# print(pose_arrays)
rollout.append(pose_arrays)
graph_curr, _ = self.database.get_motion_graphs(0, unit_idx, shift=turbulence)
for i in range(n_rollout):
graph_next = self.model(graph_curr)
graph_next_np = graph_next.squeeze().detach().cpu().numpy()
# print(graph_curr, '\n', graph_next_np)
pose_arrays = batch_features_to_pose_arrays(graph_next_np)
# print(pose_arrays)
rollout.append(pose_arrays)
graph_curr = self.database.feature_array_to_graph(graph_next_np)
np.save(self.save_path, rollout)
|
#
# PySNMP MIB module CISCOSB-SpecialBpdu-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-SpecialBpdu-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:23:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, MibIdentifier, TimeTicks, ModuleIdentity, Bits, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Counter32, ObjectIdentity, Unsigned32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibIdentifier", "TimeTicks", "ModuleIdentity", "Bits", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Counter32", "ObjectIdentity", "Unsigned32", "iso")
TruthValue, RowStatus, TextualConvention, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "TextualConvention", "MacAddress", "DisplayString")
rlSpecialBpdu = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144))
rlSpecialBpdu.setRevisions(('2008-05-03 12:34',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlSpecialBpdu.setRevisionsDescriptions(('The private MIB module definition Traffic Segmentation MIB.',))
if mibBuilder.loadTexts: rlSpecialBpdu.setLastUpdated('200805031234Z')
if mibBuilder.loadTexts: rlSpecialBpdu.setOrganization('Cisco Small Business')
if mibBuilder.loadTexts: rlSpecialBpdu.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Home http://www.cisco.com/smb>;, Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlSpecialBpdu.setDescription('<description>')
class EncapType(TextualConvention, Integer32):
description = 'The L2 encapsulation type. In case the entry contains MAC only, the encapsulation will be none(1), otherwisw: EthernetV2 (2), LLC (2) or LLC-Snap (3)'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("ethernet-v2", 2), ("llc", 3), ("llc-snap", 4))
class Action(TextualConvention, Integer32):
description = 'Action to be taken. Bridge(1) or Discard (2)'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("bridge", 1), ("discard", 2))
class HwAction(TextualConvention, Integer32):
description = 'Configured action in the HW. Forward(1), Drop (2) or Trap(3)'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("forward", 1), ("drop", 2), ("trap", 3))
rlSpecialBpduTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1), )
if mibBuilder.loadTexts: rlSpecialBpduTable.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduTable.setDescription('A table contains entries of Special BPDU configuration')
rlSpecialBpduEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1), ).setIndexNames((0, "CISCOSB-SpecialBpdu-MIB", "rlSpecialBpduMacAddr"), (0, "CISCOSB-SpecialBpdu-MIB", "rlSpecialBpduEncap"), (0, "CISCOSB-SpecialBpdu-MIB", "rlSpecialBpduProtId"))
if mibBuilder.loadTexts: rlSpecialBpduEntry.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduEntry.setDescription('An entry of Special BPDU configuration table')
rlSpecialBpduMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: rlSpecialBpduMacAddr.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduMacAddr.setDescription('Reserved MAC Mc 01:80:C2:00:00:00 - 01:80:C2:00:00:2F.')
rlSpecialBpduEncap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1, 2), EncapType())
if mibBuilder.loadTexts: rlSpecialBpduEncap.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduEncap.setDescription('L2 Encapsulation Type: Ethernet-V2, LLC or LLC-Snap.')
rlSpecialBpduProtId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5))
if mibBuilder.loadTexts: rlSpecialBpduProtId.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduProtId.setDescription('Protocol ID. For Ethernet-V2: 0x600 - 0xFFFF; For LLC: 0 - 0xFFFF; For LLC-Snap: 0 - 0xFFFFFFFFFF.')
rlSpecialBpduAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1, 4), Action()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlSpecialBpduAction.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduAction.setDescription('Action to be taken on the incoming frame: Discard or Bridge.')
rlSpecialBpduRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlSpecialBpduRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduRowStatus.setDescription('This object indicates the status of this entry.')
rlSpecialBpduHwTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 2), )
if mibBuilder.loadTexts: rlSpecialBpduHwTable.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduHwTable.setDescription('A table contains entries of Special BPDU Hw status')
rlSpecialBpduHwEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 2, 1), ).setIndexNames((0, "CISCOSB-SpecialBpdu-MIB", "rlSpecialBpduMacAddr"))
if mibBuilder.loadTexts: rlSpecialBpduHwEntry.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduHwEntry.setDescription('An entry of Special BPDU Hw status table')
rlSpecialBpduHwAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 144, 2, 1, 2), HwAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSpecialBpduHwAction.setStatus('current')
if mibBuilder.loadTexts: rlSpecialBpduHwAction.setDescription('HW action per MAC address: Forward, Drop or Trap.')
mibBuilder.exportSymbols("CISCOSB-SpecialBpdu-MIB", PYSNMP_MODULE_ID=rlSpecialBpdu, rlSpecialBpduTable=rlSpecialBpduTable, Action=Action, rlSpecialBpduEncap=rlSpecialBpduEncap, rlSpecialBpduHwEntry=rlSpecialBpduHwEntry, rlSpecialBpdu=rlSpecialBpdu, rlSpecialBpduHwAction=rlSpecialBpduHwAction, rlSpecialBpduMacAddr=rlSpecialBpduMacAddr, rlSpecialBpduHwTable=rlSpecialBpduHwTable, rlSpecialBpduEntry=rlSpecialBpduEntry, rlSpecialBpduAction=rlSpecialBpduAction, rlSpecialBpduProtId=rlSpecialBpduProtId, rlSpecialBpduRowStatus=rlSpecialBpduRowStatus, HwAction=HwAction, EncapType=EncapType)
|
import sys
import extraction.ethereum as eth2
import time
import argparse
def main():
sys.path.append('/home/larte/projects/data_engineering_experiments_v1')
parser = argparse.ArgumentParser()
parser.add_argument("--bucket_name", help="define bucket name", type=str)
parser.add_argument("--object_folder", help="define object folder", type=str)
parser.add_argument("--object_name_base", help="define object name", type=str)
parser.add_argument("--first_block_number", help="define start block number", type=int)
parser.add_argument("--last_block_number", help="define end block number", type=int)
args=parser.parse_args()
BUCKET_NAME = args.bucket_name
OBJECT_FOLDER = args.object_folder
OBJECT_NAME_BASE = args.object_name_base
FIRST_BLOCK_NUMBER = args.first_block_number
LAST_BLOCK_NUMBER = args.last_block_number
print(f"Start: block number {FIRST_BLOCK_NUMBER}")
start = time.time()
eth2.extract_blocks(
first_block_number=FIRST_BLOCK_NUMBER,
last_block_number=LAST_BLOCK_NUMBER,
bucket_name=BUCKET_NAME,
object_folder=OBJECT_FOLDER,
object_name=OBJECT_NAME_BASE
)
end = time.time()
print(f"End: block number {LAST_BLOCK_NUMBER}")
print(f"Total time in seconds: {end - start}")
print(f"Total blocks: {LAST_BLOCK_NUMBER - FIRST_BLOCK_NUMBER}")
print(f"Seconds per block: {(end - start)/(LAST_BLOCK_NUMBER - FIRST_BLOCK_NUMBER)}")
if __name__ == "__main__":
main()
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2019 Mobvoi Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from wenet(https://github.com/wenet-e2e/wenet)
"""Positonal Encoding Module."""
import math
from typing import Tuple
import paddle
from paddle import nn
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
__all__ = [
"PositionalEncodingInterface", "NoPositionalEncoding", "PositionalEncoding",
"RelPositionalEncoding"
]
class PositionalEncodingInterface:
def forward(self, x: paddle.Tensor,
offset: int=0) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Compute positional encoding.
Args:
x (paddle.Tensor): Input tensor (batch, time, `*`).
Returns:
paddle.Tensor: Encoded tensor (batch, time, `*`).
paddle.Tensor: Positional embedding tensor (1, time, `*`).
"""
raise NotImplementedError("forward method is not implemented")
def position_encoding(self, offset: int, size: int) -> paddle.Tensor:
""" For getting encoding in a streaming fashion
Args:
offset (int): start offset
size (int): requried size of position encoding
Returns:
paddle.Tensor: Corresponding position encoding
"""
raise NotImplementedError("position_encoding method is not implemented")
class NoPositionalEncoding(nn.Layer, PositionalEncodingInterface):
def __init__(self,
d_model: int,
dropout_rate: float,
max_len: int=5000,
reverse: bool=False):
nn.Layer.__init__(self)
def forward(self, x: paddle.Tensor,
offset: int=0) -> Tuple[paddle.Tensor, paddle.Tensor]:
return x, None
def position_encoding(self, offset: int, size: int) -> paddle.Tensor:
return None
class PositionalEncoding(nn.Layer, PositionalEncodingInterface):
def __init__(self,
d_model: int,
dropout_rate: float,
max_len: int=5000,
reverse: bool=False):
"""Positional encoding.
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
Args:
d_model (int): embedding dim.
dropout_rate (float): dropout rate.
max_len (int, optional): maximum input length. Defaults to 5000.
reverse (bool, optional): Not used. Defaults to False.
"""
nn.Layer.__init__(self)
self.d_model = d_model
self.max_len = max_len
self.xscale = paddle.to_tensor(math.sqrt(self.d_model))
self.dropout = nn.Dropout(p=dropout_rate)
self.pe = paddle.zeros([self.max_len, self.d_model]) #[T,D]
position = paddle.arange(
0, self.max_len, dtype=paddle.float32).unsqueeze(1) #[T, 1]
div_term = paddle.exp(
paddle.arange(0, self.d_model, 2, dtype=paddle.float32) *
-(math.log(10000.0) / self.d_model))
self.pe[:, 0::2] = paddle.sin(position * div_term)
self.pe[:, 1::2] = paddle.cos(position * div_term)
self.pe = self.pe.unsqueeze(0) #[1, T, D]
def forward(self, x: paddle.Tensor,
offset: int=0) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Add positional encoding.
Args:
x (paddle.Tensor): Input. Its shape is (batch, time, ...)
offset (int): position offset
Returns:
paddle.Tensor: Encoded tensor. Its shape is (batch, time, ...)
paddle.Tensor: for compatibility to RelPositionalEncoding, (batch=1, time, ...)
"""
T = x.shape[1]
assert offset + x.shape[1] < self.max_len
#TODO(Hui Zhang): using T = x.size(1), __getitem__ not support Tensor
pos_emb = self.pe[:, offset:offset + T]
x = x * self.xscale + pos_emb
return self.dropout(x), self.dropout(pos_emb)
def position_encoding(self, offset: int, size: int) -> paddle.Tensor:
""" For getting encoding in a streaming fashion
Attention!!!!!
we apply dropout only once at the whole utterance level in a none
streaming way, but will call this function several times with
increasing input size in a streaming scenario, so the dropout will
be applied several times.
Args:
offset (int): start offset
size (int): requried size of position encoding
Returns:
paddle.Tensor: Corresponding position encoding
"""
assert offset + size < self.max_len
return self.dropout(self.pe[:, offset:offset + size])
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding module.
See : Appendix B in https://arxiv.org/abs/1901.02860
"""
def __init__(self, d_model: int, dropout_rate: float, max_len: int=5000):
"""
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int, optional): [Maximum input length.]. Defaults to 5000.
"""
super().__init__(d_model, dropout_rate, max_len, reverse=True)
def forward(self, x: paddle.Tensor,
offset: int=0) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Compute positional encoding.
Args:
x (paddle.Tensor): Input tensor (batch, time, `*`).
Returns:
paddle.Tensor: Encoded tensor (batch, time, `*`).
paddle.Tensor: Positional embedding tensor (1, time, `*`).
"""
assert offset + x.shape[1] < self.max_len
x = x * self.xscale
#TODO(Hui Zhang): using x.size(1), __getitem__ not support Tensor
pos_emb = self.pe[:, offset:offset + x.shape[1]]
return self.dropout(x), self.dropout(pos_emb)
|
from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
SIN_CURVE = 0.2 # Curvature of sine < 0.5 for fast init, > 0.5 for fast out never negative
POW_CURVE = 0.4 # Curvature of x axis power modifier
rand = np.linspace(-1, 1, RAND_SIZE)
def f442(m: OsuMap):
# noinspection PyTypeChecker
events = [
[SvOsuMeasureLineEvent(
firstOffset=174702, lastOffset=175362,
startX=0, endX=1,
startY=-1, endY=1,
funcs=[
lambda x, r=r: r * np.sin(x * pi / 2) ** SIN_CURVE
]),
SvOsuMeasureLineEvent(
firstOffset=175362, lastOffset=176142,
startX=0, endX=1,
startY=-1, endY=1,
funcs=[
lambda x, r=r: r * np.cos(x ** POW_CURVE * pi)
]),
SvOsuMeasureLineEvent(
firstOffset=176142, lastOffset=176382,
startX=0, endX=1,
startY=-1, endY=1,
funcs=[
lambda x, r=r: -r * (1 - np.sin(x * pi / 2) ** SIN_CURVE)
])] for r in rand]
svs, bpms = svOsuMeasureLineMD([i for j in events for i in j],
scalingFactor=SCALE,
firstOffset=174702,
lastOffset=176382,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(svs)
m.bpms.extend(bpms)
|
"""Support for Satel Integra devices."""
import logging
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
REQUIREMENTS = ['satel_integra==0.3.2']
DEFAULT_ALARM_NAME = 'satel_integra'
DEFAULT_PORT = 7094
DEFAULT_CONF_ARM_HOME_MODE = 1
DEFAULT_DEVICE_PARTITION = 1
DEFAULT_ZONE_TYPE = 'motion'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'satel_integra'
DATA_SATEL = 'satel_integra'
CONF_DEVICE_HOST = 'host'
CONF_DEVICE_PORT = 'port'
CONF_DEVICE_PARTITION = 'partition'
CONF_ARM_HOME_MODE = 'arm_home_mode'
CONF_ZONE_NAME = 'name'
CONF_ZONE_TYPE = 'type'
CONF_ZONES = 'zones'
CONF_OUTPUTS = 'outputs'
ZONES = 'zones'
SIGNAL_PANEL_MESSAGE = 'satel_integra.panel_message'
SIGNAL_PANEL_ARM_AWAY = 'satel_integra.panel_arm_away'
SIGNAL_PANEL_ARM_HOME = 'satel_integra.panel_arm_home'
SIGNAL_PANEL_DISARM = 'satel_integra.panel_disarm'
SIGNAL_ZONES_UPDATED = 'satel_integra.zones_updated'
SIGNAL_OUTPUTS_UPDATED = 'satel_integra.outputs_updated'
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICE_HOST): cv.string,
vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE_PARTITION,
default=DEFAULT_DEVICE_PARTITION): cv.positive_int,
vol.Optional(CONF_ARM_HOME_MODE,
default=DEFAULT_CONF_ARM_HOME_MODE): vol.In([1, 2, 3]),
vol.Optional(CONF_ZONES,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_OUTPUTS,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Satel Integra component."""
conf = config.get(DOMAIN)
zones = conf.get(CONF_ZONES)
outputs = conf.get(CONF_OUTPUTS)
host = conf.get(CONF_DEVICE_HOST)
port = conf.get(CONF_DEVICE_PORT)
partition = conf.get(CONF_DEVICE_PARTITION)
from satel_integra.satel_integra import AsyncSatel
controller = AsyncSatel(host, port, hass.loop, zones, outputs, partition)
hass.data[DATA_SATEL] = controller
result = await controller.connect()
if not result:
return False
async def _close():
controller.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close())
_LOGGER.debug("Arm home config: %s, mode: %s ",
conf,
conf.get(CONF_ARM_HOME_MODE))
hass.async_create_task(
async_load_platform(hass, 'alarm_control_panel', DOMAIN, conf, config))
hass.async_create_task(
async_load_platform(hass, 'binary_sensor', DOMAIN,
{CONF_ZONES: zones, CONF_OUTPUTS: outputs}, config)
)
@callback
def alarm_status_update_callback():
"""Send status update received from alarm to home assistant."""
_LOGGER.debug("Sending request to update panel state")
async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE)
@callback
def zones_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Zones callback, status: %s", status)
async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES])
@callback
def outputs_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Outputs updated callback , status: %s", status)
async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"])
# Create a task instead of adding a tracking job, since this task will
# run until the connection to satel_integra is closed.
hass.loop.create_task(controller.keep_alive())
hass.loop.create_task(
controller.monitor_status(
alarm_status_update_callback,
zones_update_callback,
outputs_update_callback)
)
return True
|
from iroha import IrohaCrypto
import sys
import json
import os
from copy import deepcopy
config_docker = {
"block_store_path" : "/tmp/block_store/",
"torii_port" : 50051,
"internal_port" : 10001,
"max_proposal_size" : 10,
"proposal_delay" : 10,
"vote_delay" : 1,
"mst_enable" : False,
"mst_expiration_time" : 1440,
"max_rounds_delay": 10,
"stale_stream_max_rounds": 2,
"database": {
"host" : "",
"port" : 5432,
"user" : "postgres",
"password" : "QPtc2AssTNv2ugnD",
"working database" : "iroha_data",
"maintenance database" : "postgres"
}
}
genesis_block = {
"block_v1":{
"payload":{
"transactions":[
{
"payload":{
"reducedPayload":{
"commands":[
{
"createRole":{
"roleName":"admin",
"permissions":[
"can_create_account",
"can_set_detail",
"can_create_asset",
"can_receive",
"can_transfer",
"can_add_asset_qty",
"can_subtract_asset_qty",
"can_add_domain_asset_qty",
"can_subtract_domain_asset_qty",
"can_create_domain",
"can_add_peer",
"can_remove_peer",
"can_append_role",
"can_create_role",
"can_detach_role",
"can_add_signatory",
"can_remove_signatory",
"can_set_quorum",
"can_get_all_acc_detail",
"can_get_all_accounts",
"can_get_all_acc_ast",
"can_get_all_acc_ast_txs",
"can_get_all_acc_txs",
"can_read_assets",
"can_get_blocks",
"can_get_roles",
"can_get_all_signatories",
"can_get_all_txs",
"can_get_peers"
]
}
},
{
"createRole":{
"roleName":"user",
"permissions":[
"can_add_signatory",
"can_get_my_acc_ast",
"can_get_my_acc_ast_txs",
"can_get_my_acc_detail",
"can_get_my_acc_txs",
"can_get_my_account",
"can_get_my_signatories",
"can_receive",
"can_remove_signatory",
"can_set_quorum",
"can_transfer"
]
}
},
{
"createDomain":{
"domainId":"coniks",
"defaultRole":"user"
}
},
{
"createAsset":{
"assetName":"coin",
"domainId":"coniks",
"precision":2
}
},
{
"createAccount":{
"accountName":"admin",
"domainId":"coniks",
"publicKey":"65628c6eaddc37c042c5a97ec69c6d16857bd5aa0465125dbb315b84019d9d6a"
}
},
{
"appendRole":{
"accountId":"admin@coniks",
"roleName":"admin"
}
},
{
"createAccount":{
"accountName":"drone1",
"domainId":"coniks",
"publicKey":"94e15264063c5b2a3db480bc686cb8822a752be59e6f2f006bb057be91fdcd1f"
}
},
{
"createAccount":{
"accountName":"drone2",
"domainId":"coniks",
"publicKey":"56b522f1d91bd7284ee05cd41d25ad26ac574d9ab091649380371be47632bfb6"
}
}
],
"quorum":1
}
}
}
],
"txNumber":1,
"height":"1",
"prevBlockHash":"0000000000000000000000000000000000000000000000000000000000000000"
}
}
}
NODES_PER_DB=10
if __name__ == '__main__':
if len(sys.argv) != 2 or not (sys.argv[1].isdigit() and int(sys.argv[1]) >= 0):
sys.exit("ERROR: YOU NEED TO ENTER THE NUMBER OF KEYS TO GENERATE (POSITIVE INTEGER).")
nodes = int(sys.argv[1])
db_counter = 0
for i in range(nodes):
private_key = IrohaCrypto.private_key()
public_key = IrohaCrypto.derive_public_key(private_key)
if not os.path.isdir("nodes"):
os.mkdir("nodes", 0o755)
if not os.path.isdir("nodes/node_{}".format(i)):
os.mkdir("nodes/node_{}".format(i), 0o755)
with open('nodes/node_{}/node_{}.priv'.format(i, i), 'wb') as f:
f.write(private_key)
with open('nodes/node_{}/node_{}.pub'.format(i, i), 'wb') as f:
f.write(public_key)
genesis_block["block_v1"]["payload"]["transactions"][0]["payload"]["reducedPayload"]["commands"].append({
"addPeer": {
"peer": {
"address": "10.1.2.{}:10001".format(i),
"peerKey": public_key.decode("utf-8")
}
}
})
if i > 0 and i % NODES_PER_DB == 0:
db_counter+=1
config_docker["database"]["host"] = "nodedb_{}".format(db_counter)
config_docker["database"]["working database"] = "node_data_{}".format(i % NODES_PER_DB)
with open("nodes/node_{}/config.docker".format(i), 'w') as f:
json.dump(config_docker, f, indent=2)
for i in range(nodes):
# node_genesis_block = deepcopy(genesis_block)
# commands = node_genesis_block["block_v1"]["payload"]["transactions"][0]["payload"]["reducedPayload"]["commands"]
# commands = list(filter(lambda command: "addPeer" in command, commands))
# commands[i]["addPeer"]["peer"]["address"] = "127.0.0.1:10001"
with open("nodes/node_{}/genesis.block".format(i), 'w') as f:
json.dump(genesis_block, f, indent=2) |
import cv2
import os
import json
from PIL import Image
def get_content_type(file):
return file.content_type.split("/")
def create_directories(directory_list):
for path in directory_list:
create_directory(path)
def create_directory(path):
if not os.path.exists(path):
os.makedirs(path)
print("Directory created: {!r}".format(path))
def resize_image(image, width):
if image.size[1] >= width:
wpercent = (width / float(image.size[0]))
hsize = int((float(image.size[1]) * float(wpercent)))
image = image.resize((width, hsize), Image.ANTIALIAS)
def get_rotate_code(rotate):
rotate_code = cv2.ROTATE_90_CLOCKWISE
if rotate == 180:
rotate_code = cv2.ROTATE_180
elif rotate == 270:
rotate_code = cv2.ROTATE_90_COUNTERCLOCKWISE
return rotate_code
def load_json_file(file_path):
with open(file_path, 'r') as f:
return json.load(f)
def rotate_cv2_image(cv2_image, rotate):
if rotate is not None:
return cv2.rotate(cv2_image, rotateCode=get_rotate_code(int(rotate)))
def save_cv2_image(directory, image_name, cv2_image):
image_path = os.path.join(directory, image_name)
cv2.imwrite(image_path, cv2_image)
print("File created: {!r}".format(image_path)) |
'''
Extraigo las preguntas en for de texto, ahora, como le quito los dos
patrones que trae?
<p><strong>
</strong> </p>
todos = soup.find_all('p')
print(type(todos))
# <class 'bs4.element.ResultSet'>
uno = soup.find('p')
print(type(uno))
# <class 'bs4.element.Tag'>
print(todos)
'''
import requests as rq
from bs4 import BeautifulSoup as bs
URL= "https://www.guru99.com/javascript-interview-questions-answers.html"
response = rq.get(URL).content
soup = bs(response, 'lxml')
# PREGUNTAS: Trae todos los valores de p converitdos es una lista sin etiquetas.
namelist = soup.find_all('p')
questions =[]
for name in namelist:
name = str(name)
if name.startswith("<p><strong>"):
name = name.replace('<p><strong>',"")
name = name.replace('</strong> </p>', "")
questions.append(name)
for a in questions:
print(a)
# RESPUESTAS: Trae todos los valores de p que son respuestas.
#
# answers =[]
# for answer in namelist:
# answer = str(answer)
# if not answer.startswith("<p><strong>"):
# answer = answer.replace('<p><strong>', "")
# answer = answer.replace('</strong> </p>', "")
# answers.append(answer)
# # print(answer)
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import reduce
import numpy as np
import tensorrt
import pycuda.driver as cuda
import pycuda.autoinit
from trt_lite import TrtLite
np.set_printoptions(threshold=np.inf)
def build_engine_static(builder, input_shape):
network = builder.create_network()
data = network.add_input("data", tensorrt.DataType.FLOAT, input_shape)
w = np.asarray(
[0, 0, 0,
0, 1, 0,
0, 0, 0],
dtype = np.float32)
b = np.zeros((1,), np.float32)
conv = network.add_convolution(data, 1, (3, 3), w, b)
conv.stride = (1, 1)
conv.padding = (1, 1)
print('conv', conv.get_output(0).shape)
network.mark_output(conv.get_output(0))
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
builder.max_batch_size = 64
return builder.build_engine(network, config)
def run_engine_static(save_and_load=False):
batch_size = 1
input_shape = (batch_size, 1, 5, 5)
n = reduce(lambda x, y: x * y, input_shape)
input_data = np.asarray(range(n), dtype=np.float32).reshape(input_shape)
output_data = np.zeros(input_shape, dtype=np.float32)
trt = TrtLite(build_engine_static, (input_shape[1:],))
if save_and_load:
trt.save_to_file("out.trt")
trt = TrtLite(engine_file_path="out.trt")
trt.print_info()
d_buffers = trt.allocate_io_buffers(batch_size, True)
cuda.memcpy_htod(d_buffers[0], input_data)
trt.execute(d_buffers, batch_size)
cuda.memcpy_dtoh(output_data, d_buffers[1])
print(output_data)
def build_engine_dynamic(builder):
network = builder.create_network(1)
data = network.add_input("data", tensorrt.DataType.FLOAT, (-1, 1, -1, -1))
w = np.asarray(
[0, 0, 0,
0, 1, 0,
0, 0, 0],
dtype = np.float32)
b = np.zeros((1,), np.float32)
conv = network.add_convolution(data, 1, (3, 3), w, b)
conv.stride = (1, 1)
conv.padding = (1, 1)
print('conv', conv.get_output(0).shape)
network.mark_output(conv.get_output(0))
op = builder.create_optimization_profile()
op.set_shape('data', (1, 1, 3, 3), (1, 1, 5, 5), (16, 1, 128, 128))
config = builder.create_builder_config()
config.add_optimization_profile(op)
config.flags = 1 << int(tensorrt.BuilderFlag.FP16)
config.max_workspace_size = 1 << 30
return builder.build_engine(network, config)
def run_engine_dynamic(save_and_load=False):
input_shape = (1, 1, 5, 5)
n = reduce(lambda x, y: x * y, input_shape)
input_data = np.asarray(range(n), dtype=np.float32).reshape(input_shape)
output_data = np.zeros(input_shape, dtype=np.float32)
trt = TrtLite(build_engine_dynamic)
if save_and_load:
trt.save_to_file("out.trt")
trt = TrtLite(engine_file_path="out.trt")
trt.print_info()
i2shape = {0: input_shape}
d_buffers = trt.allocate_io_buffers(i2shape, True)
cuda.memcpy_htod(d_buffers[0], input_data)
trt.execute(d_buffers, i2shape)
cuda.memcpy_dtoh(output_data, d_buffers[1])
print(output_data)
if __name__ == '__main__':
run_engine_static()
run_engine_static(True)
run_engine_dynamic()
run_engine_dynamic(True)
|
import numpy as np
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _, ugettext
from base.models import Stock, RunBase
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
FACTOR_BUY_CLASSES = (
('FactorBuyBreakXd', u"海龟买"),
('AbuDoubleMaBuy', u"双均线买"),
)
FACTOR_SELL_CLASSES = (
('FactorSellBreakXd', u"海龟卖"),
('AbuDoubleMaSell', u"双均线卖"),
)
@python_2_unicode_compatible
class Range(models.Model):
name = models.CharField(max_length=64, verbose_name=u'名称')
start = models.IntegerField(verbose_name=u"开始")
end = models.IntegerField(verbose_name=u"结束")
step = models.FloatField(verbose_name=u"增量")
class_name = models.CharField(max_length=256, choices=FACTOR_SELL_CLASSES, verbose_name=u'策略', editable=False)
class Meta:
abstract = True
def __str__(self):
return '策略名称: %s' % self.name
@python_2_unicode_compatible
class RangeBuy(Range):
pass
@python_2_unicode_compatible
class RangeSell(Range):
pass
@python_2_unicode_compatible
class FactorBuyRangeBreakXd(RangeBuy):
class Meta:
verbose_name = u"海龟买入范围"
verbose_name_plural = verbose_name
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.class_name = "{'xd': np.arange(%d, %d, %d), 'class': [AbuFactorBuyBreak]}" % (self.start, self.end, self.step)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return '策略名称: %s, 周期: np.arange(%d, %d, %d)' % (self.name, self.start, self.end, self.step)
@python_2_unicode_compatible
class FactorSellRangeBreakXd(RangeSell):
class Meta:
verbose_name = u"海龟卖出范围"
verbose_name_plural = verbose_name
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.class_name = "{'xd': np.arange(%d, %d, %d), 'class': [AbuFactorSellBreak]}" % (self.start, self.end, self.step)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return '策略名称: %s, 周期: np.arange(%d, %d, %d)' % (self.name, self.start, self.end, self.step)
@python_2_unicode_compatible
class RunGridSearch(RunBase):
factor_buys = models.ManyToManyField(
RangeBuy, verbose_name=u'买策略组合', blank=False, related_name='factor_buy_groups')
factor_sells = models.ManyToManyField(
RangeSell, verbose_name=u'卖策略组合', blank=False, related_name='factor_sell_groups')
class Meta:
verbose_name = u"GridSearch"
verbose_name_plural = verbose_name
def __str__(self):
return '参数调优: %s' % (self.name,) |
import sys
from collections import namedtuple
from random import randint
import os
from codecs import getdecoder
from codecs import getencoder
import asn1
from sympy import invert
from hashlib import sha256
Point = namedtuple("Point", "x y")
EllipticCurve = namedtuple("EllipticCurve", "a b")
Origin = None
# Helper functions for bytes2long realization
_hexdecoder = getdecoder("hex")
_hexencoder = getencoder("hex")
def hexenc(data):
"""Encode hexadecimal
"""
return _hexencoder(data)[0].decode("ascii")
# End of helper functions of bytes2long realization
# Function to convert bytes to long number
def bytes2long(raw):
""" Deserialize big-endian bytes into long number
:param \b0 bytes raw: binary string
:returns\b0 : deserialized long number
:rtype\b0 : int
"""
return int(hexenc(raw), 16)
import math
# Generate random number from 1 to r-1
# And return only mutually prime number with r
def rand(r):
while True:
k = randint(1, r - 1)
if math.gcd(k, r) == 1:
return k
# Vars init
p = q = d = 0
Q = Origin
point = Origin
curve = Origin
# A function to multiply point to number
def multiply(point, x, a, p):
if x == 0:
return None
x_bin = [int(k) for k in bin(x)[2:]]
result = Origin
for k in x_bin:
result = add(result, result, a, p)
if k != 0:
result = add(result, point, a, p)
return result
# Points addition
def add(point_a, point_b, a, p):
if point_a is Origin:
return point_b
elif point_b is Origin:
return point_a
s = slope(point_a, point_b, a, p)
if s is None:
return None
else:
s = int(s)
x = (s ** 2 - point_a.x - point_b.x) % p
y = (s * (point_a.x - x) - point_a.y) % p
return Point(x, y)
def slope(point_a, point_b, a, p):
if point_a.x != point_b.x:
s = (point_b.y - point_a.y) * invert((point_b.x - point_a.x), p)
elif point_a.y == point_b.y:
s = (3 * point_a.x ** 2 + a) * invert((2 * point_a.y), p)
else:
return None
return s % p
# Get random point in [1, n)
def random_point(n):
x = randint(1, n - 1)
y = randint(1, n - 1)
return Point(x, y)
# Check equation
def is_curve_params_correct(a, b):
return True if 4 * a ** 3 + 27 * b ** 2 != 0 else False
# Generate elliptic curve
def random_elliptic_curve(n):
while True:
point = random_point(n)
a = randint(1, n - 1)
b = (point.y ** 2 - point.x ** 3 - a * point.x) % n
if is_curve_params_correct(a, b) is True:
break
return EllipticCurve(a, b), point
def prv_unmarshal(prv):
"""Unmarshal private key
:param \b0 bytes prv: serialized private key
:rtype\b0 : long
"""
return bytes2long(prv[::-1])
# Process parameters from given numbers
def ProcessParameter():
p = 57896044620753384133869303843568937902752767818974600847634902975134129543643
q = 28948022310376692066934651921784468951377218528270520403696863131129758387393
a = 1
b = 52259530098387149819562511889780651425271270942919542722038553712464420235875
x = 14539175448068301073584752148116082765715462525899666138074034449285211025933
y = 8328801466633898282311029798556417767141491055036399348346324804478619400451
curve = EllipticCurve(a, b)
point = Point(x, y)
q = q
d = prv_unmarshal(os.urandom(64)) # Private key
Q = multiply(point, d, curve.a, p) # Public key
print('[+] a = ', hex(curve.a))
print('[+] b = ', hex(curve.b))
# print('[+] x = ', hex(point.x))
# print('[+] y = ', hex(point.y))
print('[+] p = ', hex(p))
print('[+] r = ', hex(q))
# print('[+] d = ', hex(d))
print('[+] P.x = ', hex(Q.x))
print('[+] P.y = ', hex(Q.y))
return p, q, curve, point, d, Q
# ASN.1
def encode_signature(Q, prime, curve, P, group_order, signature_r, signature_s, ksi):
encoder = asn1.Encoder()
encoder.start()
encoder.enter(asn1.Numbers.Sequence)
encoder.enter(asn1.Numbers.Set)
encoder.enter(asn1.Numbers.Sequence)
encoder.write(b'\\x80\\x06\\x07\\x00', asn1.Numbers.OctetString)
encoder.write(b'GOST 34.10-2018', asn1.Numbers.UTF8String)
# Public key Q(x,y)
encoder.enter(asn1.Numbers.Sequence)
encoder.write(Q.x, asn1.Numbers.Integer) # Qx
encoder.write(Q.y, asn1.Numbers.Integer) # Qy
encoder.leave()
# Cryptosystem parameters
encoder.enter(asn1.Numbers.Sequence)
encoder.write(prime, asn1.Numbers.Integer)
encoder.leave()
encoder.enter(asn1.Numbers.Sequence)
encoder.write(curve.a, asn1.Numbers.Integer) # A parameter
encoder.write(curve.b, asn1.Numbers.Integer) # B parameter
encoder.leave()
# P(x,y)
encoder.enter(asn1.Numbers.Sequence)
encoder.write(P.x, asn1.Numbers.Integer) # Px
encoder.write(P.y, asn1.Numbers.Integer) # Py
encoder.leave()
# Group order (r)
encoder.enter(asn1.Numbers.Sequence)
encoder.write(group_order, asn1.Numbers.Integer)
encoder.leave()
encoder.leave()
# Sugnature
encoder.enter(asn1.Numbers.Sequence)
encoder.write(signature_r, asn1.Numbers.Integer)
# First part of signature (r)
encoder.write(signature_s, asn1.Numbers.Integer)
# Second part of signature (s)
encoder.leave()
encoder.leave()
# Files parameters
encoder.enter(asn1.Numbers.Sequence)
encoder.leave()
encoder.leave()
encoded_bytes = encoder.output()
return encoded_bytes
params = []
params_dict = { 'Qx': 0, 'Qy': 1, 'p': 2, 'a': 3, 'b': 4, 'Px': 5, 'Py': 6, 'q': 7, 'r': 8, 's': 9 }
# Decode asn1 file format
def parse_ans1(decoder):
while not decoder.eof():
tag = decoder.peek()
if tag.nr == asn1.Numbers.Null:
break
if tag.typ == asn1.Types.Primitive:
tag, value = decoder.read()
if tag.nr == asn1.Numbers.Integer:
params.append(value)
else:
decoder.enter()
parse_ans1(decoder)
decoder.leave()
# Sign file using El-Gamal
def elgamal_ecc_sign(src_file, sign_file):
global p, q, curve, point, d, Q
p, q, curve, point, d, Q = ProcessParameter()
with open(src_file, mode='rb') as file:
data = file.read()
# First step
dgst = sha256(data).digest() # Used sha256, but standard use stribog.
with open("hash", mode='wb') as file:
data = file.write(dgst)
# Second step
alfa = int.from_bytes(dgst, byteorder='big')
e = alfa % q
print('[+] e = ', hex(e))
if e == 0:
e = 1
k = 0
r = 0
s = 0
C = Origin
while True:
# Third step
k = rand(q)
# Fourth step
C = multiply(point, k, curve.a, p)
r = C.x % q
if r == 0:
continue
# Fifth step
s = (r * d + k * e) % q
if s == 0:
continue
break
r_bin = [int(k) for k in bin(r)[2:]]
s_bin = [int(k) for k in bin(s)[2:]]
# Sixth step
ksi = str(r_bin) + str(s_bin)
encoded_bytes = encode_signature(Q, p, curve, point, q, r, s, ksi)
with open(sign_file, mode='wb') as file:
file.write(encoded_bytes)
print('[+] File successfully signed!')
# Check file sign using El-Gamal
def elgamal_ecc_verify(src_file, sign_file):
with open(sign_file, mode='rb') as file:
encoded_data = file.read()
decoder = asn1.Decoder()
decoder.start(encoded_data)
parse_ans1(decoder)
Qx = params[params_dict['Qx']]
Qy = params[params_dict['Qy']]
p = params[params_dict['p']]
a = params[params_dict['a']]
# b = params[params_dict['b']]
Px = params[params_dict['Px']]
Py = params[params_dict['Py']]
q = params[params_dict['q']]
r = params[params_dict['r']]
s = params[params_dict['s']]
# First step
r = int(r)
s = int(s)
print('[+] a = ', hex(a))
print('[+] x = ', hex(Px))
print('[+] y = ', hex(Py))
print('[+] p = ', hex(p))
print('[+] r = ', hex(q))
print('[+] P.x = ', hex(Qx))
print('[+] P.y = ', hex(Qy))
if r <= 0 or r >= q or s <= 0 or s >= q:
print('[-] Invalid signature! r <= 0 || r >= q || s <= 0 || s >= q ! ')
with open(src_file, mode='rb') as file:
data = file.read()
# Second step
# Used sha256, but standard use stribog
dgst = sha256(data).digest()
# Third step
alfa = int.from_bytes(dgst, byteorder='big')
e = alfa % q
print('[+] e = ', hex(e))
if e == 0:
e = 1
# Fourth step
v = invert(e, q)
# Fifth step
z1 = s * v % q z2 = -r * v % q
# Sixth step
c1 = multiply(Point(Px, Py), z1, a, p)
c2 = multiply(Point(Qx, Qy), z2, a, p)
C = add(c1, c2, a, p) R = C.x % q
# Seventh step
if R == r:
print('[+] Signature is valid!')
else:
print('[-] Invalid signature!')
def main():
if len(sys.argv) < 4:
print( "[-] Error! Usage: python <program name> [sign] [verify] <message filename> <sign filename>")
if sys.argv[1] == 'sign':
elgamal_ecc_sign(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'verify':
elgamal_ecc_verify(sys.argv[2], sys.argv[3])
else:
print( "[-] Error! Usage: python <program name> [sign] [verify]' <message filename> <sign filename>")
if __name__ == '__main__':
main()
|
#!/usr/bin/env pybricks-micropython
from pybricks.messaging import BluetoothMailboxServer, TextMailbox
from pybricks.tools import wait
from monitor import SpikeMonitor
# Create monitor object and initialize mechanisms
spike = SpikeMonitor()
# Start server
server = BluetoothMailboxServer()
# Keep accepting connections and commands from PC script.
while True:
# Wait for incoming bluetooth connection
mbox = TextMailbox('command', server)
print('waiting for connection...')
server.wait_for_connection()
print('connected!')
while True:
# Wait for new instruction
mbox.wait()
command = mbox.read()
print(command)
# Execute command
if command == 'power_on':
spike.click_center()
if command == 'activate_dfu':
spike.activate_dfu()
elif command == 'remove_usb':
spike.insert_usb(False)
elif command == 'shutdown':
spike.shutdown()
# Say we are done with this command
mbox.send(command)
if command == 'stop':
break
|
"""
T: O(N)
S: O(N)
Do as the description says.
"""
class Solution:
def toHexspeak(self, num: str) -> str:
allowed_chars = {"A", "B", "C", "D", "E", "F", "I", "O"}
hex_representation = hex(int(num))[2:].upper()
result = hex_representation.replace('0', 'O').replace('1', 'I')
return result if all(ch in allowed_chars for ch in result) else "ERROR"
|
def find_common_element(a, b, c):
i = j = k = 0
a_len = len(a) - 1
b_len = len(b) - 1
c_len = len(c) - 1
while i <= a_len and j <= b_len and k <= c_len:
max_element = max([a[i], b[j], c[k]])
while i < a_len and a[i] < max_element:
i += 1
while j < b_len and b[j] < max_element:
j += 1
while k < c_len and c[k] < max_element:
k += 1
if a[i] == b[j] == c[k]:
return a[i]
return None
print(find_common_element([1, 2, 3, 4, 6,8], [6, 7,8, 9], [1,2,3,8]))
print(find_common_element([1, 2, 3, 4, 6,8], [6, 7,8, 9], [8]))
print(find_common_element([1, 2, 3, 4, 7 ,8], [6, 7,8, 9], [6,7])) |
from enum import Enum
from flask import Flask, redirect, request
import urllib.parse as urlparse
from common.course_config import get_course
from common.db import connect_db
from common.html import error, html, make_row
from common.oauth_client import create_oauth_client, is_enrolled, is_staff, login
from common.rpc.auth import read_spreadsheet
from common.url_for import url_for
class AccessRestriction(Enum):
"""Enumeration of access restrictions: all (0), staff (1), student (2)."""
ALL = 0
STAFF = 1
STUDENT = 2
with connect_db() as db:
db(
"""CREATE TABLE IF NOT EXISTS shortlinks (
shortlink varchar(512),
url varchar(512),
creator varchar(512),
secure int,
course varchar(128)
)"""
)
db(
"""CREATE TABLE IF NOT EXISTS sources (
url varchar(512),
sheet varchar(256),
secure int,
course varchar(128)
)"""
)
def add_url_params(url, params_string):
"""Takes in a URL and a string of parameters, and adds the parameters to the URL.
:param url: URL to add parameters to
:type url: str
:param params_string: string of parameters to add
:type params_string: str
:return: URL with parameters string added
"""
parse_result = list(urlparse.urlsplit(url))
parse_result[3] = "&".join(filter(lambda s: s, [parse_result[3], params_string]))
return urlparse.urlunsplit(tuple(parse_result))
app = Flask(__name__)
app.url_map.strict_slashes = False
if __name__ == "__main__":
app.debug = True
create_oauth_client(app, "61a-shortlinks")
def lookup(path):
"""Looks up a path in the database.
:param path: path to look up
:return: result of lookup, or ``(None, None, None)`` upon failure.
"""
with connect_db() as db:
target = db(
"SELECT url, creator, secure FROM shortlinks WHERE shortlink=%s AND course=%s",
[path, get_course()],
).fetchone()
if target:
target = list(target)
target[2] = AccessRestriction(target[2])
return target or (None, None, None)
def is_authorized(secure: AccessRestriction):
"""Returns authorization status based on the given access restriction.
:param secure: access restriction
:type secure: AccessRestriction
:return: authorization status (``True`` or ``False``)
"""
if secure == AccessRestriction.ALL:
return True
elif secure == AccessRestriction.STAFF:
return is_staff(get_course())
elif secure == AccessRestriction.STUDENT:
return is_enrolled(get_course())
else:
raise Exception(f"{secure} is not a valid AccessRestriction")
@app.route("/<path>/")
def handler(path):
url, creator, secure = lookup(path)
if not url:
return error("Target not found!")
if not is_authorized(secure):
return login()
return redirect(add_url_params(url, request.query_string.decode("utf-8")))
@app.route("/preview/<path>/")
def preview(path):
url, creator, secure = lookup(path)
if url is None:
return html("No such link exists.")
if not is_authorized(secure):
return login()
return html(
'Points to <a href="{0}">{0}</a> by {1}'.format(
add_url_params(url, request.query_string.decode("utf-8")), creator
)
)
@app.route("/")
def index():
if not is_staff(get_course()):
return login()
with connect_db() as db:
sources = db(
"SELECT url, sheet, secure FROM sources WHERE course=%s", [get_course()]
).fetchall()
insert_fields = f"""<input placeholder="Spreadsheet URL" name="url"></input>
<input placeholder="Sheet Name" name="sheet"></input>
<select name="secure">
<option value="{AccessRestriction.ALL.value}">Public</option>
<option value="{AccessRestriction.STAFF.value}">Staff Only</option>
<option value="{AccessRestriction.STUDENT.value}">Students and Staff</option>
</select>"""
sources = "<br/>".join(
make_row(
f'<a href="{url}">{url}</a> {sheet} (Secure: {AccessRestriction(secure).name})'
f'<input name="url" type="hidden" value="{url}"></input>'
f'<input name="sheet" type="hidden" value="{sheet}"></input>',
url_for("remove_source"),
)
for url, sheet, secure in sources
)
return html(
f"""
<h2>Course: <code>{get_course()}</code></h2>
Each spreadsheet should be shared with the 61A service account
<a href="mailto:secure-links@ok-server.iam.gserviceaccount.com">
secure-links@ok-server.iam.gserviceaccount.com</a>.
They should have three columns with the headers: "URL", "Shortlink", and "Creator".
<p>
Visit <a href="{url_for("refresh")}">{url_for("refresh")}</a> (no auth required)
after adding a link to synchronize with the spreadsheets.
<h3>Sources</h3>
{sources}
<h3>Add Sources</h3>
{make_row(insert_fields, url_for("add_source"), "Add")}
"""
)
@app.route("/add_source", methods=["POST"])
def add_source():
if not is_staff(get_course()):
return login()
url = request.form["url"]
sheet = request.form["sheet"]
secure = int(request.form.get("secure"))
with connect_db() as db:
db(
"INSERT INTO sources VALUES (%s, %s, %s, %s)",
[url, sheet, secure, get_course()],
)
return redirect(url_for("index"))
@app.route("/remove_source", methods=["POST"])
def remove_source():
if not is_staff(get_course()):
return login()
url = request.form["url"]
sheet = request.form["sheet"]
with connect_db() as db:
db(
"DELETE FROM sources WHERE url=%s AND sheet=%s AND course=%s",
[url, sheet, get_course()],
)
return redirect(url_for("index"))
@app.route("/_refresh/")
def refresh():
data = []
links = set()
with connect_db() as db:
sheets = db(
"SELECT url, sheet, secure FROM sources WHERE course=(%s)", [get_course()]
).fetchall()
for url, sheet, secure in sheets:
try:
csvr = read_spreadsheet(url=url, sheet_name=sheet)
except:
return error(f"Failed to read spreadsheet {url} (Sheet: {sheet})")
headers = [x.lower() for x in csvr[0]]
for row in csvr[1:]:
row = row + [""] * 5
shortlink = row[headers.index("shortlink")]
if not shortlink.strip():
continue
if shortlink in links:
return error(f"Duplicate shortlink `{shortlink}` found, aborting.")
links.add(shortlink)
url = row[headers.index("url")]
creator = row[headers.index("creator")]
data.append([shortlink, url, creator, secure, get_course()])
with connect_db() as db:
db("DELETE FROM shortlinks WHERE course=%s", [get_course()])
db(
"INSERT INTO shortlinks (shortlink, url, creator, secure, course) VALUES (%s, %s, %s, %s, %s)",
data,
)
return html("Links updated")
if __name__ == "__main__":
app.run(debug=True)
|
# The array type can hold homogeneous data types and operate
# on them more efficiently while using less memory
from array import array
# TODO: Create an array of integer numbers
arr1 = array('i', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
# TODO: Add additional items to the array
# TODO: iterate over the array content like any other list
# TODO: Try to add a non-integer number to the array
# TODO: Create an array to hold bytes instead of ints
arr2 = array('B', [18, 102, 182, 56, 89, 5, 254, 32, 64, 50])
# TODO: try to add an item that's out of range
# TODO: Convert an array to a list
|
# -*- coding:utf-8 -*-
import weakref
class AbstractDescriptor(object):
def __init__(self):
self._resolve_cache = weakref.WeakKeyDictionary()
def _resolve_name(self, owner):
try:
return self._resolve_cache[owner]
except KeyError:
pass
for name in dir(owner):
try:
attr = getattr(owner, name)
except AttributeError:
continue
if attr is not self:
continue
break
else:
raise RuntimeError(
'{!r} Unable to resolve bounded name (UNREACHABLE)'.format(
self))
self._resolve_cache[owner] = name
return name
class Alias(AbstractDescriptor):
def __init__(self, path, *tail):
super().__init__()
self.path = [path]
self.path.extend(tail)
def __get__(self, instance, owner):
if instance is None:
return self
return self.resolve(instance)[-1]
def __set__(self, instance, value):
trace = self.resolve(instance, exclude_last=True)
trace.insert(0, instance)
trace.reverse()
steps = reversed(self.path)
for adapter, current in zip(steps, trace):
value = adapter.write(current, value)
if value is current:
break
else:
# need to replace root item
raise AttributeError(
'Invalid alias setup - trying to write outside of aliased '
'scope.')
def resolve(self, instance, exclude_last=False):
trace = []
target, idx = instance, 0
path = self.path
if exclude_last:
path = path[:-1]
try:
for idx, adapter in enumerate(path):
target = adapter.read(target)
trace.append(target)
except AliasAdapterError:
raise AttributeError(
'Unresolvable alias {!r}. Lookup failed at {!r}.{}'.format(
self._resolve_name(type(instance)), instance,
''.join(self.path[:idx])))
return trace
class AliasAdapter(object):
def __init__(self, name):
self.name = name
def read(self, target):
raise NotImplementedError
def write(self, target, value):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class AttrAdapter(AliasAdapter):
def read(self, target):
try:
value = getattr(target, self.name)
except AttributeError:
raise AliasAdapterError
return value
def write(self, target, value):
setattr(target, self.name, value)
return target
def __str__(self):
return '.{}'.format(self.name)
class ItemAdapter(AliasAdapter):
def read(self, target):
try:
value = target[self.name]
except (KeyError, IndexError):
raise AliasAdapterError
return value
def write(self, target, value):
target[self.name] = value
return target
def __str__(self):
return '[{!r}]'.format(self.name)
class AliasAdapterError(Exception):
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-01 04:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '0028_auto_20160229_2252'),
]
operations = [
migrations.AddField(
model_name='sector',
name='points',
field=models.SmallIntegerField(blank=True, choices=[(1, '5,5 15,5 15,15 5,15'), (2, '0,0 20,0 15,5 5,5'), (3, '5,15 15,15 20,20 0,20'), (4, '15,5 20,0 20,20 15,15'), (5, '0,0 5,5 5,15 0,20')], null=True),
),
migrations.AddField(
model_name='tooth',
name='position_x',
field=models.SmallIntegerField(blank=True, choices=[(0, 0), (1, 25), (2, 50), (3, 75), (4, 100), (5, 125), (6, 150), (7, 175), (8, 210), (9, 235), (10, 260), (11, 285), (12, 310), (13, 335), (14, 360), (15, 385)], null=True),
),
migrations.AddField(
model_name='tooth',
name='position_y',
field=models.SmallIntegerField(blank=True, choices=[(0, 0), (1, 40), (2, 80), (3, 120)], null=True),
),
]
|
import PyTCI.tci
import PyTCI.audio
|
import os
# Authenticated user
USERNAME = os.getenv('TWITTER_USERNAME').lower()
MENTION_EXPIRY_TIME_IN_SECONDS = 60
MENTION_CHECK_INTERVAL_IN_SECONDS = 15
LAST_MENTION_FILE = 'last_mention'
FONTS_DIR = 'fonts/'
QUOTES_DIR = 'quotes/'
IMAGES_DIR = 'images/'
IMAGE_STORE_FILE=f'{IMAGES_DIR}store.json'
IMAGE_EXT = '.jpg'
DEFAULT_IMAGE_KIND='Nature'
MIN_MATCH_SIMILARITY=0.5
QUOTE_FONT_SIZE = 36
AUTHOR_FONT_SIZE = 24
WATERMARK_FONT_SIZE = 16
MAX_CHARS_PER_LINE = 40
GAP_SIZE = 8
TEXT_COLOR = (255, 255, 255)
WATERMARK_TEXT = f'@{USERNAME}'
## Twitter API Keys
CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY')
CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET')
ACCESS_TOKEN = os.getenv('TWITTER_ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
## Discord
DISCORD_WEBHOOK_URL = os.getenv('DISCORD_WEBHOOK_URL') |
from flask import render_template,request,redirect,url_for
from ..requests import getSources, get_articles
from ..models import Source
from . import main
@main.route('/')
def index():
news = getSources()
title = 'Highlights'
return render_template('index.html',title = title, news = news)
@main.route('/entertainment/')
def entertainment():
entertainment = get_articles("entertainment")
title = 'Entertainment'
return render_template('entertainment.html',title = title, entertainment = entertainment)
@main.route('/business/')
def business():
business = get_articles("business")
title = 'Business'
return render_template('business.html',title = title, business = business)
@main.route('/sports/')
def sports():
sports = get_articles("sports")
title = 'Sports'
return render_template('sports.html',title = title, sports = sports)
@main.route('/technology/')
def technology():
technology = get_articles("technology")
title = 'Technology'
return render_template('technology.html',title = title, technology = technology)
|
import validatish
import formish
import schemaish
import unittest
class TestFormData(unittest.TestCase):
"""Build a Simple Form and test that it doesn't raise exceptions on build and that the methods/properties are as expected"""
schema_nested = schemaish.Structure([
("one", schemaish.Structure([
("a", schemaish.String(validator=validatish.Required(),
description="This is a field with name a and title A and has a Required validator")),
("b", schemaish.String(title='bee')),
("c", schemaish.Structure([("x", schemaish.String(title='cee')),("y", schemaish.String())])),
])
),
])
def test_titles(self):
form = formish.Form(self.schema_nested, 'nested')
assert form['one.b'].title == 'bee'
assert form['one.c.x'].title == 'cee'
form['one.b'].title = 'bee bee cee'
assert form['one.b'].title == 'bee bee cee'
form['one.c.x'].title = 'bee bee cee'
assert form['one.c.x'].title == 'bee bee cee'
def test_widgets(self):
form = formish.Form(self.schema_nested, 'nested')
assert isinstance(form['one.a'].widget.widget,formish.Input)
form['one.a'].widget = formish.TextArea()
assert isinstance(form['one.a'].widget.widget,formish.TextArea)
def test_description(self):
form = formish.Form(self.schema_nested, 'nested')
assert str(form['one.a'].description) == "This is a field with name a and title A and has a Required validator"
form['one.a'].description = "This is a new description"
assert str(form['one.a'].description) == "This is a new description"
def test_value(self):
form = formish.Form(self.schema_nested, 'nested')
self.assertRaises(KeyError, setattr, form['one.a'], 'value', 7)
class TestSequenceFormData(unittest.TestCase):
schema = schemaish.Structure()
schema.add('a',schemaish.Sequence(schemaish.String(title='bee')))
def test_widgets(self):
form = formish.Form(self.schema, 'sequences')
form.defaults = {'a': ['1','2']}
assert isinstance(form['a.0'].widget.widget,formish.Input)
form['a.*'].widget = formish.TextArea()
assert isinstance(form['a.0'].widget.widget,formish.TextArea)
def test_widgets_before_data(self):
form = formish.Form(self.schema, 'sequences')
form['a.*'].widget = formish.TextArea()
form.defaults = {'a': ['1','2']}
assert isinstance(form['a.0'].widget.widget,formish.TextArea)
if __name__ == '__main__':
unittest.main()
|
# flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from aenum import Enum, extend_enum
class ApplicationSignOnMode(
str,
Enum
):
"""
An enumeration class for ApplicationSignOnMode.
"""
BOOKMARK = "BOOKMARK"
BASIC_AUTH = "BASIC_AUTH"
BROWSER_PLUGIN = "BROWSER_PLUGIN"
SECURE_PASSWORD_STORE = "SECURE_PASSWORD_STORE"
AUTO_LOGIN = "AUTO_LOGIN"
WS_FEDERATION = "WS_FEDERATION"
SAML_2_0 = "SAML_2_0"
OPENID_CONNECT = "OPENID_CONNECT"
SAML_1_1 = "SAML_1_1"
@classmethod
def _missing_(cls, value):
value_upper_case = value.upper()
try:
return getattr(cls, value_upper_case)
except:
extend_enum(ApplicationSignOnMode, value_upper_case, value_upper_case)
return getattr(cls, value_upper_case)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class IncidentComment(Resource):
"""Represents an incident comment.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:ivar created_time_utc: The time the comment was created
:vartype created_time_utc: datetime
:param message: Required. The comment message
:type message: str
:ivar author: Describes the client that created the comment
:vartype author: ~securityinsights.models.ClientInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time_utc': {'readonly': True},
'message': {'required': True},
'author': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'created_time_utc': {'key': 'properties.createdTimeUtc', 'type': 'iso-8601'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'ClientInfo'},
}
def __init__(self, **kwargs):
super(IncidentComment, self).__init__(**kwargs)
self.created_time_utc = None
self.message = kwargs.get('message', None)
self.author = None
|
from rest_framework import serializers
from .models import Warehouse, Product, Location, LocationQuantity
class WarehouseSerializer(serializers.HyperlinkedModelSerializer):
warehouse_locations = serializers.HyperlinkedRelatedField(
many=True, read_only=True, view_name="location-detail"
)
class Meta:
model = Warehouse
fields = ["url", "id", "name", "location_count", "warehouse_locations"]
class ProductSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Product
fields = [
"url",
"id",
"name",
"quantity",
"allocated_for_jobs",
"desired_stock_level",
"planned",
"purchased",
"sold",
"required",
]
class LocationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Location
fields = ["url", "id", "warehouse", "name", "location_warehouse"]
class LocationQuantitySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = LocationQuantity
fields = [
"url",
"id",
"product",
"location",
"quantity",
"location_name",
"location_warehouse",
"product_name",
]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='common.proto',
package='pb',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0c\x63ommon.proto\x12\x02pb\"L\n\x12SlotNumberResponse\x12\x12\n\nSlotNumber\x18\x01 \x01(\x04\x12\x11\n\tBlockHash\x18\x02 \x01(\x0c\x12\x0f\n\x07TipSlot\x18\x03 \x01(\x04\"!\n\x0e\x43onnectMessage\x12\x0f\n\x07\x41\x64\x64ress\x18\x01 \x01(\t\"/\n\x1aListeningAddressesResponse\x12\x11\n\tAddresses\x18\x01 \x03(\t\"D\n\x12ProposalSignedData\x12\x0c\n\x04Slot\x18\x01 \x01(\x04\x12\r\n\x05Shard\x18\x02 \x01(\x04\x12\x11\n\tBlockHash\x18\x03 \x01(\x0c\"\xbf\x01\n\x10ProposerSlashing\x12\x15\n\rProposerIndex\x18\x01 \x01(\r\x12-\n\rProposalData1\x18\x02 \x01(\x0b\x32\x16.pb.ProposalSignedData\x12\x1a\n\x12ProposalSignature1\x18\x03 \x01(\x0c\x12-\n\rProposalData2\x18\x04 \x01(\x0b\x32\x16.pb.ProposalSignedData\x12\x1a\n\x12ProposalSignature2\x18\x05 \x01(\x0c\"\xa0\x01\n\x11SlashableVoteData\x12%\n\x1d\x41ggregateSignaturePoC0Indices\x18\x01 \x03(\r\x12%\n\x1d\x41ggregateSignaturePoC1Indices\x18\x02 \x03(\r\x12!\n\x04\x44\x61ta\x18\x03 \x01(\x0b\x32\x13.pb.AttestationData\x12\x1a\n\x12\x41ggregateSignature\x18\x04 \x01(\x0c\"\\\n\x0e\x43\x61sperSlashing\x12$\n\x05Vote0\x18\x01 \x01(\x0b\x32\x15.pb.SlashableVoteData\x12$\n\x05Vote1\x18\x02 \x01(\x0b\x32\x15.pb.SlashableVoteData\"\xce\x01\n\x0f\x41ttestationData\x12\x0c\n\x04Slot\x18\x01 \x01(\x04\x12\x17\n\x0f\x42\x65\x61\x63onBlockHash\x18\x02 \x01(\x0c\x12\x13\n\x0bTargetEpoch\x18\x03 \x01(\x04\x12\x12\n\nTargetHash\x18\x04 \x01(\x0c\x12\x13\n\x0bSourceEpoch\x18\x05 \x01(\x04\x12\x12\n\nSourceHash\x18\x06 \x01(\x0c\x12\x16\n\x0eShardBlockHash\x18\x07 \x01(\x0c\x12\r\n\x05Shard\x18\x08 \x01(\x04\x12\x1b\n\x13LatestCrosslinkHash\x18\t \x01(\x0c\"Q\n\x1c\x41ttestationDataAndCustodyBit\x12!\n\x04\x44\x61ta\x18\x01 \x01(\x0b\x32\x13.pb.AttestationData\x12\x0e\n\x06PoCBit\x18\x02 \x01(\x08\"~\n\x0b\x41ttestation\x12!\n\x04\x44\x61ta\x18\x01 \x01(\x0b\x32\x13.pb.AttestationData\x12\x1d\n\x15ParticipationBitfield\x18\x02 \x01(\x0c\x12\x17\n\x0f\x43ustodyBitfield\x18\x03 \x01(\x0c\x12\x14\n\x0c\x41ggregateSig\x18\x04 \x01(\x0c\"`\n\x11\x44\x65positParameters\x12\x11\n\tPublicKey\x18\x01 \x01(\x0c\x12\x19\n\x11ProofOfPossession\x18\x02 \x01(\x0c\x12\x1d\n\x15WithdrawalCredentials\x18\x03 \x01(\x0c\"4\n\x07\x44\x65posit\x12)\n\nParameters\x18\x01 \x01(\x0b\x32\x15.pb.DepositParameters\"?\n\x04\x45xit\x12\x0c\n\x04Slot\x18\x01 \x01(\x04\x12\x16\n\x0eValidatorIndex\x18\x02 \x01(\x04\x12\x11\n\tSignature\x18\x03 \x01(\x0c\"E\n\x05\x42lock\x12\x1f\n\x06Header\x18\x01 \x01(\x0b\x32\x0f.pb.BlockHeader\x12\x1b\n\x04\x42ody\x18\x02 \x01(\x0b\x32\r.pb.BlockBody\"q\n\x0b\x42lockHeader\x12\x12\n\nSlotNumber\x18\x01 \x01(\x04\x12\x12\n\nParentRoot\x18\x02 \x01(\x0c\x12\x11\n\tStateRoot\x18\x03 \x01(\x0c\x12\x14\n\x0cRandaoReveal\x18\x04 \x01(\x0c\x12\x11\n\tSignature\x18\x05 \x01(\x0c\"\xeb\x01\n\tBlockBody\x12%\n\x0c\x41ttestations\x18\x01 \x03(\x0b\x32\x0f.pb.Attestation\x12/\n\x11ProposerSlashings\x18\x02 \x03(\x0b\x32\x14.pb.ProposerSlashing\x12+\n\x0f\x43\x61sperSlashings\x18\x03 \x03(\x0b\x32\x12.pb.CasperSlashing\x12\x1d\n\x08\x44\x65posits\x18\x04 \x03(\x0b\x32\x0b.pb.Deposit\x12\x17\n\x05\x45xits\x18\x05 \x03(\x0b\x32\x08.pb.Exit\x12!\n\x05Votes\x18\x06 \x03(\x0b\x32\x12.pb.AggregatedVote\"M\n\x08\x46orkData\x12\x16\n\x0ePreForkVersion\x18\x01 \x01(\x04\x12\x17\n\x0fPostForkVersion\x18\x02 \x01(\x04\x12\x10\n\x08\x46orkSlot\x18\x03 \x01(\x04\"\xb9\x01\n\tValidator\x12\x0e\n\x06Pubkey\x18\x01 \x01(\x0c\x12\x1d\n\x15WithdrawalCredentials\x18\x02 \x01(\x0c\x12\x0e\n\x06Status\x18\x04 \x01(\x04\x12\x1e\n\x16LatestStatusChangeSlot\x18\x05 \x01(\x04\x12\x11\n\tExitCount\x18\x06 \x01(\x04\x12\x19\n\x11LastPoCChangeSlot\x18\x07 \x01(\x04\x12\x1f\n\x17SecondLastPoCChangeSlot\x18\x08 \x01(\x04\"O\n\x0eShardCommittee\x12\r\n\x05Shard\x18\x01 \x01(\x04\x12\x11\n\tCommittee\x18\x02 \x03(\r\x12\x1b\n\x13TotalValidatorCount\x18\x03 \x01(\x04\"@\n\x16ShardCommitteesForSlot\x12&\n\nCommittees\x18\x01 \x03(\x0b\x32\x12.pb.ShardCommittee\":\n\x1bPersistentCommitteesForSlot\x12\x1b\n\x13PersistentCommittee\x18\x01 \x03(\r\"I\n\tCrosslink\x12\x0c\n\x04Slot\x18\x01 \x01(\x04\x12\x16\n\x0eShardBlockHash\x18\x02 \x01(\x0c\x12\x16\n\x0eShardStateHash\x18\x03 \x01(\x0c\"\x9e\x01\n\x12PendingAttestation\x12!\n\x04\x44\x61ta\x18\x01 \x01(\x0b\x32\x13.pb.AttestationData\x12\x1d\n\x15ParticipationBitfield\x18\x02 \x01(\x0c\x12\x17\n\x0f\x43ustodyBitfield\x18\x03 \x01(\x0c\x12\x16\n\x0eInclusionDelay\x18\x04 \x01(\x04\x12\x15\n\rProposerIndex\x18\x05 \x01(\r\"\xd0\x06\n\x05State\x12\x0c\n\x04Slot\x18\x01 \x01(\x04\x12\x12\n\nEpochIndex\x18\x02 \x01(\x04\x12\x13\n\x0bGenesisTime\x18\x03 \x01(\x04\x12\x1e\n\x08\x46orkData\x18\x04 \x01(\x0b\x32\x0c.pb.ForkData\x12(\n\x11ValidatorRegistry\x18\x05 \x03(\x0b\x32\r.pb.Validator\x12\x19\n\x11ValidatorBalances\x18\x06 \x03(\x04\x12*\n\"ValidatorRegistryLatestChangeEpoch\x18\x07 \x01(\x04\x12\"\n\x1aValidatorRegistryExitCount\x18\x08 \x01(\x04\x12&\n\x1eValidatorRegistryDeltaChainTip\x18\t \x01(\x0c\x12\x11\n\tRandaoMix\x18\n \x01(\x0c\x12\x15\n\rNextRandaoMix\x18\x0b \x01(\x0c\x12\x33\n\x0fShardCommittees\x18\x0c \x03(\x0b\x32\x1a.pb.ShardCommitteesForSlot\x12\x1e\n\x16PreviousJustifiedEpoch\x18\r \x01(\x04\x12\x16\n\x0eJustifiedEpoch\x18\x0e \x01(\x04\x12\x1d\n\x15JustificationBitField\x18\x0f \x01(\x04\x12\x16\n\x0e\x46inalizedEpoch\x18\x10 \x01(\x04\x12\'\n\x10LatestCrosslinks\x18\x11 \x03(\x0b\x32\r.pb.Crosslink\x12)\n\x12PreviousCrosslinks\x18\x12 \x03(\x0b\x32\r.pb.Crosslink\x12\x15\n\rShardRegistry\x18\x13 \x03(\x0c\x12\x19\n\x11LatestBlockHashes\x18\x14 \x03(\x0c\x12\x38\n\x18\x43urrentEpochAttestations\x18\x15 \x03(\x0b\x32\x16.pb.PendingAttestation\x12\x39\n\x19PreviousEpochAttestations\x18\x16 \x03(\x0b\x32\x16.pb.PendingAttestation\x12\x19\n\x11\x42\x61tchedBlockRoots\x18\x17 \x03(\x0c\x12%\n\tProposals\x18\x18 \x03(\x0b\x32\x12.pb.ActiveProposal\x12(\n\x0cPendingVotes\x18\x19 \x03(\x0b\x32\x12.pb.AggregatedVote\"t\n\x1bValidatorRegistryDeltaBlock\x12\x1f\n\x17LatestRegistryDeltaRoot\x18\x01 \x01(\x0c\x12\x16\n\x0eValidatorIndex\x18\x02 \x01(\r\x12\x0e\n\x06Pubkey\x18\x03 \x01(\x0c\x12\x0c\n\x04\x46lag\x18\x04 \x01(\x04\"3\n\x12\x41ttestationRequest\x12\x1d\n\x15ParticipationBitfield\x18\x01 \x01(\x0c\"N\n\x08VoteData\x12\x0c\n\x04Type\x18\x01 \x01(\r\x12\x0e\n\x06Shards\x18\x02 \x03(\r\x12\x12\n\nActionHash\x18\x03 \x01(\x0c\x12\x10\n\x08Proposer\x18\x04 \x01(\r\"V\n\x0e\x41ggregatedVote\x12\x1a\n\x04\x44\x61ta\x18\x01 \x01(\x0b\x32\x0c.pb.VoteData\x12\x11\n\tSignature\x18\x02 \x01(\x0c\x12\x15\n\rParticipation\x18\x03 \x01(\x0c\"g\n\x0e\x41\x63tiveProposal\x12\x1a\n\x04\x44\x61ta\x18\x01 \x01(\x0b\x32\x0c.pb.VoteData\x12\x15\n\rParticipation\x18\x02 \x01(\x0c\x12\x12\n\nStartEpoch\x18\x03 \x01(\x04\x12\x0e\n\x06Queued\x18\x04 \x01(\x08\"T\n\nShardBlock\x12$\n\x06Header\x18\x01 \x01(\x0b\x32\x14.pb.ShardBlockHeader\x12 \n\x04\x42ody\x18\x02 \x01(\x0b\x32\x12.pb.ShardBlockBody\"\x97\x01\n\x10ShardBlockHeader\x12\x19\n\x11PreviousBlockHash\x18\x01 \x01(\x0c\x12\x0c\n\x04Slot\x18\x02 \x01(\x04\x12\x11\n\tSignature\x18\x03 \x01(\x0c\x12\x11\n\tStateRoot\x18\x04 \x01(\x0c\x12\x17\n\x0fTransactionRoot\x18\x05 \x01(\x0c\x12\x1b\n\x13\x46inalizedBeaconHash\x18\x06 \x01(\x0c\"<\n\x0eShardBlockBody\x12*\n\x0cTransactions\x18\x01 \x03(\x0b\x32\x14.pb.ShardTransaction\"+\n\x10ShardTransaction\x12\x17\n\x0fTransactionData\x18\x01 \x01(\x0c\"t\n\x13VerificationWitness\x12\x0b\n\x03Key\x18\x01 \x01(\x0c\x12\r\n\x05Value\x18\x02 \x01(\x0c\x12\x17\n\x0fWitnessBitfield\x18\x03 \x01(\x0c\x12\x15\n\rWitnessHashes\x18\x04 \x03(\x0c\x12\x11\n\tLastLevel\x18\x05 \x01(\r\"\x83\x01\n\rUpdateWitness\x12\x0b\n\x03Key\x18\x01 \x01(\x0c\x12\x10\n\x08OldValue\x18\x02 \x01(\x0c\x12\x10\n\x08NewValue\x18\x03 \x01(\x0c\x12\x17\n\x0fWitnessBitfield\x18\x04 \x01(\x0c\x12\x15\n\rWitnessHashes\x18\x05 \x03(\x0c\x12\x11\n\tLastLevel\x18\x06 \x01(\r\"\xc8\x01\n\x12TransactionPackage\x12\x36\n\x15VerificationWitnesses\x18\x01 \x03(\x0b\x32\x17.pb.VerificationWitness\x12*\n\x0fUpdateWitnesses\x18\x02 \x03(\x0b\x32\x11.pb.UpdateWitness\x12*\n\x0cTransactions\x18\x03 \x03(\x0b\x32\x14.pb.ShardTransaction\x12\x11\n\tStartRoot\x18\x04 \x01(\x0c\x12\x0f\n\x07\x45ndRoot\x18\x05 \x01(\x0c\x62\x06proto3')
)
_SLOTNUMBERRESPONSE = _descriptor.Descriptor(
name='SlotNumberResponse',
full_name='pb.SlotNumberResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='SlotNumber', full_name='pb.SlotNumberResponse.SlotNumber', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BlockHash', full_name='pb.SlotNumberResponse.BlockHash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TipSlot', full_name='pb.SlotNumberResponse.TipSlot', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=96,
)
_CONNECTMESSAGE = _descriptor.Descriptor(
name='ConnectMessage',
full_name='pb.ConnectMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Address', full_name='pb.ConnectMessage.Address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=131,
)
_LISTENINGADDRESSESRESPONSE = _descriptor.Descriptor(
name='ListeningAddressesResponse',
full_name='pb.ListeningAddressesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Addresses', full_name='pb.ListeningAddressesResponse.Addresses', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=180,
)
_PROPOSALSIGNEDDATA = _descriptor.Descriptor(
name='ProposalSignedData',
full_name='pb.ProposalSignedData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.ProposalSignedData.Slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Shard', full_name='pb.ProposalSignedData.Shard', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BlockHash', full_name='pb.ProposalSignedData.BlockHash', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=250,
)
_PROPOSERSLASHING = _descriptor.Descriptor(
name='ProposerSlashing',
full_name='pb.ProposerSlashing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ProposerIndex', full_name='pb.ProposerSlashing.ProposerIndex', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposalData1', full_name='pb.ProposerSlashing.ProposalData1', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposalSignature1', full_name='pb.ProposerSlashing.ProposalSignature1', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposalData2', full_name='pb.ProposerSlashing.ProposalData2', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposalSignature2', full_name='pb.ProposerSlashing.ProposalSignature2', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=444,
)
_SLASHABLEVOTEDATA = _descriptor.Descriptor(
name='SlashableVoteData',
full_name='pb.SlashableVoteData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='AggregateSignaturePoC0Indices', full_name='pb.SlashableVoteData.AggregateSignaturePoC0Indices', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregateSignaturePoC1Indices', full_name='pb.SlashableVoteData.AggregateSignaturePoC1Indices', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Data', full_name='pb.SlashableVoteData.Data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregateSignature', full_name='pb.SlashableVoteData.AggregateSignature', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=447,
serialized_end=607,
)
_CASPERSLASHING = _descriptor.Descriptor(
name='CasperSlashing',
full_name='pb.CasperSlashing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Vote0', full_name='pb.CasperSlashing.Vote0', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Vote1', full_name='pb.CasperSlashing.Vote1', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=609,
serialized_end=701,
)
_ATTESTATIONDATA = _descriptor.Descriptor(
name='AttestationData',
full_name='pb.AttestationData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.AttestationData.Slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BeaconBlockHash', full_name='pb.AttestationData.BeaconBlockHash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TargetEpoch', full_name='pb.AttestationData.TargetEpoch', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TargetHash', full_name='pb.AttestationData.TargetHash', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SourceEpoch', full_name='pb.AttestationData.SourceEpoch', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SourceHash', full_name='pb.AttestationData.SourceHash', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ShardBlockHash', full_name='pb.AttestationData.ShardBlockHash', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Shard', full_name='pb.AttestationData.Shard', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LatestCrosslinkHash', full_name='pb.AttestationData.LatestCrosslinkHash', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=704,
serialized_end=910,
)
_ATTESTATIONDATAANDCUSTODYBIT = _descriptor.Descriptor(
name='AttestationDataAndCustodyBit',
full_name='pb.AttestationDataAndCustodyBit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Data', full_name='pb.AttestationDataAndCustodyBit.Data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PoCBit', full_name='pb.AttestationDataAndCustodyBit.PoCBit', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=912,
serialized_end=993,
)
_ATTESTATION = _descriptor.Descriptor(
name='Attestation',
full_name='pb.Attestation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Data', full_name='pb.Attestation.Data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ParticipationBitfield', full_name='pb.Attestation.ParticipationBitfield', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CustodyBitfield', full_name='pb.Attestation.CustodyBitfield', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregateSig', full_name='pb.Attestation.AggregateSig', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=995,
serialized_end=1121,
)
_DEPOSITPARAMETERS = _descriptor.Descriptor(
name='DepositParameters',
full_name='pb.DepositParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='PublicKey', full_name='pb.DepositParameters.PublicKey', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProofOfPossession', full_name='pb.DepositParameters.ProofOfPossession', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WithdrawalCredentials', full_name='pb.DepositParameters.WithdrawalCredentials', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1123,
serialized_end=1219,
)
_DEPOSIT = _descriptor.Descriptor(
name='Deposit',
full_name='pb.Deposit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Parameters', full_name='pb.Deposit.Parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1221,
serialized_end=1273,
)
_EXIT = _descriptor.Descriptor(
name='Exit',
full_name='pb.Exit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.Exit.Slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorIndex', full_name='pb.Exit.ValidatorIndex', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Signature', full_name='pb.Exit.Signature', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1275,
serialized_end=1338,
)
_BLOCK = _descriptor.Descriptor(
name='Block',
full_name='pb.Block',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Header', full_name='pb.Block.Header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Body', full_name='pb.Block.Body', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1340,
serialized_end=1409,
)
_BLOCKHEADER = _descriptor.Descriptor(
name='BlockHeader',
full_name='pb.BlockHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='SlotNumber', full_name='pb.BlockHeader.SlotNumber', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ParentRoot', full_name='pb.BlockHeader.ParentRoot', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='StateRoot', full_name='pb.BlockHeader.StateRoot', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='RandaoReveal', full_name='pb.BlockHeader.RandaoReveal', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Signature', full_name='pb.BlockHeader.Signature', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1411,
serialized_end=1524,
)
_BLOCKBODY = _descriptor.Descriptor(
name='BlockBody',
full_name='pb.BlockBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Attestations', full_name='pb.BlockBody.Attestations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposerSlashings', full_name='pb.BlockBody.ProposerSlashings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CasperSlashings', full_name='pb.BlockBody.CasperSlashings', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Deposits', full_name='pb.BlockBody.Deposits', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Exits', full_name='pb.BlockBody.Exits', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Votes', full_name='pb.BlockBody.Votes', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1527,
serialized_end=1762,
)
_FORKDATA = _descriptor.Descriptor(
name='ForkData',
full_name='pb.ForkData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='PreForkVersion', full_name='pb.ForkData.PreForkVersion', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PostForkVersion', full_name='pb.ForkData.PostForkVersion', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ForkSlot', full_name='pb.ForkData.ForkSlot', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1764,
serialized_end=1841,
)
_VALIDATOR = _descriptor.Descriptor(
name='Validator',
full_name='pb.Validator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Pubkey', full_name='pb.Validator.Pubkey', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WithdrawalCredentials', full_name='pb.Validator.WithdrawalCredentials', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Status', full_name='pb.Validator.Status', index=2,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LatestStatusChangeSlot', full_name='pb.Validator.LatestStatusChangeSlot', index=3,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ExitCount', full_name='pb.Validator.ExitCount', index=4,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastPoCChangeSlot', full_name='pb.Validator.LastPoCChangeSlot', index=5,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SecondLastPoCChangeSlot', full_name='pb.Validator.SecondLastPoCChangeSlot', index=6,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1844,
serialized_end=2029,
)
_SHARDCOMMITTEE = _descriptor.Descriptor(
name='ShardCommittee',
full_name='pb.ShardCommittee',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Shard', full_name='pb.ShardCommittee.Shard', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Committee', full_name='pb.ShardCommittee.Committee', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TotalValidatorCount', full_name='pb.ShardCommittee.TotalValidatorCount', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2031,
serialized_end=2110,
)
_SHARDCOMMITTEESFORSLOT = _descriptor.Descriptor(
name='ShardCommitteesForSlot',
full_name='pb.ShardCommitteesForSlot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Committees', full_name='pb.ShardCommitteesForSlot.Committees', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2112,
serialized_end=2176,
)
_PERSISTENTCOMMITTEESFORSLOT = _descriptor.Descriptor(
name='PersistentCommitteesForSlot',
full_name='pb.PersistentCommitteesForSlot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='PersistentCommittee', full_name='pb.PersistentCommitteesForSlot.PersistentCommittee', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2178,
serialized_end=2236,
)
_CROSSLINK = _descriptor.Descriptor(
name='Crosslink',
full_name='pb.Crosslink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.Crosslink.Slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ShardBlockHash', full_name='pb.Crosslink.ShardBlockHash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ShardStateHash', full_name='pb.Crosslink.ShardStateHash', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2238,
serialized_end=2311,
)
_PENDINGATTESTATION = _descriptor.Descriptor(
name='PendingAttestation',
full_name='pb.PendingAttestation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Data', full_name='pb.PendingAttestation.Data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ParticipationBitfield', full_name='pb.PendingAttestation.ParticipationBitfield', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CustodyBitfield', full_name='pb.PendingAttestation.CustodyBitfield', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='InclusionDelay', full_name='pb.PendingAttestation.InclusionDelay', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ProposerIndex', full_name='pb.PendingAttestation.ProposerIndex', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2314,
serialized_end=2472,
)
_STATE = _descriptor.Descriptor(
name='State',
full_name='pb.State',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.State.Slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EpochIndex', full_name='pb.State.EpochIndex', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='GenesisTime', full_name='pb.State.GenesisTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ForkData', full_name='pb.State.ForkData', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorRegistry', full_name='pb.State.ValidatorRegistry', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorBalances', full_name='pb.State.ValidatorBalances', index=5,
number=6, type=4, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorRegistryLatestChangeEpoch', full_name='pb.State.ValidatorRegistryLatestChangeEpoch', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorRegistryExitCount', full_name='pb.State.ValidatorRegistryExitCount', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorRegistryDeltaChainTip', full_name='pb.State.ValidatorRegistryDeltaChainTip', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='RandaoMix', full_name='pb.State.RandaoMix', index=9,
number=10, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='NextRandaoMix', full_name='pb.State.NextRandaoMix', index=10,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ShardCommittees', full_name='pb.State.ShardCommittees', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PreviousJustifiedEpoch', full_name='pb.State.PreviousJustifiedEpoch', index=12,
number=13, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='JustifiedEpoch', full_name='pb.State.JustifiedEpoch', index=13,
number=14, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='JustificationBitField', full_name='pb.State.JustificationBitField', index=14,
number=15, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FinalizedEpoch', full_name='pb.State.FinalizedEpoch', index=15,
number=16, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LatestCrosslinks', full_name='pb.State.LatestCrosslinks', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PreviousCrosslinks', full_name='pb.State.PreviousCrosslinks', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ShardRegistry', full_name='pb.State.ShardRegistry', index=18,
number=19, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LatestBlockHashes', full_name='pb.State.LatestBlockHashes', index=19,
number=20, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CurrentEpochAttestations', full_name='pb.State.CurrentEpochAttestations', index=20,
number=21, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PreviousEpochAttestations', full_name='pb.State.PreviousEpochAttestations', index=21,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BatchedBlockRoots', full_name='pb.State.BatchedBlockRoots', index=22,
number=23, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Proposals', full_name='pb.State.Proposals', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PendingVotes', full_name='pb.State.PendingVotes', index=24,
number=25, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2475,
serialized_end=3323,
)
_VALIDATORREGISTRYDELTABLOCK = _descriptor.Descriptor(
name='ValidatorRegistryDeltaBlock',
full_name='pb.ValidatorRegistryDeltaBlock',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='LatestRegistryDeltaRoot', full_name='pb.ValidatorRegistryDeltaBlock.LatestRegistryDeltaRoot', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ValidatorIndex', full_name='pb.ValidatorRegistryDeltaBlock.ValidatorIndex', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Pubkey', full_name='pb.ValidatorRegistryDeltaBlock.Pubkey', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Flag', full_name='pb.ValidatorRegistryDeltaBlock.Flag', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3325,
serialized_end=3441,
)
_ATTESTATIONREQUEST = _descriptor.Descriptor(
name='AttestationRequest',
full_name='pb.AttestationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ParticipationBitfield', full_name='pb.AttestationRequest.ParticipationBitfield', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3443,
serialized_end=3494,
)
_VOTEDATA = _descriptor.Descriptor(
name='VoteData',
full_name='pb.VoteData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Type', full_name='pb.VoteData.Type', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Shards', full_name='pb.VoteData.Shards', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ActionHash', full_name='pb.VoteData.ActionHash', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Proposer', full_name='pb.VoteData.Proposer', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3496,
serialized_end=3574,
)
_AGGREGATEDVOTE = _descriptor.Descriptor(
name='AggregatedVote',
full_name='pb.AggregatedVote',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Data', full_name='pb.AggregatedVote.Data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Signature', full_name='pb.AggregatedVote.Signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Participation', full_name='pb.AggregatedVote.Participation', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3576,
serialized_end=3662,
)
_ACTIVEPROPOSAL = _descriptor.Descriptor(
name='ActiveProposal',
full_name='pb.ActiveProposal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Data', full_name='pb.ActiveProposal.Data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Participation', full_name='pb.ActiveProposal.Participation', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='StartEpoch', full_name='pb.ActiveProposal.StartEpoch', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Queued', full_name='pb.ActiveProposal.Queued', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3664,
serialized_end=3767,
)
_SHARDBLOCK = _descriptor.Descriptor(
name='ShardBlock',
full_name='pb.ShardBlock',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Header', full_name='pb.ShardBlock.Header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Body', full_name='pb.ShardBlock.Body', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3769,
serialized_end=3853,
)
_SHARDBLOCKHEADER = _descriptor.Descriptor(
name='ShardBlockHeader',
full_name='pb.ShardBlockHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='PreviousBlockHash', full_name='pb.ShardBlockHeader.PreviousBlockHash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Slot', full_name='pb.ShardBlockHeader.Slot', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Signature', full_name='pb.ShardBlockHeader.Signature', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='StateRoot', full_name='pb.ShardBlockHeader.StateRoot', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TransactionRoot', full_name='pb.ShardBlockHeader.TransactionRoot', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FinalizedBeaconHash', full_name='pb.ShardBlockHeader.FinalizedBeaconHash', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3856,
serialized_end=4007,
)
_SHARDBLOCKBODY = _descriptor.Descriptor(
name='ShardBlockBody',
full_name='pb.ShardBlockBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Transactions', full_name='pb.ShardBlockBody.Transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4009,
serialized_end=4069,
)
_SHARDTRANSACTION = _descriptor.Descriptor(
name='ShardTransaction',
full_name='pb.ShardTransaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='TransactionData', full_name='pb.ShardTransaction.TransactionData', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4071,
serialized_end=4114,
)
_VERIFICATIONWITNESS = _descriptor.Descriptor(
name='VerificationWitness',
full_name='pb.VerificationWitness',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Key', full_name='pb.VerificationWitness.Key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Value', full_name='pb.VerificationWitness.Value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WitnessBitfield', full_name='pb.VerificationWitness.WitnessBitfield', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WitnessHashes', full_name='pb.VerificationWitness.WitnessHashes', index=3,
number=4, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastLevel', full_name='pb.VerificationWitness.LastLevel', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4116,
serialized_end=4232,
)
_UPDATEWITNESS = _descriptor.Descriptor(
name='UpdateWitness',
full_name='pb.UpdateWitness',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Key', full_name='pb.UpdateWitness.Key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='OldValue', full_name='pb.UpdateWitness.OldValue', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='NewValue', full_name='pb.UpdateWitness.NewValue', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WitnessBitfield', full_name='pb.UpdateWitness.WitnessBitfield', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='WitnessHashes', full_name='pb.UpdateWitness.WitnessHashes', index=4,
number=5, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastLevel', full_name='pb.UpdateWitness.LastLevel', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4235,
serialized_end=4366,
)
_TRANSACTIONPACKAGE = _descriptor.Descriptor(
name='TransactionPackage',
full_name='pb.TransactionPackage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='VerificationWitnesses', full_name='pb.TransactionPackage.VerificationWitnesses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='UpdateWitnesses', full_name='pb.TransactionPackage.UpdateWitnesses', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Transactions', full_name='pb.TransactionPackage.Transactions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='StartRoot', full_name='pb.TransactionPackage.StartRoot', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EndRoot', full_name='pb.TransactionPackage.EndRoot', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4369,
serialized_end=4569,
)
_PROPOSERSLASHING.fields_by_name['ProposalData1'].message_type = _PROPOSALSIGNEDDATA
_PROPOSERSLASHING.fields_by_name['ProposalData2'].message_type = _PROPOSALSIGNEDDATA
_SLASHABLEVOTEDATA.fields_by_name['Data'].message_type = _ATTESTATIONDATA
_CASPERSLASHING.fields_by_name['Vote0'].message_type = _SLASHABLEVOTEDATA
_CASPERSLASHING.fields_by_name['Vote1'].message_type = _SLASHABLEVOTEDATA
_ATTESTATIONDATAANDCUSTODYBIT.fields_by_name['Data'].message_type = _ATTESTATIONDATA
_ATTESTATION.fields_by_name['Data'].message_type = _ATTESTATIONDATA
_DEPOSIT.fields_by_name['Parameters'].message_type = _DEPOSITPARAMETERS
_BLOCK.fields_by_name['Header'].message_type = _BLOCKHEADER
_BLOCK.fields_by_name['Body'].message_type = _BLOCKBODY
_BLOCKBODY.fields_by_name['Attestations'].message_type = _ATTESTATION
_BLOCKBODY.fields_by_name['ProposerSlashings'].message_type = _PROPOSERSLASHING
_BLOCKBODY.fields_by_name['CasperSlashings'].message_type = _CASPERSLASHING
_BLOCKBODY.fields_by_name['Deposits'].message_type = _DEPOSIT
_BLOCKBODY.fields_by_name['Exits'].message_type = _EXIT
_BLOCKBODY.fields_by_name['Votes'].message_type = _AGGREGATEDVOTE
_SHARDCOMMITTEESFORSLOT.fields_by_name['Committees'].message_type = _SHARDCOMMITTEE
_PENDINGATTESTATION.fields_by_name['Data'].message_type = _ATTESTATIONDATA
_STATE.fields_by_name['ForkData'].message_type = _FORKDATA
_STATE.fields_by_name['ValidatorRegistry'].message_type = _VALIDATOR
_STATE.fields_by_name['ShardCommittees'].message_type = _SHARDCOMMITTEESFORSLOT
_STATE.fields_by_name['LatestCrosslinks'].message_type = _CROSSLINK
_STATE.fields_by_name['PreviousCrosslinks'].message_type = _CROSSLINK
_STATE.fields_by_name['CurrentEpochAttestations'].message_type = _PENDINGATTESTATION
_STATE.fields_by_name['PreviousEpochAttestations'].message_type = _PENDINGATTESTATION
_STATE.fields_by_name['Proposals'].message_type = _ACTIVEPROPOSAL
_STATE.fields_by_name['PendingVotes'].message_type = _AGGREGATEDVOTE
_AGGREGATEDVOTE.fields_by_name['Data'].message_type = _VOTEDATA
_ACTIVEPROPOSAL.fields_by_name['Data'].message_type = _VOTEDATA
_SHARDBLOCK.fields_by_name['Header'].message_type = _SHARDBLOCKHEADER
_SHARDBLOCK.fields_by_name['Body'].message_type = _SHARDBLOCKBODY
_SHARDBLOCKBODY.fields_by_name['Transactions'].message_type = _SHARDTRANSACTION
_TRANSACTIONPACKAGE.fields_by_name['VerificationWitnesses'].message_type = _VERIFICATIONWITNESS
_TRANSACTIONPACKAGE.fields_by_name['UpdateWitnesses'].message_type = _UPDATEWITNESS
_TRANSACTIONPACKAGE.fields_by_name['Transactions'].message_type = _SHARDTRANSACTION
DESCRIPTOR.message_types_by_name['SlotNumberResponse'] = _SLOTNUMBERRESPONSE
DESCRIPTOR.message_types_by_name['ConnectMessage'] = _CONNECTMESSAGE
DESCRIPTOR.message_types_by_name['ListeningAddressesResponse'] = _LISTENINGADDRESSESRESPONSE
DESCRIPTOR.message_types_by_name['ProposalSignedData'] = _PROPOSALSIGNEDDATA
DESCRIPTOR.message_types_by_name['ProposerSlashing'] = _PROPOSERSLASHING
DESCRIPTOR.message_types_by_name['SlashableVoteData'] = _SLASHABLEVOTEDATA
DESCRIPTOR.message_types_by_name['CasperSlashing'] = _CASPERSLASHING
DESCRIPTOR.message_types_by_name['AttestationData'] = _ATTESTATIONDATA
DESCRIPTOR.message_types_by_name['AttestationDataAndCustodyBit'] = _ATTESTATIONDATAANDCUSTODYBIT
DESCRIPTOR.message_types_by_name['Attestation'] = _ATTESTATION
DESCRIPTOR.message_types_by_name['DepositParameters'] = _DEPOSITPARAMETERS
DESCRIPTOR.message_types_by_name['Deposit'] = _DEPOSIT
DESCRIPTOR.message_types_by_name['Exit'] = _EXIT
DESCRIPTOR.message_types_by_name['Block'] = _BLOCK
DESCRIPTOR.message_types_by_name['BlockHeader'] = _BLOCKHEADER
DESCRIPTOR.message_types_by_name['BlockBody'] = _BLOCKBODY
DESCRIPTOR.message_types_by_name['ForkData'] = _FORKDATA
DESCRIPTOR.message_types_by_name['Validator'] = _VALIDATOR
DESCRIPTOR.message_types_by_name['ShardCommittee'] = _SHARDCOMMITTEE
DESCRIPTOR.message_types_by_name['ShardCommitteesForSlot'] = _SHARDCOMMITTEESFORSLOT
DESCRIPTOR.message_types_by_name['PersistentCommitteesForSlot'] = _PERSISTENTCOMMITTEESFORSLOT
DESCRIPTOR.message_types_by_name['Crosslink'] = _CROSSLINK
DESCRIPTOR.message_types_by_name['PendingAttestation'] = _PENDINGATTESTATION
DESCRIPTOR.message_types_by_name['State'] = _STATE
DESCRIPTOR.message_types_by_name['ValidatorRegistryDeltaBlock'] = _VALIDATORREGISTRYDELTABLOCK
DESCRIPTOR.message_types_by_name['AttestationRequest'] = _ATTESTATIONREQUEST
DESCRIPTOR.message_types_by_name['VoteData'] = _VOTEDATA
DESCRIPTOR.message_types_by_name['AggregatedVote'] = _AGGREGATEDVOTE
DESCRIPTOR.message_types_by_name['ActiveProposal'] = _ACTIVEPROPOSAL
DESCRIPTOR.message_types_by_name['ShardBlock'] = _SHARDBLOCK
DESCRIPTOR.message_types_by_name['ShardBlockHeader'] = _SHARDBLOCKHEADER
DESCRIPTOR.message_types_by_name['ShardBlockBody'] = _SHARDBLOCKBODY
DESCRIPTOR.message_types_by_name['ShardTransaction'] = _SHARDTRANSACTION
DESCRIPTOR.message_types_by_name['VerificationWitness'] = _VERIFICATIONWITNESS
DESCRIPTOR.message_types_by_name['UpdateWitness'] = _UPDATEWITNESS
DESCRIPTOR.message_types_by_name['TransactionPackage'] = _TRANSACTIONPACKAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SlotNumberResponse = _reflection.GeneratedProtocolMessageType('SlotNumberResponse', (_message.Message,), {
'DESCRIPTOR' : _SLOTNUMBERRESPONSE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.SlotNumberResponse)
})
_sym_db.RegisterMessage(SlotNumberResponse)
ConnectMessage = _reflection.GeneratedProtocolMessageType('ConnectMessage', (_message.Message,), {
'DESCRIPTOR' : _CONNECTMESSAGE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ConnectMessage)
})
_sym_db.RegisterMessage(ConnectMessage)
ListeningAddressesResponse = _reflection.GeneratedProtocolMessageType('ListeningAddressesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTENINGADDRESSESRESPONSE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ListeningAddressesResponse)
})
_sym_db.RegisterMessage(ListeningAddressesResponse)
ProposalSignedData = _reflection.GeneratedProtocolMessageType('ProposalSignedData', (_message.Message,), {
'DESCRIPTOR' : _PROPOSALSIGNEDDATA,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ProposalSignedData)
})
_sym_db.RegisterMessage(ProposalSignedData)
ProposerSlashing = _reflection.GeneratedProtocolMessageType('ProposerSlashing', (_message.Message,), {
'DESCRIPTOR' : _PROPOSERSLASHING,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ProposerSlashing)
})
_sym_db.RegisterMessage(ProposerSlashing)
SlashableVoteData = _reflection.GeneratedProtocolMessageType('SlashableVoteData', (_message.Message,), {
'DESCRIPTOR' : _SLASHABLEVOTEDATA,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.SlashableVoteData)
})
_sym_db.RegisterMessage(SlashableVoteData)
CasperSlashing = _reflection.GeneratedProtocolMessageType('CasperSlashing', (_message.Message,), {
'DESCRIPTOR' : _CASPERSLASHING,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.CasperSlashing)
})
_sym_db.RegisterMessage(CasperSlashing)
AttestationData = _reflection.GeneratedProtocolMessageType('AttestationData', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATIONDATA,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.AttestationData)
})
_sym_db.RegisterMessage(AttestationData)
AttestationDataAndCustodyBit = _reflection.GeneratedProtocolMessageType('AttestationDataAndCustodyBit', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATIONDATAANDCUSTODYBIT,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.AttestationDataAndCustodyBit)
})
_sym_db.RegisterMessage(AttestationDataAndCustodyBit)
Attestation = _reflection.GeneratedProtocolMessageType('Attestation', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATION,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Attestation)
})
_sym_db.RegisterMessage(Attestation)
DepositParameters = _reflection.GeneratedProtocolMessageType('DepositParameters', (_message.Message,), {
'DESCRIPTOR' : _DEPOSITPARAMETERS,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.DepositParameters)
})
_sym_db.RegisterMessage(DepositParameters)
Deposit = _reflection.GeneratedProtocolMessageType('Deposit', (_message.Message,), {
'DESCRIPTOR' : _DEPOSIT,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Deposit)
})
_sym_db.RegisterMessage(Deposit)
Exit = _reflection.GeneratedProtocolMessageType('Exit', (_message.Message,), {
'DESCRIPTOR' : _EXIT,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Exit)
})
_sym_db.RegisterMessage(Exit)
Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), {
'DESCRIPTOR' : _BLOCK,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Block)
})
_sym_db.RegisterMessage(Block)
BlockHeader = _reflection.GeneratedProtocolMessageType('BlockHeader', (_message.Message,), {
'DESCRIPTOR' : _BLOCKHEADER,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.BlockHeader)
})
_sym_db.RegisterMessage(BlockHeader)
BlockBody = _reflection.GeneratedProtocolMessageType('BlockBody', (_message.Message,), {
'DESCRIPTOR' : _BLOCKBODY,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.BlockBody)
})
_sym_db.RegisterMessage(BlockBody)
ForkData = _reflection.GeneratedProtocolMessageType('ForkData', (_message.Message,), {
'DESCRIPTOR' : _FORKDATA,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ForkData)
})
_sym_db.RegisterMessage(ForkData)
Validator = _reflection.GeneratedProtocolMessageType('Validator', (_message.Message,), {
'DESCRIPTOR' : _VALIDATOR,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Validator)
})
_sym_db.RegisterMessage(Validator)
ShardCommittee = _reflection.GeneratedProtocolMessageType('ShardCommittee', (_message.Message,), {
'DESCRIPTOR' : _SHARDCOMMITTEE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardCommittee)
})
_sym_db.RegisterMessage(ShardCommittee)
ShardCommitteesForSlot = _reflection.GeneratedProtocolMessageType('ShardCommitteesForSlot', (_message.Message,), {
'DESCRIPTOR' : _SHARDCOMMITTEESFORSLOT,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardCommitteesForSlot)
})
_sym_db.RegisterMessage(ShardCommitteesForSlot)
PersistentCommitteesForSlot = _reflection.GeneratedProtocolMessageType('PersistentCommitteesForSlot', (_message.Message,), {
'DESCRIPTOR' : _PERSISTENTCOMMITTEESFORSLOT,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.PersistentCommitteesForSlot)
})
_sym_db.RegisterMessage(PersistentCommitteesForSlot)
Crosslink = _reflection.GeneratedProtocolMessageType('Crosslink', (_message.Message,), {
'DESCRIPTOR' : _CROSSLINK,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.Crosslink)
})
_sym_db.RegisterMessage(Crosslink)
PendingAttestation = _reflection.GeneratedProtocolMessageType('PendingAttestation', (_message.Message,), {
'DESCRIPTOR' : _PENDINGATTESTATION,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.PendingAttestation)
})
_sym_db.RegisterMessage(PendingAttestation)
State = _reflection.GeneratedProtocolMessageType('State', (_message.Message,), {
'DESCRIPTOR' : _STATE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.State)
})
_sym_db.RegisterMessage(State)
ValidatorRegistryDeltaBlock = _reflection.GeneratedProtocolMessageType('ValidatorRegistryDeltaBlock', (_message.Message,), {
'DESCRIPTOR' : _VALIDATORREGISTRYDELTABLOCK,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ValidatorRegistryDeltaBlock)
})
_sym_db.RegisterMessage(ValidatorRegistryDeltaBlock)
AttestationRequest = _reflection.GeneratedProtocolMessageType('AttestationRequest', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATIONREQUEST,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.AttestationRequest)
})
_sym_db.RegisterMessage(AttestationRequest)
VoteData = _reflection.GeneratedProtocolMessageType('VoteData', (_message.Message,), {
'DESCRIPTOR' : _VOTEDATA,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.VoteData)
})
_sym_db.RegisterMessage(VoteData)
AggregatedVote = _reflection.GeneratedProtocolMessageType('AggregatedVote', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEDVOTE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.AggregatedVote)
})
_sym_db.RegisterMessage(AggregatedVote)
ActiveProposal = _reflection.GeneratedProtocolMessageType('ActiveProposal', (_message.Message,), {
'DESCRIPTOR' : _ACTIVEPROPOSAL,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ActiveProposal)
})
_sym_db.RegisterMessage(ActiveProposal)
ShardBlock = _reflection.GeneratedProtocolMessageType('ShardBlock', (_message.Message,), {
'DESCRIPTOR' : _SHARDBLOCK,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardBlock)
})
_sym_db.RegisterMessage(ShardBlock)
ShardBlockHeader = _reflection.GeneratedProtocolMessageType('ShardBlockHeader', (_message.Message,), {
'DESCRIPTOR' : _SHARDBLOCKHEADER,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardBlockHeader)
})
_sym_db.RegisterMessage(ShardBlockHeader)
ShardBlockBody = _reflection.GeneratedProtocolMessageType('ShardBlockBody', (_message.Message,), {
'DESCRIPTOR' : _SHARDBLOCKBODY,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardBlockBody)
})
_sym_db.RegisterMessage(ShardBlockBody)
ShardTransaction = _reflection.GeneratedProtocolMessageType('ShardTransaction', (_message.Message,), {
'DESCRIPTOR' : _SHARDTRANSACTION,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.ShardTransaction)
})
_sym_db.RegisterMessage(ShardTransaction)
VerificationWitness = _reflection.GeneratedProtocolMessageType('VerificationWitness', (_message.Message,), {
'DESCRIPTOR' : _VERIFICATIONWITNESS,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.VerificationWitness)
})
_sym_db.RegisterMessage(VerificationWitness)
UpdateWitness = _reflection.GeneratedProtocolMessageType('UpdateWitness', (_message.Message,), {
'DESCRIPTOR' : _UPDATEWITNESS,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.UpdateWitness)
})
_sym_db.RegisterMessage(UpdateWitness)
TransactionPackage = _reflection.GeneratedProtocolMessageType('TransactionPackage', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTIONPACKAGE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:pb.TransactionPackage)
})
_sym_db.RegisterMessage(TransactionPackage)
# @@protoc_insertion_point(module_scope)
|
# terrascript/resource/sacloud/sakuracloud.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:06 UTC)
import terrascript
class sakuracloud_archive(terrascript.Resource):
pass
class sakuracloud_archive_share(terrascript.Resource):
pass
class sakuracloud_auto_backup(terrascript.Resource):
pass
class sakuracloud_bridge(terrascript.Resource):
pass
class sakuracloud_cdrom(terrascript.Resource):
pass
class sakuracloud_container_registry(terrascript.Resource):
pass
class sakuracloud_database(terrascript.Resource):
pass
class sakuracloud_database_read_replica(terrascript.Resource):
pass
class sakuracloud_disk(terrascript.Resource):
pass
class sakuracloud_dns(terrascript.Resource):
pass
class sakuracloud_dns_record(terrascript.Resource):
pass
class sakuracloud_enhanced_db(terrascript.Resource):
pass
class sakuracloud_esme(terrascript.Resource):
pass
class sakuracloud_gslb(terrascript.Resource):
pass
class sakuracloud_icon(terrascript.Resource):
pass
class sakuracloud_internet(terrascript.Resource):
pass
class sakuracloud_ipv4_ptr(terrascript.Resource):
pass
class sakuracloud_load_balancer(terrascript.Resource):
pass
class sakuracloud_local_router(terrascript.Resource):
pass
class sakuracloud_mobile_gateway(terrascript.Resource):
pass
class sakuracloud_nfs(terrascript.Resource):
pass
class sakuracloud_note(terrascript.Resource):
pass
class sakuracloud_packet_filter(terrascript.Resource):
pass
class sakuracloud_packet_filter_rules(terrascript.Resource):
pass
class sakuracloud_private_host(terrascript.Resource):
pass
class sakuracloud_proxylb(terrascript.Resource):
pass
class sakuracloud_proxylb_acme(terrascript.Resource):
pass
class sakuracloud_server(terrascript.Resource):
pass
class sakuracloud_sim(terrascript.Resource):
pass
class sakuracloud_simple_monitor(terrascript.Resource):
pass
class sakuracloud_ssh_key(terrascript.Resource):
pass
class sakuracloud_ssh_key_gen(terrascript.Resource):
pass
class sakuracloud_subnet(terrascript.Resource):
pass
class sakuracloud_switch(terrascript.Resource):
pass
class sakuracloud_vpc_router(terrascript.Resource):
pass
class sakuracloud_webaccel_certificate(terrascript.Resource):
pass
__all__ = [
"sakuracloud_archive",
"sakuracloud_archive_share",
"sakuracloud_auto_backup",
"sakuracloud_bridge",
"sakuracloud_cdrom",
"sakuracloud_container_registry",
"sakuracloud_database",
"sakuracloud_database_read_replica",
"sakuracloud_disk",
"sakuracloud_dns",
"sakuracloud_dns_record",
"sakuracloud_enhanced_db",
"sakuracloud_esme",
"sakuracloud_gslb",
"sakuracloud_icon",
"sakuracloud_internet",
"sakuracloud_ipv4_ptr",
"sakuracloud_load_balancer",
"sakuracloud_local_router",
"sakuracloud_mobile_gateway",
"sakuracloud_nfs",
"sakuracloud_note",
"sakuracloud_packet_filter",
"sakuracloud_packet_filter_rules",
"sakuracloud_private_host",
"sakuracloud_proxylb",
"sakuracloud_proxylb_acme",
"sakuracloud_server",
"sakuracloud_sim",
"sakuracloud_simple_monitor",
"sakuracloud_ssh_key",
"sakuracloud_ssh_key_gen",
"sakuracloud_subnet",
"sakuracloud_switch",
"sakuracloud_vpc_router",
"sakuracloud_webaccel_certificate",
]
|
import argparse
import teuthology.worker
def main():
teuthology.worker.main(parse_args())
def parse_args():
parser = argparse.ArgumentParser(description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
""")
parser.add_argument(
'-v', '--verbose',
action='store_true', default=None,
help='be more verbose',
)
parser.add_argument(
'--archive-dir',
metavar='DIR',
help='path under which to archive results',
required=True,
)
parser.add_argument(
'-l', '--log-dir',
help='path in which to store logs',
required=True,
)
parser.add_argument(
'-t', '--tube',
help='which beanstalk tube to read jobs from',
required=True,
)
return parser.parse_args()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 09:02:56 2019
@author: helpthx
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy import ndimage
from pylab import imread,imshow,figure,show,subplot
from numpy import reshape,uint8,flipud
from sklearn.cluster import MiniBatchKMeans
print('Versão da OpenCV: ', cv2.__version__, end='\n\n')
# Importando a imagem usada
img = cv2.imread('/home/helpthx/Documents/PROJETO_2/kiss512x512.jpg', cv2.IMREAD_GRAYSCALE)
plt.gray()
plt.imshow(img)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/base512x512.png', img)
img.shape
# Função para fazer o sub sapling da imagem
def simple_subsampling(img, n):
lista_imagens = []
for i in range(n+1):
img = img[1::2, 1::2]
lista_imagens.append(img)
return lista_imagens
# Chamando a função
lista_imagens = simple_subsampling(img, 4)
lista_imagens[0].shape
# (256, 256)
lista_imagens[1].shape
# (128, 128)
lista_imagens[2].shape
# (64, 64)
lista_imagens[3].shape
# (32, 32)
lista_imagens[4].shape
# (16, 16)
# Salvando imagens
plt.imshow(lista_imagens[0])
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/sub_samplu256x.png',
lista_imagens[0])
plt.imshow(lista_imagens[1])
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/sub_samplu128x.png',
lista_imagens[1])
plt.imshow(lista_imagens[2])
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/sub_samplu64x.png',
lista_imagens[2])
plt.imshow(lista_imagens[3])
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/sub_samplu32x.png',
lista_imagens[3])
# Interpolação nn
def nn_interpolate(A, new_size):
"""Vectorized Nearest Neighbor Interpolation"""
old_size = A.shape
row_ratio, col_ratio = np.array(new_size)/np.array(old_size)
# row wise interpolation
row_idx = (np.ceil(range(1, 1 + int(old_size[0]*row_ratio))/row_ratio) - 1).astype(int)
# column wise interpolation
col_idx = (np.ceil(range(1, 1 + int(old_size[1]*col_ratio))/col_ratio) - 1).astype(int)
final_matrix = A[:, row_idx][col_idx, :]
return final_matrix
# Chamando a função
nn_iterpolation256 = nn_interpolate(lista_imagens[0], 512)
nn_iterpolation128 = nn_interpolate(lista_imagens[1], 512)
nn_iterpolation64 = nn_interpolate(lista_imagens[2], 512)
nn_iterpolation32 = nn_interpolate(lista_imagens[3], 512)
# Salvando arquivos
plt.imshow(nn_iterpolation256)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/nn_inteporlation256x.png',
nn_iterpolation256)
plt.imshow(nn_iterpolation128)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/nn_inteporlation128x.png',
nn_iterpolation128)
plt.imshow(nn_iterpolation64)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/nn_inteporlation64x.png',
nn_iterpolation64)
plt.imshow(nn_iterpolation32)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/nn_inteporlation32x.png',
nn_iterpolation32)
# Interpolação Biliniar
# Bilinear interpolation
def bilinear_interpolate(image):
image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
(h, w, channels) = image.shape
h2 = 512
w2 = 512
temp = np.zeros((h2, w2, 3), np.uint8)
x_ratio = float((w - 1)) / w2;
y_ratio = float((h - 1)) / h2;
for i in range(1, h2 - 1):
for j in range(1 ,w2 - 1):
x = int(x_ratio * j)
y = int(y_ratio * i)
x_diff = (x_ratio * j) - x
y_diff = (y_ratio * i) - y
a = image[x, y] & 0xFF
b = image[x + 1, y] & 0xFF
c = image[x, y + 1] & 0xFF
d = image[x + 1, y + 1] & 0xFF
blue = a[0] * (1 - x_diff) * (1 - y_diff) + b[0] * (x_diff) * (1-y_diff) + c[0] * y_diff * (1 - x_diff) + d[0] * (x_diff * y_diff)
green = a[1] * (1 - x_diff) * (1 - y_diff) + b[1] * (x_diff) * (1-y_diff) + c[1] * y_diff * (1 - x_diff) + d[1] * (x_diff * y_diff)
red = a[2] * (1 - x_diff) * (1 - y_diff) + b[2] * (x_diff) * (1-y_diff) + c[2] * y_diff * (1 - x_diff) + d[2] * (x_diff * y_diff)
temp[j, i] = (blue, green, red)
return cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
# Chamando as funções
bl_iterpolation256 = bilinear_interpolate(lista_imagens[0])
bl_iterpolation128 = bilinear_interpolate(lista_imagens[1])
bl_iterpolation64 = bilinear_interpolate(lista_imagens[2])
bl_iterpolation32 = bilinear_interpolate(lista_imagens[3])
# Salvando arquivos
plt.imshow(bl_iterpolation256)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/bl_inteporlation256x.png',
bl_iterpolation256)
plt.imshow(bl_iterpolation128)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/bl_inteporlation128x.png',
bl_iterpolation128)
plt.imshow(bl_iterpolation64)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/bl_inteporlation64x.png',
bl_iterpolation64)
plt.imshow(bl_iterpolation32)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/bl_inteporlation32x.png',
bl_iterpolation32)
# Qubatização de niveis de cinza
image = cv2.imread('/home/helpthx/Documents/PROJETO_2/kiss512x512.jpg',
cv2.IMREAD_GRAYSCALE)
print(image.shape)
def quantizador_kmeans(image, n):
# Extract width & height of image
(HEIGHT, WIDTH) = image.shape[:2]
# Convert image to L, A, B color space
# image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Reshape the image to a feature vector
image = image.reshape((image.shape[0] * image.shape[1], 1))
# Apply MiniBatchKMeans and then create the quantized image based on the predictions
clt = MiniBatchKMeans(n_clusters = n)
labels = clt.fit_predict(image)
print(labels)
quant = clt.cluster_centers_.astype("uint8")[labels]
# reshape the feature vectors to images
quant = quant.reshape((HEIGHT, WIDTH))
image = image.reshape((HEIGHT, WIDTH))
return quant, image
quant_8, image_8 = quantizador_kmeans(image, 8)
quant_4, image_4 = quantizador_kmeans(image, 4)
quant_2, image_2 = quantizador_kmeans(image, 2)
quant_1, image_1 = quantizador_kmeans(image, 1)
# Salvando arquivos
plt.gray()
plt.imshow(quant_8)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/quant_8.png',
quant_8)
plt.imshow(quant_4)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/quant_4.png',
quant_4)
plt.imshow(quant_2)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/quant_2.png',
quant_2)
plt.imshow(quant_1)
plt.show()
plt.imsave('/home/helpthx/Documents/PROJETO_2/quant_1.png',
quant_1)
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six import iteritems, string_types
from numbers import Number
def environment(parameters, exclude=[]):
if not isinstance(parameters, dict):
raise AnsibleFilterError('manala_environment expects a dict but was given a %s' % type(parameters))
[parameters.pop(key, None) for key in exclude]
result = ''
for key in sorted(parameters):
parameter = environment_parameter(parameters, key)
if parameter:
result += '\n%s' % parameter
return result.lstrip()
def environment_parameter(parameters, key, required=False, default=None, comment=False):
if not isinstance(parameters, dict):
raise AnsibleFilterError('manala_environment_parameter parameters expects a dict but was given a %s' % type(parameters))
if not isinstance(key, string_types):
raise AnsibleFilterError('manala_environment_parameter key expects a string but was given a %s' % type(key))
if required and key not in parameters:
raise AnsibleFilterError('manala_environment_parameter requires a value for key %s' % key)
result = ''
value = parameters.get(key, default)
if isinstance(value, string_types):
result = '%s="%s"' % (key, value)
elif isinstance(value, Number):
result = '%s=%s' % (key, value)
else:
AnsibleFilterError('manala_environment_parameter value of an unknown type %s' % type(value))
if key not in parameters:
if comment is True:
result = '#' + result.replace('\n', '\n#')
elif isinstance(comment, string_types):
result = comment
return result
class FilterModule(object):
''' Manala environment jinja2 filters '''
def filters(self):
filters = {
'manala_environment': environment,
'manala_environment_parameter': environment_parameter,
}
return filters
|
import io
import os
import sys
import copy
import json
import time
import logging
import datetime
import platform
from collections import OrderedDict
from bdbag import bdbag_api as bdb
from deriva.core import get_credential, format_credential, urlquote, format_exception, DEFAULT_SESSION_CONFIG, \
__version__ as VERSION
from deriva.core.utils.version_utils import get_installed_version
from deriva.core.ermrest_model import Model
from deriva.core.deriva_server import DerivaServer
from deriva.core.ermrest_catalog import ErmrestCatalog, _clone_state_url as CLONE_STATE_URL
from deriva.core.hatrac_store import HatracStore
from deriva.transfer import DerivaUpload, DerivaUploadError, DerivaUploadConfigurationError, GenericUploader
from deriva.transfer.restore import DerivaRestoreError, DerivaRestoreConfigurationError, \
DerivaRestoreAuthenticationError, DerivaRestoreAuthorizationError
class DerivaRestore:
"""
Restore a DERIVA catalog from a bag archive or directory.
Core restore logic re-purposed from ErmrestCatalog.clone_catalog().
"""
RESTORE_STATE_URL = "tag:isrd.isi.edu,2019:restore-status"
BASE_DATA_INPUT_PATH = os.path.join("records", "{}", "{}.json")
BASE_ASSETS_INPUT_PATH = "assets"
def __init__(self, *args, **kwargs):
self.server_args = args[0]
self.hostname = None
self.dst_catalog = None
self.cancelled = False
self.input_path = kwargs.get("input_path")
self.exclude_schemas = kwargs.get("exclude_schemas", list())
self.restore_data = not kwargs.get("no_data", False)
self.data_chunk_size = kwargs.get("data_chunk_size", 10000)
self.restore_annotations = not kwargs.get("no_annotations", False)
self.restore_policy = not kwargs.get("no_policy", False)
self.restore_assets = not kwargs.get("no_assets", False)
self.strict_bag_validation = not kwargs.get("weak_bag_validation", True)
self.no_bag_materialize = kwargs.get("no_bag_materialize", False)
self.upload_config = kwargs.get("asset_config")
self.truncate_after = True
self.envars = kwargs.get("envars", dict())
self.config = kwargs.get("config")
self.credentials = kwargs.get("credentials", dict())
config_file = kwargs.get("config_file")
credential_file = kwargs.get("credential_file")
info = "%s v%s [Python %s, %s]" % (
self.__class__.__name__, get_installed_version(VERSION),
platform.python_version(), platform.platform(aliased=True))
logging.info("Initializing: %s" % info)
if not self.server_args:
raise DerivaRestoreConfigurationError("Target server not specified!")
# server variable initialization
self.hostname = self.server_args.get('host', '')
if not self.hostname:
raise DerivaRestoreConfigurationError("Host not specified!")
protocol = self.server_args.get('protocol', 'https')
self.server_url = protocol + "://" + self.hostname
self.catalog_id = self.server_args.get("catalog_id",)
self.session_config = self.server_args.get('session', DEFAULT_SESSION_CONFIG.copy())
self.session_config["allow_retry_on_all_methods"] = True
# credential initialization
token = kwargs.get("token")
oauth2_token = kwargs.get("oauth2_token")
username = kwargs.get("username")
password = kwargs.get("password")
if token or oauth2_token or (username and password):
self.credentials = format_credential(token=token,
oauth2_token=oauth2_token,
username=username,
password=password)
else:
self.credentials = get_credential(self.hostname, credential_file)
# destination catalog initialization
self.server = DerivaServer(protocol,
self.hostname,
self.credentials,
caching=True,
session_config=self.session_config)
self.server.dcctx["cid"] = kwargs.get("dcctx_cid", "api/" + self.__class__.__name__)
# process config file
if config_file:
try:
self.config = read_config(config_file)
except Exception as e:
raise DerivaRestoreConfigurationError(e)
def set_config(self, config):
self.config = config
def set_credentials(self, credentials):
self.catalog.set_credentials(credentials, self.hostname)
self.store.set_credentials(credentials, self.hostname)
self.credentials = credentials
def prune_parts(self, dest):
if not self.restore_annotations and 'annotations' in dest:
del dest['annotations']
if not self.restore_policy:
if 'acls' in dest:
del dest['acls']
if 'acl_bindings' in dest:
del dest['acl_bindings']
return dest
def copy_sdef(self, schema):
"""Copy schema definition structure with conditional parts for cloning."""
dest = self.prune_parts(schema.prejson())
if 'tables' in dest:
del dest['tables']
return dest
def copy_tdef_core(self, table):
"""Copy table definition structure with conditional parts excluding fkeys."""
dest = self.prune_parts(table.prejson())
dest['column_definitions'] = [self.prune_parts(column) for column in dest['column_definitions']]
dest['keys'] = [self.prune_parts(column) for column in dest.get('keys', [])]
if 'foreign_keys' in dest:
del dest['foreign_keys']
if 'annotations' not in dest:
dest['annotations'] = {}
dest['annotations'][self.RESTORE_STATE_URL] = 1 if self.restore_data else None
return dest
def copy_tdef_fkeys(self, table):
"""Copy table fkeys structure."""
def check(fkdef):
for fkc in fkdef['referenced_columns']:
if fkc['schema_name'] == 'public' \
and fkc['table_name'] in {'ERMrest_Client', 'ERMrest_Group'} \
and fkc['column_name'] == 'RID':
raise DerivaRestoreError(
"Cannot restore catalog with foreign key reference to "
"%(schema_name)s:%(table_name)s:%(column_name)s" % fkc)
return fkdef
return [self.prune_parts(check(dest)) for dest in table.prejson().get('foreign_keys', [])]
def copy_cdef(self, column):
"""Copy column definition with conditional parts."""
return column.table.schema.name, column.table.name, self.prune_parts(column.prejson())
@staticmethod
def check_column_compatibility(src, dst):
"""Check compatibility of source and destination column definitions."""
def error(fieldname, sv, dv):
return DerivaRestoreError("Source/dest column %s mismatch %s != %s for %s:%s:%s" % (
fieldname,
sv, dv,
src.table.schema.name, src.table.name, src.name
))
if src.type.typename != dst.type.typename:
raise error("type", src.type.typename, dst.type.typename)
if src.nullok != dst.nullok:
raise error("nullok", src.nullok, dst.nullok)
if src.default != dst.default:
raise error("default", src.default, dst.default)
def copy_kdef(self, key):
return key.table.schema.name, key.table.name, self.prune_parts(key.prejson())
def get_table_path(self, sname, tname, is_bag):
return os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", self.BASE_DATA_INPUT_PATH.format(sname, tname)))
def load_json_file(self, file_path):
with io.open(file_path, 'r', encoding='UTF-8') as file_data:
return json.load(file_data, object_pairs_hook=OrderedDict)
def open_json_stream_file(self, table_path):
"""
Open a JSON-Stream file for reading, caller is responsible for closing.
"""
table_data = io.open(table_path, 'r', encoding='UTF-8')
line = table_data.readline().strip()
table_data.seek(0)
if line.startswith('{') and line.endswith('}'):
return table_data
else:
table_data.close()
raise DerivaRestoreError(
"Input file %s does not appear to be in the required json-stream format." % table_path)
def get_json_recordset(self, data, chunk_size, after=None, after_column='RID'):
chunk = list()
found = False
for line in data:
if isinstance(line, dict):
row = line
else:
row = json.loads(line, object_pairs_hook=OrderedDict)
if after and not found:
if after == row[after_column]:
found = True
continue
chunk.append(row)
if len(chunk) == chunk_size:
yield chunk
chunk = list()
if chunk:
yield chunk
def restore(self, **kwargs):
"""
Perform the catalog restore operation. The restore process is broken up into six phases:
1. Pre-process the input path.
- If the input path is a file, it is assumed that it is a compressed archive file that can be extracted
into an input directory via a supported codec: `tar`,`tgz`,`bz2`, or `zip`.
- If the input directory is a valid _bag_ directory structure, the bag will be materialized.
2. The catalog schema will be restored first. The schema is restored from a ERMRest JSON schema document file.
The schema document file must be named `catalog-schema.json` and must appear at the root of the input
directory. The restore process can be configured to exclude the restoration of an enumerated set both
schema and tables.
3. The catalog table data will be restored, if present. The table date restoration process is resilient to
interruption and may be restarted. However, if the catalog schema or data is mutated outside of the scope of
the restore function in-between such restarts, the restored catalog's consistency cannot be guaranteed.
The restore process can be configured to exclude the restoration of table data for a set of tables.
4. The catalog foreign keys will be restored.
5. The catalog assets will be restored, if present.
6. On success, the restore state marker annotations will be deleted and the catalog history will be truncated.
:param kwargs:
:return:
"""
success = True
start = datetime.datetime.now()
# pre-process input
logging.info("Processing input path: %s" % self.input_path)
is_file, is_dir, is_uri = bdb.inspect_path(self.input_path)
if not (is_file or is_dir or is_uri):
raise DerivaRestoreError("Invalid input path [%s]. If the specified input path refers to a locally mounted "
"file or directory, it does not exist or cannot be accessed. If the specified "
"path is a URI, the scheme component of the URI could not be determined." %
self.input_path)
if is_file or is_dir:
self.input_path = os.path.abspath(self.input_path)
if is_file:
logging.info("The input path [%s] is a file. Assuming input file is a directory archive and extracting..." %
self.input_path)
self.input_path = bdb.extract_bag(self.input_path)
try:
if not self.no_bag_materialize:
self.input_path = bdb.materialize(self.input_path)
except bdb.bdbagit.BagValidationError as e:
if self.strict_bag_validation:
raise DerivaRestoreError(format_exception(e))
else:
logging.warning("Input bag validation failed and strict validation mode is disabled. %s" %
format_exception(e))
is_bag = bdb.is_bag(self.input_path)
src_schema_file = os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", "catalog-schema.json"))
# the src_catalog_stub created below will never be "connected" in any kind of network sense,
# but we need an instance of ErmrestCatalog in order to get a working Model from the schema file.
src_catalog_stub = ErmrestCatalog("file", src_schema_file, "1")
src_model = Model.fromfile(src_catalog_stub, src_schema_file)
# initialize/connect to destination catalog
if not self.catalog_id:
self.catalog_id = self.server.create_ermrest_catalog().catalog_id
self.server_args["catalog_id"] = self.catalog_id
logging.info("Created new target catalog with ID: %s" % self.catalog_id)
self.dst_catalog = self.server.connect_ermrest(self.catalog_id)
# init dcctx cid to a default
self.dst_catalog.dcctx['cid'] = self.__class__.__name__
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
logging.info("Restoring %s to catalog: %s" % (self.input_path, self.dst_catalog.get_server_uri()))
# set top-level config right away and find fatal usage errors...
if self.restore_policy:
logging.info("Restoring top-level catalog ACLs...")
if not src_model.acls:
logging.info("Source schema does not contain any ACLs.")
else:
src_model.acls.owner.extend(dst_model.acls.owner)
self.dst_catalog.put('/acl', json=src_model.acls)
if self.restore_annotations:
logging.info("Restoring top-level catalog annotations...")
self.dst_catalog.put('/annotation', json=src_model.annotations)
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
new_model = []
new_columns = [] # ERMrest does not currently allow bulk column creation
new_keys = [] # ERMrest does not currently allow bulk key creation
restore_states = {}
fkeys_deferred = {}
exclude_schemas = [] if self.exclude_schemas is None else self.exclude_schemas
try:
for sname, schema in src_model.schemas.items():
if sname in exclude_schemas:
continue
if sname not in dst_model.schemas:
new_model.append(self.copy_sdef(schema))
for tname, table in schema.tables.items():
if table.kind != 'table':
logging.warning('Skipping restore of %s %s:%s' % (table.kind, sname, tname))
continue
if 'RID' not in table.column_definitions.elements:
raise DerivaRestoreError(
"Source table %s.%s lacks system-columns and cannot be restored." % (sname, tname))
# make sure the source table is pruned of any existing restore state markers
if table.annotations.get(CLONE_STATE_URL) is not None:
del table.annotations[CLONE_STATE_URL]
if table.annotations.get(self.RESTORE_STATE_URL) is not None:
del table.annotations[self.RESTORE_STATE_URL]
if sname not in dst_model.schemas or tname not in dst_model.schemas[sname].tables:
new_model.append(self.copy_tdef_core(table))
restore_states[(sname, tname)] = 1 if self.restore_data else None
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
else:
src_columns = {c.name: c for c in table.column_definitions}
dst_columns = {c.name: c for c in dst_model.schemas[sname].tables[tname].column_definitions}
for cname in src_columns:
if cname not in dst_columns:
new_columns.append(self.copy_cdef(src_columns[cname]))
else:
self.check_column_compatibility(src_columns[cname], dst_columns[cname])
for cname in dst_columns:
if cname not in src_columns:
raise DerivaRestoreError(
"Destination column %s.%s.%s does not exist in source catalog." %
(sname, tname, cname))
src_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in table.keys}
dst_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in
dst_model.schemas[sname].tables[tname].keys}
for utuple in src_keys:
if utuple not in dst_keys:
new_keys.append(self.copy_kdef(src_keys[utuple]))
for utuple in dst_keys:
if utuple not in src_keys:
raise DerivaRestoreError("Destination key %s.%s(%s) does not exist in source catalog."
% (sname, tname, ', '.join(utuple)))
restore_states[(sname, tname)] = \
dst_model.schemas[sname].tables[tname].annotations.get(self.RESTORE_STATE_URL)
if dst_model.schemas[sname].tables[tname].foreign_keys:
# assume that presence of any destination foreign keys means we already completed
if self.restore_assets:
self.upload_assets()
return
else:
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
# apply the stage 1 model to the destination in bulk
logging.info("Restoring catalog schema...")
if new_model:
self.dst_catalog.post("/schema", json=new_model).raise_for_status()
for sname, tname, cdef in new_columns:
self.dst_catalog.post("/schema/%s/table/%s/column" % (urlquote(sname), urlquote(tname)),
json=cdef).raise_for_status()
for sname, tname, kdef in new_keys:
self.dst_catalog.post("/schema/%s/table/%s/key" % (urlquote(sname), urlquote(tname)),
json=kdef).raise_for_status()
# copy data in stage 2
if self.restore_data:
logging.info("Restoring catalog data...")
for sname, tname in restore_states.keys():
tname_uri = "%s:%s" % (urlquote(sname), urlquote(tname))
if restore_states[(sname, tname)] == 1:
# determine current position in (partial?) copy
row = self.dst_catalog.get("/entity/%s@sort(RID::desc::)?limit=1" % tname_uri).json()
if row:
last = row[0]['RID']
logging.info("Existing data detected in table [%s] -- will attempt partial restore of "
"remaining records following last known RID: %s" % (tname_uri, last))
else:
last = None
table_path = self.get_table_path(sname, tname, is_bag)
if not os.path.isfile(table_path):
logging.warning("Restoration of table data [%s] incomplete. File not found: %s" %
(("%s:%s" % (sname, tname)), table_path))
continue
table = self.get_json_recordset(self.open_json_stream_file(table_path),
self.data_chunk_size, after=last)
total = 0
table_success = True
try:
for chunk in table:
if chunk:
self.dst_catalog.post("/entity/%s?nondefaults=RID,RCT,RCB" % tname_uri, json=chunk)
total += len(chunk)
else:
break
except:
table_success = False
finally:
table.close()
if table_success:
logging.info("Restoration of table data [%s] successful. %s rows restored." %
(tname_uri, total))
else:
logging.warning("Restoration of table data [%s] failed. %s rows restored." %
(tname_uri, total))
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
elif restore_states[(sname, tname)] is None and (sname, tname) in {
('public', 'ERMrest_Client'),
('public', 'ERMrest_Group'),
}:
# special sync behavior for magic ermrest tables
# HACK: these are assumed small enough to join via local merge of arrays
want = sorted(self.load_json_file(self.get_table_path(sname, tname, is_bag)),
key=lambda r: r['ID'])
have = sorted(self.dst_catalog.get("/entity/%s?limit=none" % tname_uri).json(),
key=lambda r: r['ID'])
create = []
update = []
pos_want = 0
pos_have = 0
while pos_want < len(want):
while pos_have < len(have) and have[pos_have]['ID'] < want[pos_want]['ID']:
# dst-only rows will be retained as is
pos_have += 1
if pos_have >= len(have) or have[pos_have]['ID'] > want[pos_want]['ID']:
# src-only rows will be inserted
create.append(want[pos_want])
pos_want += 1
else:
# overlapping rows will be updated
update.append(want[pos_want])
pos_want += 1
self.dst_catalog.post("/entity/%s?nondefaults=RCT,RCB" % tname_uri, json=create)
self.dst_catalog.put(
"/attributegroup/%s/ID;%s" % (
tname_uri,
",".join([
urlquote(c.name)
for c in src_model.schemas[sname].tables[tname].column_definitions
if c.name not in {'RID', 'RMT', 'RMB', 'ID'}
])
),
json=update
)
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
# apply stage 2 model in bulk only... we won't get here unless preceding succeeded
logging.info("Restoring foreign keys...")
new_fkeys = []
for fkeys in fkeys_deferred.values():
new_fkeys.extend(fkeys)
# restore fkeys
if new_fkeys:
self.dst_catalog.post("/schema", json=new_fkeys)
# restore assets
if self.restore_assets:
self.upload_assets()
# cleanup
self.cleanup_restored_catalog()
except:
success = False
raise
finally:
elapsed_time = datetime.datetime.now() - start
total_secs = elapsed_time.total_seconds()
elapsed = time.strftime('%H:%M:%S', time.gmtime(total_secs))
logging.info("Restore of catalog %s %s. %s" % (self.dst_catalog.get_server_uri(),
"completed successfully" if success else "failed",
("Elapsed time: %s" % elapsed) if (total_secs > 0) else ""))
def cleanup_restored_catalog(self):
# cleanup restore state markers
logging.info("Cleaning up restore state...")
dst_model = self.dst_catalog.getCatalogModel()
for sname, schema in dst_model.schemas.items():
for tname, table in schema.tables.items():
annotation_uri = "/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL)
)
try:
self.dst_catalog.delete(annotation_uri)
except Exception as e:
logging.warning("Unable to cleanup restore state marker annotation %s: %s" %
(annotation_uri, format_exception(e)))
continue
# truncate restore history
if self.truncate_after:
logging.info("Truncating restore history...")
snaptime = self.dst_catalog.get("/").json()["snaptime"]
self.dst_catalog.delete("/history/,%s" % urlquote(snaptime))
def upload_assets(self):
asset_dir = os.path.join(self.input_path, self.BASE_ASSETS_INPUT_PATH)
if not os.path.isdir(asset_dir):
logging.debug("No asset directory found. Will not attempt to upload file assets.")
return
logging.info("Restoring file assets...")
uploader = GenericUploader(config_file=self.upload_config, server=self.server_args)
uploader.setCredentials(self.credentials)
uploader.setConfig(self.upload_config)
uploader.scanDirectory(asset_dir, abort_on_invalid_input=False, purge_state=False)
uploader.uploadFiles(file_callback=uploader.defaultFileCallback)
uploader.cleanup()
|
# -*- coding: utf-8 -*-
import mock
from django import http
from django import test
from django_cradmin import cradmin_testhelpers
from model_mommy import mommy
from devilry.apps.core import models as core_models
from devilry.devilry_dbcache import customsql
from devilry.devilry_deadlinemanagement.views import multiselect_groups_view
from devilry.devilry_group import devilry_group_mommy_factories
from devilry.project.common import settings
from devilry.utils import datetimeutils
class TestCaseExaminerMixin(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = multiselect_groups_view.AssignmentGroupMultiSelectListFilterView
handle_deadline = 'new-attempt'
def setUp(self):
customsql.AssignmentGroupDbCacheCustomSql().initialize()
def _get_mock_instance(self, assignment):
mock_instance = mock.MagicMock()
mock_instance.get_devilryrole_type.return_value = 'examiner'
mock_instance.assignment = assignment
return mock_instance
def _get_mock_app(self, user=None):
mock_app = mock.MagicMock()
mock_app.get_devilryrole.return_value = 'examiner'
mock_app.get_accessible_group_queryset.return_value = core_models.AssignmentGroup.objects\
.filter_examiner_has_access(user=user)
return mock_app
def test_title(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
self.assertIn(
'Select groups',
mockresponse.selector.one('title').alltext_normalized)
class TestExaminerNewAttemptMultiSelectView(TestCaseExaminerMixin):
viewclass = multiselect_groups_view.AssignmentGroupMultiSelectListFilterView
handle_deadline = 'new-attempt'
def test_info_box_not_showing_when_one_group_were_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_anonymizationmode_off_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertContains(mockresponse.response, 'unanonymizedfullname')
self.assertContains(mockresponse.response, 'A un-anonymized fullname')
self.assertNotContains(mockresponse.response, 'MyAnonymousID')
def test_anonymizationmode_semi_anonymous_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertNotContains(mockresponse.response, 'unanonymizedfullname')
self.assertNotContains(mockresponse.response, 'A un-anonymized fullname')
self.assertContains(mockresponse.response, 'MyAnonymousID')
def test_anonymizationmode_fully_anonymous_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertNotContains(mockresponse.response, 'unanonymizedfullname')
self.assertNotContains(mockresponse.response, 'A un-anonymized fullname')
self.assertContains(mockresponse.response, 'MyAnonymousID')
def test_search_anonymous_nomatch_fullname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-TestUser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_nomatch_shortname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='testuser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-testuser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_nomatch_candidate_id_from_candidate(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_match_automatic_candidate_id_from_relatedstudent(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_match_automatic_anonymous_id(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyAnonymousID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_fullname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-TestUser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_shortname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='testuser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-testuser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_automatic_candidate_id_from_relatedstudent(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_automatic_anonymous_id(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyAnonymousID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_match_candidate_id_from_candidate(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_three_groups_on_assignment_published(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
self.assertEqual(
3,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_three_groups_on_assignment_unpublished(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_get_num_queries(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
with self.assertNumQueries(6):
self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
def test_post_num_queries(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
with self.assertNumQueries(3):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'selected_items': [testgroup1.id, testgroup2.id, testgroup3.id]
}
})
class TestExaminerMoveDeadlineMultiSelectView(TestCaseExaminerMixin):
viewclass = multiselect_groups_view.AssignmentGroupMultiSelectListFilterView
handle_deadline = 'move-deadline'
def test_info_box_not_showing_when_one_group_were_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_anonymizationmode_off_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertContains(mockresponse.response, 'unanonymizedfullname')
self.assertContains(mockresponse.response, 'A un-anonymized fullname')
self.assertNotContains(mockresponse.response, 'MyAnonymousID')
def test_anonymizationmode_semi_anonymous_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertNotContains(mockresponse.response, 'unanonymizedfullname')
self.assertNotContains(mockresponse.response, 'A un-anonymized fullname')
self.assertContains(mockresponse.response, 'MyAnonymousID')
def test_anonymizationmode_fully_anonymous_candidates(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='unanonymizedfullname',
relatedstudent__user__fullname='A un-anonymized fullname',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertNotContains(mockresponse.response, 'unanonymizedfullname')
self.assertNotContains(mockresponse.response, 'A un-anonymized fullname')
self.assertContains(mockresponse.response, 'MyAnonymousID')
def test_search_anonymous_nomatch_fullname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-TestUser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_nomatch_shortname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='testuser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-testuser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_nomatch_candidate_id_from_candidate(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_match_automatic_candidate_id_from_relatedstudent(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_match_automatic_anonymous_id(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyAnonymousID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_fullname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-TestUser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_shortname(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__shortname='testuser')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-testuser'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_automatic_candidate_id_from_relatedstudent(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_nomatch_automatic_anonymous_id(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
relatedstudent__user__fullname='TestUser',
relatedstudent__automatic_anonymous_id='MyAnonymousID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyAnonymousID'
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_search_anonymous_uses_custom_candidate_ids_match_candidate_id_from_candidate(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
uses_custom_candidate_ids=True,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
mommy.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mommy.make('core.Candidate',
assignment_group=testgroup1,
candidate_id='MyCandidateID')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'filters_string': 'search-MyCandidateID'
})
self.assertEqual(
1,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_three_groups_on_assignment_published(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
self.assertEqual(
0,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_three_groups_on_assignment_unpublished(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
})
self.assertEqual(
3,
mockresponse.selector.count('.django-cradmin-multiselect2-itemvalue'))
def test_get_num_queries(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
with self.assertNumQueries(6):
self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': 'new-attempt'
})
def test_post_num_queries(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment 0')
testgroup1 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = mommy.make('core.AssignmentGroup', parentnode=testassignment)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup1)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup2)
devilry_group_mommy_factories.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = mommy.make(settings.AUTH_USER_MODEL)
mommy.make('core.Examiner', assignmentgroup=testgroup1,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup2,
relatedexaminer__user=examiner_user)
mommy.make('core.Examiner', assignmentgroup=testgroup3,
relatedexaminer__user=examiner_user)
with self.assertNumQueries(3):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': 'new-attempt'
},
requestkwargs={
'data': {
'selected_items': [testgroup1.id, testgroup2.id, testgroup3.id]
}
})
|
#!/usr/bin/env python
"""
Prepares DB for loaddata command (must perform some cleanup first)
"""
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openwisp2.settings")
django.setup()
from django.contrib.auth.models import ContentType
from django.contrib.auth.models import Permission
from openwisp_users.models import Organization
from openwisp_controller.config.models import OrganizationConfigSettings
# flush automatically created content types and permissions
ContentType.objects.all().delete()
Permission.objects.all().delete()
# create the default organization
org = Organization.objects.create(id='{{ openwisp2_default_organization_id }}',
name='default',
slug='default')
OrganizationConfigSettings.objects.create(organization=org,
registration_enabled=True,
shared_secret='{{ openwisp2_shared_secret|default("<CHANGE-ME>") }}')
|
# Importamos las librerías necesarias
import sys
import os
import time
import shutil
import pandas as pd
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
# Cargamos los archivos de entranamiento
training_data = 'data/train.csv'
include = ['Age', 'Sex', 'Embarked', 'Survived']
dependent_variable = include[-1]
# Cargamos el modelo entranado desde archivos pickle
model_directory = 'model'
model_file_name = '%s/model.pkl' % model_directory
model_columns_file_name = '%s/model_columns.pkl' % model_directory
# Variables para almacenar el modelo entrenado
model_columns = None
clf = None
# Función para realizar el entrenamiento con Randon Forest
def train():
df = pd.read_csv(training_data)
df_ = df[include]
categoricals = []
for col, col_type in df_.dtypes.iteritems():
if col_type == 'O':
categoricals.append(col)
else:
# Reemplazaos los valores nulos con 0s
df_[col].fillna(0, inplace=True)
# Creamos los atributos dummies para las categoricas
df_ohe = pd.get_dummies(df_, columns=categoricals, dummy_na=True)
x = df_ohe[df_ohe.columns.difference([dependent_variable])]
y = df_ohe[dependent_variable]
# Obtenemos las columnas que serán usadas en el entrenamiento
global model_columns
model_columns = list(x.columns)
joblib.dump(model_columns, model_columns_file_name)
global clf
clf = RandomForestClassifier()
start = time.time()
# Realizamos el entrenamiento del modelo
clf.fit(x, y)
print('Entrenamiento en %.1f segundos' % (time.time() - start))
print('Score del modelo entrenado: %s' % clf.score(x, y))
joblib.dump(clf, model_file_name)
return 'Success'
# Función para limpiar el entrenamiento del modelo
def wipe():
try:
shutil.rmtree('model')
os.makedirs(model_directory)
return 'Success'
except Exception as e:
print(str(e))
return 'No fue posible eliminar y re-crear el directorio del modelo'
|
username = ""
password = ""
client_id = ""
client_secret = ""
user_agent = "Autoremoval Bot"
subreddit = ""
reply = "Upvote this comment if you feel this submission is characteristic of our subreddit. Downvote this if you feel that it is not. If this comment's score falls below a certain number, this submission will be automatically removed."
score = 1 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
Christopher J.C. Burges, Robert Ragno, and Quoc Viet Le. 2006.
Learning to Rank with Nonsmooth Cost Functions. In Proceedings of NIPS conference. 193–200.
"""
import torch
import torch.nn.functional as F
from ptranking.base.ranker import NeuralRanker
from ptranking.data.data_utils import LABEL_TYPE
from ptranking.metric.metric_utils import get_delta_ndcg
from ptranking.ltr_adhoc.eval.parameter import ModelParameter
class LambdaRank(NeuralRanker):
'''
Christopher J.C. Burges, Robert Ragno, and Quoc Viet Le. 2006.
Learning to Rank with Nonsmooth Cost Functions. In Proceedings of NIPS conference. 193–200.
'''
def __init__(self, sf_para_dict=None, model_para_dict=None, gpu=False, device=None):
super(LambdaRank, self).__init__(id='LambdaRank', sf_para_dict=sf_para_dict, gpu=gpu, device=device)
self.sigma = model_para_dict['sigma']
def inner_train(self, batch_preds, batch_stds, **kwargs):
'''
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_stds: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
'''
label_type = kwargs['label_type']
assert LABEL_TYPE.MultiLabel == label_type
assert 'presort' in kwargs and kwargs['presort'] is True # aiming for direct usage of ideal ranking
batch_preds_sorted, batch_preds_sorted_inds = torch.sort(batch_preds, dim=1, descending=True) # sort documents according to the predicted relevance
batch_stds_sorted_via_preds = torch.gather(batch_stds, dim=1, index=batch_preds_sorted_inds) # reorder batch_stds correspondingly so as to make it consistent. BTW, batch_stds[batch_preds_sorted_inds] only works with 1-D tensor
batch_std_diffs = torch.unsqueeze(batch_stds_sorted_via_preds, dim=2) - torch.unsqueeze(batch_stds_sorted_via_preds, dim=1) # standard pairwise differences, i.e., S_{ij}
batch_std_Sij = torch.clamp(batch_std_diffs, min=-1.0, max=1.0) # ensuring S_{ij} \in {-1, 0, 1}
batch_std_p_ij = 0.5 * (1.0 + batch_std_Sij)
batch_s_ij = torch.unsqueeze(batch_preds_sorted, dim=2) - torch.unsqueeze(batch_preds_sorted, dim=1) # computing pairwise differences, i.e., s_i - s_j
batch_p_ij = 1.0 / (torch.exp(-self.sigma * batch_s_ij) + 1.0)
batch_delta_ndcg = get_delta_ndcg(batch_ideally_sorted_stds=batch_stds, batch_stds_sorted_via_preds=batch_stds_sorted_via_preds, label_type=label_type, gpu=self.gpu)
# about reduction, mean leads to poor performance, a probable reason is that the small values due to * lambda_weight * mean
batch_loss = F.binary_cross_entropy(input=torch.triu(batch_p_ij, diagonal=1),
target=torch.triu(batch_std_p_ij, diagonal=1),
weight=torch.triu(batch_delta_ndcg, diagonal=1), reduction='sum')
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
return batch_loss
###### Parameter of LambdaRank ######
class LambdaRankParameter(ModelParameter):
''' Parameter class for LambdaRank '''
def __init__(self, debug=False, para_json=None):
super(LambdaRankParameter, self).__init__(model_id='LambdaRank', para_json=para_json)
self.debug = debug
def default_para_dict(self):
"""
Default parameter setting for LambdaRank
:return:
"""
self.lambda_para_dict = dict(model_id=self.model_id, sigma=1.0)
return self.lambda_para_dict
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
lambda_para_dict = given_para_dict if given_para_dict is not None else self.lambda_para_dict
s1, s2 = (':', '\n') if log else ('_', '_')
lambdarank_para_str = s1.join(['Sigma', '{:,g}'.format(lambda_para_dict['sigma'])])
return lambdarank_para_str
def grid_search(self):
"""
Iterator of parameter settings for LambdaRank
"""
if self.use_json:
choice_sigma = self.json_dict['sigma']
else:
choice_sigma = [5.0, 1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0
for sigma in choice_sigma:
self.lambda_para_dict = dict(model_id=self.model_id, sigma=sigma)
yield self.lambda_para_dict
|
from django.db import models
from .helper import bulk_update
class BulkUpdateQuerySet(models.QuerySet):
def bulk_update(self, objs, update_fields=None,
exclude_fields=None, batch_size=None):
self._for_write = True
using = self.db
return bulk_update(
objs, update_fields=update_fields,
exclude_fields=exclude_fields, using=using,
batch_size=batch_size)
|
class Solution:
def maxKilledEnemies(self, grid: List[List[str]]) -> int:
n = len(grid)
m = len(grid[0])
count_grid = [[0]*m for _ in range(n)]
for i in range(n):
start = 0
count = 0
for j in range(m):
if grid[i][j] == "E":
count += 1
elif grid[i][j] == "W":
for t in range(start, j):
count_grid[i][t] = count
start = j + 1
count = 0
for t in range(start, m):
count_grid[i][t] = count
for j in range(m):
start = 0
count = 0
for i in range(n):
if grid[i][j] == "E":
count += 1
elif grid[i][j] == "W":
for t in range(start, i):
count_grid[t][j] += count
start = i + 1
count = 0
for t in range(start, n):
count_grid[t][j] += count
ans = 0
for i in range(n):
for j in range(m):
if grid[i][j] == "0":
ans = max(ans, count_grid[i][j])
return ans
|
#!/usr/bin/env python3
# coding: utf8
class MyClass: pass
class MyClass1:
cls_attr = 1
|
# Add checker to close the program when MW2 closes after running.
import configparser
import os
import pymem
import pymem.process
import sys
import time
from win32gui import GetWindowText, GetForegroundWindow
# Memory addresses used in MW2 v1.2.211
FOV_ADDRESS = (0x0639322C) # float
FPS_ADDRESS = (0x638152C) # int32
# Base configuration parser
config = configparser.RawConfigParser()
# Attempts to load MW2 into Pymem, closes if game not open.
try:
pm = pymem.Pymem("iw4mp.exe")
except pymem.exception.ProcessNotFound:
print("MW2 needs to be running first. Exiting...")
sys.exit(1)
def main():
# Checks to see if mw2tweak.ini exists, if not, creates default config.
if not os.path.exists(os.getcwd() + "\mw2tweak.ini"):
print("Config not found. Creating config with default values.")
create_config()
# By default both tweaks are disabled so the user must configure them.
print("You must configure mw2tweak.ini before continuing. Exiting...")
sys.exit(0)
else:
# Config exists so we can read values from it.
config.read("mw2tweak.ini")
# Grabbing the values to see if FPS or FOV tweaks are enabled.
fps_enabled = config.get("mw2tweak", "fps_enabled")
fov_enabled = config.get("mw2tweak", "fov_enabled")
# If both tweaks are disabled, MW2Tweak has nothing to do.
if "false" in fps_enabled and "false" in fov_enabled:
print("You have both tweaks disabled. Exiting...")
sys.exit(1)
# Passes enabled tweaks to the main tweak_loop.
tweaks = []
if "true" in fps_enabled:
tweaks.append("fps")
if "true" in fov_enabled:
tweaks.append("fov")
tweak_loop(tweaks)
def tweak_loop(tweaks):
# Loads welcome text and lists off loaded tweaks.
print("MW2Tweak v1.0 loaded. You have the following tweaks loaded:")
for tweak in tweaks:
print("\t-> {}".format(tweak))
# Grabs values for tweaks if they are enabled.
if "fov" in tweaks:
fov_value = config.get("mw2tweak", "fov_value")
if "fps" in tweaks:
fps_value = config.get("mw2tweak", "fps_value")
while True:
# We only want to write to the game when alt-tabbed in.
if "Modern Warfare 2" in GetWindowText(GetForegroundWindow()):
# If tweak is enabled, we will write values to addresses.
if "fps" in tweaks:
pm.write_int(FPS_ADDRESS, int(fps_value))
if "fov" in tweaks:
pm.write_float(FOV_ADDRESS, float(fov_value))
# Sleep to save some extra CPU cycles.
time.sleep(0.1)
def create_config():
# Creates the default config for MW2Tweak.
config.add_section("mw2tweak")
config.set("mw2tweak", "fps_enabled", "false")
config.set("mw2tweak", "fov_enabled", "false")
config.set("mw2tweak", "fps_value", "333")
config.set("mw2tweak", "fov_value", "90")
with open("mw2tweak.ini", "w") as config_file:
config.write(config_file)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import codecs
from datetime import datetime
import gspread
import json
from oauth2client.client import GoogleCredentials
import os
from pymongo import MongoClient
import re
from dateutil import parser
projects_SHEET_KEY = '1mabXCaQwt8pxq45xsqosEAUqZ-F6csD3r771DxE9fJE'
MONGODB_URL = os.getenv('MONGODB_URL', 'mongodb://127.0.0.1:3001/meteor')
def sync_sheet(worksheet, db):
list_of_lists = worksheet.get_all_values()
print list_of_lists
row_nr = 0
for cell_list in list_of_lists:
print(cell_list)
if row_nr > 0:
name, description, url, github, reddit, contact, twitter, license, platform, status, last_update, icon = cell_list
db.projects.update({'name': name}, {'$set': {
'row_nr': row_nr,
'description': description,
'url': url,
'github': github,
'reddit': reddit,
'contact': contact,
'twitter': twitter,
'license': license,
'platform': platform,
'status': status,
'last_update': last_update,
'icon': icon
}}, upsert=True)
row_nr += 1
def import_json(filename):
data = []
with codecs.open(filename, 'rU', 'utf-8') as f:
for line in f:
data.append(json.loads(line))
return data
def import_queue(db):
return list(db.queue.find())
def update_sheet(worksheet, db, data):
for row in data:
project_name = row['project_name']
dt = parser.parse(row['timestamp'])
timestamp = dt.strftime('%Y-%m-%d')
print row['timestamp'], dt, timestamp
db_entry = db.projects.find_one({'name': re.compile('^' + re.escape(project_name) + '$', re.IGNORECASE)})
if db_entry:
print 'Existing', row['project_name'], db_entry['row_nr']
print worksheet.row_values(db_entry['row_nr'] + 1)
else:
print 'New', row['project_name']
output = [
row['project_name'],
row['description'],
row['site'],
row['github'],
row['reddit'],
row['contact'],
row['twitter'],
row['license'],
'Bitcoin',
row['status'],
timestamp
]
worksheet.append_row(output)
def main():
credentials = GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(['https://spreadsheets.google.com/feeds'])
gc = gspread.authorize(credentials)
sh = gc.open_by_key(projects_SHEET_KEY)
worksheet = sh.get_worksheet(0)
client = MongoClient(MONGODB_URL)
db = client.get_default_database()
db.projects.ensure_index('name')
# data = import_queue(db)
# update_sheet(worksheet, db, data)
sync_sheet(worksheet, db)
if __name__ == '__main__':
print("starting sync")
main()
|
"""nornir_scrapli.tasks.netconf"""
|
import torch
from .library_functions import AffineFeatureSelectionFunction
DEFAULT_BBALL_FEATURE_SUBSETS = {
"ball" : torch.LongTensor([0, 1]),
"offense" : torch.LongTensor(list(range(2,12))),
"defense" : torch.LongTensor(list(range(12,22))),
'offense_dist2ball': torch.LongTensor(list(range(22,27))),
'defense_dist2bh': torch.LongTensor(list(range(27,32))),
'offense_dist2bh': torch.LongTensor(list(range(32,37))),
'offense_dist2basket': torch.LongTensor(list(range(37,42))),
'offense_paint': torch.LongTensor(list(range(42,47))), #binary
}
BBALL_FULL_FEATURE_DIM = 47
class BBallBallSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["ball"]
super().__init__(input_size, output_size, num_units, name="BallXYAffine")
class BBallOffenseSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["offense"]
super().__init__(input_size, output_size, num_units, name="OffenseXYAffine")
class BBallDefenseSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["defense"]
super().__init__(input_size, output_size, num_units, name="DefenseXYAffine")
class BBallOffenseBallDistSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["offense_dist2ball"]
super().__init__(input_size, output_size, num_units, name="OffenseBallDist")
class BBallDefenseBhDistSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["defense_dist2bh"]
super().__init__(input_size, output_size, num_units, name="DefenseBhDist")
class BBallOffenseBhDistSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["offense_dist2bh"]
super().__init__(input_size, output_size, num_units, name="OffenseBhDist")
class BBallOffenseBasketDistSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["offense_dist2basket"]
super().__init__(input_size, output_size, num_units, name="OffenseBasketDist")
class BBallOffensePaintSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["offense_paint"]
super().__init__(input_size, output_size, num_units, name="OffenseInPaint")
class BBallBallPaintSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["ball_inpaint"]
super().__init__(input_size, output_size, num_units, name="BallInPaint")
class BBallScreenPaintSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["screen_inpaint"]
super().__init__(input_size, output_size, num_units, name="ScreenInPaint")
class BBallScreenBhDistSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["screen_bh_dist"]
super().__init__(input_size, output_size, num_units, name="ScreenBhDist")
class BBallBhOneHotSelection(AffineFeatureSelectionFunction):
def __init__(self, input_size, output_size, num_units):
self.full_feature_dim = BBALL_FULL_FEATURE_DIM
self.feature_tensor = DEFAULT_BBALL_FEATURE_SUBSETS["ballhandler"]
super().__init__(input_size, output_size, num_units, name="BallhandlerId") |
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.utils.translation import ugettext_lazy as _
from .management import create_default_site
class SitesConfig(AppConfig):
name = 'django.contrib.sites'
verbose_name = _("Sites")
def ready(self):
post_migrate.connect(create_default_site, sender=self)
|
import os
import signal
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from firexapp.plugins import load_plugin_modules, cdl2list
from firexapp.submit.console import setup_console_logging
from firexkit.permissions import DEFAULT_UMASK
logger = setup_console_logging(__name__)
def main():
os.umask(DEFAULT_UMASK)
# Need to call setup_console_logging like this as this module is always called from another.
setup_console_logging("__main__")
with tempfile.NamedTemporaryFile(delete=True) as submission_tmp_file:
from firexapp.submit.submit import SubmitBaseApp
submit_app = SubmitBaseApp(submission_tmp_file=submission_tmp_file.name)
app = FireXBaseApp(submit_app=submit_app)
app.run()
def import_microservices(plugins_files=None, imports: tuple = None) -> []:
for f in cdl2list(plugins_files):
if not os.path.isfile(f):
raise FileNotFoundError(f)
from firexapp.engine.celery import app
if not imports:
imports = app.conf.imports
for module_name in imports:
__import__(module_name)
load_plugin_modules(plugins_files)
return app.tasks
def get_app_task(task_short_name: str, all_tasks=None):
task_short_name = task_short_name.strip()
if all_tasks is None:
from firexapp.engine.celery import app
all_tasks = app.tasks
# maybe it isn't a short name, but a long one
if task_short_name in all_tasks:
return all_tasks[task_short_name]
# Search for an exact match first
for key, value in all_tasks.items():
if key.split('.')[-1] == task_short_name:
return value
# Let's do a case-insensitive search
task_name_lower = task_short_name.lower()
for key, value in all_tasks.items():
if key.split('.')[-1].lower() == task_name_lower:
return value
# Can't find a match
from celery.exceptions import NotRegistered
raise NotRegistered(task_short_name)
def get_app_tasks(tasks, all_tasks=None):
if type(tasks) is str:
tasks = tasks.split(",")
return [get_app_task(task, all_tasks) for task in tasks]
class FireXBaseApp:
def __init__(self, submit_app=None, info_app=None):
if not info_app:
from firexapp.info import InfoBaseApp
info_app = InfoBaseApp()
self.info_app = info_app
if not submit_app:
from firexapp.submit.submit import SubmitBaseApp
submit_app = SubmitBaseApp()
self.submit_app = submit_app
self.arg_parser = None
self.running_app = None
self._signal_exit_handler = ExitSignalHandler(self)
self.submit_args_to_process = None
def run(self, sys_argv=None):
if not self.arg_parser:
self.arg_parser = self.create_arg_parser()
try:
if sys_argv is not None:
"".join(sys_argv).encode('ascii')
except UnicodeEncodeError as ue:
self.arg_parser.error(
'You entered a non-ascii character at the command line.\n' + str(ue))
arguments, others = self.arg_parser.parse_known_args(sys_argv)
# run default help
if not hasattr(arguments, "func"):
self.arg_parser.print_help()
self.arg_parser.exit()
if self.submit_app.run_submit.__name__ not in arguments.func.__name__:
if len(others):
# only submit supports 'other' arguments
msg = 'Unrecognized arguments: %s' % ' '.join(others)
self.arg_parser.error(message=msg)
arguments.func(arguments)
else:
self.running_app = self.submit_app
self.submit_app.submit_args_to_process = sys_argv
arguments.func(arguments, others)
def main_error_exit_handler(self, reason=None):
if self.running_app and hasattr(self.running_app, self.main_error_exit_handler.__name__):
self.running_app.main_error_exit_handler(reason=reason)
exit(-1)
def create_arg_parser(self, description=None)->ArgumentParser:
if not description:
description = """
FireX is a workflow automation and execution engine built using a micro-service oriented design and architecture.
FireX provides a framework to facilitate the automation of the various workflows that are part of every development
and testing processes."""
main_parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
sub_parser = main_parser.add_subparsers()
self.info_app.create_list_sub_parser(sub_parser)
self.info_app.create_info_sub_parser(sub_parser)
submit_parser = self.submit_app.create_submit_parser(sub_parser)
self.arg_parser = main_parser
self.submit_app.store_parser_attributes(main_parser, submit_parser)
return main_parser
class ExitSignalHandler:
first_warning = "\nExiting due to signal %s"
second_warning = "\nWe know! Have a little patience for crying out loud!"
last_warning = "\nFINE! We'll stop. But you might have leaked a celery instance or a broker instance."
@staticmethod
def _register_signal_handlers(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGHUP, handler)
def __init__(self, app):
def first_exit_handler(signal_num, _):
def last_exit_handler(_, __):
logger.error(self.last_warning)
exit(-1)
def second_exit_handler(_, __):
logger.error(self.second_warning)
self._register_signal_handlers(last_exit_handler)
self._register_signal_handlers(second_exit_handler)
signal_name = signal.Signals(signal_num).name
logger.error(self.first_warning % signal_name)
app.main_error_exit_handler(reason=f"Received signal {signal_name}.")
self._register_signal_handlers(first_exit_handler)
|
"""plotlib.py: Module is used to implement various plotting functions"""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2021, Chakraborty"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
from matplotlib.dates import date2num
import datetime as dt
import pandas as pd
class FrequencyTimePlot(object):
"""
Create plots for spactral datasets.
"""
def __init__(self, dates, WFR, num_subplots, fig_title="Daily Summary: {date}"):
self.dates = dates
self.WFR = WFR
self.num_subplots = num_subplots
self._num_subplots_created = 0
fig_title = fig_title.format(date=self.dates[0].strftime("%Y-%m-%d"))
self.fig = plt.figure(figsize=(8, 3*self.num_subplots), dpi=100) # Size for website
plt.suptitle(fig_title, x=0.9, y=0.95, ha="right", fontweight="bold", fontsize=12)
mpl.rcParams.update({"font.size": 10})
return
def addParamPlot(self, Z, title, vmax=1e-5, vmin=1e-9, steps=3, cmap = plt.cm.Spectral, xlabel="Time UT",
ylabel="Frequency, Hz", label=r"[$nT^2Hz^{-1}$]", ax=None, fig=None, add_colbar=True):
if fig is None: fig = self.fig
if ax is None: ax = self._add_axis()
if vmax is None: vmax = Z.max()
if vmin is None: vmin = Z.min()
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)
cmap.set_bad("w", alpha=0.0)
# Configure axes
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
ax.xaxis.set_major_locator(hours)
ax.set_xlabel(xlabel, fontdict={"size":12})
ax.set_ylabel(ylabel, fontdict={"size":12})
ax.set_xlim([self.dates[0], self.dates[-1]])
ax.set_ylim([self.WFR["frequencies"][0], self.WFR["frequencies"][-1]])
X, Y = np.meshgrid(self.dates, self.WFR["frequencies"])
ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
ax.set_yscale("log")
if add_colbar: self._add_colorbar(fig, ax, norm, cmap, label=title+" "+label)
ax.set_title(title, loc="left")
return
def save(self, filepath):
self.fig.savefig(filepath, bbox_inches="tight")
def close(self):
self.fig.clf()
plt.close()
# Private helper functions
def _add_axis(self):
self._num_subplots_created += 1
ax = self.fig.add_subplot(self.num_subplots, 1, self._num_subplots_created)
ax.tick_params(axis="both", labelsize=12)
return ax
def _add_colorbar(self, fig, ax, norm, colormap, label=""):
"""
Add a colorbar to the right of an axis.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.0125,
0.015, pos.height * 0.8] # this list defines (left, bottom, width, height
cax = fig.add_axes(cpos)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
return
def get_gridded_parameters(q, xparam="x", yparam="y", zparam="z"):
"""
Method converts scans to "beam" and "slist" or gate
"""
plotParamDF = q[ [xparam, yparam, zparam] ]
plotParamDF[xparam] = plotParamDF[xparam].tolist()
plotParamDF[yparam] = np.round(plotParamDF[yparam].tolist(), 1)
plotParamDF = plotParamDF.groupby( [xparam, yparam] ).mean().reset_index()
plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )
x = plotParamDF.index.values
y = plotParamDF.columns.levels[1].values
X, Y = np.meshgrid( x, y )
# Mask the nan values! pcolormesh can't handle them well!
Z = np.ma.masked_where(
np.isnan(plotParamDF[zparam].values),
plotParamDF[zparam].values)
return X,Y,Z
class RangeTimePlot(object):
"""
Create plots for wave datasets.
"""
def __init__(self, dates, num_subplots, fig_title="Summary: {date}"):
self.dates = dates
self.num_subplots = num_subplots
self._num_subplots_created = 0
fig_title = fig_title.format(date=self.dates[0].strftime("%Y.%m.%d") + "-" + self.dates[-1].strftime("%m.%d"))
self.fig = plt.figure(figsize=(8, 3*self.num_subplots), dpi=150) # Size for website
plt.suptitle(fig_title, x=0.9, y=0.95, ha="right", fontweight="bold", fontsize=12)
mpl.rcParams.update({"font.size": 10})
return
def addParamPlot(self, x, y, z, title="", vmax=1e2, vmin=1e0, steps=3, cmap = plt.cm.Spectral_r, xlabel="Time UT",
ylabel="L", label=r"$B_{chorus}$[pT]", ax=None, fig=None, add_colbar=True,
interpolate_params={"dt":"1T"}):
if fig is None: fig = self.fig
if ax is None: ax = self._add_axis()
if vmax is None: vmax = Z.max()
if vmin is None: vmin = Z.min()
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)
cmap.set_bad("w", alpha=0.0)
df = pd.DataFrame()
df["x"], df["y"], df["z"] = x, y, z
df = df.set_index("x").resample(interpolate_params["dt"]).max().reset_index()
df["x"] = df.x.apply(lambda k: date2num(k))
X, Y, Z = get_gridded_parameters(df)
# Configure axes
ax.xaxis.set_major_formatter(DateFormatter(r"$%d$"))
ax.xaxis.set_minor_formatter(DateFormatter(r"$%H^{%M}$"))
hours = mdates.HourLocator(byhour=[12])
ax.xaxis.set_minor_locator(hours)
ax.set_xlabel(xlabel, fontdict={"size":12})
ax.set_ylabel(ylabel, fontdict={"size":12})
ax.set_xlim([self.dates[0], self.dates[-1]+dt.timedelta(1)])
ax.set_ylim(1.5, 6.5)
ax.pcolormesh(X, Y, Z.T, lw=4., edgecolors="None", cmap=cmap, norm=norm)
if add_colbar: self._add_colorbar(fig, ax, norm, cmap, label=title+" "+label)
ax.set_title(title, loc="left")
return
def _add_axis(self):
self._num_subplots_created += 1
ax = self.fig.add_subplot(self.num_subplots, 1, self._num_subplots_created)
ax.tick_params(axis="both", labelsize=12)
return ax
def _add_colorbar(self, fig, ax, norm, colormap, label=""):
"""
Add a colorbar to the right of an axis.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.0125,
0.015, pos.height * 0.8] # this list defines (left, bottom, width, height
cax = fig.add_axes(cpos)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
return
def close(self):
self.fig.clf()
plt.close()
return
def save(self, filepath):
self.fig.savefig(filepath, bbox_inches="tight")
return |
from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.ConvectionDiffusionApplication as ConvectionDiffusionApplication
from KratosMultiphysics.ConvectionDiffusionApplication import convection_diffusion_analysis
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as KratosUtilities
class TestCaseConfiguration(object):
"""Auxiliary class to configure the test
This auxiliary class customizes the material propertiesand
boundary conditions for each test variant.
Public member variables:
xmin -- Minimum x-coordinate
xmax -- Maximum x-coordinate
ymin -- Minimum y-coordinate
ymax -- Maximum y-coordinate
T_xmin -- Temperature at minimum x-coordinate
T_xmax -- Temperature at maximum x-coordinate
rho -- Density
c -- Specific heat
k -- Thermal conductivity
ux -- Convective velocity
source -- Source term value
"""
def __init__(self):
self.xmin = 0.0
self.xmax = 10.0
self.ymin = 0.0
self.ymax = 1.0
self.rho = 1.0
self.c = 1.0
self.k = 1.0
self.ux = 0.0
self.source = 0.0
class SourceTermTestConvectionDiffusionAnalysis(convection_diffusion_analysis.ConvectionDiffusionAnalysis):
"""Derived convection-diffusion analysis stage to set the test material properties and boundary conditions."""
def __init__(self, model, project_parameters, test_config):
super().__init__(model, project_parameters)
self.config = test_config
def ModifyInitialProperties(self):
super().ModifyInitialProperties()
## Set the material properties according to the test case configuration
for node in self.model.GetModelPart("ThermalModelPart").Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DENSITY, self.config.rho)
node.SetSolutionStepValue(KratosMultiphysics.CONDUCTIVITY, self.config.k)
node.SetSolutionStepValue(KratosMultiphysics.SPECIFIC_HEAT, self.config.c)
def ApplyBoundaryConditions(self):
super().ApplyBoundaryConditions()
velocity = KratosMultiphysics.Vector(3)
velocity[0] = self.config.ux
velocity[1] = 0.0
velocity[2] = 0.0
## Set initial and boundary conditions according to the test configuration
for node in self.model.GetModelPart("ThermalModelPart").Nodes:
node.SetSolutionStepValue(KratosMultiphysics.HEAT_FLUX, self.config.source)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, velocity)
if node.X == self.config.xmin:
node.Fix(KratosMultiphysics.TEMPERATURE)
node.SetSolutionStepValue(KratosMultiphysics.TEMPERATURE, self.config.T_xmin)
elif node.X == self.config.xmax:
node.Fix(KratosMultiphysics.TEMPERATURE)
node.SetSolutionStepValue(KratosMultiphysics.TEMPERATURE, self.config.T_xmax)
class SourceTermTest(KratosUnittest.TestCase):
def setUp(self):
self.domain_size = 2
self.input_file = "source_test"
self.theta = 1.0 # Since it is steady state, use backward Euler
# Note: Crank-Nicolson (theta=0.5) won't converge in a single iteration (or at all, for huge dt)
self.checked_variable = KratosMultiphysics.TEMPERATURE
self.check_tolerance = 1e-6
self.print_output = False
self.print_reference_values = False
self.calculate_reactions = False
self.config = TestCaseConfiguration()
def tearDown(self):
with KratosUnittest.WorkFolderScope("SourceTermTest", __file__):
KratosUtilities.DeleteFileIfExisting(self.input_file+'.time')
def testPureDiffusion(self):
self.reference_file = "pure_diffusion"
self.config.T_xmin = 0.0
self.config.T_xmax = 0.0
self.config.source = 1.0
self.config.ux = 0.0
# What follows is the analytical solution
#def F(x):
# L = self.config.xmax - self.config.xmin
# return self.config.source * x * (L-x) / 2.
#print( [ F(float(i)) for i in range(0, 10) ] )
self.testSourceTerm()
def testDiffusionDominated(self):
self.reference_file = "diffusion_dominated"
self.config.T_xmin = 0.0
self.config.T_xmax = 0.0
self.config.ux = 0.1
self.config.source = self.config.ux * self.config.rho / (self.config.xmax - self.config.xmin)
# What follows is the analytical solution
#def F(x):
# import math
# L = self.config.xmax - self.config.xmin
# a = self.config.rho*self.config.ux / self.config.k
# return x / L - (1.0 - math.exp( a*x ) ) / ( 1.0 - math.exp( a*L ) )
#print( [ F(float(i)) for i in range(0, 10) ] )
self.testSourceTerm()
def testConvectionDominated(self):
self.reference_file = "convection_dominated"
self.config.T_xmin = 0.0
self.config.T_xmax = 0.0
self.config.ux = 2.0
self.config.source = self.config.ux * self.config.rho / (self.config.xmax - self.config.xmin)
# What follows is the analytical solution
#def F(x):
# import math
# L = self.config.xmax - self.config.xmin
# a = self.config.rho*self.config.ux / self.config.k
# return x / L - (1.0 - math.exp( a*x ) ) / ( 1.0 - math.exp( a*L ) )
#print( [ F(float(i)) for i in range(0, 10) ] )
self.testSourceTerm()
def testReaction(self):
self.reference_file = "reaction_test"
self.config.T_xmin = 0.0
self.config.T_xmax = 0.0
self.config.source = 1.0
self.config.ux = 0.0
self.checked_variable = KratosMultiphysics.REACTION_FLUX
self.calculate_reactions = True
self.testSourceTerm()
def testSourceTerm(self):
with KratosUnittest.WorkFolderScope("SourceTermTest", __file__):
## Set up the custom source term input
self.model = KratosMultiphysics.Model()
with open("SourceTermTestProjectParameters.json",'r') as parameter_file:
parameters = KratosMultiphysics.Parameters(parameter_file.read())
parameters["solver_settings"]["compute_reactions"].SetBool(self.calculate_reactions)
parameters["solver_settings"]["transient_parameters"]["theta"].SetDouble(self.theta)
## Run test
self.source_term_analysis = SourceTermTestConvectionDiffusionAnalysis(self.model, parameters, self.config)
self.source_term_analysis.Run()
## Check results
self.checkResults()
## If required, print output
if self.print_output:
self.printOutput()
def checkResults(self):
model_part = self.model.GetModelPart("ThermalModelPart")
if self.print_reference_values:
with open(self.reference_file+'.csv','w') as ref_file:
ref_file.write("#ID, {0}\n".format(self.checked_variable.Name()))
for node in model_part.Nodes:
value = node.GetSolutionStepValue(self.checked_variable,0)
ref_file.write("{0}, {1}\n".format(node.Id, value))
else:
with open(self.reference_file+'.csv','r') as reference_file:
reference_file.readline() # skip header
line = reference_file.readline()
for node in model_part.Nodes:
values = [ float(i) for i in line.rstrip('\n ').split(',') ]
node_id = values[0]
reference_value = values[1]
value = node.GetSolutionStepValue(self.checked_variable)
self.assertAlmostEqual(reference_value, value, delta=self.check_tolerance)
line = reference_file.readline()
if line != '': # If we did not reach the end of the reference file
self.fail("The number of nodes in the mdpa is smaller than the number of nodes in the output file")
def printOutput(self):
gid_mode = KratosMultiphysics.GiDPostMode.GiD_PostBinary
multifile = KratosMultiphysics.MultiFileFlag.SingleFile
deformed_mesh_flag = KratosMultiphysics.WriteDeformedMeshFlag.WriteUndeformed
write_conditions =KratosMultiphysics. WriteConditionsFlag.WriteElementsOnly
gid_io = KratosMultiphysics.GidIO(self.input_file, gid_mode, multifile, deformed_mesh_flag, write_conditions)
mesh_name = 0.0
model_part = self.model.GetModelPart("ThermalModelPart")
gid_io.InitializeMesh(mesh_name)
gid_io.WriteMesh(model_part.GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(mesh_name, model_part.GetMesh())
label = model_part.ProcessInfo[KratosMultiphysics.TIME]
gid_io.WriteNodalResults(KratosMultiphysics.VELOCITY, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.TEMPERATURE, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.DENSITY, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.CONDUCTIVITY, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.SPECIFIC_HEAT, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.HEAT_FLUX, model_part.Nodes,label,0)
gid_io.WriteNodalResults(KratosMultiphysics.REACTION_FLUX, model_part.Nodes,label,0)
gid_io.FinalizeResults()
if __name__ == '__main__':
a = SourceTermTest()
a.setUp()
a.print_reference_values = False
a.print_output = True
#a.testPureDiffusion()
#a.testConvectionDominated()
#a.testDiffusionDominated()
a.testReaction()
a.tearDown()
|
import networkx as nx
import eigenvector
from p9base import *
import math
EIGENVECTOR_CENTRALITY = 1
EIGENVECTOR_CENTRALITY_REV = 2
EDGE_CENTRALITY = 3
class Graph(object):
def __init__(self):
self.nodeorder = None
self.edgeorder = None
def Order(self):
return self.graph.order()
def Size(self):
return self.graph.size()
def SetNodeOrder(self, order):
self.nodeorder = self.graph.nodes()
if order == EIGENVECTOR_CENTRALITY:
self.nodeorder.sort(key = lambda node: self.graph.node[node]["eigcent"], reverse = False)
elif order == EIGENVECTOR_CENTRALITY_REV:
self.nodeorder.sort(key = lambda node: self.graph.node[node]["eigcent"], reverse = True)
# print [math.log(self.graph.node[node]["eigcent"]) for node in self.nodeorder]
def GetNodes(self):
return self.nodeorder
def GetEdges(self):
return self.edgeorder
def SetEdgeOrder(self, order):
self.edgeorder = self.graph.edges()
if order == EDGE_CENTRALITY:
self.edgeorder.sort(key = lambda edge: self.graph.edge[edge[0]][edge[1]]["edgeweight"], reverse = False)
def NodePos(self):
assert(self.nodeorder is not None)
return [vector2(self.graph.node[node]["x"], self.graph.node[node]["y"]) for node in self.nodeorder]
def EdgePos0(self):
assert(self.edgeorder is not None)
return [vector2(self.graph.node[edge[0]]["x"], self.graph.node[edge[0]]["y"]) for edge in self.edgeorder]
def EdgePos1(self):
assert(self.edgeorder is not None)
return [vector2(self.graph.node[edge[1]]["x"], self.graph.node[edge[1]]["y"]) for edge in self.edgeorder]
def NodeCentrality(self):
assert(self.nodeorder is not None)
return [self.graph.node[node]["eigcent"] for node in self.nodeorder]
def EdgeImportance(self):
assert(self.edgeorder is not None)
return [self.graph.edge[edge[0]][edge[1]]["edgeweight"] for edge in self.edgeorder]
def NodeType(self):
assert(self.nodeorder is not None)
return [self.graph.node[node].get("type", None) for node in self.nodeorder]
def CalcEigenvectorCentrality(self):
centrality = eigenvector.centrality(self.graph)
total = 0
for node in centrality.items():
self.graph.node[node[0]]["eigcent"] = node[1]
total += node[1]
# print "eigenvector centrality total: ", total
def CalcEdgeCentrality(self):
heaviest = 0
for edge in self.graph.edges():
weight = self.graph.node[edge[0]]["eigcent"]*self.graph.node[edge[1]]["eigcent"]
self.graph.edge[edge[0]][edge[1]]["edgeweight"] = weight
# if weight > heaviest:
# print weight
# heaviest = weight
def GridAnnealedLayout(self, xdim, ydim, iterations, costfactor, start_threshold, data_filename, use_saved_data = False):
assert(self.graph.order() <= xdim*ydim)
def cost_per_length(edge_weight):
# if costfactor is 0, cost is 1 for all weights of edges
# if costfactor is 1, cost is proportional to weight of edge
return 1 + costfactor*(10*edge_weight - 1)
def calc_node_costs():
for node in self.graph.nodes():
self.graph.node[node]["nodecost"] = node_cost(node)
def randomlayout(grid):
poslist = [(x, y) for x in range(xdim) for y in range(ydim)]
RGen(1).shuffle(poslist)
for enumnode in enumerate(self.graph.nodes()):
x = poslist[enumnode[0]][0]
y = poslist[enumnode[0]][1]
self.graph.node[enumnode[1]]["x"] = x
self.graph.node[enumnode[1]]["y"] = y
grid[gridkey(x, y)] = enumnode[1]
def gridkey(x, y):
return str(x) + "," + str(y)
def anneal(grid, iterations):
rgen = RGen(2)
nodes = self.graph.nodes()
numnodes = self.graph.order()
for i in range(iterations):
threshold = (1 - (float(i)/float(iterations)))*start_threshold
thisnode = nodes[rgen.randrange(numnodes)]
thisx = self.graph.node[thisnode]["x"]
thisy = self.graph.node[thisnode]["y"]
# destx = rgen.randrange(xdim)
# desty = rgen.randrange(ydim)
dist = int((iterations - i)*(xdim + ydim)/(iterations*4))
dist = max(dist, 2)
x = rgen.randrange(-dist, dist)
y = dist - abs(x)
if rgen.randrange(2) == 0:
y = -y
destx = thisx + x
desty = thisy + y
destx = max(0, min(destx, xdim - 1))
desty = max(0, min(desty, ydim - 1))
# print "x, y ", x, y, " destx, desty ", destx, desty
ediff = energy_diff(thisnode, destx, desty)
destkey = gridkey(destx, desty)
swapping = False
reallyswapping = False
if destkey in grid:
swapping = True
destnode = grid[destkey]
ediff += energy_diff(destnode, thisx, thisy)
if ediff < threshold:
reallyswapping = True
self.graph.node[thisnode]["x"] = destx
self.graph.node[thisnode]["y"] = desty
grid[destkey] = thisnode
if swapping:
self.graph.node[destnode]["x"] = thisx
self.graph.node[destnode]["y"] = thisy
grid[gridkey(thisx, thisy)] = destnode
self.graph.node[thisnode]["nodecost"] = node_cost(thisnode)
if swapping:
self.graph.node[destnode]["nodecost"] = node_cost(destnode)
# print "iteration ", i, " energy_diff ", ediff, " threshold ", threshold, " swapping ", reallyswapping
def edge_length(edge):
n0 = self.graph.node[edge[0]]
n1 = self.graph.node[edge[1]]
dx = n1["x"] - n0["x"]
dy = n1["y"] - n0["y"]
return abs(dx) + abs(dy)
def edge_weight(edge):
return self.graph.edge[edge[0]][edge[1]]["edgeweight"]
# bug: switch with neighbors won't be calculated properly
def node_cost(node, pos = None):
if pos is not None:
x0, y0 = pos[0], pos[1]
else:
x0 = self.graph.node[node]["x"]
y0 = self.graph.node[node]["y"]
cost = 0
for neighbor in self.graph.neighbors(node):
x1 = self.graph.node[neighbor]["x"]
y1 = self.graph.node[neighbor]["y"]
d = abs(x1 - x0) + abs(y1 - y0)
cost += d*cost_per_length(self.graph.edge[node][neighbor]["edgeweight"])
return cost
def energy_diff(node, destx, desty):
return node_cost(node, (destx, desty)) - self.graph.node[node]["nodecost"]
def totalcost():
# for edge in self.graph.edges():
# print "edgelength:", edge_length(edge), " cost_per_length:", cost_per_length(edge_weight(edge))
return sum(edge_length(edge)*cost_per_length(edge_weight(edge)) for edge in self.graph.edges())
def save_data(filename):
f = open(filename, "w")
for node in self.graph.nodes():
f.write(str(self.graph.node[node]["x"]) + " " + str(self.graph.node[node]["y"]) + "\n")
f.close()
def load_data(filename):
f = open(filename, "r")
for node in self.graph.nodes():
line = f.readline()
tokens = line.split()
self.graph.node[node]["x"] = int(tokens[0])
self.graph.node[node]["y"] = int(tokens[1])
f.close()
print "calculating centrality"
self.CalcEigenvectorCentrality()
self.CalcEdgeCentrality()
grid = {}
randomlayout(grid)
calc_node_costs()
print "totalcost: ", totalcost()
if use_saved_data:
print "loading ", data_filename
load_data(data_filename)
else:
print "annealing with ", iterations, "iterations"
anneal(grid, iterations)
print "saving to ", data_filename
save_data(data_filename)
print "totalcost: ", totalcost()
class ASGraph(Graph):
def __init__(self, filename, attrfilename = None):
super(ASGraph, self).__init__()
asdatafile = open(filename)
self.graph = nx.Graph()
for line in asdatafile:
tokens = line.split()
if tokens[0] == "D" or tokens[0] == "I":
if "_" in tokens[1] or "_" in tokens[2]:
continue
self.graph.add_edge(tokens[1], tokens[2])
print "connected: ", nx.is_connected(self.graph)
if not nx.is_connected(self.graph):
sorted_components = sorted(nx.connected_components(self.graph), key=len, reverse=True)
for comp in sorted_components[1:]:
for node in comp:
self.graph.remove_node(node)
# print self.graph.node
if attrfilename is not None:
attrdatafile = open(attrfilename)
for line in attrdatafile:
tokens = [token.strip() for token in line.split('\t')]
# print tokens
if tokens[0] in self.graph.node:
self.graph.node[tokens[0]]["name"] = tokens[1]
self.graph.node[tokens[0]]["type"] = tokens[7]
# print self.graph.node[token[0]]
print "loaded AS graph: ", filename
print "order: ", self.graph.order(), " size: ", self.graph.size()
class TestGraph(Graph):
def __init__(self):
super(TestGraph, self).__init__()
self.graph = nx.complete_graph(60)
print "order: ", self.graph.order(), " size: ", self.graph.size()
class SubdivisionGrid(object):
def find_nearest_open_point(self, pos):
npiter = self.nearby_points(pos)
candidates = []
key, extrapoints = npiter.next()
while key in self.grid:
key, extrapoints = npiter.next()
candidates.append(key)
# print "extrapoints: ", extrapoints
for i in range(extrapoints):
key, extrapoints = npiter.next()
if key not in self.grid:
candidates.append(key)
candidate_distances = [self.point_distance(key, pos) for key in candidates]
closest = candidate_distances.index(min(candidate_distances))
return candidates[closest]
# def mark_covered_spots(self, key, size):
# npiter = self.nearby_points(pos)
# candidates = []
# key, extrapoints = npiter.next()
# while key in self.grid:
# key, extrapoints = npiter.next()
class SquareSubdivisionGrid(SubdivisionGrid):
def __init__(self, parentgrid = None, numsubs = 2):
assert(numsubs >= 2)
self.grid = {}
if parentgrid is None:
self.TotalSubs = 1
else:
self.TotalSubs = parentgrid.TotalSubs*numsubs
for item in parentgrid.grid.iteritems():
self.grid[(item[0][0]*numsubs, item[0][1]*numsubs)] = item[1]
def nearby_points(self, pos):
x, y = int(pos.x), int(pos.y)
yield (x, y), 0
d = 1
while True:
extrarounds = math.ceil(d*.29 + 2)
extrapoints = int(extrarounds*4*(d + extrarounds/2))
yield (x + d, y), extrapoints
yield (x - d, y), extrapoints
yield (x, y + d), extrapoints
yield (x, y - d), extrapoints
for i in range(1, d):
yield (x + (d - i), y + i), extrapoints
yield (x - (d - i), y + i), extrapoints
yield (x + (d - i), y - i), extrapoints
yield (x - (d - i), y - i), extrapoints
d += 1
def point_distance(self, key, pos):
keyvec = vector2(float(key[0]), float(key[1]))
diff = keyvec - pos
return diff.length()
def AddNode(self, pos, node, size = None):
subdivided_pos = pos.scale(self.TotalSubs)
key = self.find_nearest_open_point(subdivided_pos)
self.grid[key] = node
# if size is not None:
# if size > 0:
# self.mark_covered_spots(key, size)
return (self.TotalSubs, key)
def NodePos(self, pos):
return vector2(float(pos[1][0])/pos[0], float(pos[1][1])/pos[0])
def LayerNum(self, pos):
return math.log(pos[0], 2)
def PosName(self):
return "cgpos"
def GetBBMax(self):
maxval = 0
for key in self.grid.keys():
if abs(key[0]) > maxval:
maxval = key[0]
if abs(key[1]) > maxval:
maxval = key[1]
return float(maxval)/float(self.TotalSubs)
# max radius of node, expressed as a fraction of grid spacing
MAX_NODE_RADIUS = 0.1
OffsetVec = vector2(0, -.0)
class CentralityGraph(object):
def __init__(self, graph, maxbbdim = None, scale = None, logbase = 2.71828183):
self.graph = graph
graph.CalcEigenvectorCentrality()
graph.CalcEdgeCentrality()
graph.SetNodeOrder(EIGENVECTOR_CENTRALITY_REV)
nodes = graph.GetNodes()
cents = graph.NodeCentrality()
logs = [math.log(cent, logbase) for cent in cents]
startlog = logs[0]
loggrouping = [[] for i in range(30)]
# print list(enumerate(logs))
for logitem in enumerate(logs):
loggrouping[int(math.floor(startlog - logitem[1]))].append(nodes[logitem[0]])
self.grid = SquareSubdivisionGrid()
for loggroup in loggrouping:
for node in loggroup:
pos = vector2(0, 0)
totalcent = 0.0
for neighbor in self.graph.graph.neighbors(node):
# print "thisnode: ", node, " neighbor: ", neighbor
if nodes.index(neighbor) < nodes.index(node):
neighborcent = self.graph.graph.node[neighbor]["eigcent"]
# neighborcent = 1
totalcent += neighborcent
pos += self.node_pos(neighbor).scale(neighborcent)
if totalcent != 0.0:
pos = pos.scale(1.0/totalcent)
self.graph.graph.node[node][self.grid.PosName()] = self.grid.AddNode(pos, node, math.sqrt(self.graph.graph.node[neighbor]["eigcent"])*24)
# print "added node: ", node
self.grid = SquareSubdivisionGrid(self.grid, 2)
self.SetScale(maxbbdim, scale)
# self.WriteIndexFile("../indexfile.txt", nodes)
def writeData(self, filename):
f = open(filename, 'w')
f.write(str(self.graph.Order()) + " " + str(self.graph.Size()) + "\n")
for node in self.graph.GetNodes():
cent = self.graph.graph.node[node]["eigcent"]
pos = self.node_pos(node)
f.write(node + " " + str(cent) + " " + str(pos.x) + " " + str(pos.y) + "\n")
self.graph.SetEdgeOrder(EDGE_CENTRALITY)
for edge in self.graph.GetEdges():
f.write(edge[0] + " " + edge[1] + "\n")
f.close
def WriteIndexFile(self, filename, nodes):
f = open(filename, 'w')
for node in nodes:
# f.write(str(self.graph.graph.node[node]["cgpos"]).ljust(16))
f.write(self.IndexString(self.graph.graph.node[node]["cgpos"]))
f.write("\t")
# f.write(node.ljust(12)[:12])
# f.write("\t")
name = self.graph.graph.node[node].get("name", "")
f.write(name.ljust(100)[:100])
f.write('\n')
f.close()
def IndexString(self, cgpos):
x = float(cgpos[1][0])/cgpos[0]
y = float(cgpos[1][1])/cgpos[0]
x *= 8
y *= 8
x = x + 8
y = 13 - y
xlet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[int(math.floor(x))]
xfrac = x - math.floor(x)
# return xlet + str(xfrac)[1:].ljust(9)[:9] + str(y + 1).ljust(10)[:10]
return xlet + "\t" + str(int(math.floor(y)))
# return xlet + str(xfrac)[1:].ljust(9)[:9] + str(y).ljust(10)[:10]
def SetScale(self, maxbbdim, scale):
if maxbbdim is not None:
bbmax = self.grid.GetBBMax()
self.scale = maxbbdim/bbmax
print "BBMAX = ", bbmax
print "maxbbdim = ", maxbbdim
elif scale is not None:
self.scale = scale
print "scale = ", self.scale
def node_pos(self, node):
return self.grid.NodePos(self.graph.graph.node[node][self.grid.PosName()])
def node_pos_xformed(self, node):
pos = self.graph.graph.node[node][self.grid.PosName()]
return self.grid.NodePos(pos) + OffsetVec.scale(self.grid.LayerNum(pos))
def NodePos(self):
nodes = self.graph.GetNodes()
return [self.node_pos_xformed(node).scale(self.scale) for node in nodes]
# keys = [self.graph.graph.node[node]["cgpos"] for node in nodes]
# return [vector2(float(key[1])/key[0], float(key[2])/key[0]).scale(self.scale) for key in keys]
def EdgePos0(self):
edges = self.graph.GetEdges()
return [self.node_pos_xformed(edge[0]).scale(self.scale) for edge in edges]
# keys = [self.graph.graph.node[edge[0]]["cgpos"] for edge in edges]
# return [vector2(float(key[1])/key[0], float(key[2])/key[0]).scale(self.scale) for key in keys]
def EdgePos1(self):
edges = self.graph.GetEdges()
return [self.node_pos_xformed(edge[1]).scale(self.scale) for edge in edges]
# return [self.grid.NodePos(self.graph.graph.node[edge[1]][self.grid.PosName()]) for edge in edges]
# keys = [self.graph.graph.node[edge[1]]["cgpos"] for edge in edges]
# return [vector2(float(key[1])/key[0], float(key[2])/key[0]).scale(self.scale) for key in keys]
import os
dir = os.listdir("data")
print "=============="
for filename in dir:
print "Input filename: " + filename
if filename[0:5] == "cycle":
outfilename = filename[28:32]
elif filename[0:7] == "skitter":
outfilename = filename[17:21]
else:
print "Invalid file, skipping"
continue
outfilename += ".txt"
print "Printing to " + outfilename
asg = ASGraph("data/" + filename)
cg = CentralityGraph(asg, scale = 4*inches(1.714286), logbase = 2)
cg.writeData("out/" + outfilename);
# asg = ASGraph("../../data/cycle-aslinks.l7.t1.c002162.20120916.txt")
# cg = CentralityGraph(asg, scale = 4*inches(1.714286), logbase = 2)
# cg.writeData("../assets/realdata.txt");
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.