content stringlengths 5 1.05M |
|---|
# Masters Research Project 2017
# Kenneth Young
# Data Processing for the SUALBSP-2
# This file contains:
# -Functions to process the results
# Packages
from __future__ import division
import os, sys, pdb, csv
import numpy as np
#----------------------------------------------------------------------------------------#
# IMPORTING FILES
def import_results(ResultsDir, Model, SPsolver, classNum, alpha, creator, SearchAbrv):
ResultsFilename = '{}results_{}_SP{}_class{}_alpha{}_{}_{}.txt'.format(ResultsDir,Model,SPsolver,classNum,
alpha,creator,SearchAbrv)
try:
results = csv.DictReader(open(ResultsFilename))
except IOError:
results = []
return results
def store_all_instances_dict(ResultsDir,Model,SPsolver,classNum,alphaList,CreatorList,SearchAbvrList):
ResultsArray = {alpha: {creator: {SearchAbrv: []
for SearchAbrv in SearchAbvrList}
for creator in CreatorList}
for alpha in alphaList}
for alpha in alphaList:
for creator in CreatorList:
for SearchAbrv in SearchAbvrList:
results = import_results(ResultsDir, Model, SPsolver,classNum, alpha, creator, SearchAbrv)
results = list(results)
ResultsArray[alpha][creator][SearchAbrv].append(results)
return ResultsArray
#----------------------------------------------------------------------------------------#
# SUPPORT FUNCTIONALITY
def define_lowercase_SP_solver(SPsolver, CPmodeltype):
if SPsolver == 'MIP':
lcSPsolver = 'mip'
elif SPsolver == 'CP2' or SPsolver == 'CP3':
if CPmodeltype == 2:
lcSPsolver = 'cp2'
elif CPmodeltype == 3:
lcSPsolver = 'cp3'
else:
lcSPsolver = 'na'
return lcSPsolver
def define_creator_list(classNum):
if classNum == 1:
CreatorList = ['mertens', 'bowman8', 'jaeschke', 'jackson', 'mansoor', 'mitchell']
elif classNum == 2:
CreatorList = ['roszieg', 'heskia', 'buxey', 'sawyer30']
elif classNum == 3:
CreatorList = ['lutz1', 'gunther', 'kilbrid', 'hahn', 'warnecke']
elif classNum == 4:
CreatorList = ['tonge', 'wee-mag', 'arc83']
elif classNum == 5:
CreatorList = ['lutz2', 'lutz3', 'mukherje']
elif classNum == 6:
CreatorList = ['arc111', 'barthold', 'barthol2']
elif classNum == 7:
CreatorList = ['scholl']
return CreatorList
def write_tex_file_preamble(TableFile):
TableFile.write("""
\\documentclass[12pt]{article}
{}
\\usepackage{latexsym,amssymb,amsmath,epsfig,amsfonts,graphicx,url,fancyhdr,todonotes,verbatim}
\\usepackage{float}
\\usepackage{chngpage}
\\usepackage{booktabs}
\\setlength{\\topmargin}{-10pt}
\\setlength{\headsep}{20pt}
\\setlength{\headheight}{14.5pt}
\\setlength{\\textheight}{600pt}
\\setlength{\oddsidemargin}{0pt}
\\setlength{\evensidemargin}{0pt}
\\setlength{\\textwidth}{460pt}
\\setlength{\parskip}{.30cm}
\\parskip=10pt
\\interdisplaylinepenalty=1000
\\pagestyle{fancy}
\\fancyhf{}
\\rhead{Kenneth Young}
\\lhead{Master Research Project - SUALBSP-2}
\\cfoot{\\thepage}
\\begin{document}
""")
def write_tex_file_post(TableFile):
TableFile.write("""
\\end{document}""")
def write_tex_table_benders_preamble(TableFile, SPsolver, CPmodeltype, cutType):
TableFile.write("""
\\begin{table}[tpb]
\\scriptsize
\\begin{adjustwidth}{-.9in}{-.9in}
""")
if SPsolver != 'MIP':
TableFile.write("\\caption{spsolver=%s-%s, cut=%s}" %(SPsolver, CPmodeltype, cutType))
else:
TableFile.write("\\caption{spsolver=%s, cut=%s}" %(SPsolver, cutType))
TableFile.write("""
\\centering
\\vspace{2mm}
\\begin{tabular}{ccl|rrrrrrrrrrr}
\\toprule
""")
def write_tex_table_benders_CP_preamble(TableFile, SPsolver, CPmodeltype, cutType):
TableFile.write("""
\\begin{table}[tpb]
\\scriptsize
\\begin{adjustwidth}{-.9in}{-.9in}
""")
if SPsolver != 'MIP':
TableFile.write("\\caption{spsolver=%s-%s, cut=%s}" %(SPsolver, CPmodeltype, cutType))
else:
TableFile.write("\\caption{spsolver=%s, cut=%s}" %(SPsolver, cutType))
TableFile.write("""
\\centering
\\vspace{2mm}
\\begin{tabular}{cc|rrrrrrrrrrr}
\\toprule
""")
def write_tex_table_benders_post(TableFile):
TableFile.write("""
\\bottomrule
\\end{tabular}
\\end{adjustwidth}
\\end{table}
""")
def write_tex_table_benders_header_row_simple(TableFile):
TableFile.write("""
class & alpha & creator & \#nodes & iters & \#cuts & cuts & \\%gap & \#no sol & \#opt & \\%opt & RMP.rt & SP.rt & rt(s) \\\\\\midrule
""")
def write_tex_table_benders_CP_header_row_simple(TableFile):
TableFile.write("""
search & class & \#CPnodes & iters & \#cuts & cuts & \\%gap & \#no sol & \#opt & \\%opt & RMP.rt & SP.rt & rt(s) \\\\\\midrule\n""")
def write_tex_table_mip_preamble(TableFile, mipModel):
TableFile.write("""
\\begin{table}[tpb]
\\scriptsize
\\begin{adjustwidth}{-.9in}{-.9in}
""")
TableFile.write("\\caption{MIP-model=%s}" %(mipModel))
TableFile.write("""
\\centering
\\vspace{2mm}
\\begin{tabular}{ccl|rrrrrr}
\\toprule
""")
def write_tex_table_mip_header_row_simple(TableFile):
TableFile.write("""
class & alpha & creator & \#nodes & \\%gap & \#no sol & \#opt & \\%opt & rt(s) \\\\\\midrule
""")
def write_tex_table_mip_post(TableFile):
TableFile.write("""
\\bottomrule
\\end{tabular}
\\end{adjustwidth}
\\end{table}
""")
#----------------------------------------------------------------------------------------#
# SUMMARIZING RESULTS
# class storing values for the row output
class RowTotal():
def __init__(self):
self.TotalNodes = 0
self.TotalSPNodes = 0
self.TotalIters = 0
self.TotalCuts = 0
self.TCutList = [0,0,0,0,0,0]
self.TotalPercGap = 0
self.TotalFeas = 0
self.TotalSubOptimal = 0
self.TotalOpt = 0
self.TotalRMPruntime = 0
self.TotalSPruntime = 0
self.TotalRuntime = 0
# rSum specific attributes
self.counter = 0
self.TotalNoSol = 0
self.TotalPercOpt = 0
self.TotalNumInst = 0
# functions to write types of rows
def write_benders_row_simple_averages(TableFile,ResultsList,rSum,SPsolver):
# initialise totals for output
r = RowTotal()
numInst = len(ResultsList)
for instanceResult in ResultsList:
# nodes
r.TotalNodes += int(instanceResult['nodes'])
# SP nodes
r.TotalSPNodes += int(instanceResult['SPnodes'])
# iters
r.TotalIters += int(instanceResult['iters'])
# cuts total
r.TotalCuts += int(instanceResult['cuts'])
# each cut total
if instanceResult['numNG'] != '-':
r.TCutList[0] += int(instanceResult['numNG'])
if instanceResult['numGUB'] != '-':
r.TCutList[1] += int(instanceResult['numGUB'])
if instanceResult['numIC'] != '-':
r.TCutList[2] += int(instanceResult['numIC'])
if instanceResult['numIC2'] != '-':
r.TCutList[3] += int(instanceResult['numIC2'])
if instanceResult['numIC3'] != '-':
r.TCutList[4] += int(instanceResult['numIC3'])
if instanceResult['numLC'] != '-':
r.TCutList[5] += int(instanceResult['numLC'])
# feasibility
feasible = int(instanceResult['feasible'])
r.TotalFeas += feasible
# optimality
optimal = int(instanceResult['optimal'])
r.TotalOpt += optimal
# gap percentage
# only add gap to running total if instance is sub-optimal
if feasible == 1 and optimal != 1:
r.TotalSubOptimal += 1
r.TotalPercGap += float(instanceResult['gap'])
# RMP runtime
r.TotalRMPruntime += float(instanceResult['RMPtime'])
# SP runtime
r.TotalSPruntime += float(instanceResult['SPtime'])
# total runtime
r.TotalRuntime += float(instanceResult['runtime'])
AvgNodes = r.TotalNodes/numInst
AvgIters = r.TotalIters/numInst
AvgCuts = r.TotalCuts/numInst
AvgCutList = [ r.TCutList[i]/numInst for i in range(6) ]
# AvgPercGap = r.TotalPercGap/numInst
if r.TotalSubOptimal == 0:
AvgPercGap = 0
else:
AvgPercGap = r.TotalPercGap/r.TotalSubOptimal
r.TotalNoSol = numInst - r.TotalFeas
r.TotalPercOpt = float(r.TotalOpt/numInst)*100
AvgRMPruntime = r.TotalRMPruntime/numInst
AvgSPruntime = r.TotalSPruntime/numInst
AvgRuntime = r.TotalRuntime/numInst
# if SPsolver == "MIP":
TableFile.write('{:,.0f} & {:,.0f} & {:,.0f} & [{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f}] & {:.2f} & {} & {}/{} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\\n'.format(AvgNodes,
AvgIters,AvgCuts,
AvgCutList[0],AvgCutList[1],AvgCutList[2],AvgCutList[3],AvgCutList[4],AvgCutList[5],
AvgPercGap,r.TotalNoSol,r.TotalOpt,numInst,r.TotalPercOpt,
AvgRMPruntime,AvgSPruntime,AvgRuntime))
# keep track of running totals for this class
rSum.counter += 1
rSum.TotalNumInst += numInst
rSum.TotalNodes += r.TotalNodes
rSum.TotalSPNodes += r.TotalSPNodes
rSum.TotalIters += r.TotalIters
rSum.TotalCuts += r.TotalCuts
rSum.TCutList[0] += r.TCutList[0]
rSum.TCutList[1] += r.TCutList[1]
rSum.TCutList[2] += r.TCutList[2]
rSum.TCutList[3] += r.TCutList[3]
rSum.TCutList[4] += r.TCutList[4]
rSum.TCutList[5] += r.TCutList[5]
rSum.TotalPercGap += r.TotalPercGap
rSum.TotalNoSol += r.TotalNoSol
rSum.TotalSubOptimal += r.TotalSubOptimal
rSum.TotalOpt += r.TotalOpt
rSum.TotalPercOpt += r.TotalPercOpt/100
rSum.TotalRMPruntime += r.TotalRMPruntime
rSum.TotalSPruntime += r.TotalSPruntime
rSum.TotalRuntime += r.TotalRuntime
return rSum
def write_benders_row_simple_summary(TableFile,rSum,SPsolver):
rSum.TotalPercOpt = (rSum.TotalOpt/rSum.TotalNumInst)*100
if rSum.TotalNumInst > rSum.TotalOpt:
# totalSubOptimal = rSum.TotalNumInst - rSum.TotalOpt - rSum.TotalNoSol
rSum.TotalPercGap = rSum.TotalPercGap/rSum.TotalSubOptimal
# pdb.set_trace()
else:
rSum.TotalPercGap = 0
rSum.TotalNodes = rSum.TotalNodes/rSum.TotalNumInst
rSum.TotalSPNodes = rSum.TotalSPNodes/rSum.TotalNumInst
rSum.TotalIters = rSum.TotalIters/rSum.TotalNumInst
rSum.TotalCuts = rSum.TotalCuts/rSum.TotalNumInst
rSum.TCutList[0] = rSum.TCutList[0]/rSum.TotalNumInst
rSum.TCutList[1] = rSum.TCutList[1]/rSum.TotalNumInst
rSum.TCutList[2] = rSum.TCutList[2]/rSum.TotalNumInst
rSum.TCutList[3] = rSum.TCutList[3]/rSum.TotalNumInst
rSum.TCutList[4] = rSum.TCutList[4]/rSum.TotalNumInst
rSum.TCutList[5] = rSum.TCutList[5]/rSum.TotalNumInst
# rSum.TotalPercGap = rSum.TotalPercGap/rSum.TotalNumInst
# rSum.TotalPercOpt = rSum.TotalPercOpt/100
rSum.TotalRMPruntime = rSum.TotalRMPruntime/rSum.TotalNumInst
rSum.TotalSPruntime = rSum.TotalSPruntime/rSum.TotalNumInst
rSum.TotalRuntime = rSum.TotalRuntime/rSum.TotalNumInst
# if SPsolver == "MIP":
# TableFile.write('\t\t\t\t\\midrule & & Overall\t& {:,.0f} & {:,.0f} & {:,.0f} & [{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f}] & {:.2f} & {} & {}/{} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\\\midrule\n'.format(rSum.TotalNodes,
# rSum.TotalIters,rSum.TotalCuts,
# rSum.TCutList[0],rSum.TCutList[1],rSum.TCutList[2],rSum.TCutList[3],rSum.TCutList[4],rSum.TCutList[5],
# rSum.TotalPercGap,rSum.TotalNoSol,rSum.TotalOpt,rSum.TotalNumInst,rSum.TotalPercOpt,
# rSum.TotalRMPruntime,rSum.TotalSPruntime,rSum.TotalRuntime))
# else:
TableFile.write('\t\t\t\t\\midrule & & Overall\t& {:,.0f} & {:,.0f} & {:,.0f} & [{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f},\\:{:,.0f}] & {:.2f} & {} & {}/{} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\\n'.format(rSum.TotalSPNodes,
rSum.TotalIters,rSum.TotalCuts,
rSum.TCutList[0],rSum.TCutList[1],rSum.TCutList[2],rSum.TCutList[3],rSum.TCutList[4],rSum.TCutList[5],
rSum.TotalPercGap,rSum.TotalNoSol,rSum.TotalOpt,rSum.TotalNumInst,rSum.TotalPercOpt,
rSum.TotalRMPruntime,rSum.TotalSPruntime,rSum.TotalRuntime))
def write_mip_row_simple_averages(TableFile,ResultsList,rSum):
# initialise totals for output
r = RowTotal()
numInst = len(ResultsList)
# pdb.set_trace()
for instanceResult in ResultsList:
# nodes
r.TotalNodes += int(instanceResult['nodes'])
# gap percentage
if int(instanceResult['feasible']) == 1:
r.TotalPercGap += float(instanceResult['gap'])
# feasibility
r.TotalFeas += int(instanceResult['feasible'])
# optimality
r.TotalOpt += int(instanceResult['optimal'])
# total runtime
r.TotalRuntime += float(instanceResult['runtime'])
AvgNodes = r.TotalNodes/numInst
AvgPercGap = r.TotalPercGap/(numInst-r.TotalNoSol)
r.TotalNoSol = numInst - r.TotalFeas
r.TotalPercOpt = float(r.TotalOpt/numInst)*100
AvgRuntime = r.TotalRuntime/numInst
TableFile.write('{:,.0f} & {:.2f} & {} & {}/{} & {:.2f} & {:.2f} \\\\\n'.format(AvgNodes,
AvgPercGap,r.TotalNoSol,r.TotalOpt,numInst,r.TotalPercOpt,AvgRuntime))
# keep track of running totals for this class
rSum.counter += 1
rSum.TotalNumInst += numInst
rSum.TotalNodes += r.TotalNodes
rSum.TotalPercGap += AvgPercGap
rSum.TotalNoSol += r.TotalNoSol
rSum.TotalOpt += r.TotalOpt
rSum.TotalPercOpt += r.TotalPercOpt/100
rSum.TotalRuntime += r.TotalRuntime
return rSum
def write_mip_row_simple_summary(TableFile,rSum):
rSum.TotalPercOpt = (rSum.TotalOpt/rSum.TotalNumInst)*100
rSum.TotalPercGap = rSum.TotalPercGap/(rSum.counter)
rSum.TotalNodes = rSum.TotalNodes/rSum.TotalNumInst
rSum.TotalIters = rSum.TotalIters/rSum.TotalNumInst
# rSum.TotalPercGap = rSum.TotalPercGap/rSum.TotalNumInst
# rSum.TotalPercOpt = rSum.TotalPercOpt/100
rSum.TotalRuntime = rSum.TotalRuntime/rSum.TotalNumInst
TableFile.write('\t\t\t\t\\midrule & & Overall\t& {:,.0f} & {:.2f} & {} & {}/{} & {:.2f} & {:.2f} \\\\\\midrule\n'.format(rSum.TotalNodes,
rSum.TotalPercGap,rSum.TotalNoSol,
rSum.TotalOpt,rSum.TotalNumInst,rSum.TotalPercOpt,rSum.TotalRuntime))
#----------------------------------------------------------------------------------------#
# DATA PROCESSING DRIVERS
def main_basic_data_processing():
debugging = 0
# ~~~~~~~~~~~~~~~~~~~~~~
# Input Parameters
TIMELIMIT = 1800
Model = "BD"
# SPsolver = "MIP"
SPsolver = "CP3"
# ClassNumList = [1 ,2, 3]
ClassNumList = [2]
CPmodeltype = 3
alphaList = ["1.00", "0.75", "0.50", "0.25"]
# alphaList=["0.25"]
if SPsolver == 'MIP':
SearchList=["default_s"]
SearchAbvrList = ['na']
elif SPsolver == 'CP2' or SPsolver == 'CP3':
# SearchList=["default_s", "priority_smallest"]
# SearchAbvrList = ["def", "pris"]
SearchList=["priority_first_fail"]
SearchAbvrList = ["priff"]
else:
SearchList=["default_s"]
SearchAbvrList = ['na']
# Define Benders cutting procedure
# CuttingList = ["001000", "000100", "000010", "011000", "010100", "010010", "010101", "011001"]
CuttingList = ["010010"]
# ~~~~~~~~~~~~~~~~~~~~~~
# Output Directories
TableDir = "tables-" + Model + "/"
# initialise output instance file
filename = TableDir + "table_{}_{}.tex".format(Model,SPsolver)
TableFile = open(filename, 'w')
# define the lowercase version of current solver for the data processing
lcSPsolver = define_lowercase_SP_solver(SPsolver,CPmodeltype)
write_tex_file_preamble(TableFile)
for cutType in CuttingList:
# Find directory containing desired redults
if Model == "BD":
ResultsDir = Model + "/" + SPsolver + "-" + cutType + "/results/"
else:
ResultsDir = Model + "/results/"
# write start current tex table
if Model == 'BD':
write_tex_table_benders_preamble(TableFile, SPsolver, CPmodeltype, cutType)
write_tex_table_benders_header_row_simple(TableFile)
else:
write_tex_table_mip_preamble(TableFile,Model)
write_tex_table_mip_header_row_simple(TableFile)
for classNum in ClassNumList:
# find the right creators
CreatorList = define_creator_list(classNum)
# store all results for this class
ResultsDict = store_all_instances_dict(ResultsDir,Model,lcSPsolver,classNum,alphaList,CreatorList,SearchAbvrList)
# store the overall result for this class
rSum = RowTotal()
for alpha in alphaList:
for creator in CreatorList:
if alphaList.index(alpha) == 0:
if CreatorList.index(creator) == 0:
TableFile.write("\\midrule\t%s\t& %s\t& {\\tt %s}\t& " %(classNum, alpha, creator))
else:
TableFile.write("\t\t\t\t&\t\t& {\\tt %s}\t& " %(creator))
else:
if CreatorList.index(creator) == 0:
TableFile.write("\t\t\t\t& %s\t& {\\tt %s}\t& " %(alpha, creator))
else:
TableFile.write("\t\t\t\t&\t\t& {\\tt %s}\t& " %(creator))
# if (alpha=='0.50' and creator=='heskia') or (alpha=='0.25' and creator=='heskia'):
# TableFile.write("\\\\\n")
# continue
for SearchAbrv in SearchAbvrList:
ResultsList = ResultsDict[alpha][creator][SearchAbrv][0]
try:
# write content current tex table
if Model == 'BD':
rSum = write_benders_row_simple_averages(TableFile,ResultsList,rSum,SPsolver)
else:
rSum = write_mip_row_simple_averages(TableFile,ResultsList,rSum)
except ZeroDivisionError:
pdb.set_trace()
# write overall averages for this class
if Model == 'BD':
write_benders_row_simple_summary(TableFile,rSum,SPsolver)
else:
write_mip_row_simple_summary(TableFile,rSum)
# write end current tex table
if Model == 'BD':
write_tex_table_benders_post(TableFile)
else:
write_tex_table_mip_post(TableFile)
# close the tex file
write_tex_file_post(TableFile)
TableFile.close
def main_basic_data_processing_CP():
debugging = 0
# ~~~~~~~~~~~~~~~~~~~~~~
# Input Parameters
TIMELIMIT = 1800
Model = "BD"
# SPsolver = "na"
SPsolver = "CP"
# ClassNumList = [1 ,2, 3]
ClassNumList = [2]
CPmodeltype = 3
alphaList = ["1.00", "0.75", "0.50", "0.25"]
# alphaList=["1.00"]
if SPsolver == 'MIP':
SearchList=["default_s"]
SearchAbvrList = ['def']
elif SPsolver == 'CP':
# SearchList=["default_s", "start_s"]
# SearchAbvrList = ["def", "s"]
# SearchList=["start_s", "start_Then_spair", "priority_input_order", "priority_smallest", "priority_smallest_largest", "priority_first_fail"]
# SearchAbvrList = ["s", "sTsp", "priio", "pris", "prisl", "priff"]
SearchList=["priority_first_fail"]
SearchAbvrList = ["priff"]
else:
SearchList=["default_s"]
SearchAbvrList = ['na']
# Define Benders cutting procedure
# CuttingList = ["001000", "000100", "000010", "011000", "010100", "010010", "010101", "011001"]
CuttingList = ["010010"]
# ~~~~~~~~~~~~~~~~~~~~~~
# Output Directories
TableDir = "tables-" + Model + "/"
# initialise output instance file
# filename = TableDir + "table_{}_{}.tex".format(Model,SPsolver)
# filename = TableDir + "table_{}_{}{}.tex".format(Model,SPsolver,CPmodeltype)
filename = TableDir + "table_BD_CP3_round2.tex"
TableFile = open(filename, 'w')
# define the lowercase version of current solver for the data processing
lcSPsolver = define_lowercase_SP_solver(SPsolver,CPmodeltype)
write_tex_file_preamble(TableFile)
for cutType in CuttingList:
# Find directory containing desired redults
if Model == "BD":
if SPsolver == "MIP":
ResultsDir = Model + "/" + SPsolver + "-" + cutType + "/results/"
else:
# ResultsDir = Model + "/CP-010010-cp23" + "/results/"
ResultsDir = Model + "/CP3-010010" + "/results/"
else:
ResultsDir = Model + "/results/"
# write start current tex table
if Model == 'BD':
if SPsolver == "MIP":
write_tex_table_benders_preamble(TableFile, SPsolver, CPmodeltype, cutType)
write_tex_table_benders_header_row_simple(TableFile)
else:
write_tex_table_benders_CP_preamble(TableFile, SPsolver, CPmodeltype, cutType)
write_tex_table_benders_CP_header_row_simple(TableFile)
else:
write_tex_table_mip_preamble(TableFile,Model)
write_tex_table_mip_header_row_simple(TableFile)
for SearchAbrv in SearchAbvrList:
TableFile.write("\t\t\t%s " %(SearchAbrv))
for classNum in ClassNumList:
if ClassNumList.index(classNum) == 0:
TableFile.write("& %s & " %(classNum))
else:
TableFile.write("\t\t\t & %s & " %(classNum))
# find the right creators
CreatorList = define_creator_list(classNum)
# store all results for this class
ResultsDict = store_all_instances_dict(ResultsDir,Model,lcSPsolver,classNum,alphaList,CreatorList,SearchAbvrList)
# store the overall result for this class
rSum = RowTotal()
for alpha in alphaList:
for creator in CreatorList:
# if alphaList.index(alpha) == 0:
# if CreatorList.index(creator) == 0:
# TableFile.write("\\midrule\t%s\t& %s\t& {\\tt %s}\t& " %(classNum, alpha, creator))
# else:
# TableFile.write("\t\t\t\t&\t\t& {\\tt %s}\t& " %(creator))
# else:
# if CreatorList.index(creator) == 0:
# TableFile.write("\t\t\t\t& %s\t& {\\tt %s}\t& " %(alpha, creator))
# else:
# TableFile.write("\t\t\t\t&\t\t& {\\tt %s}\t& " %(creator))
ResultsList = ResultsDict[alpha][creator][SearchAbrv][0]
try:
# write content current tex table
if Model == 'BD':
rSum = write_benders_row_simple_averages(TableFile,ResultsList,rSum,SPsolver)
else:
rSum = write_mip_row_simple_averages(TableFile,ResultsList,rSum)
except ZeroDivisionError:
pdb.set_trace()
# write overall averages for this class
if Model == 'BD':
write_benders_row_simple_summary(TableFile,rSum,SPsolver)
else:
write_mip_row_simple_summary(TableFile,rSum)
# write end current tex table
if Model == 'BD':
write_tex_table_benders_post(TableFile)
else:
write_tex_table_mip_post(TableFile)
# close the tex file
write_tex_file_post(TableFile)
TableFile.close
if __name__ == "__main__":
main_basic_data_processing()
# main_basic_data_processing_CP() |
import base64
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import os,sys
def decrypt_met(private_key,cipher_text):
# DECRYPTION
decrypted_cipher_text_bytes = private_key.decrypt(
ciphertext=base64.urlsafe_b64decode(cipher_text),
padding=padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA512(),
label=None
)
)
return decrypted_cipher_text_bytes
def read_private_key(password_bytes):
with open("res/private_key.pem", "rb") as key_file:
private_key_after = serialization.load_pem_private_key(
data=key_file.read(),
password=password_bytes,
backend=default_backend()
)
return private_key_after
def read_cipher():
return open('res/key.txt.y4h').read()
def d_main(cipher):
password=str('Th1sAV3ryS3cureP4sswD')
password_bytes = password.encode('utf-8')
private_key = read_private_key(password_bytes)
keys=''
for i in cipher.split('Y$H4'):
if(i==''):
continue
keys += decrypt_met(private_key,i).decode()
return keys
|
import imp
import pytest
from biocommons.seqrepo import config
def test_SEQREPO_LRU_CACHE_MAXSIZE_default(monkeypatch):
monkeypatch.delenv("SEQREPO_LRU_CACHE_MAXSIZE", raising=False)
imp.reload(config)
assert config.SEQREPO_LRU_CACHE_MAXSIZE == 1000000
def test_SEQREPO_LRU_CACHE_MAXSIZE_int(monkeypatch):
monkeypatch.setenv("SEQREPO_LRU_CACHE_MAXSIZE", "42")
imp.reload(config)
assert config.SEQREPO_LRU_CACHE_MAXSIZE == 42
def test_SEQREPO_LRU_CACHE_MAXSIZE_none(monkeypatch):
monkeypatch.setenv("SEQREPO_LRU_CACHE_MAXSIZE", "nOne")
imp.reload(config)
assert config.SEQREPO_LRU_CACHE_MAXSIZE is None
def test_SEQREPO_LRU_CACHE_MAXSIZE_invalid(monkeypatch):
monkeypatch.setenv("SEQREPO_LRU_CACHE_MAXSIZE", "invalid")
with pytest.raises(ValueError):
imp.reload(config)
|
from setuptools import setup
from cli import __version__
setup(
name='csvmatch',
version=__version__,
description='Find (fuzzy) matches between two CSV files in the terminal.',
long_description=open('README.md').read(),
author='Max Harlow',
author_email='maxharlow@gmail.com',
url='https://github.com/maxharlow/csvmatch',
license='Apache',
packages=[''],
install_requires=[
'chardet==3.0.4',
'tqdm==4.18.0',
'colorama==0.4.1',
'unidecode==1.0.23',
'dedupe==1.9.7',
'jellyfish==0.7.2',
'doublemetaphone==0.1'
],
entry_points = {
'console_scripts': [
'csvmatch = cli:main'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Utilities'
]
)
|
# coding: utf-8
from __future__ import absolute_import
import json
from google.appengine.ext import ndb
from api import fields
import model
import util
class Drawing(model.Base):
hash = ndb.StringProperty(required=True)
json = ndb.JsonProperty(required=True, compressed=True, verbose_name=u'JSON')
@ndb.ComputedProperty
def size(self):
if self.json:
return len(json.dumps(self.json))
return 0
@ndb.ComputedProperty
def elements(self):
if self.json and 'elements' in self.json:
return len(self.json['elements'])
return 0
FIELDS = {
'hash': fields.String,
'json': fields.Raw,
'size': fields.Integer,
'elements': fields.Integer,
}
FIELDS.update(model.Base.FIELDS)
|
import falcon
import subprocess
from datalad_service.common.user import get_user_info
from datalad_service.tasks.files import commit_files
class ResetResource(object):
def __init__(self, store):
self.store = store
def on_post(self, req, resp, dataset, hexsha):
"""Reset master to a given commit."""
if dataset and hexsha.isalnum() and len(hexsha) == 40:
try:
dataset_path = self.store.get_dataset_path(dataset)
gitProcess = subprocess.run(
['git', 'reset', '--hard', hexsha], check=True, cwd=dataset_path)
resp.status = falcon.HTTP_OK
except subprocess.CalledProcessError:
resp.status = falcon.HTTP_INTERNAL_SERVER_ERROR
else:
resp.media = {
'error': 'Missing or malformed dataset or hexsha parameter in request.'}
resp.status = falcon.HTTP_UNPROCESSABLE_ENTITY
|
# semordnilap
def semordnilapWrapper(str1, str2):
# A single-length string cannot be semordnilap
if len(str1) == 1 or len(str2) == 1:
return False
# Equal strings cannot be semordnilap
if str1 == str2:
return False
return semordnilap(str1, str2)
def semordnilap(str1, str2):
'''
str1: a string
str2: a string
returns: True if str1 and str2 are semordnilap;
False otherwise.
'''
if len(str1) != len(str2):
return False
elif str1 == '' and str2 == '':
return True
else:
return str1[0] == str2[-1] and semordnilap(str1[1:], str2[:-1])
'''
def semordnilap(str1, str2):
# If strings aren't the same length, they cannot be semordnilap
if len(str1) != len(str2):
return False
# Base case: if the strings are each of length 1, check if they're equal
if len(str1) == 1:
return str1 == str2
# Recursive case: check if the first letter of str1 equals the last letter
# of str2
if str1[0] == str2[-1]:
return semordnilap(str1[1:], str2[:-1])
else:
return False
''' |
#!/usr/bin/python
#crc tutorial scripts
#0_summarize_data.py
#identifies datasets and dependencies necessary for tutorial
#==========================================================================
#=============================DEPENDENCIES=================================
#==========================================================================
import sys, os, string
# Get the script's full local path
whereAmI = os.path.dirname(os.path.realpath(__file__)) +'/'
print(whereAmI)
pipeline_dir = '/'.join(whereAmI.split('/')[0:-2]) + '/pipeline/'
print(pipeline_dir)
sys.path.append(whereAmI)
sys.path.append(pipeline_dir)
import pipeline_dfci
import utils
import string
import numpy
import os
import re
from collections import defaultdict
import subprocess
#==========================================================================
#============================PARAMETERS====================================
#==========================================================================
projectName = 'crc_tutorial'
genome ='hg19'
annotFile = '%s/annotation/%s_refseq.ucsc' % (pipeline_dir,genome)
#project folders
projectFolder = '/'.join(whereAmI.split('/')[0:-2]) + '/'
projectFolder = utils.formatFolder(projectFolder,True)
#mask Files
#==========================================================================
#============================LIST OF DATAFILES=============================
#==========================================================================
#this project will utilize multiple datatables
#data tables are organized largely by type/system
#some data tables overlap for ease of analysis
#ChIP-seq
chip_data_file = '%sdata_tables/MM1S_CHIP_TABLE.txt' % (projectFolder)
#ATAC-seq
atac_data_file = '%sdata_tables/MM1S_ATAC_TABLE.txt' % (projectFolder)
#==========================================================================
#===========================MAIN METHOD====================================
#==========================================================================
def main():
print('main analysis for project %s' % (projectName))
print('changing directory to project folder')
os.chdir(projectFolder)
print('\n\n')
print('#======================================================================')
print('#==================I. LOADING DATA ANNOTATION TABLES===================')
print('#======================================================================')
print('\n\n')
#This section sanity checks each data table and makes sure both bam and .bai files are accessible
#for chip data file
pipeline_dfci.summary(chip_data_file)
#for chip data file
pipeline_dfci.summary(atac_data_file)
print('\n\n')
print('#======================================================================')
print('#=====================II. CONFIGURING GENOME BUILD=====================')
print('#======================================================================')
print('\n\n')
genome_directory = '%sgenomes/Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/' % (projectFolder)
mask_file = '%sgenomes/Homo_sapiens/UCSC/hg19/Annotation/Masks/hg19_encode_blacklist.bed' % (projectFolder)
config_table = [['BUILD:FIELD:PATH'],
['%s:%s:%s' % (genome,'genome_directory',genome_directory)],
['%s:%s:%s' % (genome,'mask_file',mask_file)],
]
config_path = '%scrc_config.txt' %(whereAmI)
utils.unParseTable(config_table,config_path,'\t')
print('writing genome configuration to %s' % (config_path))
print('\n\n')
print('#======================================================================')
print('#==================III. DETECTING DEPENDENCIES=========================')
print('#======================================================================')
print('\n\n')
from distutils.spawn import find_executable
# Try to find bamliquidator, bamliquidator_batch.py, and fimo
bamliquidatorString = find_executable('bamliquidator')
if bamliquidatorString is None:
raise ValueError('bamliquidator not found in path')
else:
print('found bamliquidator')
bamliquidatorBatchString = find_executable('bamliquidator_batch.py')
if bamliquidatorString is None:
raise ValueError('bamliquidator_batch.py not found in path')
else:
print('found bamliquidator_batch.py')
bamliquidatorBatchString = find_executable('fimo')
if bamliquidatorString is None:
raise ValueError('fimo not found in path')
else:
print('found fimo')
#==========================================================================
#==================================THE END=================================
#==========================================================================
if __name__=="__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import libcst as cst
from fixit import CstLintRule, InvalidTestCase as Invalid, ValidTestCase as Valid
class UsePlusForStringConcatRule(CstLintRule):
"""
Enforces use of explicit string concatenations using a ``+`` sign where an implicit concatenation is detected.
An implicit concatenation is a tuple or a call with multiple strings and a missing comma, e.g: ``("a" "b")``, and may have unexpected results.
"""
MESSAGE: str = (
"Implicit string concatenation detected, please add '+' to be explicit. "
+ 'E.g. a tuple or a call ("a" "b") with a missing comma results in multiple strings '
+ "being concatenated as one string and causes unexpected behaviour."
)
VALID = [Valid("'abc'"), Valid("'abc' + 'def'"), Valid("f'abc'")]
INVALID = [
Invalid(
"'ab' 'cd'",
expected_replacement="('ab' + 'cd')",
),
# We can deal with nested concatenated strings
Invalid(
"'ab' 'cd' 'ef' 'gh'",
expected_replacement="('ab' + 'cd' + 'ef' + 'gh')",
),
# works for f-strings too
Invalid(
"f'ab' f'cd'",
expected_replacement="(f'ab' + f'cd')",
),
# arbitrary whitespace between the elements is preserved
Invalid(
"""
(
# comment
'ab' # middle comment
'cd' # trailing comment
)
""",
expected_replacement="""
(
# comment
'ab' # middle comment
+ 'cd' # trailing comment
)
""",
),
]
def visit_ConcatenatedString(self, node: cst.ConcatenatedString) -> None:
# Skip if our immediate parent is also a ConcatenatedString, since our parent
# should've already reported this violation.
if isinstance(self.context.node_stack[-2], cst.ConcatenatedString):
return
# collect nested ConcatenatedString nodes into a flat list from outer to
# innermost children
children: List[cst.ConcatenatedString] = []
el = node
while isinstance(el, cst.ConcatenatedString):
children.append(el)
# left cannot be a ConcatenatedString, only right can.
el = el.right
# Build up a replacement by starting with the innermost child
replacement = children[-1].right
for el in reversed(children):
replacement = cst.BinaryOperation(
left=el.left, # left is never a ConcatenatedString
operator=cst.Add(
whitespace_before=el.whitespace_between,
whitespace_after=cst.SimpleWhitespace(" "),
),
right=replacement,
lpar=el.lpar,
rpar=el.rpar,
)
# A binary operation has a lower priority in the order-of-operations than an
# implicitly concatenated string, so we need to make sure the replacement is
# parenthesized to make our change safe.
if not replacement.lpar:
# There's a good chance that the formatting might be messed up by this, but
# black should be able to sort it out when it gets run next time.
#
# Because of the changes needed (e.g. increased indentation of children),
# it's not really sane/possible for us to format this any better.
replacement = replacement.with_changes(
lpar=[cst.LeftParen()], rpar=[cst.RightParen()]
)
self.report(node, replacement=replacement)
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
from unittest import mock
from click.testing import CliRunner
import pytest
import yaml
from pegleg import pegleg_main
from pegleg.cli import commands
from pegleg.engine import errorcodes
from pegleg.engine.catalog import pki_utility
from pegleg.engine.util import git
from tests.unit import test_utils
TEST_PARAMS = {
"site_name": "seaworthy",
"site_type": "foundry",
"repo_rev": '48e2afa570541972d4c60719181bc2fcca84a89c',
"repo_name": "treasuremap",
"repo_url": "https://opendev.org/airship/treasuremap.git",
}
TEST_CERT = """
-----BEGIN CERTIFICATE-----
DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF
DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF
DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF
DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF
-----END CERTIFICATE-----
"""
@pytest.mark.skipif(
not test_utils.is_connected(),
reason='git clone requires network connectivity.')
class BaseCLIActionTest(object):
"""Tests end-to-end flows for all Pegleg CLI actions, with minimal mocking.
General pattern should be to include exactly one test that uses a remote
repo URL and as many other tests that are required that use a local repo
path for runtime optimization.
All tests should validate that the ``exit_code`` from the CLI is 0 (for
positive tests).
"""
# TODO(felipemonteiro): Need tests that validate repository overrides. Also
# need to write tests that use a site-defintion.yaml with repositories key.
@classmethod
def setup_class(cls):
cls.runner = CliRunner()
# Pin so we know that seaworthy is a valid site.
cls.site_name = TEST_PARAMS["site_name"]
cls.site_type = TEST_PARAMS["site_type"]
cls.repo_rev = TEST_PARAMS["repo_rev"]
cls.repo_name = TEST_PARAMS["repo_name"]
cls.treasuremap_path = git.git_handler(
TEST_PARAMS["repo_url"], ref=TEST_PARAMS["repo_rev"])
class TestSiteCLIOptions(BaseCLIActionTest):
"""Tests site-level CLI options."""
### clone_path tests ###
def test_list_sites_using_remote_repo_and_clone_path_option(self, tmpdir):
"""Validates clone_path (-p) option is working properly with site list
action when using remote repo. Verify that the repo was cloned in the
clone_path
"""
# Scenario:
#
# 1) List sites (should clone repo automatically to `clone_path`
# location if `clone_path` is set)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
# Note that the -p option is used to specify the clone_folder
site_list = self.runner.invoke(
commands.site,
['--no-decrypt', '-p', tmpdir, '-r', repo_url, 'list'])
assert site_list.exit_code == 0
# Verify that the repo was cloned into the clone_path
assert os.path.exists(os.path.join(tmpdir, self.repo_name))
assert git.is_repository(os.path.join(tmpdir, self.repo_name))
def test_list_sites_using_local_repo_and_clone_path_option(self, tmpdir):
"""Validates clone_path (-p) option is working properly with site list
action when using a local repo. Verify that the clone_path has NO
effect when using a local repo
"""
# Scenario:
#
# 1) List sites (when using local repo there should be not cloning
# even if the clone_path is passed in)
repo_path = self.treasuremap_path
# Note that the -p option is used to specify the clone_folder
site_list = self.runner.invoke(
commands.site,
['--no-decrypt', '-p', tmpdir, '-r', repo_path, 'list'])
assert site_list.exit_code == 0
# Verify that passing in clone_path when using local repo has no effect
assert not os.path.exists(os.path.join(tmpdir, self.repo_name))
class TestSiteCLIOptionsNegative(BaseCLIActionTest):
"""Negative Tests for site-level CLI options."""
### Negative clone_path tests ###
def test_list_sites_using_remote_repo_and_reuse_clone_path_option(
self, tmpdir):
"""Validates clone_path (-p) option is working properly with site list
action when using remote repo. Verify that the same repo can't be
cloned in the same clone_path if it already exists
"""
# Scenario:
#
# 1) List sites (should clone repo automatically to `clone_path`
# location if `clone_path` is set)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
# Note that the -p option is used to specify the clone_folder
site_list = self.runner.invoke(
commands.site,
['--no-decrypt', '-p', tmpdir, '-r', repo_url, 'list'])
assert git.is_repository(os.path.join(tmpdir, self.repo_name))
# Run site list for a second time to validate that the repo can't be
# cloned twice in the same clone_path
site_list = self.runner.invoke(
commands.site,
['--no-decrypt', '-p', tmpdir, '-r', repo_url, 'list'])
assert site_list.exit_code == 1
assert 'File exists' in site_list.output
class TestSiteCliActions(BaseCLIActionTest):
"""Tests site-level CLI actions."""
### Collect tests ###
def _validate_collect_site_action(self, repo_path_or_url, save_location):
result = self.runner.invoke(
commands.site, [
'--no-decrypt', '-r', repo_path_or_url, 'collect',
self.site_name, '-s', save_location
])
collected_files = os.listdir(save_location)
assert result.exit_code == 0, result.output
assert len(collected_files) == 1
# Validates that site manifests collected from cloned repositories
# are written out to sensibly named files like airship-treasuremap.yaml
assert collected_files[0] == ("%s.yaml" % self.repo_name)
def test_collect_using_remote_repo_url(self, tmpdir):
"""Validates collect action using a remote URL."""
# Scenario:
#
# 1) Create temporary save location
# 2) Collect into save location (should clone repo automatically)
# 3) Check that expected file name is there
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._validate_collect_site_action(repo_url, tmpdir)
def test_collect_using_remote_repo_url_ending_with_dot_git(self, tmpdir):
"""Validates collect action using a remote URL ending in .git."""
# Scenario:
#
# 1) Create temporary save location
# 2) Collect into save location (should clone repo automatically)
# 3) Check that expected file name is there
repo_url = 'https://opendev.org/airship/%s@%s.git' % (
self.repo_name, self.repo_rev)
self._validate_collect_site_action(repo_url, tmpdir)
def test_collect_using_local_path(self, tmpdir):
"""Validates collect action using a path to a local repo."""
# Scenario:
#
# 1) Create temporary save location
# 2) Collect into save location (should skip clone repo)
# 3) Check that expected file name is there
repo_path = self.treasuremap_path
self._validate_collect_site_action(repo_path, tmpdir)
### Lint tests ###
def _test_lint_site_action(self, repo_path_or_url, exclude=True):
flag = '-x' if exclude else '-w'
lint_command = [
'--no-decrypt', '-r', repo_path_or_url, 'lint', self.site_name
]
exclude_lint_command = [
flag, errorcodes.SCHEMA_STORAGE_POLICY_MISMATCH_FLAG, flag,
errorcodes.SECRET_NOT_ENCRYPTED_POLICY
]
with mock.patch('pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(
commands.site, lint_command + exclude_lint_command)
assert result.exit_code == 0, result.output
if exclude:
# A successful result (while setting lint checks to exclude) should
# output nothing.
assert not result.output
else:
assert result.output
def test_lint_site_using_remote_repo_url_with_exclude(self):
"""Validates site lint action using remote repo URL."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint site with exclude flags (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._test_lint_site_action(repo_url, exclude=True)
def test_lint_site_using_local_path_with_exclude(self):
"""Validates site lint action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint site with exclude flags (should skip clone repo)
repo_path = self.treasuremap_path
self._test_lint_site_action(repo_path, exclude=True)
def test_lint_site_using_local_path_with_warn(self):
"""Validates site lint action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint site with warn flags (should skip clone repo)
repo_path = self.treasuremap_path
self._test_lint_site_action(repo_path, exclude=False)
### List tests ###
def _validate_list_site_action(self, repo_path_or_url, tmpdir):
mock_output = os.path.join(tmpdir, 'output')
result = self.runner.invoke(
commands.site, [
'--no-decrypt', '-r', repo_path_or_url, 'list', '-s',
mock_output
])
assert result.exit_code == 0, result.output
with open(mock_output, 'r') as f:
table_output = f.read()
assert self.site_name in table_output
assert self.site_type in table_output
def test_list_sites_using_remote_repo_url(self, tmpdir):
"""Validates list action using remote repo URL."""
# Scenario:
#
# 1) List sites (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._validate_list_site_action(repo_url, tmpdir)
def test_list_sites_using_local_path(self, tmpdir):
"""Validates list action using local repo path."""
# Scenario:
#
# 1) List sites (should skip clone repo)
repo_path = self.treasuremap_path
self._validate_list_site_action(repo_path, tmpdir)
### Show tests ###
def _validate_site_show_action(self, repo_path_or_url, tmpdir):
mock_output = os.path.join(tmpdir, 'output')
result = self.runner.invoke(
commands.site, [
'--no-decrypt', '-r', repo_path_or_url, 'show', self.site_name,
'-s', mock_output
])
assert result.exit_code == 0, result.output
with open(mock_output, 'r') as f:
table_output = f.read()
assert self.site_name in table_output
def test_show_site_using_remote_repo_url(self, tmpdir):
"""Validates show action using remote repo URL."""
# Scenario:
#
# 1) Show site (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._validate_site_show_action(repo_url, tmpdir)
def test_show_site_using_local_path(self, tmpdir):
"""Validates show action using local repo path."""
# Scenario:
#
# 1) Show site (should skip clone repo)
repo_path = self.treasuremap_path
self._validate_site_show_action(repo_path, tmpdir)
### Render tests ###
def _validate_render_site_action(self, repo_path_or_url):
render_command = [
'--no-decrypt', '-r', repo_path_or_url, 'render', self.site_name
]
with mock.patch('pegleg.engine.site.yaml') as mock_yaml:
with mock.patch(
'pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(commands.site, render_command)
assert result.exit_code == 0
mock_yaml.dump_all.assert_called_once()
def test_render_site_using_remote_repo_url(self):
"""Validates render action using remote repo URL."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Render site (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._validate_render_site_action(repo_url)
def test_render_site_using_local_path(self):
"""Validates render action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Render site (should skip clone repo)
repo_path = self.treasuremap_path
self._validate_render_site_action(repo_path)
### Upload tests ###
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def test_upload_documents_shipyard_using_local_repo_path(self):
"""Validates ShipyardHelper is called with correct arguments."""
# Scenario:
#
# 1) Mock out ShipyardHelper
# 2) Check ShipyardHelper was called with correct arguments
repo_path = self.treasuremap_path
with mock.patch('pegleg.pegleg_main.ShipyardHelper') as mock_obj:
result = self.runner.invoke(
commands.site, [
'--no-decrypt', '-r', repo_path, 'upload', self.site_name,
'--collection', 'collection'
])
assert result.exit_code == 0
mock_obj.assert_called_once()
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def test_upload_collection_callback_default_to_site_name(self):
"""Validates that collection will default to the given site_name"""
# Scenario:
#
# 1) Mock out ShipyardHelper
# 2) Check that ShipyardHelper was called with collection set to
# site_name
repo_path = self.treasuremap_path
with mock.patch('pegleg.pegleg_main.ShipyardHelper') as mock_obj:
result = self.runner.invoke(
commands.site,
['--no-decrypt', '-r', repo_path, 'upload', self.site_name])
assert result.exit_code == 0
mock_obj.assert_called_once()
class TestGenerateActions(BaseCLIActionTest):
def test_generate_passphrase(self):
result = self.runner.invoke(commands.generate, ['passphrase'])
assert result.exit_code == 0, result.output
def test_generate_salt(self):
result = self.runner.invoke(commands.generate, ['salt'])
assert result.exit_code == 0, result.output
class TestRepoCliActions(BaseCLIActionTest):
"""Tests repo-level CLI actions."""
### Lint tests ###
def test_lint_repo_using_remote_repo_url_with_exclude(self):
"""Validates repo lint action using remote repo URL."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint repo with exclude flags (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
lint_command = ['-r', repo_url, 'lint']
exclude_lint_command = [
'-x', errorcodes.SCHEMA_STORAGE_POLICY_MISMATCH_FLAG, '-x',
errorcodes.SECRET_NOT_ENCRYPTED_POLICY
]
with mock.patch('pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(
commands.repo, lint_command + exclude_lint_command)
assert result.exit_code == 0, result.output
# A successful result (while setting lint checks to exclude) should
# output nothing.
assert not result.output
def test_lint_repo_using_local_path_with_exclude(self):
"""Validates repo lint action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint repo with exclude flags (should skip clone repo)
repo_path = self.treasuremap_path
lint_command = ['-r', repo_path, 'lint']
exclude_lint_command = [
'-x', errorcodes.SCHEMA_STORAGE_POLICY_MISMATCH_FLAG, '-x',
errorcodes.SECRET_NOT_ENCRYPTED_POLICY
]
with mock.patch('pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(
commands.repo, lint_command + exclude_lint_command)
assert result.exit_code == 0, result.output
# A successful result (while setting lint checks to exclude) should
# output nothing.
assert not result.output
class TestSiteSecretsActions(BaseCLIActionTest):
"""Tests site secrets-related CLI actions."""
@classmethod
def setup_class(cls):
super(TestSiteSecretsActions, cls).setup_class()
cls.runner = CliRunner(
env={
"PEGLEG_PASSPHRASE": 'ytrr89erARAiPE34692iwUMvWqqBvC',
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def _validate_generate_pki_action(self, result):
assert result.exit_code == 0
generated_files = []
output_lines = result.output.split("\n")
for line in output_lines:
if self.repo_name in line:
generated_files.append(line)
assert len(generated_files), 'No secrets were generated'
for generated_file in generated_files:
with open(generated_file, 'r') as f:
result = yaml.safe_load_all(f) # Validate valid YAML.
assert list(result), "%s file is empty" % generated_file
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_site_secrets_generate_pki_using_remote_repo_url(self):
"""Validates ``generate certificates`` action using remote repo URL."""
# Scenario:
#
# 1) Generate PKI using remote repo URL
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
secrets_opts = ['secrets', 'generate', 'certificates', self.site_name]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_url] + secrets_opts)
self._validate_generate_pki_action(result)
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_site_secrets_generate_pki_using_local_repo_path(self):
"""Validates ``generate certificates`` action using local repo path."""
# Scenario:
#
# 1) Generate PKI using local repo path
repo_path = self.treasuremap_path
secrets_opts = ['secrets', 'generate', 'certificates', self.site_name]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
self._validate_generate_pki_action(result)
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def test_site_secrets_encrypt_and_decrypt_local_repo_path(self):
"""Validates ``generate certificates`` action using local repo path."""
# Scenario:
#
# 1) Encrypt a file in a local repo
repo_path = self.treasuremap_path
file_path = os.path.join(
repo_path, "site", "seaworthy", "secrets", "passphrases",
"ceph_fsid.yaml")
with open(file_path, "r") as ceph_fsid_fi:
ceph_fsid = yaml.safe_load(ceph_fsid_fi)
ceph_fsid["metadata"]["storagePolicy"] = "encrypted"
ceph_fsid["metadata"]["layeringDefinition"]["layer"] = "site"
with open(file_path, "w") as ceph_fsid_fi:
yaml.dump(ceph_fsid, ceph_fsid_fi)
secrets_opts = [
'secrets', 'encrypt', '--save-location', repo_path, '-a', 'test',
self.site_name
]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
assert result.exit_code == 0
with open(os.path.join(repo_path, "site", "seaworthy",
"secrets", "passphrases", "ceph_fsid.yaml"),
"r") \
as ceph_fsid_fi:
ceph_fsid = yaml.safe_load(ceph_fsid_fi)
assert "encrypted" in ceph_fsid["data"]
assert "managedDocument" in ceph_fsid["data"]
secrets_opts = [
'secrets', 'decrypt', '--path', file_path, self.site_name
]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
assert result.exit_code == 0, result.output
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def test_site_secrets_encrypt_and_decrypt_multiple_paths(self):
"""Validates decrypt using multiple paths."""
# Scenario:
#
# 1) Encrypt a file in a local repo
repo_path = self.treasuremap_path
file_path = os.path.join(
repo_path, "site", "seaworthy", "secrets", "passphrases",
"ceph_fsid.yaml")
file_path_2 = os.path.join(
repo_path, "site", "seaworthy", "secrets", "passphrases",
"ucp_oslo_messaging_password.yaml")
with open(file_path, "r") as ceph_fsid_fi:
ceph_fsid = yaml.safe_load(ceph_fsid_fi)
ceph_fsid["metadata"]["storagePolicy"] = "encrypted"
ceph_fsid["metadata"]["layeringDefinition"]["layer"] = "site"
with open(file_path_2, "r") as oslo_messaging_file:
oslo_messaging = yaml.safe_load(oslo_messaging_file)
oslo_messaging["metadata"]["storagePolicy"] = "encrypted"
oslo_messaging["metadata"]["layeringDefinition"]["layer"] = "site"
with open(file_path, "w") as ceph_fsid_fi:
yaml.dump(ceph_fsid, ceph_fsid_fi)
with open(file_path_2, "w") as oslo_messaging_file:
yaml.dump(oslo_messaging, oslo_messaging_file)
secrets_opts = [
'secrets', 'encrypt', '--save-location', repo_path, '-a', 'test',
self.site_name
]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
assert result.exit_code == 0
with open(file_path, "r") as ceph_fsid_fi:
ceph_fsid = yaml.safe_load(ceph_fsid_fi)
assert "encrypted" in ceph_fsid["data"]
assert "managedDocument" in ceph_fsid["data"]
with open(file_path_2, "r") as oslo_messaging_file:
oslo_messaging = yaml.safe_load(oslo_messaging_file)
assert "encrypted" in oslo_messaging["data"]
assert "managedDocument" in oslo_messaging["data"]
secrets_opts = [
'secrets', 'decrypt', '-o', '--path', file_path, '--path',
file_path_2, self.site_name
]
result = self.runner.invoke(
commands.site, ['-r', repo_path] + secrets_opts)
assert result.exit_code == 0
with open(file_path, "r") as ceph_fsid_fi:
ceph_fsid = yaml.safe_load(ceph_fsid_fi)
assert "managedDocument" not in ceph_fsid["data"]
with open(file_path_2, "r") as oslo_messaging_file:
oslo_messaging = yaml.safe_load(oslo_messaging_file)
assert "managedDocument" not in oslo_messaging["data"]
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_check_pki_certs_expired(self):
repo_path = self.treasuremap_path
secrets_opts = ['secrets', 'check-pki-certs', self.site_name]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
assert result.exit_code == 1, result.output
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_check_pki_certs(self):
repo_path = self.treasuremap_path
secrets_opts = ['secrets', 'check-pki-certs', "airsloop"]
result = self.runner.invoke(
commands.site, ['--no-decrypt', '-r', repo_path] + secrets_opts)
assert result.exit_code == 0, result.output
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "123456"
})
def test_site_secrets_wrap(self):
"""Validates ``generate certificates`` action using local repo path."""
# Scenario:
#
# 1) Encrypt a file in a local repo
repo_path = self.treasuremap_path
file_dir = os.path.join(
repo_path, "site", "seaworthy", "secrets", "certificates")
file_path = os.path.join(file_dir, "test.crt")
save_location = os.path.join(file_dir, "test.yaml")
with open(file_path, "w") as test_crt_fi:
test_crt_fi.write(TEST_CERT)
secrets_opts = [
'secrets', 'wrap', "-a", "lm734y", "--filename", file_path, "-s",
"deckhand/Certificate/v1", "-n", "test-certificate", "-l", "site",
"--no-encrypt", self.site_name
]
result = self.runner.invoke(
commands.site, ['--no-decrypt', "-r", repo_path] + secrets_opts)
assert result.exit_code == 0
with open(save_location, "r") as output_fi:
doc = yaml.safe_load(output_fi)
assert doc["data"]["managedDocument"]["data"] == TEST_CERT
assert doc["data"]["managedDocument"][
"schema"] == "deckhand/Certificate/v1"
assert doc["data"]["managedDocument"]["metadata"][
"name"] == "test-certificate"
assert doc["data"]["managedDocument"]["metadata"][
"layeringDefinition"]["layer"] == "site"
assert doc["data"]["managedDocument"]["metadata"][
"storagePolicy"] == "cleartext"
os.remove(save_location)
secrets_opts = [
'secrets', 'wrap', "-a", "lm734y", "--filename", file_path,
"--save-location", save_location, "-s", "deckhand/Certificate/v1",
"-n", "test-certificate", "-l", "site", self.site_name
]
result = self.runner.invoke(
commands.site, ['--no-decrypt', "-r", repo_path] + secrets_opts)
assert result.exit_code == 0
with open(save_location, "r") as output_fi:
doc = yaml.safe_load(output_fi)
assert "encrypted" in doc["data"]
assert "managedDocument" in doc["data"]
class TestTypeCliActions(BaseCLIActionTest):
"""Tests type-level CLI actions."""
def setup(self):
self.expected_types = ['foundry']
def _assert_table_has_expected_sites(self, table_output):
for expected_type in self.expected_types:
assert expected_type in table_output
def _validate_type_list_action(self, repo_path_or_url, tmpdir):
mock_output = os.path.join(tmpdir, 'output')
result = self.runner.invoke(
commands.type, ['-r', repo_path_or_url, 'list', '-s', mock_output])
with open(mock_output, 'r') as f:
table_output = f.read()
assert result.exit_code == 0, result.output
self._assert_table_has_expected_sites(table_output)
def test_list_types_using_remote_repo_url(self, tmpdir):
"""Validates list types action using remote repo URL."""
# Scenario:
#
# 1) List types (should clone repo automatically)
repo_url = 'https://opendev.org/airship/%s@%s' % (
self.repo_name, self.repo_rev)
self._validate_type_list_action(repo_url, tmpdir)
def test_list_types_using_local_repo_path(self, tmpdir):
"""Validates list types action using local repo path."""
# Scenario:
#
# 1) List types for local repo path
repo_path = self.treasuremap_path
self._validate_type_list_action(repo_path, tmpdir)
class TestSiteCliActionsWithSubdirectory(BaseCLIActionTest):
"""Tests site CLI actions with subdirectories in repository paths."""
def setup(self):
self.expected_sites = ['demo', 'gate-multinode', 'dev', 'dev-proxy']
def _assert_table_has_expected_sites(self, table_output):
for expected_site in self.expected_sites:
assert expected_site in table_output
def _validate_list_site_action(self, repo_path_or_url, tmpdir):
mock_output = os.path.join(tmpdir, 'output')
result = self.runner.invoke(
commands.site, [
'--no-decrypt', '-r', repo_path_or_url, 'list', '-s',
mock_output
])
with open(mock_output, 'r') as f:
table_output = f.read()
assert result.exit_code == 0, result.output
self._assert_table_has_expected_sites(table_output)
def test_site_action_with_subpath_in_remote_url(self, tmpdir):
"""Validates list action with subpath in remote URL."""
# Scenario:
#
# 1) List sites for https://opendev.org/airship/airship-in-a-bottle
# deployment_files (subpath in remote URL)
# Perform site action using remote URL.
repo_name = 'airship-in-a-bottle'
repo_rev = '7a0717adc68261c7adb3a3db74a9326d6103519f'
repo_url = 'https://opendev.org/airship/%s/deployment_files@%s' % (
repo_name, repo_rev)
self._validate_list_site_action(repo_url, tmpdir)
def test_site_action_with_subpath_in_local_repo_path(self, tmpdir):
"""Validates list action with subpath in local repo path."""
# Scenario:
#
# 1) List sites for local repo at /tmp/.../airship-in-a-bottle/
# deployment_files
# Perform site action using local repo path.
repo_name = 'airship-in-a-bottle'
repo_rev = '7a0717adc68261c7adb3a3db74a9326d6103519f'
repo_url = 'https://opendev.org/airship/%s' % repo_name
_repo_path = git.git_handler(repo_url, ref=repo_rev)
repo_path = os.path.join(_repo_path, 'deployment_files')
self._validate_list_site_action(repo_path, tmpdir)
@pytest.mark.usefixtures('monkeypatch')
class TestCliSiteSubcommandsWithDecryptOption(BaseCLIActionTest):
@classmethod
def setup_class(cls):
super(TestCliSiteSubcommandsWithDecryptOption, cls).setup_class()
cls.runner = CliRunner(
env={
"PEGLEG_PASSPHRASE": 'ytrr89erARAiPE34692iwUMvWqqBvC',
"PEGLEG_SALT": "MySecretSalt1234567890][",
"PROMENADE_ENCRYPTION_KEY": "test"
})
for file in glob.iglob(os.path.join(cls.treasuremap_path, 'site',
'seaworthy', 'secrets', '**',
'*.yaml'), recursive=True):
args = [
'sed', '-i',
's/storagePolicy: cleartext/storagePolicy: encrypted/g', file
]
sed_output = subprocess.check_output(args, shell=False)
assert not sed_output
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": 'ytrr89erARAiPE34692iwUMvWqqBvC',
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def setup(self):
pegleg_main.run_config(
self.treasuremap_path, None, None, None, [], True, False)
pegleg_main.run_encrypt('zuul-tester', None, self.site_name)
@staticmethod
def _validate_no_files_encrypted(path):
for file in glob.iglob(os.path.join(path, '**', '*.yaml'),
recursive=True):
with open(file, 'r') as f:
data = f.read()
if 'pegleg/PeglegManagedDocument/v1' in data:
return False
return True
def test_collect_using_decrypt_option(self, tmpdir):
"""Validates collect action using a path to a local repo."""
# Scenario:
#
# 1) Create temporary save location
# 2) Collect into save location (should skip clone repo)
# 3) Check that expected file name is there
repo_path = self.treasuremap_path
result = self.runner.invoke(
commands.site, [
'--decrypt', '-r', repo_path, 'collect', self.site_name, '-s',
tmpdir
])
collected_files = os.listdir(tmpdir)
assert result.exit_code == 0, result.output
assert len(collected_files) == 1
# Validates that site manifests collected from cloned repositories
# are written out to sensibly named files like airship-treasuremap.yaml
assert collected_files[0] == ("%s.yaml" % self.repo_name)
assert self._validate_no_files_encrypted(tmpdir)
def test_render_site_using_decrypt_option(self, tmpdir):
"""Validates render action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Render site (should skip clone repo)
repo_path = self.treasuremap_path
render_command = [
'--decrypt', '-p', tmpdir, '-r', repo_path, 'render',
self.site_name
]
with mock.patch('pegleg.engine.site.yaml') as mock_yaml:
with mock.patch(
'pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(commands.site, render_command)
assert result.exit_code == 0
mock_yaml.dump_all.assert_called_once()
assert self._validate_no_files_encrypted(
os.path.join(
tmpdir, 'treasuremap.git', 'site', 'seaworthy', 'secrets'))
def test_lint_site_using_decrypt_option(self, tmpdir):
"""Validates site lint action using local repo path."""
# Scenario:
#
# 1) Mock out Deckhand render (so we can ignore P005 issues)
# 2) Lint site with warn flags (should skip clone repo)
repo_path = self.treasuremap_path
lint_command = [
'--decrypt', '-p', tmpdir, '-r', repo_path, 'lint', self.site_name
]
exclude_lint_command = [
'-w', errorcodes.SCHEMA_STORAGE_POLICY_MISMATCH_FLAG, '-w',
errorcodes.SECRET_NOT_ENCRYPTED_POLICY
]
with mock.patch('pegleg.engine.site.util.deckhand') as mock_deckhand:
mock_deckhand.deckhand_render.return_value = ([], [])
result = self.runner.invoke(
commands.site, lint_command + exclude_lint_command)
assert result.exit_code == 0, result.output
assert self._validate_no_files_encrypted(
os.path.join(
tmpdir, 'treasuremap.git', 'site', 'seaworthy', 'secrets'))
@mock.patch.dict(
os.environ, {
"PEGLEG_PASSPHRASE": "123456789012345678901234567890",
"PEGLEG_SALT": "MySecretSalt1234567890]["
})
def test_upload_collection_callback_default_to_site_name(self, tmpdir):
"""Validates that collection will default to the given site_name"""
# Scenario:
#
# 1) Mock out ShipyardHelper
# 2) Check that ShipyardHelper was called with collection set to
# site_name
repo_path = self.treasuremap_path
with mock.patch('pegleg.pegleg_main.ShipyardHelper') as mock_obj:
result = self.runner.invoke(
commands.site, [
'--decrypt', '-p', tmpdir, '-r', repo_path, 'upload',
self.site_name
])
assert result.exit_code == 0
mock_obj.assert_called_once()
assert self._validate_no_files_encrypted(
os.path.join(
tmpdir, 'treasuremap.git', 'site', 'seaworthy', 'secrets'))
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_site_secrets_generate_pki_using_decrypt_option(self, tmpdir):
"""Validates ``generate certificates`` action using local repo path."""
# Scenario:
#
# 1) Generate PKI using local repo path
repo_path = self.treasuremap_path
secrets_opts = ['secrets', 'generate', 'certificates', self.site_name]
result = self.runner.invoke(
commands.site,
['--decrypt', '-p', tmpdir, '-r', repo_path] + secrets_opts)
assert result.exit_code == 0
generated_files = []
output_lines = result.output.split("\n")
for line in output_lines:
if self.repo_name in line:
generated_files.append(line)
assert len(generated_files), 'No secrets were generated'
for generated_file in generated_files:
with open(generated_file, 'r') as f:
result = yaml.safe_load_all(f) # Validate valid YAML.
assert list(result), "%s file is empty" % generated_file
assert self._validate_no_files_encrypted(
os.path.join(
tmpdir, 'treasuremap.git', 'site', 'seaworthy', 'secrets'))
@pytest.mark.skipif(
not pki_utility.PKIUtility.cfssl_exists(),
reason='cfssl must be installed to execute these tests')
def test_check_pki_certs_expired_using_decrypt_option(self, tmpdir):
repo_path = self.treasuremap_path
secrets_opts = ['secrets', 'check-pki-certs', self.site_name]
result = self.runner.invoke(
commands.site,
['--decrypt', '-r', repo_path, '-p', tmpdir] + secrets_opts)
assert result.exit_code == 1, result.output
assert self._validate_no_files_encrypted(
os.path.join(tmpdir, 'site', 'seaworthy', 'secrets'))
def test_genesis_bundle_using_decrypt_option(self, tmpdir):
repo_path = self.treasuremap_path
args = [
'--decrypt', '-p', tmpdir, '-r', repo_path, 'genesis_bundle', '-b',
tmpdir, self.site_name
]
with mock.patch(
'pegleg.pegleg_main.bundle.build_genesis') as mock_build:
result = self.runner.invoke(commands.site, args)
assert result.exit_code == 0
assert self._validate_no_files_encrypted(tmpdir)
mock_build.assert_called_once()
def test_generate_passphrases_using_decrypt_option(self, tmpdir):
repo_path = self.treasuremap_path
args = [
'--decrypt', '-p', tmpdir, '-r', repo_path, 'secrets', 'generate',
'passphrases', '-s', repo_path, '-a', 'zuul_tester', self.site_name
]
with mock.patch(
'pegleg.pegleg_main.engine.secrets.generate_passphrases'
) as mock_generator:
result = self.runner.invoke(commands.site, args)
assert result.exit_code == 0
assert self._validate_no_files_encrypted(tmpdir)
mock_generator.assert_called_once()
|
import glob
import jinja2
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader("slides"))
template = jinja_env.get_template("base.html")
general_vars = {
"presentation_date": "2021-09-29",
"presentation_title": "Investigating the determinants of social communication differences in neurodevelopmental disoders",
"subtitle": "Thesis Defense",
"contact_text": "Contact Me",
"presentation_width": 1300,
"presentation_height": 700,
"email": "nguyenpjenny@gmail.com",
}
color_vars = {
"blue": "#5073B3",
"aqua_blue": "#015D8E",
"baby_blue": "#80DEEA",
"mustard": "#E58D05",
"pink": "#E91E63",
"light_pink": "#EB9B94",
"red": "#FC3E00",
"tan": "#EDE6D4",
"peach": "#E5CEAE",
"box_color": "#294F7C",
"purple": "#563D7C",
"light_purple": "#B39DDB",
"green": "#4CAF50",
"light_green": "#8BC34A",
"dark_grey": "#414042",
# Method colours
"data_processing_color": "#93C8C9",
"pcorr_color": "#F3A095",
"sem_color": "#FDCB58",
# Factor colours
"demo_color": "#3F4077",
"anx_color": "#47A5A2",
"att_color": "#FDB715",
"iq_color": "#E77E24",
"rrb_color": "#897BB6",
"lang_color": "#CE5F57",
"sen_color": "#60AD45",
}
logos = glob.glob(r"assets/images/logos/logo_*.png")
logos = [i.replace("\\", "/") for i in logos]
home_vars = {
"logos": logos,
"jenny_avatar": "assets/images/jennyn_theme/jenny_icon.svg",
"mugsy_avatar": "assets/images/jennyn_theme/mugsy_icon.svg",
"home_bg": "assets/images/jennyn_theme/geometric_orange_bg.svg",
}
image_vars = {
# Image folder
"image_folder": "assets/images/",
# Introduction images
"asd_img": "assets/images/ASDDomains.svg",
"cliff_img": "assets/images/GapCliff.svg",
"path_model_img": "assets/images/PathModel.svg",
"twelve_models_img": "assets/images/TwelveSemModels.svg",
"factor_img": "assets/images/Factors.svg",
# Method images
"pond_img": "assets/images/logos/logo_POND.png",
"sex_dx_plot": "assets/images/SexDx.svg",
"measures_img": "assets/images/Measures.svg",
"methods_img": "assets/images/Methods-03.svg",
"data_cleaning_img": "assets/images/DataCleaning.svg",
"dat_viz_img": "assets/images/DataViz.svg",
"pcorr_img": "assets/images/PCorr.svg",
"ttest_img": "assets/images/TTest.svg",
"sem_img": "assets/images/SEM.svg",
"labels_img": "assets/images/Labels.svg",
"distill_img": "assets/images/Distill.svg",
# SEM
"sem_scq_combined_img": "assets/sem/SCQ_Combined.svg",
"sem_abas_combined_img": "assets/sem/ABAS_Combined.svg",
"sem_cbcl_combined_img": "assets/sem/CBCL_Combined.svg",
"multigroup_plot": "assets/sem/sem_multigroup.html",
"sem_3_measures_plot": "assets/sem/sem_3_measures.html",
"sem_scq_plot": "assets/sem/sem_scq.html",
"sem_abas_plot": "assets/sem/sem_abas.html",
"sem_cbcl_plot": "assets/sem/sem_cbcl.html",
# Distribution Plots
"sex_race_plt": "assets/plots/sex_race.svg",
"sc_dist_plt": "assets/plots/sc_dist.svg",
"factor_dist_plt": "assets/plots/factor_dist.svg",
"scq_soc_dist_plt": "assets/plots/scq_soc_dist.svg",
"scq_com_dist_plt": "assets/plots/scq_com_dist.svg",
"abas_soc_dist_plt": "assets/plots/abas_soc_dist.svg",
"abas_com_dist_plt": "assets/plots/abas_com_dist.svg",
"cbcl_soc_dist_plt": "assets/plots/cbcl_soc_dist.svg",
# gifs
"rainbow_path": "assets/gifs/RainbowPath.mp4",
"support_gif": "https://giphy.com/embed/eiADi71GIXWWc2Z8X8",
"path_gif": "https://giphy.com/embed/CUehYc3mmGfUDXO6pH",
"stair_gif": "https://giphy.com/embed/lrA5TYCRUjVUFDbJFz",
"microscope_gif": "https://giphy.com/embed/jSO9vpciheZYmiormq",
"measure_gif": "https://giphy.com/embed/fo4Wm6nsWAUbdaAIfD",
"faces_gif": "https://giphy.com/embed/LRncOtkTDSjqeJEWmQ",
# "_img": "assets/images/.svg",
}
table_vars = {
"par_char_table_full": "assets/tables/full_participant_char_table.svg",
"par_char_table": "assets/tables/participant_char_table.svg",
"ttest_table": "assets/tables/ttest_table.svg",
"pcorr_table": "assets/tables/corr_table.svg",
"fit_table": "assets/tables/fit_table.svg",
# "_table": "assets/tables/_table.htm",
}
jinja_vars = {**general_vars, **color_vars, **home_vars, **image_vars, **table_vars}
html_str = template.render(jinja_vars)
html_file = open("index.html", "w")
html_file.write(html_str)
html_file.close()
|
# Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-Suite to ensure that the Solr Service is working as expected for all queries."""
import time
from http import HTTPStatus
import pytest
from flask import Flask
from search_api.services import solr
from search_api.services.solr import Solr, SolrDoc, SolrFields
from tests import integration_solr
from . import SOLR_TEST_DOCS
@integration_solr
@pytest.mark.parametrize('test_name,query,expected', [
('test-doesnt-match-identifier', 'CP00', []),
('test-doesnt-match-bn', 'BN00012334', []),
('test-name-exact', 'tests 2222', ['tests 2222']),
('test-case', 'not case sensitive', ['NOt Case SENSitive']),
('test-partial-1', 'tester', ['tester 1111']),
('test-partial-2', 'tester 11', ['tester 1111']),
('test-partial-3', 'lots of wor', ['lots of words in here']),
('test-all-words-match', 'tests oops 2222', []),
('test-stem-matches', 'test 2222', ['tests 2222']),
('test-multiple-matches', 'test', ['test 1234', 'tester 1111', 'tests 2222', 'test 3333', '4444 test']),
])
def test_solr_suggest_name(test_name, query, expected):
"""Assert that solr suggest call works as expected."""
# setup
solr.delete_all_docs()
solr.create_or_replace_docs(SOLR_TEST_DOCS)
time.sleep(0.5) # wait for solr to register update
# call suggester
suggestions = solr.suggest(query, 10, True)
assert len(suggestions) == len(expected)
for name in expected:
assert name.upper() in suggestions
@integration_solr
@pytest.mark.parametrize('test_name,query,query_field,expected_field,expected', [
('test-identifier', 'CP00', SolrFields.IDENTIFIER_SELECT, SolrFields.IDENTIFIER, ['CP0034567']),
('test-bn', '0012334', SolrFields.BN_SELECT, SolrFields.BN, ['BN00012334']),
('test-name-exact', 'tests 2222', SolrFields.NAME_SINGLE, SolrFields.NAME, ['tests 2222']),
('test-case', 'not case sensitive', SolrFields.NAME_SINGLE, SolrFields.NAME, ['NOt Case SENSitive']),
('test-partial-1', 'tester', SolrFields.NAME_SINGLE, SolrFields.NAME, ['tester 1111']),
('test-partial-2', 'tester 11', SolrFields.NAME_SINGLE, SolrFields.NAME, ['tester 1111']),
('test-partial-3', 'lots of wor', SolrFields.NAME_SINGLE, SolrFields.NAME, ['lots of words in here']),
('test-partial-4', 'ots of ords', SolrFields.NAME_SINGLE, SolrFields.NAME, ['lots of words in here']),
('test-all-words-match', 'tests oops 2222', SolrFields.NAME_SINGLE, SolrFields.NAME, []),
('test-multiple-matches', 'test 1', SolrFields.NAME_SINGLE, SolrFields.NAME, ['test 1234', 'tester 1111']),
])
def test_solr_select(test_name, query, query_field, expected_field, expected):
"""Assert that solr select call works as expected."""
# setup
solr.delete_all_docs()
solr.create_or_replace_docs(SOLR_TEST_DOCS)
time.sleep(0.5) # wait for solr to register update
search_params = Solr.build_split_query(query, query_field)
# call select
docs = solr.select(search_params, 10)
# test
assert len(docs) == len(expected)
for doc in docs:
assert doc[expected_field] in expected
@integration_solr
@pytest.mark.parametrize('test_name,query,expected', [
('test-identifier', 'CP00', ['<b>CP00</b>34567']),
('test-bn', '0012334', ['BN0<b>0012334</b>']),
('test-name-exact', 'tests 2222', ['<b>TESTS 2222</b>']),
('test-case', 'not case sensitive', ['<b>NOT CASE SENSITIVE</b>']),
('test-partial-1', 'tester', ['<b>TESTER</b> 1111']),
('test-partial-2', 'tester 11', ['<b>TESTER 11</b>11']),
('test-partial-3', 'ots of ords', ['LOTS OF WORDS IN HERE']),
('test-all-words-match', 'tests oops 2222', []),
('test-multiple-matches', 'test 1', ['<b>TEST 1</b>234','TESTER 1111',]),
('test-bn-identifier-name', '123', [
'TEST <b>123</b>4',
'CP<b>123</b>4567',
'BN000<b>123</b>34']),
])
def test_solr_business_suggest(test_name, query, expected):
"""Assert that solr business suggest call works as expected."""
# setup
solr.delete_all_docs()
solr.create_or_replace_docs(SOLR_TEST_DOCS)
time.sleep(0.5) # wait for solr to register update
# call select
suggestions = solr.business_suggest(query)
# test
assert len(suggestions) == len(expected)
for suggestion in suggestions:
assert suggestion['value'] in expected
|
import scrapy
import datetime
import math
from scrapy_official_newspapers.items import ScrapyOfficialNewspapersItem
from scrapy_official_newspapers.spiders import BaseSpider
class ElPeruano(BaseSpider):
name = "ElPeruano"
country = "El Peruano, Diario oficial del Perú"
country_code = "PE" # You can find the ISO3166 country code here: https://gist.github.com/ssskip/5a94bfcd2835bf1dea52
state_name = "Federal"
state_code = "" # As per the Holidays package, you can find the code here https://pypi.org/project/holidays/ if avaiable.
source = "El peruano"
spider_builder = "Ignacio Fernandez"
scrapable = "True"
allowed_domains = ["elperuano.pe"]
start_date = "2020-09-01"
def __init__(self):
self.keyword_dict, self.negative_keyword_dict = self.import_filtering_keywords()
self.from_date, self.today = self.create_date_span(self.start_date)
self.start_urls = [
f'https://busquedas.elperuano.pe/api/v1/elvis?from_date={self.from_date}&page=0&scope=false&to_date={self.today}']
def parse(self, response):
hits = json.loads(response.text)['totalHits']
hits = math.ceil(hits / 10)
URLs = [
f'https://busquedas.elperuano.pe/api/v1/elvis?from_date={self.from_date}&page={i}&scope=false&to_date={self.today}'
for i in range(1, hits)]
ref_lst = []
self.ref_lst = ref_lst
for url in URLs:
yield scrapy.Request(url, dont_filter=True, callback=self.parse_other)
def parse_other(self, response):
item = ScrapyOfficialNewspapersItem()
for norm in json.loads(response.text)['hits']:
if 'subjectOrganizationCode' in norm['metadata']:
ref = norm['metadata']['subjectOrganizationCode']
else:
ref = ""
try:
item['reference'] = ref
item['doc_url'] = 'https://busquedas.elperuano.pe/download/url/' + str(norm['metadata']['slug'])
text_to_search = self.clean_text(norm['metadata']['description']) + " " + self.clean_text(
norm['metadata']['slug']) + " " + self.clean_text(norm['highlightedText'])
if self.search_keywords(text_to_search, self.keyword_dict, self.negative_keyword_dict):
item['country'] = self.country
item['state'] = self.state_name
item["law_class"] = "" #TODO: look at the right field when adjusted.
item['data_source'] = self.source
item['authorship'] = norm['metadata']['editionName']
item['summary'] = self.clean_text(norm['metadata']['description'])
item['title'] = self.clean_text(norm['metadata']['description'])
item['publication_date'] = norm['metadata']['publicationDate']['formatted']
item['enforcement_date'] = item['publication_date']
item['url'] = 'https://busquedas.elperuano.pe' + str(norm['url_link'])
item['doc_name'] = self.HSA1_encoding(doc_url)
yield item
except Exception as e:
print(e)
pass
|
from fastapi import APIRouter, Request
from typing import Optional
import requests
# from fastapi import FastAPI
from fastapi.responses import RedirectResponse
import sys
import time
import jwt # JWT library for python available from https://github.com/progrium/pyjwt
import uuid
import urllib
import urllib.parse
import urllib.request
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
router = APIRouter()
templates = Jinja2Templates(directory="website")
@router.get("/sisensesso", response_class=HTMLResponse)
async def read_item(request: Request, return_to: str = "", hostname: str = "sample", shared_key: str = "", username: str = "sso-test-user"):
payload = {
"iat": int(time.time()),
"sub": username,
"jti": str(uuid.uuid4())
#"exp" : int(time.time()) + 10000, #optional- expiration time
}
# shared_key = "0356d5471b73bf08a3f518474bedcc418366a9245b56ad2fecea9b73c5300261"
jwt_string = jwt.encode(payload, shared_key)
encoded_jwt = urllib.parse.quote_plus(jwt_string) #url-encode the jwt string
# hostname = "https://sisense.dataflix.com"
location = hostname + "/jwt?jwt=" + encoded_jwt
# return_to = request.GET.get('return_to')
if return_to is not None:
location += "&return_to=" + hostname + return_to
return templates.TemplateResponse("sisense.html", {'request': request, 'location': location, 'hostname': hostname})
|
from gpytranslate import SyncTranslator
translator = SyncTranslator()
with open("test.mp3", "wb") as file:
translator.tts("Hello world!", file=file, targetlang="en")
|
from .dataset import directory_dataset, loader, torchvision_dataset
from .model import model_checkpointer
from .trainer import trainer_checkpointer
from .optim import optimizer, scheduler, optimizer_checkpointer
from .opts import print_opts, parse, add_extras, make_subsets |
"""Class to control openMVS behaviour."""
from pathlib import Path
import subprocess
from . import utils
class OpenMVSControllerError(RuntimeError):
"""Generic openMVS error."""
pass
class OpenMVSController():
"""Controls behaviour of openMVS data processing."""
def __init__(self, res_dir, ext_logger, openMVS_dir=None):
"""."""
self.logger = ext_logger
root_dir = Path(__file__).parent.parent.parent
if openMVS_dir is None:
self.openMVS_dir = root_dir / "software" / "openMVS" / "build_openMVS"
if (self.openMVS_dir / "bin" / "x64" / "Release").is_dir():
self.openMVS_dir = self.openMVS_dir / "bin" / "x64" / "Release"
elif (self.openMVS_dir / "install" / "bin").is_dir():
self.openMVS_dir = self.openMVS_dir / "install" / "bin"
else:
raise OpenMVSControllerError("Could not find executables dir!")
else:
self.openMVS_dir = openMVS_dir
self.res_dir = res_dir
def densify_pointcloud(self,
p_prio=-1,
max_threads=0,
res_lvl=1,
res_min=640,
num_views=0,
num_views_fuse=3,
est_colors=False,
est_normals=False,
sample_mesh=0):
"""Increases number of points to make 3D model smoother."""
self.logger.debug("Densify point cloud to make model smoother")
self.export_dir = utils.check_dir(self.res_dir / "export")
self.export_scene = self.export_dir / "scene.mvs"
working_dir = utils.check_dir(self.res_dir / "dense")
self.dense_scene = working_dir / "scene_densified.mvs"
args = [str(self.openMVS_dir / "DensifyPointCloud")]
args.extend(["-i", str(self.export_scene)])
args.extend(["-o", str(self.dense_scene)])
args.extend(["-w", str(working_dir)])
args.extend(["--process-priority", str(p_prio)])
args.extend(["--max-threads", str(max_threads)])
args.extend(["--resolution-level", str(res_lvl)])
args.extend(["--min-resolution", str(res_min)])
args.extend(["--number-views", str(num_views)])
args.extend(["--number-views-fuse", str(num_views_fuse)])
args.extend(["--estimate-colors", str(int(est_colors))])
args.extend(["--estimate-normals", str(int(est_normals))])
args.extend(["--sample-mesh", str(sample_mesh)])
try:
utils.execute(args, self.logger, OpenMVSControllerError)
except OpenMVSControllerError as e:
pass
def create_mesh(self,
export_type="obj",
p_prio=-1,
max_threads=0,
const_weight=1,
free_space=0,
thickness=1,
quality=1,
decimate=1,
remove_spurious=20,
remove_spikes=True,
close_holes=30,
smooth=2):
"""Create a mesh from a 3D point cloud."""
self.logger.debug("Create mesh from point cloud")
working_dir = utils.check_dir(self.res_dir / "mesh")
self.mesh_scene = working_dir / "mesh.mvs"
# If no dense point cloud exists, use exported scene
args = [str(self.openMVS_dir / "ReconstructMesh")]
if self.dense_scene.is_file():
args.extend(["-i", str(self.dense_scene)])
elif self.export_scene.is_file():
self.logger.debug("Using exported scene instead of dense scene.")
args.extend(["-i", str(self.export_scene)])
else:
raise OpenMVSControllerError("No pointcloud found, will not mesh")
args.extend(["-o", str(self.mesh_scene)])
args.extend(["-w", str(working_dir)])
args.extend(["--export-type", str(export_type)])
args.extend(["--process-priority", str(p_prio)])
args.extend(["--max-threads", str(max_threads)])
args.extend(["--constant-weight", str(const_weight)])
args.extend(["-f", str(free_space)])
args.extend(["--thickness-factor", str(thickness)])
args.extend(["--quality-factor", str(quality)])
args.extend(["--decimate", str(decimate)])
args.extend(["--remove-spurious", str(remove_spurious)])
args.extend(["--remove-spikes", str(int(remove_spikes))])
args.extend(["--close-holes", str(close_holes)])
args.extend(["--smooth", str(smooth)])
utils.execute(args, self.logger, OpenMVSControllerError)
def refine_mesh(self,
export_type="obj",
p_prio=-1,
max_threads=0,
res_lvl=0,
res_min=640,
max_views=8,
decimate=1,
holes=30,
ensure_edge_size=1,
max_face_area=64,
scales=3,
scale_step=0.5,
reduce_memory=True,
alt_pair=0,
reg_weight=0.2,
rig_ela_r=0.9,
grad_step=45.05,
vertex_ratio=0,
use_cuda=False):
"""
Refine 3D mesh.
Despite being used by default, CUDA is specifically disabled as default
since it is known to cause problems. See also
https://github.com/cdcseacave/openMVS/issues/378
https://github.com/cdcseacave/openMVS/issues/230
"""
self.logger.debug("Refine 3D mesh")
working_dir = utils.check_dir(self.res_dir / "refined_mesh")
self.refined_mesh = working_dir / "mesh_refined.mvs"
args = [str(self.openMVS_dir / "RefineMesh")]
args.extend(["-i", str(self.mesh_scene)])
args.extend(["-o", str(self.refined_mesh)])
args.extend(["-w", str(working_dir)])
args.extend(["--export-type", str(export_type)])
args.extend(["--process-priority", str(p_prio)])
args.extend(["--max-threads", str(max_threads)])
args.extend(["--resolution-level", str(res_lvl)])
args.extend(["--min-resolution", str(res_min)])
args.extend(["--max-views", str(max_views)])
args.extend(["--decimate", str(decimate)])
args.extend(["--close-holes", str(holes)])
args.extend(["--ensure-edge-size", str(ensure_edge_size)])
args.extend(["--max-face-area", str(max_face_area)])
args.extend(["--scales", str(scales)])
args.extend(["--scale-step", str(scale_step)])
args.extend(["--reduce-memory", str(int(reduce_memory))])
args.extend(["--alternate-pair", str(alt_pair)])
args.extend(["--regularity-weight", str(reg_weight)])
args.extend(["--rigidity-elasticity-ratio", str(rig_ela_r)])
args.extend(["--gradient-step", str(grad_step)])
args.extend(["--planar-vertex-ratio", str(vertex_ratio)])
args.extend(["--use-cuda", str(int(use_cuda))])
try:
utils.execute(args, self.logger, OpenMVSControllerError)
except OpenMVSControllerError as e:
pass
def texture_mesh(self,
export_type="obj",
p_prio=-1,
max_threads=0,
res_lvl=0,
res_min=640,
outlier_thres=0.6,
cost_smooth_r=0.1,
seam_level_global=1,
seam_level_local=1,
texture_size_multiple=0,
patch_heuristic=3,
empty_color=16744231,
orthographic_res=0):
"""Add texture to mesh using images."""
self.logger.debug("Add texture to mesh using images")
working_dir = utils.check_dir(self.res_dir / "textured_mesh")
self.textured_obj = working_dir / "textured_model.obj"
# If no refined mesh exists, use regular mesh
args = [str(self.openMVS_dir / "TextureMesh")]
if self.refined_mesh.is_file():
args.extend(["-i", str(self.refined_mesh)])
elif self.mesh_scene.is_file():
self.logger.debug("Using regular mesh instead of refined mesh.")
args.extend(["-i", str(self.mesh_scene)])
else:
raise OpenMVSControllerError("No mesh found, will not texture")
args.extend(["-o", str(self.textured_obj)])
args.extend(["-w", str(working_dir)])
args.extend(["--export-type", str(export_type)])
args.extend(["--process-priority", str(p_prio)])
args.extend(["--max-threads", str(max_threads)])
args.extend(["--resolution-level", str(res_lvl)])
args.extend(["--min-resolution", str(res_min)])
args.extend(["--outlier-threshold", str(outlier_thres)])
args.extend(["--cost-smoothness-ratio", str(cost_smooth_r)])
args.extend(["--global-seam-leveling", str(seam_level_global)])
args.extend(["--local-seam-leveling", str(seam_level_local)])
args.extend(["--texture-size-multiple", str(texture_size_multiple)])
args.extend(["--patch-packing-heuristic", str(patch_heuristic)])
args.extend(["--empty-color", str(empty_color)])
args.extend(["--orthographic-image-resolution", str(orthographic_res)])
utils.execute(args, self.logger, OpenMVSControllerError)
|
"""
Figure: network spreading
@auth: Yu-Hsiang Fu
@date: 2015/12/10
@update: 2018/03/22
"""
# --------------------------------------------------------------------------------
# 1.Import modular
# --------------------------------------------------------------------------------
# import modular
import matplotlib.pyplot as plt
import numpy as np
# import custom-modular
import util.handler.pickle_handler as ph
# import constant
from util.constant.constant_folder import FOLDER_IMAGE
# --------------------------------------------------------------------------------
# 2.Define variable
# --------------------------------------------------------------------------------
# simulation
NUM_SIMULATION = 100
NUM_SPREADER = 1
NUM_TIME_STEP = 50
# plot
# COLOR_LIST = ['gray', 'orange', 'y', 'b', 'c', 'm', 'r', 'k']
# MARKER_LIST = ['o', '^', 'v', '8', 'H', 's', 'D', 'x', '+']
PLOT_DPI = 300
PLOT_FORMAT = 'png'
PLOT_LAYOUT_PAD = 0.1
PLOT_X_SIZE = 4
PLOT_Y_SIZE = 4
# --------------------------------------------------------------------------------
# 3.Define function
# --------------------------------------------------------------------------------
def draw_spreading_result(net_name, spreading_result):
for r0 in spreading_result:
print(" --- Daw plot of r0={0}".format(r0))
data = {}
for measure in sorted(spreading_result[r0].keys()):
simulation = [0] * (NUM_TIME_STEP + 1)
for s in spreading_result[r0][measure]:
for t in sorted(s):
simulation[t] += s[t]
data[measure] = [s/NUM_SIMULATION for s in simulation]
# --------------------------------------------------
# create a figure
fig, ax = plt.subplots(figsize=(PLOT_X_SIZE, PLOT_Y_SIZE), facecolor='w')
# plot setting
ax.grid(color="gray", linestyle="dotted", linewidth=0.5)
ax.locator_params(axis="x", nbins=11)
ax.set_xlim(-0.01, NUM_TIME_STEP + 1.01)
ax.set_ylim(-0.01, 1.01)
ax.set_xlabel('Time step', fontdict={'fontsize': 10})
ax.set_ylabel('% infected-nodes', fontdict={'fontsize': 10})
ax.set_xticklabels([str(round(p, 1)) for p in np.arange(0, 1.1, 0.1)])
ax.tick_params(axis="both", direction="in", which="major", labelsize=8)
# draw plot
marker_index = 0
legend_text = sorted(data)
for measure in legend_text:
_marker = "o"
# _marker = MARKER_LIST[marker_index]
# marker_index += 1
ax.axes.plot(data[measure], linewidth=1, marker=_marker, markersize=4, markevery=1, fillstyle="none")
# legend-text
ax.legend(legend_text, loc=2, fontsize='medium', prop={'size': 6}, ncol=1, framealpha=0.5)
# save image
image_path = "{0}{1}, spreading-r0={2}-topk={3}-sim={4}-t={5}.png"
image_path = image_path.format(FOLDER_IMAGE, net_name, r0, NUM_SPREADER, NUM_SIMULATION, NUM_TIME_STEP)
plt.tight_layout(pad=PLOT_LAYOUT_PAD)
plt.savefig(image_path, dpi=PLOT_DPI, format=PLOT_FORMAT)
plt.close()
# --------------------------------------------------------------------------------
# 4.Main function
# --------------------------------------------------------------------------------
def main_function():
# test networks
# filename_list = ["regular_n=1000_k=5"]
#
filename_list = ["ba_n=1000_k=5",
"random_n=1000_k=5",
"sw_n=1000_k=5_p=0.1"]
# global-variable setting
global NUM_SIMULATION, NUM_SPREADER, NUM_TIME_STEP
NUM_SIMULATION = 100
NUM_SPREADER = 1
NUM_TIME_STEP = 50
for net_name in filename_list:
print(" - [Net] {0}:".format(net_name))
print(" -- Read pickle file")
file_path = "{0}{1}, spreading-topk={2}-sim={3}-t={4}.pickle"
file_path = file_path.format(FOLDER_IMAGE, net_name, NUM_SPREADER, NUM_SIMULATION, NUM_TIME_STEP)
spreading_result = ph.read_pickle_file(file_path)
print(" -- Draw spreading results")
draw_spreading_result(net_name, spreading_result)
print(" - [/Net]\n")
if __name__ == '__main__':
main_function()
|
from flask import Flask, render_template, url_for
from forms import RegistrationForm, LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = ''
posts = [
{
'author': 'Johnny Nguyen',
'title': 'Blog post 1',
'content': 'First post content',
'date_posted': 'Oct 3, 2018',
},
{
'author': 'Jane Nguyen',
'title': 'Blog post 2',
'content': 'second post content',
'date_posted': 'Oct 4, 2018',
},
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route('/register')
def register():
form = RegistrationForm()
return render_template('register.html', title='Register', form=form)
@app.route('/login')
def login():
form = LoginForm()
return render_template('login.html', title='login', form=form)
if __name__ == '__main__':
app.run(debug=True)
|
def nextPermutation(nums):
if len(nums) == 0:
return None
i = len(nums) - 1
j = -1 # j is set to -1 for case `4321`, so need to reverse all in following step
while i > 0:
if nums[i - 1] < nums[i]: # first one violates the trend
j = i - 1
break
i -= 1
for i in range(len(nums) - 1, -1, -1):
if nums[i] > nums[j]:
nums[i], nums[j] = nums[j], nums[i] # swap position
nums[j + 1:] = sorted(nums[j + 1:]) # sort rest
return
nums = [1,2,3]
# Output: [1,3,2]
print(nextPermutation(nums)) |
import json
import sys,os
import threading
from typing import List
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from request.RequestPostJson import RequestPostJson
from session.Session import Session
from request.RequestGetOwnPlayerTrades import RequestGetOwnPlayerTrades
class RequestCancelTrade(RequestPostJson):
def __init__(self, session: Session, trade: int) -> None:
super().__init__(session)
self.trade = trade
def get_body(self):
di_request = [{
'__clazz__': 'ServerRequestVO',
'requestClass': 'TradeService',
'requestData': self.trade,
'requestId': self.session.get_post_request_id(),
'requestMethod': 'cancelTrade'
}]
return self.build_body(di_request)
def clear_all_trades(sess: Session):
# query exsisting trades
request = RequestGetOwnPlayerTrades(sess)
response = request.post()
li_responses = json.loads(response.text)
li_trades = []
for res in li_responses:
if res['requestMethod'] == 'getOwnPlayerTrades':
li_trades = res['responseData']
# cancel all trades, must one by one
def post_request(request: RequestPostJson):
request.post()
li_threads = []
for trade in li_trades:
request = RequestCancelTrade(sess, trade['id'])
thr = threading.Thread(target=post_request, args=[request])
thr.start()
li_threads.append(thr)
for thr in li_threads:
thr.join()
if __name__ == '__main__':
sess = Session(sys.argv[1])
if sess.load_from_file():
clear_all_trades(sess)
|
import matplotlib.pyplot as plt
import numpy as np
from ECDF import ecdf
samples_std1 = np.random.normal(20, 1, size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
plt.plot(x_std1, y_std1, marker='.', linestyle='none')
plt.plot(x_std3, y_std3, marker='.', linestyle='none')
plt.plot(x_std10, y_std10, marker='.', linestyle='none')
# Make a legend and show the plot
plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
|
from tinydb import Query
from .database import Database
from .document import Document
from .mixins import ModelMixin
from .models import BaseModel
from .table import Table
|
class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=b''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes(b'')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes(b'')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model as lm # Used for solving linear regression problems
def fit_ar(y, p):
"""Fits an AR(p) model. The loss function is the sum of squared errors from t=p+1 to t=n.
:param y: array (n,), training data points
:param p: int, AR model order
:return theta: array (p,), learnt AR coefficients
"""
# Number of training data points
n = len(y)
# Construct the regression matrix
Phi = np.zeros((n-p,p))
for j in range(p):
Phi[:,j] = y[p-j-1:n-j-1]
# Drop the first p values from the target vector y
yy = y[p:]
# Here we use fit_intercept=False since we do not want to include an intercept term in the AR model
regr = lm.LinearRegression(fit_intercept=False)
regr.fit(Phi,yy)
return regr.coef_
def predict_ar_1step(theta, y_target):
"""Predicts the value y_t for t = p+1, ..., n, for an AR(p) model, based on the data in y_target using
one-step-ahead prediction.
:param theta: array (p,), AR coefficients, theta=(a1,a2,...,ap).
:param y_target: array (n,), the data points used to compute the predictions.
:return y_pred: array (n-p,), the one-step predictions (\hat y_{p+1}, ...., \hat y_n)
"""
n = len(y_target)
p = len(theta)
# Number of steps in prediction
m = n-p
y_pred = np.zeros(m)
for i in range(m):
t = i+p
phi = np.flip(y_target[t-p:t]) # (y_{t-1}, ..., y_{t-p})^T
y_pred[i] = np.sum( phi * theta )
return y_pred
def plot_history(history, start_at):
plt.plot(history.epoch[start_at:], history.history['val_loss'][start_at:])
plt.plot(history.epoch[start_at:], history.history['loss'][start_at:])
plt.legend(['Test error','Training error'])
plt.xlabel('Epoch') |
#-------------------------------
# Statistical functions for computing the bounds
#-------------------------------
from Source.experiments import *
from scipy.optimize import fsolve, brentq
# Years for the exposure
years = 10.
# Chi2 treshold
#chi2_th = 1. # 68.27 CL (1 sigma)
chi2_th = 2.71 # 90 % CL
#chi2_th = 3.84 # 95 % CL
#chi2_th = 4. # 95.45 % CL (2 sigma)
# Poisson chi2
# s: signal, b: background, d: data
def chi2_Poisson(s,b,d):
return 2.*( s + b -d + d*np.log(d/(s+b)) )
#return (s + b - d)**2./b # Gaussian approximation
# From a list of PBH masses, compute the chi2 and the bound on the abundance for a given experiment
def pbh_bounds_chi2(Mpbhs, fpbhs, exp, is_DM, mass_spec, sig):
# Sufix for outputs depending on the mass function
sufx = sufix(mass_spec, sig)
Eback, eventback = exp.Eback_bin, exp.backrate_bin
eventback = eventback*years
eventdat = eventback # for forecasts, take data as background
fpbh_bounds = []
for Mpbh in Mpbhs:
folder = "fluxes/{:.1e}/event_rate_{}".format(Mpbh,exp.name)+sufx
Evec, events = np.loadtxt(folder, unpack=True)
chi2_fpbh = []
for fpbh in fpbhs:
signal = fpbh*events*years
chi2_tot = 0.
for i, element in enumerate(Eback):
chi2_tot += chi2_Poisson(signal[i], eventback[i], eventdat[i])
chi2_fpbh.append(chi2_tot)
#chi2int = interp1d(np.log10(fpbhs), chi2_fpbh, fill_value="extrapolate")
chi2int = interp1d(fpbhs, chi2_fpbh, fill_value="extrapolate")
#fpbhvec = np.logspace(np.log10(fpbhs[0]), np.log10(fpbhs[-1]))
minchi2 = 0. #np.amin(chi2int(np.log10(fpbhvec)))
# If is DM, obtain f_pbh, else obtain \beta'
if is_DM:
fpbh_bounds.append( fsolve( lambda fpbh: chi2int(fpbh) - (minchi2 + chi2_th), 1.e-2 ) )
#fpbh_bounds.append( 10.**fsolve( lambda logfpbh: chi2int(logfpbh) - (minchi2 + chi2_th), -2 ) )
#fpbh_bounds.append( brentq( lambda fpbh: chi2int(fpbh) - (minchi2 + chi2_th), fpbhs[0], 1.e5 ) )
#fpbh_bounds.append( 10.**brentq( lambda logfpbh: chi2int(logfpbh) - (minchi2 + chi2_th), np.log10(fpbhs[0]), 2. ) )
else:
fpbh_bounds.append( fsolve( lambda fpbh: chi2int(fpbh) - (minchi2 + chi2_th), 1.e-16 ) )
return fpbh_bounds
# Bound from Poisson chi2 for one single bin
def bound_anal_chi2_Poi(back, sig):
# sig is with fpbh=1
snr = sig/back
return brentq( lambda fpbh: chi2_th/(2.*back) - (fpbh*snr - np.log(1. + fpbh*snr)), 1.e-7, 1.e5 )
# Bound from approximated chi2 for one single bin, valid for low snr
def bound_anal_chi2_apr(back, sig):
#sig is with fpbh=1
snr = sig/back
return np.sqrt(chi2_th/back)/snr
#return brentq( lambda fpbh: chi2_th/(back) - (fpbh*snr)**2., 1.e-7, 1.e5 )
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import itertools
# switch to Physical Review compatible font
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = r'\usepackage{amsmath,amssymb,txfonts}'
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = 'Times New Roman'
matplotlib.rcParams['font.weight'] = 'roman'
matplotlib.rcParams['font.size'] = 8
num_data = 1000000
step, num_iter, energy, polar = np.loadtxt("simulation.txt", skiprows=5, unpack=True, max_rows=num_data)
#fig, [ax1, ax2, ax3] = plt.subplots(3, 1, gridspec_kw={'height_ratios': [2, 2, 1]}, figsize=(8.75/2.54,6.95/2.54), constrained_layout=True)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(8.75/2.54,4.6/2.54), constrained_layout=True)
# remove beta prefactor from energy
energy *= 0.5
num_block = 0
energy_block = 0.0
polar_block = 0.0
energy_sum = 0.0
polar_sum = 0.0
energy_ave0 = 0.0
energy_var0 = 0.0
polar_ave0 = 0.0
polar_var0 = 0.0
step_block = []
energy_ave = []
energy_err = []
polar_ave = []
polar_err = []
block_size = 1000
sub_sample = 25
# block averaging & subsampling of data
for i in range(num_data):
energy_block += energy[i]
polar_block += polar[i]
if i%block_size == block_size-1:
num_block += 1
energy_block /= block_size
polar_block /= block_size
energy_ave0 += energy_block
energy_var0 += energy_block*energy_block
polar_ave0 += polar_block
polar_var0 += polar_block*polar_block
energy_block = 0.0
polar_block = 0.0
if num_block%sub_sample == 0:
step_block.append(block_size*num_block)
energy_ave.append(energy_ave0/num_block)
energy_err.append(np.sqrt((energy_var0/num_block - (energy_ave0/num_block)**2)/num_block))
polar_ave.append(polar_ave0/num_block)
polar_err.append(np.sqrt((polar_var0/num_block - (polar_ave0/num_block)**2)/num_block))
# exact reference values
energy0 = -20.08850 * 0.5
polar0 = -0.5856016
ax1.errorbar(step_block, polar_ave, yerr=polar_err, marker='o', color='black', ms=2.5, ls='', elinewidth=1.25, zorder=100)
ax1.plot([0,num_data+block_size*sub_sample],[polar0,polar0], marker='', color='darkgray')
ax1.set_ylabel(r'polarization')
ax1.set_xlim([0,num_data+block_size*sub_sample])
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_locator(ticker.AutoMinorLocator(2))
ax1.yaxis.set_minor_locator(ticker.AutoMinorLocator(10))
ax2.errorbar(step_block, energy_ave, yerr=energy_err, marker='o', color='black', ms=2.5, ls='', elinewidth=1.25, zorder=100)
ax2.plot([0,num_data+block_size*sub_sample],[energy0,energy0], marker='', color='darkgray')
ax2.set_xlabel(r'number of Metropolis operations')
ax2.set_ylabel(r'energy')
ax2.set_xlim([0,num_data+block_size*sub_sample])
ax2.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(2))
ax2.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(5))
ax2.xaxis.set_major_locator(ticker.FixedLocator([0,2e5,4e5,6e5,8e5,1e6]))
ax2.xaxis.set_major_formatter(ticker.FixedFormatter([r"$0$",r"$2 \! \times \! 10^5$",r"$4 \! \times \! 10^5$",r"$6 \! \times \! 10^5$",r"$8 \! \times \! 10^5$",r"$10^6$"]))
#ax3.hist(num_iter, bins=np.logspace(start=np.log10(1), stop=np.log10(1e7), num=36), log=True, color='darkgray', edgecolor='black', lw=1)
#ax3.set_xscale('log')
#ax3.set_xlim([1,1e7])
#ax3.set_ylim([1,1e6])
#ax3.set_xlabel(r'number of GQPE operations per Metropolis operation')
#ax3.set_ylabel('count')
#ax3.xaxis.set_minor_locator(matplotlib.ticker.LogLocator(numticks=10))
#ax3.yaxis.set_minor_locator(matplotlib.ticker.LogLocator(numticks=10))
#ax3.xaxis.set_minor_formatter(plt.NullFormatter())
#ax3.yaxis.set_minor_formatter(plt.NullFormatter())
plt.savefig('figure5.pdf', bbox_inches='tight', pad_inches=0.01)
|
import click
import toml
import sys
import os
@click.command()
@click.option('--pydss_path',
default=r'C:\Users\alatif\Desktop\PyDSS_2.0\PyDSS')
@click.option('--sim_path',
default=r'C:\Users\alatif\Desktop\PyDSS_2.0\PyDSS\examples\external_interfaces\Python_example')
@click.option('--sim_file',
default=r'simulation.toml')
def run_pyDSS(pydss_path, sim_path, sim_file):
sys.path.append(pydss_path)
sys.path.append(os.path.join(pydss_path, 'PyDSS'))
file1 = open(os.path.join(sim_path, sim_file),"r")
text = file1.read()
sim_args = toml.loads(text)
from pyDSS import instance as dssInstance
a = dssInstance()
dssInstance = a.create_dss_instance(sim_args)
for t in range(5):
x = {'Load.mpx000635970':{'kW':7.28}}
results = dssInstance.RunStep(t, x)
print(results['Load.mpx000635970']['Powers']['E']['value'])
dssInstance.ResultContainer.ExportResults()
dssInstance.DeleteInstance()
del a
run_pyDSS()
|
"""Evaluation measures at the level of document orderings."""
from repro_eval.config import TRIM_THRESH, PHI
from scipy.stats.stats import kendalltau
from tqdm import tqdm
from repro_eval.measure.external.rbo import rbo
from repro_eval.util import break_ties
def _rbo(run, ideal, p, depth):
# Implementation taken from the TREC Health Misinformation Track with modifications
# see also: https://github.com/claclark/Compatibility
run_set = set()
ideal_set = set()
score = 0.0
normalizer = 0.0
weight = 1.0
for i in range(depth):
if i < len(run):
run_set.add(run[i])
if i < len(ideal):
ideal_set.add(ideal[i])
score += weight*len(ideal_set.intersection(run_set))/(i + 1)
normalizer += weight
weight *= p
return score/normalizer
def _ktau_union(orig_run, rep_run, trim_thresh=TRIM_THRESH, pbar=False):
"""
Helping function returning a generator to determine Kendall's tau Union (KTU) for all topics.
@param orig_run: The original run.
@param rep_run: The reproduced/replicated run.
@param trim_thresh: Threshold values for the number of documents to be compared.
@param pbar: Boolean value indicating if progress bar should be printed.
@return: Generator with KTU values.
"""
generator = tqdm(rep_run.items()) if pbar else rep_run.items()
for topic, docs in generator:
orig_docs = list(orig_run.get(topic).keys())[:trim_thresh]
rep_docs = list(rep_run.get(topic).keys())[:trim_thresh]
union = list(sorted(set(orig_docs + rep_docs)))
orig_idx = [union.index(doc) for doc in orig_docs]
rep_idx = [union.index(doc) for doc in rep_docs]
yield topic, round(kendalltau(orig_idx, rep_idx).correlation, 14)
def ktau_union(orig_run, rep_run, trim_thresh=TRIM_THRESH, pbar=False):
"""
Determines the Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param orig_run: The original run.
@param rep_run: The reproduced/replicated run.
@param trim_thresh: Threshold values for the number of documents to be compared.
@param pbar: Boolean value indicating if progress bar should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
"""
# Safety check for runs that are not added via pytrec_eval
orig_run = break_ties(orig_run)
rep_run = break_ties(rep_run)
return dict(_ktau_union(orig_run, rep_run, trim_thresh=trim_thresh, pbar=pbar))
def _RBO(orig_run, rep_run, phi, trim_thresh=TRIM_THRESH, pbar=False, misinfo=True):
"""
Helping function returning a generator to determine the Rank-Biased Overlap (RBO) for all topics.
@param orig_run: The original run.
@param rep_run: The reproduced/replicated run.
@param phi: Parameter for top-heaviness of the RBO.
@param trim_thresh: Threshold values for the number of documents to be compared.
@param pbar: Boolean value indicating if progress bar should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Generator with RBO values.
"""
generator = tqdm(rep_run.items()) if pbar else rep_run.items()
if misinfo:
for topic, docs in generator:
yield topic, _rbo(list(rep_run.get(topic).keys())[:trim_thresh],
list(orig_run.get(topic).keys())[:trim_thresh],
p=phi,
depth=trim_thresh)
else:
for topic, docs in generator:
yield topic, rbo(list(rep_run.get(topic).keys())[:trim_thresh],
list(orig_run.get(topic).keys())[:trim_thresh],
p=phi).ext
def RBO(orig_run, rep_run, phi=PHI, trim_thresh=TRIM_THRESH, pbar=False, misinfo=True):
"""
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param orig_run: The original run.
@param rep_run: The reproduced/replicated run.
@param phi: Parameter for top-heaviness of the RBO.
@param trim_thresh: Threshold values for the number of documents to be compared.
@param pbar: Boolean value indicating if progress bar should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
"""
# Safety check for runs that are not added via pytrec_eval
orig_run = break_ties(orig_run)
rep_run = break_ties(rep_run)
return dict(_RBO(orig_run, rep_run, phi=phi, trim_thresh=trim_thresh, pbar=pbar, misinfo=misinfo))
|
import struct
from . import codes
from . import _data
class PDU:
@property
def functionCode(self): return self._functionCode
@property
def bytes(self): return self._bytes
def __init__(self, functionCode, bytes_=b''):
self._functionCode = functionCode
self._bytes = bytes_
def exception(self, code):
return PDU(self._functionCode + codes.Exception.Mask, struct.pack('>B', code))
class IllegalFunction(Exception): code = codes.Exception.IllegalFunction
class RequestHandler:
@staticmethod
def _exceptionPDU(exceptionCode, functionCode):
return PDU(
functionCode | codes.Exception.Mask,
struct.pack('>B', exceptionCode)
)
def __init__(self, dataModel, logCallback):
self._dataModel = dataModel
self._logCallback = logCallback
async def handle(self, pdu):
try:
code = pdu.functionCode
if code == codes.Function.ReadMultipleHoldingRegisters:
fromRegion = _data.Region(
*struct.unpack('>HH', pdu.bytes), max=125
)
self._dataModel.holdingBlock.validRegion(fromRegion)
bytes_ = await self.ReadMultipleHoldingRegisters(fromRegion)
elif code == codes.Function.WriteSingleHoldingRegister:
format = '>HH'
toAddress, value = struct.unpack(
format, pdu.bytes[:struct.calcsize(format)]
)
self._dataModel.holdingBlock.validRegion(
_data.Region(toAddress, 1, 1)
)
bytes_ = await self.WriteSingleHoldingRegister(toAddress, value)
elif code == codes.Function.WriteMultipleHoldingRegisters:
format = '>HHB'
toAddress, toCount, byteCount = struct.unpack(
format, pdu.bytes[:struct.calcsize(format)]
)
toRegion = _data.Region(toAddress, toCount, max=0x7B)
self._dataModel.holdingBlock.validRegion(toRegion)
values = tuple(struct.unpack(
'>%dH' % toCount, pdu.bytes[struct.calcsize(format):]
))
bytes_ = await self.WriteMultipleHoldingRegisters(toRegion, values)
elif code == codes.Function.ReadWriteMultipleRegisters:
format = '>HHHHB'
(
fromAddress, fromCount, toAddress, toCount, byteCount
) = struct.unpack(format, pdu.bytes[:struct.calcsize(format)])
fromRegion = Region(fromAddress, fromCount, max=0x7D)
self._dataModel.holdingBlock.validRegion(fromRegion)
toRegion = _data.Region(toAddress, toCount, max=0x79)
self._dataModel.holdingBlock.validRegion(toRegion)
values = tuple(struct.unpack(
'>%dH' % toCount, pdu.bytes[struct.calcsize(format):]
))
bytes_ = await self.ReadWriteMultipleRegisters(
fromRegion, toRegion, values
)
else:
raise IllegalFunction()
return PDU(code, bytes_)
except IllegalFunction as exception:
self._logCallback(
'Function code=%d %s not implemented',
pdu.functionCode, str(exception)
)
return RequestHandler._exceptionPDU(pdu.functionCode, exception.code)
except _data.IllegalDataAddress as exception:
self._logCallback(
'Function code=%d %s', pdu.functionCode, str(exception)
)
return RequestHandler._exceptionPDU(pdu.functionCode, exception.code)
async def ReadMultipleHoldingRegisters(self, dataModel, fromRegion):
raise IllegalFunction("ReadMultipleHoldingRegisters")
async def WriteSingleHoldingRegister(self, dataModel, toAddress, value):
raise IllegalFunction("WriteSingleHoldingRegister")
async def WriteMultipleHoldingRegisters(self, dataModel, toRegion, values):
raise IllegalFunction("WriteMultipleHoldingRegisters")
async def ReadWriteMultipleHoldingRegisters(
self, dataModel, fromRegion, toRegion, values
):
raise IllegalFunction("ReadWriteMultipleHoldingRegisters")
|
from keras.layers.core import Dense, Dropout
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
import collections
import nltk
import numpy as np
from make_tensorboard import make_tensorboard
import codecs
np.random.seed(42)
INPUT_FILE = "data/umich-sentiment-train.txt"
GLOVE_MODEL = "data/glove.6B.100d.txt"
VOCAB_SIZE = 5000
EMBED_SIZE = 100
BATCH_SIZE = 64
NUM_EPOCHS = 10
print("reading data...")
counter = collections.Counter()
fin = codecs.open(INPUT_FILE, "r", encoding='utf-8')
maxlen = 0
for line in fin:
_, sent = line.strip().split("\t")
words = [x.lower() for x in nltk.word_tokenize(sent)]
if len(words) > maxlen:
maxlen = len(words)
for word in words:
counter[word] += 1
fin.close()
print("creating vocabulary...")
word2index = collections.defaultdict(int)
for wid, word in enumerate(counter.most_common(VOCAB_SIZE)):
word2index[word[0]] = wid + 1
vocab_sz = len(word2index) + 1
index2word = {v: k for k, v in word2index.items()}
index2word[0] = "_UNK_"
print("creating word sequences...")
ws, ys = [], []
fin = codecs.open(INPUT_FILE, "r", encoding='utf-8')
for line in fin:
label, sent = line.strip().split("\t")
ys.append(int(label))
words = [x.lower() for x in nltk.word_tokenize(sent)]
wids = [word2index[word] for word in words]
ws.append(wids)
fin.close()
W = pad_sequences(ws, maxlen=maxlen)
Y = np_utils.to_categorical(ys)
# load GloVe vectors
print("loading GloVe vectors...")
word2emb = collections.defaultdict(int)
fglove = open(GLOVE_MODEL, "rb")
for line in fglove:
cols = line.strip().split()
word = cols[0].decode('utf-8')
embedding = np.array(cols[1:], dtype="float32")
word2emb[word] = embedding
fglove.close()
print("transferring embeddings...")
X = np.zeros((W.shape[0], EMBED_SIZE))
for i in range(W.shape[0]):
E = np.zeros((EMBED_SIZE, maxlen))
words = [index2word[wid] for wid in W[i].tolist()]
for j in range(maxlen):
E[:, j] = word2emb[words[j]]
X[i, :] = np.sum(E, axis=1)
Xtrain, Xtest, Ytrain, Ytest = \
train_test_split(X, Y, test_size=0.3, random_state=42)
print(Xtrain.shape, Xtest.shape, Ytrain.shape, Ytest.shape)
model = Sequential()
model.add(Dense(32, input_dim=EMBED_SIZE, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(2, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
tensorboard, log_dir = make_tensorboard(
set_dir_name='keras_transfer_glove_embeddings',
embeddings_freq=1,
embeddings_layer_names='dense_1',
)
history = model.fit(Xtrain, Ytrain, batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[tensorboard],
validation_data=(Xtest, Ytest))
# evaluate model
score = model.evaluate(Xtest, Ytest, verbose=1)
print("Test score: {:.3f}, accuracy: {:.3f}".format(score[0], score[1]))
|
from LosmliProxyPool.settings import default_settings
from LosmliProxyPool.utils.load import mysql2redis, update2mysql
from LosmliProxyPool.utils.log import root_logger
from LosmliProxyPool.validator.amazon import check_proxy_amazon
def check_proxy():
root_logger.info('Start to check proxy.')
mysql2redis(default_settings.PROXY_WAIT_CHECK_AMAZON)
check_proxy_amazon()
update2mysql(default_settings.PROXY_IS_VAILD_AMAZON)
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import INFORMIX_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.informix.enumeration import Enumeration
from plugins.dbms.informix.filesystem import Filesystem
from plugins.dbms.informix.fingerprint import Fingerprint
from plugins.dbms.informix.syntax import Syntax
from plugins.dbms.informix.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class InformixMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines Informix methods
"""
def __init__(self):
self.excludeDbsList = INFORMIX_SYSTEM_DBS
for cls in self.__class__.__bases__:
cls.__init__(self)
unescaper[DBMS.INFORMIX] = Syntax.escape
|
from metamapper import CoreIRContext
from metamapper.irs.coreir import gen_CoreIRNodes
import metamapper.coreir_util as cutil
from metamapper.common_passes import SMT, print_dag, prove_equal
def test_dag_to_smt():
CoreIRContext(reset=True)
CoreIRNodes = gen_CoreIRNodes(16)
cmod = cutil.load_from_json("examples/coreir/add1_const.json")
dag = cutil.coreir_to_dag(CoreIRNodes, cmod)
print_dag(dag)
counter_example = prove_equal(dag, dag)
assert counter_example is None
|
#!/usr/bin/python
import sys
for line in sys.stdin:
data = line.strip().split(" ")
request = data[5]
print request |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2001-2017 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL/__init__
#
# Purpose
# Initialize package `TFL`
#
# Revision Dates
# 3-Jul-2001 (CT) Creation (of comment)
# 22-Feb-2002 (CT) `_Export` for `Package_Namespace` added
# 27-Feb-2002 (CT) `TFL.Package_Namespace` assigned instead of using
# `_Export` (which leads to circular import again)
# 24-Jun-2002 (CT) Import `Package_Namespace` absolutely (i.e., from `_TFL`)
# 10-Feb-2010 (MG) `BREAK` added
# 8-Apr-2010 (CT) `BREAK` removed
# 18-Aug-2015 (CT) Remove assignment of `TFL.Package_Namespace`
# 10-Oct-2016 (CT) Add `__version__`
# 22-Feb-2017 (CT) Remove `__version__`
# ««revision-date»»···
#--
from _TFL.Package_Namespace import Package_Namespace
TFL = Package_Namespace ()
del Package_Namespace
### __END__ TFL/__init__
|
import ml_colorise
import image_convert
# IMAGE ML
image_convert.video_to_images("input/vid.mp4","input")
ml_colorise.test("input","output",ml_colorise.ML_TYPE_TF,ml_colorise.ML_MODE_COLORIZE)
image_convert.images_to_video("output",output_file="vid_colour.avi")
ml_colorise.test("output","output_super",ml_colorise.ML_TYPE_DEEPAI,ml_colorise.ML_MODE_SUPER_RES)
image_convert.images_to_video("output_super",output_file="vid_colour_super.avi") |
import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from db.helper.forms import convert_date
from db.models import JobType, DateMode, Branch
@pytest.mark.django_db
def test_employment_date_range(login, user_student, student_employment, job_type_objects_date_range,
branch_objects):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_range[0], '01.1337',
'02.1337', branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
assert data.get('studentProfileEmployment').get('success')
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.job_type.id == job_type_objects_date_range[0].id
assert user.student.branch.id == branch_objects[0].id
assert user.student.job_from_date == convert_date('01.1337', '%m.%Y')
assert user.student.job_to_date == convert_date('02.1337', '%m.%Y')
assert user.student.profile_step == 3
@pytest.mark.django_db
def test_employment_without_valid_date_range(login, user_student, student_employment,
job_type_objects_date_range, branch_objects):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_range[0], None, None,
branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
assert data.get('studentProfileEmployment').get('success') is False
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobFromDate' in errors
assert 'jobToDate' in errors
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.job_type is None
assert user.student.branch is None
assert user.student.job_from_date is None
assert user.student.job_to_date is None
assert user.student.profile_step == 2
@pytest.mark.django_db
def test_employment_with_from_date_only(login, user_student, student_employment,
job_type_objects_date_range, branch_objects):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_range[0], '01.1337', None,
branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
assert data.get('studentProfileEmployment').get('success') is False
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobToDate' in errors
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.job_type is None
assert user.student.branch is None
assert user.student.job_from_date is None
assert user.student.job_to_date is None
assert user.student.profile_step == 2
@pytest.mark.django_db
def test_employment_date_from(login, user_student, student_employment, job_type_objects_date_from,
branch_objects):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_from[0], '01.1337', None,
branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
assert data.get('studentProfileEmployment').get('success')
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.job_type.id == job_type_objects_date_from[0].id
assert user.student.branch.id == branch_objects[0].id
assert user.student.job_from_date == convert_date('01.1337', '%m.%Y')
assert user.student.job_to_date is None
assert user.student.profile_step == 3
@pytest.mark.django_db
def test_employment_without_login(user_student, student_employment, job_type_objects_date_range,
branch_objects):
data, errors = student_employment(AnonymousUser(), job_type_objects_date_range[0], '01.1337',
'02.1337', branch_objects[0])
assert errors is not None
assert data is not None
assert data.get('studentProfileEmployment') is None
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.job_type is None
assert user.student.branch is None
assert user.student.job_from_date is None
assert user.student.job_to_date is None
assert user.student.profile_step == 1
@pytest.mark.django_db
def test_employment_as_company(login, user_employee, student_employment,
job_type_objects_date_range, branch_objects):
login(user_employee)
data, errors = student_employment(user_employee, job_type_objects_date_range[0], '01.1337',
'02.1337', branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'type' in errors
@pytest.mark.django_db
def test_employment_invalid_step(login, user_student, student_employment,
job_type_objects_date_range, branch_objects):
user_student.student.profile_step = 0
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_range[0], '01.1337',
'02.1337', branch_objects[0])
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
assert data.get('studentProfileEmployment').get('success') is False
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'profileStep' in errors
user = get_user_model().objects.get(pk=user_student.id)
assert user.student.profile_step == 0
@pytest.mark.django_db
def test_employment_invalid_date_range(login, user_student, student_employment):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, JobType(id=1337, mode=DateMode.DATE_RANGE),
'03.1337', '02.1337', Branch(id=1337))
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobType' in errors
assert 'branch' in errors
@pytest.mark.django_db
def test_employment_invalid_date_from(login, user_student, student_employment):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, JobType(id=1337, mode=DateMode.DATE_FROM),
'1337.1337', None, Branch(id=1337))
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobType' in errors
assert 'branch' in errors
@pytest.mark.django_db
def test_employment_invalid_date_range_with_valid_job_type(login, user_student, student_employment,
job_type_objects_date_range):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_range[0], '03.1337',
'02.1337', Branch(id=1337))
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobToDate' in errors
assert 'branch' in errors
@pytest.mark.django_db
def test_employment_invalid_date_from_with_valid_job_type(login, user_student, student_employment,
job_type_objects_date_from):
user_student.student.profile_step = 2
user_student.student.save()
login(user_student)
data, errors = student_employment(user_student, job_type_objects_date_from[0], '1337.1337',
None, Branch(id=1337))
assert errors is None
assert data is not None
assert data.get('studentProfileEmployment') is not None
errors = data.get('studentProfileEmployment').get('errors')
assert errors is not None
assert 'jobFromDate' in errors
assert 'branch' in errors
|
import argparse
import codecs
import json
import logging
import os
import sys
import time
#BASE_DIR = "/Users/cbardas/instapy-log/"
BASE_DIR = "/home/instapy-log/"
stdout = sys.stdout
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.path.append(os.path.join(sys.path[0], '../'))
fileName = "resize" + time.strftime("%d.%m.%Y") + ".log"
logging.basicConfig(format='%(asctime)s %(message)s', filename=BASE_DIR + fileName, level=logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger = logging.getLogger('[schedule]')
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
import requests
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-id_droplet', type=str, help="id_droplet")
parser.add_argument('-size', type=str, help="size")
args = parser.parse_args()
apiUrl = 'https://rest.angie.one/doapi/'
authKey = 'b5a42bd29ebc5697adcec0adf446c26e'
#EIGHT_GB_SIZE = "s-4vcpu-8gb"
#TWO_GB_SIZE = "s-1vcpu-2gb"
#args.id_droplet = "144926024"
#args.size = EIGHT_GB_SIZE
if args.id_droplet is None:
exit("dispatcher: Error: id_droplet is not specified !")
if args.size is None:
exit("dispatcher: Error: size is not specified !")
def getDropletStatus(id_droplet):
url = apiUrl + "droplet?id_droplet=" + id_droplet
logger.info("Requesting: %s", url)
r = requests.get(url, headers={'Authorization': authKey})
result = r.content
result = json.loads(result)
print(result['droplet']['status'])
def shutdownDroplet(id_droplet):
url = apiUrl + "shutdown"
data = json.dumps({"id_droplet": id_droplet})
logger.info("Shutdown: requesting: %s, with data: %s" % (url, data))
r = requests.post(url, headers={'Authorization': authKey}, data=data)
result = r.content
result = json.loads(result)
logger.info("Shutdown: Action Status: %s, id: %s" % (result['action']['status'], result['action']['id']))
return result['action']['id']
def checkStatus(id_action):
url = apiUrl + "status?id_action=" + str(id_action)
logger.info("Requesting: %s", url)
r = requests.get(url, headers={'Authorization': authKey})
result = r.content
result = json.loads(result)
logger.info("checkStatus: response: %s", result)
return result['action']['status']
def powerOn(id_droplet):
url = apiUrl + "power"
data = json.dumps({"id_droplet": id_droplet})
logger.info("Requesting: %s, with data: %s" % (url, data))
r = requests.post(url, headers={'Authorization': authKey}, data=data)
result = r.content
result = json.loads(result)
logger.info("Action Status: %s, id: %s" % (result['action']['status'], result['action']['id']))
return result['action']['id']
def powerOff(id_droplet):
url = apiUrl + "powerOff"
data = json.dumps({"id_droplet": id_droplet})
logger.info("Requesting: %s, with data: %s" % (url, data))
r = requests.post(url, headers={'Authorization': authKey}, data=data)
result = r.content
result = json.loads(result)
logger.info("Action Status: %s, id: %s" % (result['action']['status'], result['action']['id']))
return result['action']['id']
def resize(id_droplet, size):
url = apiUrl + "resize"
data = json.dumps({"id_droplet": id_droplet, "size": size})
logger.info("Requesting: %s, with data: %s" % (url, data))
r = requests.post(url, headers={'Authorization': authKey}, data=data)
result = r.content
result = json.loads(result)
logger.info("Resize response: %s", result)
logger.info("Action Status: %s, id: %s" % (result['action']['status'], result['action']['id']))
return result['action']['id']
logger.info("STARTING RESIZE PROCESS FOR DROPLET: %s, SIZE: %s" % (args.id_droplet, args.size))
waitForShutdownMin = 1
# SHUTDOWN THE DROPLET
shutdownId = shutdownDroplet(args.id_droplet)
logger.info("Going to wait %s minutes for shutdown", waitForShutdownMin)
time.sleep(waitForShutdownMin * 60)
logger.info("Done waiting, going to check the status.")
status = checkStatus(shutdownId)
if status != "completed":
logger.info("Shutdown failed, status: %s. Going to powerOff", status)
powerOffId = powerOff(args.id_droplet)
logger.info("Going to wait %s minute for powerOff", waitForShutdownMin)
time.sleep(waitForShutdownMin * 60)
powerOffStatus = checkStatus(powerOffId)
if powerOffStatus != 'completed':
logger.info("PowerOff failed, status: %s", powerOffStatus)
raise Exception('Could not powerOff the droplet.')
logger.info("Done shutting down, going to resize the droplet...")
resizeWaitMin = 5
resizeId = resize(args.id_droplet, args.size)
logger.info("Going to wait %s minutes for resize", resizeWaitMin)
time.sleep(resizeWaitMin * 60)
logger.info("done waiting, going to check the resize status")
resizeStatus = checkStatus(resizeId)
if resizeStatus != 'completed':
logger.info('Resize failed, status: %s', resizeStatus)
raise Exception('Resize failed, status: %s', resizeStatus)
logger.info("Done resizing, going to startup the machine")
startupId = powerOn(args.id_droplet)
startUpWait = 2
logger.info("Going to wait %s minutes for powerOn" % (startUpWait))
time.sleep(resizeWaitMin * 60)
logger.info("Done waiting... going to check the startup status")
startupStatus = checkStatus(startupId)
if startupStatus != 'completed':
logger.info("Startup failed, status: %s", startupStatus)
raise Exception("Startup failed...")
logger.info("Done resizing the machine, going to exit !")
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="lambdata-NealWhitlock",
version="0.1.7",
author="NealWhitlock",
author_email="nealwhitlock@gmail.com",
description="Example package for lambda school DS Unit 3",
long_description=long_description,
long_description_content_type="text/markdown", # required if using a md file for long desc
license="MIT",
url="https://github.com/NealWhitlock/lambdata_ds11",
keywords="lambda school",
packages=find_packages() # ["my_lambdata"]
) |
"""Unit test package for concurrency."""
|
from miflora.miflora_poller import MiFloraPoller, \
MI_CONDUCTIVITY, MI_MOISTURE, MI_LIGHT, MI_TEMPERATURE, MI_BATTERY
class FlowerSensor:
def __init__(self,name,mac):
self.name = name
self.mac = mac
self.temp = 0
self.light = 0
self.battery = 0
self.conductivity = 0
self.moisture = 0
self.firmware_version = ""
def poll(self):
poller = MiFloraPoller(self.mac)
self.firmware_version = poller.firmware_version()
self.temp = poller.parameter_value(MI_TEMPERATURE)
self.moisture = poller.parameter_value(MI_MOISTURE)
self.light = poller.parameter_value(MI_LIGHT)
self.conductivity = poller.parameter_value(MI_CONDUCTIVITY)
self.battery = poller.parameter_value(MI_BATTERY)
def to_json(self):
return { "temp" : self.temp, "moisture": self.moisture , "light": self.light, "conductivity" : self.conductivity, "battery": self.battery }
|
#import Minsung.Face_list as fl
import Minsung.Face as fa
import os
import re
#import picamera
#Initial Settings
def setName(name):
name = name.lower()
with open('name.txt', 'w') as f:
f.write(name)
f.close()
print("이름이 성공적으로 저장되었습니다.")
def Picapture():
with picamera.Picamera() as camera:
camera.resolution = (1024, 768)
camera.start_preview()
camera.capture('2.jpg')
def regex(name):
name = name8
res = re.compile('[^ ㄱ-ㅣ가-힣]+')
def createMainImage(image):
data = fa.detect(image)[0]["faceId"]
print(data)
with open('ps1.txt', 'w') as f:
f.write(data)
f.close()
#Verify Image
def getSecondImage(image):
data = fa.detect(image)[0]["faceId"]
print(data)
with open('ps2.txt', 'w') as f:
f.write(data)
f.close()
def compare():
face_id = open('ps1.txt', 'r').readline()
another_face_id = open('ps2.txt', 'r').readline()
data =fa.verify(face_id, another_face_id)['confidence']
return data
#Secure number setting
def saveNumbers(num):
with open('number.txt', 'w') as f:
f.write(num)
f.close()
def chkNumbers(num):
if open('number.txt', 'r').readline() == str(num):
print("올바른 사용자입니다.")
os.remove('2.jpg')
return True
else:
print("올바르지 않은 사용자입니다.")
os.remove('2.jpg')
return False
#Delete settings
def deleteDatas():
os.remove('ps1.txt')
os.remove('ps2.txt')
os.remove('number.txt')
os.remove('name.txt')
print("초기화가 끝났습니다.")
|
import glob
import pandas as pd
import argparse
from gensim.models import Word2Vec
import gensim.downloader as api
from scipy.stats import pearsonr
parser = argparse.ArgumentParser()
parser.add_argument(
'-w',
'--w2v',
action='store',
default=None,
dest='model_path',
help='File with the word2vec model'
)
parser.add_argument(
'--test_input',
dest='testFolder',
action='store',
required=True,
help='path to folder containing test files'
)
parser.add_argument(
'--outputFile',
'-o',
dest='results_path',
action='store',
required=True,
help='Path to store results'
)
args = parser.parse_args()
'''
Read Files to test for similarities
'''
print('Loading Test Datasets.')
test_files = glob.glob(args.testFolder+'*.csv')
test_dataset = []
for f in test_files:
dataset = pd.read_csv(f, header=None).values
test_dataset.append(dataset)
'''
Loading/ Training the model.
'''
# load model
print('Loading previously trained model.')
if args.model_path == "pretrained":
model = api.load("word2vec-google-news-300")
else:
model = Word2Vec.load(args.model_path).wv
'''
Testing the model.
'''
print('Testing the trained model.')
result = open(args.results_path, 'w')
for d in range(0, len(test_dataset)):
predictions = []
result.write("---------- " + str(test_files[d]) + " ----------\n")
for pair in test_dataset[d]:
if pair[0] in model and pair[1] in model:
sim = model.similarity(pair[0], pair[1])
predictions.append(sim)
result.write(str(sim) + "\n")
else:
print("Missing one of the words in the model: ", pair[0], pair[1])
predictions.append(None)
result.write("None\n")
test_removed = [ x for i, x in enumerate(test_dataset[d][:, 2]) if predictions[i]]
predictions_removed = [ x for x in predictions if x]
print("Pearson Correlation Coefficient: ", pearsonr(predictions_removed, test_removed)[0])
result.write("Pearson Correlation Coefficient: "+ str(pearsonr(predictions_removed, test_removed)[0])+"\n")
result.write("--------------------\n")
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("CastorShowerLibraryMaker")
process.common_maximum_timex = cms.PSet(
MaxTrackTime = cms.double(500.0),
MaxTimeNames = cms.vstring(),
MaxTrackTimes = cms.vdouble()
)
process.common_pgun_particleID = cms.PSet(
PartID = cms.vint32(11,211)
#PartID = cms.vint32(211)
)
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
# process.load("Configuration.StandardSequences.GeometryExtended_cff")
process.load("SimG4CMS.Forward.castorGeometryXML_cfi")
#process.load("Geometry.CMSCommonData.cmsAllGeometryXML_cfi")
#process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
#process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
#process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout')
# categories = cms.untracked.vstring('ForwardSim'),
# debugModules = cms.untracked.vstring('*'),
# cout = cms.untracked.PSet(
# threshold = cms.untracked.string('DEBUG'),
# DEBUG = cms.untracked.PSet(
# limit = cms.untracked.int32(0)
# ),
# ForwardSim = cms.untracked.PSet(
# limit = cms.untracked.int32(0)
# )
# )
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
g4SimHits = cms.untracked.uint32(8245), # std: 9784
VtxSmeared = cms.untracked.uint32(123456789),
generator = cms.untracked.uint32(536870912) # std: 135799753
)
#sourceSeed = cms.untracked.uint32(135799753) # std: 135799753
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000000000)
)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.DefaultCutValue = 10.
process.g4SimHits.Generator.MinEtaCut = -7.0
process.g4SimHits.Generator.MaxEtaCut = 7.0
process.g4SimHits.Generator.Verbosity = 0
process.g4SimHits.CaloTrkProcessing.TestBeam = True
process.CaloSD = cms.PSet(
DetailedTiming = cms.bool(False),
EminTrack = cms.double(1.0),
Verbosity = cms.int32(0),
UseMap = cms.bool(True),
CheckHits = cms.int32(25),
TmaxHit = cms.int32(500) # L.M. testing
)
process.g4SimHits.StackingAction = cms.PSet(
process.common_heavy_suppression,
process.common_maximum_timex, # need to be localy redefined
TrackNeutrino = cms.bool(False),
KillHeavy = cms.bool(False),
KillDeltaRay = cms.bool(False),
SaveFirstLevelSecondary = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInTracker = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInCalo = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInMuon = cms.untracked.bool(True)
)
process.g4SimHits.SteppingAction = cms.PSet(
process.common_maximum_timex, # need to be localy redefined
KillBeamPipe = cms.bool(True),
CriticalEnergyForVacuum = cms.double(2.0),
CriticalDensity = cms.double(1e-15),
EkinNames = cms.vstring(),
EkinThresholds = cms.vdouble(),
EkinParticles = cms.vstring(),
Verbosity = cms.untracked.int32(0)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_pgun_particleID,
MinEta = cms.double(-6.6),
MaxEta = cms.double(-5.2),
MinPhi = cms.double(0.),
MaxPhi = cms.double(0.7854), # PI/4 = 0.7854
MinE = cms.double(12.00),
#MeanE = cms.double(12.00),
MaxE = cms.double(14.00)
#Energybins = cms.vdouble(1.,2.,3.,5.,7.,10.,20.,30.,45.,60.,75.,100.,140.,200.,300.,600.,1000.,1500.)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(False)
)
process.g4SimHits.CastorSD.useShowerLibrary = False
process.source = cms.Source("EmptySource")
#process.o1 = cms.OutputModule("PoolOutputModule",
# fileName = cms.untracked.string('sim_pion_1events-ppON.root')
#)
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('CastorShowerLibraryMaker'),
CastorShowerLibraryMaker = cms.PSet(
process.common_pgun_particleID,
EventNtupleFileName = cms.string('SL_had_E12GeV_eta-6.0phi0.3_1events-ppON.root'),
Verbosity = cms.int32(0),
DeActivatePhysicsProcess = cms.bool(False),
StepNtupleFileName = cms.string('stepNtuple_pion_electron_E12GeV_1event-ppON.root'),
StepNtupleFlag = cms.int32(0),
EventNtupleFlag = cms.int32(0),
# for shower library
nemEvents = cms.int32(5),
SLemEnergyBins = cms.vdouble(10.),
SLemEtaBins = cms.vdouble(-6.6,-6.4,-6.2,-6.0,-5.8,-5.6,-5.4),
SLemPhiBins = cms.vdouble(0.,0.07854,0.15708,0.23562,0.31416,0.3927,0.47124,0.54978,0.62832,0.70686),
nhadEvents = cms.int32(5),
SLhadEnergyBins = cms.vdouble(10.),
#SLhadEnergyBins = cms.vdouble(1.,2.,3.,5.,7.,10.,20.,30.,45.,60.,75.,100.,140.,200.),
SLhadEtaBins = cms.vdouble(-6.6,-6.4,-6.2,-6.0,-5.8,-5.6,-5.4),
SLhadPhiBins = cms.vdouble(0.,0.07854,0.15708,0.23562,0.31416,0.3927,0.47124,0.54978,0.62832,0.70686),
SLMaxPhi = cms.double(0.7854),
SLMaxEta = cms.double(-5.2)
)
))
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits)
#process.outpath = cms.EndPath(process.o1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: mohit jain
"""
'''
We will use genetic algorithum to optimize hyperparameters for XGboost.
'''
# Importing the libraries
import numpy as np
import pandas as pd
import geneticXGboost #genetic algorithum module
import xgboost as xgb
np.random.seed(723)
'''
The dataset is from https://archive.ics.uci.edu/ml/machine-learning-databases/musk/
It contains a set of 102 molecules, out of which 39 are identified by humans as
having odor that can be used in perfumery and 69 not having the desired odor.
The dataset contains 6,590 low-energy conformations of these molecules, contianing 166 features.
'''
# Importing the dataset
dataset = pd.read_csv('clean2.data', header=None)
X = dataset.iloc[:, 2:168].values #discard first two coloums as these are molecule's name and conformation's name
y = dataset.iloc[:, 168].values #extrtact last coloum as class (1 => desired odor, 0 => undesired odor)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 97)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#XGboost Classifier
#model xgboost
#use xgboost API now
xgDMatrix = xgb.DMatrix(X_train, y_train) #create Dmatrix
xgbDMatrixTest = xgb.DMatrix(X_test, y_test)
'''
Let's find optimized parameters using genetic algorithms
'''
numberOfParents = 8 #number of parents to start
numberOfParentsMating = 4 #number of parents that will mate
numberOfParameters = 7 #number of parameters that will be optimized
numberOfGenerations = 4 #number of genration that will be created
#define the population size
populationSize = (numberOfParents, numberOfParameters)
#initialize the population with randomly generated parameters
population = geneticXGboost.initilialize_poplulation(numberOfParents)
#define an array to store the fitness hitory
fitnessHistory = np.empty([numberOfGenerations+1, numberOfParents])
#define an array to store the value of each parameter for each parent and generation
populationHistory = np.empty([(numberOfGenerations+1)*numberOfParents, numberOfParameters])
#insert the value of initial parameters to history
populationHistory[0:numberOfParents, :] = population
for generation in range(numberOfGenerations):
print("This is number %s generation" % (generation))
#train the dataset and obtain fitness
fitnessValue = geneticXGboost.train_population(population=population, dMatrixTrain=xgDMatrix, dMatrixtest=xgbDMatrixTest, y_test=y_test)
fitnessHistory[generation, :] = fitnessValue
#best score in the current iteration
print('Best F1 score in the this iteration = {}'.format(np.max(fitnessHistory[generation, :])))
#survival of the fittest - take the top parents, based on the fitness value and number of parents needed to be selected
parents = geneticXGboost.new_parents_selection(population=population, fitness=fitnessValue, numParents=numberOfParentsMating)
#mate these parents to create children having parameters from these parents (we are using uniform crossover)
children = geneticXGboost.crossover_uniform(parents=parents, childrenSize=(populationSize[0] - parents.shape[0], numberOfParameters))
#add mutation to create genetic diversity
children_mutated = geneticXGboost.mutation(children, numberOfParameters)
'''
We will create new population, which will contain parents that where selected previously based on the
fitness score and rest of them will be children
'''
population[0:parents.shape[0], :] = parents #fittest parents
population[parents.shape[0]:, :] = children_mutated #children
populationHistory[(generation+1)*numberOfParents : (generation+1)*numberOfParents+ numberOfParents , :] = population #srore parent information
#Best solution from the final iteration
fitness = geneticXGboost.train_population(population=population, dMatrixTrain=xgDMatrix, dMatrixtest=xgbDMatrixTest, y_test=y_test)
fitnessHistory[generation+1, :] = fitness
#index of the best solution
bestFitnessIndex = np.where(fitness == np.max(fitness))[0][0]
#Best fitness
print("Best fitness is =", fitness[bestFitnessIndex])
#Best parameters
print("Best parameters are:")
print('learning_rate', population[bestFitnessIndex][0])
print('n_estimators', population[bestFitnessIndex][1])
print('max_depth', int(population[bestFitnessIndex][2]))
print('min_child_weight', population[bestFitnessIndex][3])
print('gamma', population[bestFitnessIndex][4])
print('subsample', population[bestFitnessIndex][5])
print('colsample_bytree', population[bestFitnessIndex][6])
#visualize the change in fitness of the various generations and parents
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, fitnessHistory, "fitness (F1-score)")
#Look at individual parameters change with generation
#Create array for each parameter history (Genration x Parents)
learnigRateHistory = populationHistory[:, 0].reshape([numberOfGenerations+1, numberOfParents])
nEstimatorHistory = populationHistory[:, 1].reshape([numberOfGenerations+1, numberOfParents])
maxdepthHistory = populationHistory[:, 2].reshape([numberOfGenerations+1, numberOfParents])
minChildWeightHistory = populationHistory[:, 3].reshape([numberOfGenerations+1, numberOfParents])
gammaHistory = populationHistory[:, 4].reshape([numberOfGenerations+1, numberOfParents])
subsampleHistory = populationHistory[:, 5].reshape([numberOfGenerations+1, numberOfParents])
colsampleByTreeHistory = populationHistory[:, 6].reshape([numberOfGenerations+1, numberOfParents])
#generate heatmap for each parameter
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, learnigRateHistory, "learning rate")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, nEstimatorHistory, "n_estimator")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, maxdepthHistory, "maximum depth")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, minChildWeightHistory, "minimum child weight")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, gammaHistory, "gamma")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, subsampleHistory, "subsample")
geneticXGboost.plot_parameters(numberOfGenerations, numberOfParents, colsampleByTreeHistory, "col sample by history")
|
import logging
import os
import json
from elasticsearch import Elasticsearch
from traceback import format_exc
### Default configuration ###
# Don't edit these values directly
# either set them on import with velociwrapper.config.varname = value
# or through the environment VW_VARNAME=value
# Data source node names as a list
# VW_DSN environment variable can be JSON or a comma separated list
dsn = ['localhost']
# Additional parameters to pass to connection Transport class
# if you need a special transport class these parameters mostly
# get passed to it via the underlying library
connection_params = {}
# Default index to find models
default_index = 'es_model'
# Default number of entries on bulk requests
bulk_chunk_size = 1000
# Default number of matches to return per page
results_per_page = 50
# Should we enforce strict types
strict_types = False
## you should't edit this file at all but definitely don't edit below here! ##
# Set the logger
logger = logging.getLogger('Velociwrapper')
# Check for environment overrides
if os.environ.get('VW_DSN'):
try:
dsn = json.loads(os.environ.get('VW_DSN'))
except:
logger.debug('VW_DSN was not JSON. Trying to split string')
dsn = os.environ.get('VW_DSN').split(",")
logger.debug('dsn set from environment')
if os.environ.get('VW_DEFAULT_INDEX'):
default_index = os.environ.get('VW_DEFAULT_INDEX')
logger.debug('default_index set from environment')
if os.environ.get('VW_BULK_CHUNK_SIZE'):
try:
bulk_chunk_size = int(os.environ.get('VW_BULK_CHUNK_SIZE'))
logger.debug('bulk_chunk_size set from environment')
except ValueError:
logger.warn('invalid value for VW_BULK_CHUNK_SIZE, expected integer. Using default')
if os.environ.get('VW_CONNECTION_PARAMS'):
try:
connection_params = json.loads(os.environ.get('VW_CONNECTION_PARAMS'))
logger.debug('connection_params set from environment')
except:
logger.warning('Failed to parse VW_CONNECTION_PARAMS from environment. Using default.')
logger.debug(format_exc())
if os.environ.get('VW_STRICT_TYPES'):
try:
strict_types = int(os.environ.get('VW_STRICT_TYPES'))
logger.debug('strict_types set from environment')
except ValueError:
logger.warning('Invalid value for VW_STRICT_TYPES, expected 0 or 1. Using default')
if os.environ.get('VW_RESULTS_PER_PAGE'):
try:
results_per_page = int(os.environ.get('VW_RESULTS_PER_PAGE'))
logger.debug('results_per_page set from environment')
except ValueError:
logger.warning('Invalid value for VW_RESULTS_PER_PAGE. Expected integer. Using default.')
|
# coding: utf-8
import os
def cd(arg, display_cwd=True):
os.chdir(arg)
if display_cwd:
print(os.getcwd())
|
"""The mqtt_room component."""
|
import os
import time
from argparse import ArgumentParser, Namespace
from multiprocessing.pool import ThreadPool
def get_time_in_seconds(testing_time):
if 'h' in testing_time:
testing_time_in_secs = int(testing_time[:-1]) * 60 * 60
elif 'm' in testing_time:
testing_time_in_secs = int(testing_time[:-1]) * 60
elif 's' in testing_time:
testing_time_in_secs = int(testing_time[:-1])
else:
print("Warning: the given time is ZERO seconds!!")
testing_time_in_secs = 0 # error!
return testing_time_in_secs
def run_monkey(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_monkey.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute monkey: %s' % command)
os.system(command)
def run_ape(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash run_ape.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute ape: %s' % command)
os.system(command)
def run_combodroid(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_combodroid.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute combodroid: %s' % command)
os.system(command)
def run_combodroid_login(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_combodroid_login.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute combodroid with login: %s' % command)
os.system(command)
def run_timemachine(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script, adb_port):
testing_time_in_secs = get_time_in_seconds(testing_time)
command = 'bash run_timemachine.sh %s %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time_in_secs,
screen_option,
login_script,
adb_port)
print('execute timemachine: %s' % command)
os.system(command)
def run_humanoid(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_humanoid.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute humanoid: %s' % command)
os.system(command)
def run_weighted(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_weighted.sh %s %s %s %s %s %s %s' % (apk, avd_serial, avd_name,
output_dir,
testing_time,
screen_option,
login_script)
print('execute weighted exploration: %s' % command)
os.system(command)
def run_stoat(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_stoat.sh %s %s %s %s %s %s %s' % (os.path.abspath(apk), avd_serial, avd_name,
os.path.abspath(output_dir),
testing_time,
screen_option,
login_script)
print('execute stoat: %s' % command)
os.system(command)
def run_sapienz(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_sapienz.sh %s %s %s %s %s %s %s' % (os.path.abspath(apk), avd_serial, avd_name,
os.path.abspath(output_dir),
testing_time,
screen_option,
login_script)
print('execute sapienz: %s' % command)
os.system(command)
def run_qtesting(apk, avd_serial, avd_name, output_dir, testing_time, screen_option, login_script):
command = 'bash -x run_qtesting.sh %s %s %s %s %s %s %s' % (os.path.abspath(apk), avd_serial, avd_name,
os.path.abspath(output_dir),
testing_time,
screen_option,
login_script)
print('execute Q-testing: %s' % command)
os.system(command)
def get_all_apks(apk_list_file):
file = open(apk_list_file, 'r')
apk_paths = []
apk_login_scripts = []
for line in file.readlines():
if line.strip().startswith('#'):
# skip commented apk files
continue
if "," in line:
content = line.split(",")
apk_paths.append(content[0].strip())
apk_login_scripts.append(content[1].strip())
else:
apk_path = line.strip()
apk_paths.append(apk_path)
apk_login_scripts.append("\"\"")
print("Total %s apks under test" % len(apk_paths))
return apk_paths, apk_login_scripts
def main(args: Namespace):
if not os.path.exists(args.o):
os.mkdir(args.o)
# allocate emulators for an apk
start_avd_serial = 5554 + args.offset * 2
avd_serial_list = []
for apk_index in range(args.number_of_devices):
avd_serial = 'emulator-' + str(start_avd_serial + apk_index * 2)
avd_serial_list.append(avd_serial)
print('allocate emulators: %s' % avd_serial)
if args.no_headless:
screen_option = "\"\""
else:
screen_option = "-no-window"
if args.apk is not None:
# single apk mode
all_apks = [args.apk]
if args.login_script is None:
all_apks_login_scripts = ["\"\""]
else:
all_apks_login_scripts = [args.login_script]
else:
# multiple apks mode
all_apks, all_apks_login_scripts = get_all_apks(args.apk_list)
if args.repeat > 1:
copy_all_apks = all_apks.copy()
copy_all_apks_login_scripts = all_apks_login_scripts.copy()
for i in range(1, args.repeat):
all_apks = all_apks + copy_all_apks
all_apks_login_scripts = all_apks_login_scripts + copy_all_apks_login_scripts
print("the apk list to fuzz: %s" % str(all_apks))
number_of_apks = len(all_apks)
apk_index = 0
while 0 <= apk_index < number_of_apks:
p = ThreadPool(args.number_of_devices)
for avd_serial in avd_serial_list:
time.sleep(10)
if apk_index >= number_of_apks:
break
current_apk = all_apks[apk_index]
print(os.path.exists(current_apk))
print("Now allocate the apk: %s on %s" % (current_apk, avd_serial))
login_script = all_apks_login_scripts[apk_index]
print("its login script: %s" % login_script)
if args.monkey:
p.apply_async(run_monkey, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.ape:
p.apply_async(run_ape, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.timemachine:
avd_port = avd_serial.split('-')[1]
p.apply_async(run_timemachine, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script, avd_port,))
elif args.combo:
p.apply_async(run_combodroid, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.combo_login:
p.apply_async(run_combodroid_login, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.humanoid:
p.apply_async(run_humanoid, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.weighted:
p.apply_async(run_weighted, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.stoat:
p.apply_async(run_stoat, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.sapienz:
p.apply_async(run_sapienz, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
elif args.qtesting:
p.apply_async(run_qtesting, args=(current_apk, avd_serial, args.avd_name,
args.o, args.time, screen_option,
login_script,))
else:
pass
apk_index += 1
print("wait the allocated devices to finish...")
p.close()
p.join()
if __name__ == '__main__':
ap = ArgumentParser()
# by default, we run each bug/tool for 6h & 5r.
# Each emulator is configured as 2GB RAM, 1GB internal storage and 1GB SDCard
ap.add_argument('--avd', type=str, dest='avd_name', help="the device name")
ap.add_argument('--apk', type=str, dest='apk')
ap.add_argument('-n', type=int, dest='number_of_devices', default=1,
help="number of emulators created for testing, default: 1")
ap.add_argument('--apk-list', type=str, dest='apk_list', help="list of apks under test")
ap.add_argument('-o', required=True, help="output dir")
ap.add_argument('--time', type=str, default='6h', help="the fuzzing time in hours (e.g., 6h), minutes (e.g., 6m),"
" or seconds (e.g., 6s), default: 6h")
ap.add_argument('--repeat', type=int, default=1, help="the repeated number of runs, default: 1")
ap.add_argument('--max-emu', type=int, default=16, help="the maximum allowed number of emulators")
ap.add_argument('--no-headless', dest='no_headless', default=False, action='store_true', help="show gui")
ap.add_argument('--login', type=str, dest='login_script', help="the script for app login")
ap.add_argument('--wait', type=int, dest='idle_time',
help="the idle time to wait before starting the fuzzing")
# supported fuzzing tools
ap.add_argument('--monkey', default=False, action='store_true')
ap.add_argument('--ape', default=False, action='store_true')
ap.add_argument('--timemachine', default=False, action='store_true')
ap.add_argument('--combo', default=False, action='store_true')
ap.add_argument('--combo-login', default=False, dest='combo_login', action='store_true')
ap.add_argument('--humanoid', default=False, action='store_true')
ap.add_argument('--stoat', default=False, action='store_true')
ap.add_argument('--sapienz', default=False, action='store_true')
ap.add_argument('--qtesting', default=False, action='store_true')
ap.add_argument('--weighted', default=False, action='store_true')
ap.add_argument('--offset', type=int, default=0, help="device offset number w.r.t emulator-5554")
args = ap.parse_args()
if args.number_of_devices + args.offset > 16:
if not args.timemachine:
# TimeMachine is allowed to run more than 16 instances due to it runs in the docker containers.
ap.error('n + offset should not be ge 16')
if args.apk is None and args.apk_list is None:
ap.error('please specify an apk or an apk list')
if args.apk_list is not None and not os.path.exists(args.apk_list):
ap.error('No such file: %s' % args.apk_list)
if 'h' not in args.time and 'm' not in args.time and 's' not in args.time:
ap.error('incorrect time format, should be appended with h, m, or s')
if args.idle_time is not None:
for i in range(1, int(args.idle_time)):
print("%d minutes remaining to wait ..." % (args.idle_time - i))
time.sleep(60)
main(args)
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vertex AI deployment monitoring jobs delete command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ai import operations
from googlecloudsdk.api_lib.ai.model_monitoring_jobs import client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import endpoint_util
from googlecloudsdk.command_lib.ai import flags
from googlecloudsdk.command_lib.ai import model_monitoring_jobs_util
from googlecloudsdk.command_lib.ai import operations_util
from googlecloudsdk.core.console import console_io
DETAILED_HELP = {
'EXAMPLES':
"""
To delete a model deployment monitoring job `123` of project `example` in region `us-central1`, run:
$ {command} 123 --project=example --region=us-central1
""",
}
def _Run(args, version):
"""Run method for delete command."""
model_monitoring_job_ref = args.CONCEPTS.monitoring_job.Parse()
region = model_monitoring_job_ref.AsDict()['locationsId']
model_monitoring_job_id = model_monitoring_job_ref.AsDict(
)['modelDeploymentMonitoringJobsId']
with endpoint_util.AiplatformEndpointOverrides(version, region=region):
console_io.PromptContinue(
'This will delete model deployment monitoring job [{}]...'.format(
model_monitoring_job_id),
cancel_on_no=True)
operation = client.ModelMonitoringJobsClient(
version=version).Delete(model_monitoring_job_ref)
return operations_util.WaitForOpMaybe(
operations_client=operations.OperationsClient(),
op=operation,
op_ref=model_monitoring_jobs_util.ParseMonitoringJobOperation(
operation.name))
@base.ReleaseTracks(base.ReleaseTrack.GA)
class DeleteGa(base.DeleteCommand):
"""Delete an existing Vertex AI model deployment monitoring job."""
@staticmethod
def Args(parser):
flags.AddModelMonitoringJobResourceArg(parser, 'to delete')
def Run(self, args):
return _Run(args, constants.GA_VERSION)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Delete(base.DeleteCommand):
"""Delete an existing Vertex AI model deployment monitoring job."""
@staticmethod
def Args(parser):
flags.AddModelMonitoringJobResourceArg(parser, 'to delete')
def Run(self, args):
return _Run(args, constants.BETA_VERSION)
Delete.detailed_help = DETAILED_HELP
DeleteGa.detailed_help = DETAILED_HELP
|
# coding:utf-8
import requests
import time
from math import fabs
from base64 import b64encode
# from demjson import decode
class RaspberryMonitorNetSpeed:
url = 'http://192.168.123.1/update.cgi?output=netdev'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'max-age=0',
# 'Connection':'keep-alive',
'Connection': 'close',
'Cookie': 'n56u_cookie_bw_rt_tab=WAN',
'Host': '192.168.123.1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
}
# 最近一次请求时间
last_time = 0
# 最近一次请求的下行数据总量
last_rbytes = 0
# 最近一次请求的上行数据总量
last_tbytes = 0
def __init__(self, username, passwd):
self.headers['Authorization'] = 'Basic '+b64encode((username+':'+passwd).encode()).decode()
data = self.__get_wan_rx_and_tx()
self.last_rbytes = data[0]
self.last_tbytes = data[1]
self.last_time = data[2]
def set_auth(self, username, passwd):
self.headers['Authorization'] = 'Basic '+b64encode((username+':'+passwd).encode()).decode()
def __get_wan_rx_and_tx(self):
text = requests.get(self.url, headers=self.headers).text
try:
rx = int(text.split(',')[25].lstrip('rx:').strip(), 16)
tx = int(text.split(',')[26].lstrip('tx:').rstrip('}\n').strip(), 16)
new_time = time.time()
except (IndexError, ValueError, TypeError):
return False
return [rx, tx, new_time]
def get_human_speed(self):
"""这里返回的是 M/s 这种网速,[下载,上传]"""
data = self.__get_wan_rx_and_tx()
if data:
down_speed = 0
up_speed = 0
try:
down_speed = self.__bytes_to_humanspeed((data[0] - self.last_rbytes) / (data[2] - self.last_time))
up_speed = self.__bytes_to_humanspeed((data[1] - self.last_tbytes) / (data[2] - self.last_time))
except ZeroDivisionError:
pass
self.last_rbytes = data[0]
self.last_tbytes = data[1]
self.last_time = data[2]
return down_speed, up_speed
def __bytes_to_humanspeed(self, B):
absval = fabs(B) / 1024
megabyte = 1024
gigabyte = megabyte * 1024
terabyte = gigabyte * 1024
# petabyte = terabyte * 1024
if absval < megabyte:
return str(round(absval, 2)) + ' KB/s'
elif absval < gigabyte:
return str(round(absval / megabyte, 2)) + ' M/s'
else:
return str(round(absval / gigabyte, 2)) + ' G/s'
def get_bits_speed(self):
"""这里返回的是 Mbps 这种网速,[下载,上传]"""
data = self.__get_wan_rx_and_tx()
if data:
down_speed = self.__bytes_to_bitrate((data[0] - self.last_rbytes) / (data[2] - self.last_time))
up_speed = self.__bytes_to_bitrate((data[1] - self.last_tbytes) / (data[2] - self.last_time))
self.last_rbytes = data[0]
self.last_tbytes = data[1]
self.last_time = data[2]
return down_speed, up_speed
def __bytes_to_bitrate(self, B):
bits = B * 8
absval = fabs(bits)
kilobit = 1000
megabit = kilobit * 1000
gigabit = megabit * 1000
if absval < megabit:
return str(round(bits / kilobit, 2)) + ' Kbps'
elif absval < gigabit:
return str(round(bits / megabit, 2)) + ' Mbps'
else:
return str(round(bits / gigabit, 2)) + ' Gbps'
#if __name__ == '__main__':
# a = RaspberryMonitorNetSpeed('admin', 'admin')
# while True:
# tmp = a.get_human_speed()
#
# print('U: ' + str(ns[1]))
# draw.text((0,0), 'U: ' + str(ns[1]), font=font)
# # Display image.
# disp.image(image)
# disp.display()
# time.sleep(2)
|
#!/usr/bin/python
"""
Name : create_smgr_db.py
Author : rishiv@juniper.net
Description : This program is a simple cli interface to
create server manager database with objects.
Objects can be cluster, server or image.
Mandatory Parameter : testbed.py
Optional Parameter : cluster_id
Optional Parameter : server Manager specific config file
"""
import subprocess
import json
import string
import textwrap
import tempfile
import os
import re
import fabric
import ConfigParser
import argparse
import sys
from datetime import datetime as dt
from os.path import expanduser
from smgr_add import get_default_object as get_default_object
import smgr_client_def
import imp
def svrmgr_add_all():
verify_user_input()
create_json()
add_cluster()
add_image()
add_pkg()
add_server()
def create_json():
modify_server_json()
modify_cluster_json()
def modify_server_json():
params=read_ini_file(sys.argv[1:])
if not params:
return None
if not params.has_key('server_file'):
return None
server_file = params['server_file']
timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S")
subprocess.call( 'cp %s %s.org.%s' %(server_file, server_file, timestamp), shell = True )
in_file = open( server_file, 'r' )
in_data = in_file.read()
server_dict = json.loads(in_data)
update_roles_from_testbed_py(server_dict)
update_bond_from_testbed_py(server_dict)
update_multi_if_from_testbed_py(server_dict)
out_file = open(server_file, 'w')
out_data = json.dumps(server_dict, indent=4)
out_file.write(out_data)
out_file.close()
return server_dict
def update_roles_from_testbed_py(server_dict):
testbed = get_testbed()
if not testbed.env.has_key('roledefs'):
return server_dict
for node in server_dict['server']:
roles = []
for key in testbed.env.roledefs:
if key == 'all' or key == 'build' :
continue
for host_string in testbed.env.roledefs[key]:
ip = getIp(host_string)
if node['ip_address'] == ip:
if key == 'cfgm':
roles.append("config")
else:
roles.append(key)
if not len(roles):
node['roles'] = [ "compute" ]
else:
node['roles'] = roles
for node in server_dict['server']:
node['cluster_id'] = get_pref_cluster_id()
return server_dict
# end update_roles_from_testbed_py
def update_bond_from_testbed_py(server_dict):
testbed = get_testbed()
if 'control_data' in dir(testbed):
for node in server_dict['server']:
for key in testbed.bond:
ip = getIp(key)
if node['ip_address'] == ip:
node['parameters']['setup_interface'] = "Yes"
#node['parameters']['compute_non_mgmt_ip'] = ""
#node['parameters']['compute_non_mgmt_gw'] = ""
name = testbed.bond[key]['name']
mode = testbed.bond[key]['mode']
member = testbed.bond[key]['member']
option = {}
option['miimon'] = '100'
option['mode'] = mode
option['xmit_hash_policy'] = 'layer3+4'
node['bond']={}
node['bond'][name]={}
node['bond'][name]['bond_options'] = "%s"%option
node['bond'][name]['member'] = "%s"%member
return server_dict
#End update_bond_from_testbed_py(server_dict):
def update_multi_if_from_testbed_py(server_dict):
testbed = get_testbed()
if 'control_data' in dir(testbed):
for node in server_dict['server']:
for key in testbed.control_data:
ip = getIp(key)
if node['ip_address'] == ip:
node['parameters']['setup_interface'] = "Yes"
#node['parameters']['compute_non_mgmt_ip'] = ""
#node['parameters']['compute_non_mgmt_gway'] = ""
ip = testbed.control_data[key]['ip']
gw = testbed.control_data[key]['gw']
device = testbed.control_data[key]['device']
node['control']={}
node['control'][device] = {}
node['control'][device]['ip'] = ip
node['control'][device]['gw'] = gw
return server_dict
#End update_multi_if_from_testbed_py(server_dict):
def getIp(string) :
regEx = re.compile( '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' )
result = regEx.search(string)
if result:
return result.group()
else:
return None
# end getIp()
def get_image_id() :
params=read_ini_file(sys.argv[1:])
image_file = params['image_file']
image_file = open( image_file, 'r' )
image_data = image_file.read()
image_json = json.loads(image_data)
image_id = image_json['image'][0]['image_id']
image_file.close()
return image_id
# end get_image_id()
def get_pkg_id() :
params=read_ini_file(sys.argv[1:])
pkg_file = params['pkg_file']
pkg_file = open( pkg_file, 'r' )
pkg_data = pkg_file.read()
pkg_json = json.loads(pkg_data)
pkg_id = pkg_json['image'][0]['image_id']
pkg_file.close()
return pkg_id
# end get_pkg_id()
def get_cluster_id() :
params=read_ini_file(sys.argv[1:])
cluster_file = params['cluster_file']
cluster_file = open( cluster_file, 'r' )
cluster_data = cluster_file.read()
cluster_json = json.loads(cluster_data)
cluster_id = cluster_json['cluster'][0]['id']
cluster_file.close()
return cluster_id
# end get_cluster_id()
def add_cluster():
cluster_file = None
params=read_ini_file(sys.argv[1:])
if params:
try:
cluster_file = params['cluster_file']
except KeyError:
pass
cluster_id = get_pref_cluster_id()
if not cluster_file:
cluster_dict = {}
cluster_dict = get_cluster_with_cluster_id_from_db()
if not len(cluster_dict['cluster']):
cluster_dict = new_cluster()
else:
cluster_dict = {
"cluster" : [
{
"id" : "",
"parameters" : {
}
}
]
}
cluster_dict['cluster'][0]['id'] = cluster_id
modify_cluster_from_testbed_py(cluster_dict)
temp_dir= expanduser("~")
cluster_file = '%s/cluster.json' %temp_dir
subprocess.call('touch %s' %cluster_file, shell = True)
out_file = open(cluster_file, 'w')
out_data = json.dumps(cluster_dict, indent=4)
out_file.write(out_data)
out_file.close()
else :
timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S")
subprocess.call( 'cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp), shell=True )
subprocess.call("sed -i 's/\"id\":.*,/\"id\":\"%s\",/' %s" %(cluster_id,cluster_file), shell=True )
subprocess.call('server-manager add cluster -f %s' %(cluster_file), shell=True )
# end add_cluster()
def add_server():
add_server_using_json()
update_server_in_db_with_testbed_py()
def add_image():
params=read_ini_file(sys.argv[1:])
if not params:
return None
if not params.has_key('image_file'):
return None
image_file = params['image_file']
subprocess.call('server-manager add image -f %s' %(image_file), shell=True )
subprocess.call('server-manager show all ', shell=True)
def add_pkg():
params=read_ini_file(sys.argv[1:])
if not params:
return None
if not params.has_key('pkg_file'):
return None
pkg_file = params['pkg_file']
subprocess.call('server-manager add image -f %s' %(pkg_file), shell=True )
subprocess.call('server-manager show image ', shell=True)
def add_server_using_json():
params=read_ini_file(sys.argv[1:])
if not params:
return None
if not params.has_key('server_file'):
return None
server_file = params['server_file']
subprocess.call('server-manager add server -f %s' %(server_file), shell=True )
subprocess.call('server-manager show server ', shell=True)
def modify_cluster_json():
params=read_ini_file(sys.argv[1:])
if not params:
return None
if not params.has_key('cluster_file'):
return None
cluster_file = params['cluster_file']
timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S")
subprocess.call( 'cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp), shell=True )
in_file = open( cluster_file, 'r' )
in_data = in_file.read()
cluster_dict = json.loads(in_data)
modify_cluster_from_testbed_py(cluster_dict)
out_file = open(cluster_file, 'w')
out_data = json.dumps(cluster_dict, indent=4)
out_file.write(out_data)
out_file.close()
def modify_cluster_from_testbed_py(cluster_dict):
testbed = get_testbed()
if testbed.env.has_key('mail_to'):
cluster_dict['cluster'][0]['email'] = testbed.env.mail_to
if testbed.env.has_key('encap_priority'):
cluster_dict['cluster'][0]['parameters']['encapsulation_priority'] = testbed.env.encap_priority
#if 'multi_tenancy' in dir(testbed):
# cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = testbed.multi_tenancy
if 'multi_tenancy' in dir(testbed):
if testbed.multi_tenancy == True :
cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "True"
elif testbed.multi_tenancy == False :
cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False"
else:
cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False"
if 'os_username' in dir(testbed):
cluster_dict['cluster'][0]['parameters']['keystone_username'] = testbed.os_username
if 'os_password' in dir(testbed):
cluster_dict['cluster'][0]['parameters']['keystone_password'] = testbed.os_password
if 'os_tenant_name' in dir(testbed):
cluster_dict['cluster'][0]['parameters']['keystone_tenant'] = testbed.os_tenant_name
if 'router_asn' in dir(testbed):
cluster_dict['cluster'][0]['parameters']['router_asn'] = testbed.router_asn
def new_cluster():
params=read_ini_file(sys.argv[1:])
cluster_id = get_user_cluster_id()
if not cluster_id:
cluster_id = params['cluster_id']
cluster_dict={
"cluster" : [
{
"id" : cluster_id,
"parameters" : {
"router_asn": "64512",
"database_dir": "/home/cassandra",
"database_token": "",
"openstack_mgmt_ip": "",
"use_certificates": "False",
"multi_tenancy": "False",
"encapsulation_priority": "'MPLSoUDP','MPLSoGRE','VXLAN'",
"service_token": "contrail123",
"keystone_user": "admin",
"keystone_password": "contrail123",
"keystone_tenant": "admin",
"openstack_password": "contrail123",
"analytics_data_ttl": "168",
"subnet_mask": "255.255.255.0",
"gateway": "1.1.1.254",
"password": "c0ntrail123",
"domain": "contrail.juniper.net",
"haproxy": "disable"
}
}
]
}
config = ConfigParser.SafeConfigParser()
config.read([smgr_client_def._DEF_SMGR_CFG_FILE])
default_config_object = get_default_object("cluster", config)
cluster_params_dict = dict(cluster_dict["cluster"][0]["parameters"].items() + default_config_object["parameters"].items())
tmp_cluster_dict = dict(cluster_dict["cluster"][0].items() + default_config_object.items())
tmp_cluster_dict["parameters"] = cluster_params_dict
cluster_dict["cluster"][0] = tmp_cluster_dict
return cluster_dict
# End new_cluster()
def parse_arguments(args_str=None):
parser = argparse.ArgumentParser(
description='''Server Manager Tool to generate json from testbed.py .
Value specified in --cluster_id will override value in
server.json and vns.json .
''',
usage= '''server-manager [-f <config_file>] [-c <cluster_id>] -t testbed.py '''
)
#group1 = parser.add_mutually_exclusive_group()
parser.add_argument("--config_file", "-f",
help="Server manager client config file ")
parser.add_argument("--cluster_id", "-c",
help="user specified preferred cluster_id ")
parser.add_argument("--testbed_py", "-t",
help="your testbed.py file")
args = parser.parse_args(args_str)
return args
# End parse_arguments
def read_ini_file(args_str=None):
args = parse_arguments(args_str)
if args.config_file:
config_file = args.config_file
try:
config = ConfigParser.SafeConfigParser()
config.read([config_file])
smgr_config = dict(config.items("SERVER-MANAGER"))
return smgr_config
except:
# ini file not mandatory input
return None
return smgr_config
# End read_ini_file
def get_testbed_py(args_str=None):
args = parse_arguments(args_str)
testbed_py = None
if args.testbed_py:
testbed_py = args.testbed_py
return testbed_py
# End read_ini_file
def get_user_cluster_id(args_str=None):
args = parse_arguments(args_str)
cluster_id = None
if args.cluster_id:
cluster_id = args.cluster_id
return cluster_id
def get_server_with_cluster_id_from_db():
cluster_id = get_pref_cluster_id()
temp_dir= expanduser("~")
file_name = '%s/server_with_cluster_id_from_db.json' %(temp_dir)
subprocess.call('server-manager show server --cluster_id %s --detail \
| tr -d "\n" \
| sed "s/[^{]*//" \
> %s' %(cluster_id, file_name), shell=True )
in_file = open( file_name, 'r' )
in_data = in_file.read()
server_dict = json.loads(in_data)
return server_dict
def get_cluster_with_cluster_id_from_db():
cluster_id = get_user_cluster_id()
if not cluster_id:
params=read_ini_file(sys.argv[1:])
cluster_id = params['cluster_id']
cluster_dict = {"cluster": []}
temp_dir= expanduser("~")
file_name = '%s/cluster.json' %(temp_dir)
subprocess.call('server-manager show cluster --cluster_id %s --detail \
| tr -d "\n" \
| sed "s/[^{]*//" \
> %s' %(cluster_id, file_name), shell=True )
in_file = open( file_name, 'r' )
in_data = in_file.read()
cluster_dict = json.loads(in_data)
return cluster_dict
def get_server_with_ip_from_db(ip=None):
params=read_ini_file(sys.argv[1:])
server_dict={}
if not ip:
print "Please provide an ip as input arg"
return ip
temp_dir= expanduser("~")
file_name = '%s/server.json' %(temp_dir)
subprocess.call('server-manager show server --ip %s --detail \
| tr -d "\n" \
| sed "s/[^{]*//" \
> %s' %(ip, file_name), shell=True )
in_file = open( file_name, 'r' )
in_data = in_file.read()
server_dict = json.loads(in_data)
return server_dict
def get_host_roles_from_testbed_py():
testbed = get_testbed()
node = {}
if not testbed.env.has_key('roledefs'):
return node
for key in testbed.env.roledefs:
if key == 'all' or key == 'build':
continue
for host_string in testbed.env.roledefs[key]:
ip = getIp(host_string)
if not node.has_key(ip):
node[ip] = []
if key == 'cfgm':
node[ip].append('config')
else:
node[ip].append(key)
return node
# end get_host_roles_from_testbed_py
def get_storage_node_config_from_testbed_py():
testbed = get_testbed()
storage_config = {}
allowed_disk_types = ['disks']
if not testbed.env.has_key('storage_node_config'):
return storage_config
for key in testbed.env.storage_node_config:
node_config_dict = dict(testbed.env.storage_node_config[key])
ip = getIp(key)
if not storage_config.has_key(ip):
storage_config[ip] = {}
for disk_type in node_config_dict:
if disk_type not in allowed_disk_types:
print ("ERROR: An invalid disk type has been specified in the testbed.py storage node config")
else:
storage_config[ip][disk_type] = node_config_dict[disk_type]
return storage_config
# end get_storage_node_config_from_testbed_py
def update_server_in_db_with_testbed_py():
cluster_id = get_pref_cluster_id()
node = get_host_roles_from_testbed_py()
storage_config = get_storage_node_config_from_testbed_py()
if not node:
return
u_server_dict = {}
u_server_dict['server'] = []
for key in node:
server_dict = {}
server_dict = get_server_with_ip_from_db(key)
if not server_dict or not server_dict['server']:
print ("ERROR: Server with ip %s not present in Server Manager" % key)
continue
server_id = server_dict['server'][0]['id']
u_server = {}
u_server['id'] = server_id
u_server['cluster_id'] = cluster_id
u_server['roles'] = node[key]
u_server['server_params'] = {}
if key in storage_config:
for disk_type in storage_config[key]:
u_server['server_params'][disk_type] = storage_config[key][disk_type]
u_server_dict['server'].append(u_server)
temp_dir= expanduser("~")
server_file = '%s/server.json' %temp_dir
subprocess.call('touch %s' %server_file, shell=True)
out_file = open(server_file, 'w')
out_data = json.dumps(u_server_dict, indent=4)
out_file.write(out_data)
out_file.close()
subprocess.call('server-manager add server -f %s' %(server_file), shell=True )
for u_server in u_server_dict['server']:
subprocess.call('server-manager show server --server_id %s --detail' \
% u_server['id'], shell=True )
#End update_server_in_db_with_cluster_id
def get_pref_cluster_id():
cluster_id = get_user_cluster_id()
if not cluster_id:
params=read_ini_file(sys.argv[1:])
cluster_id = params['cluster_id']
return cluster_id
def verify_user_input():
params=read_ini_file(sys.argv[1:])
cluster_id = get_user_cluster_id()
if not params and not cluster_id:
sys.exit(" User should either provide --cluster_id or config.ini ")
def get_testbed():
filepath = get_testbed_py(sys.argv[1:])
if not filepath:
sys.exit("tesbed.py missing in commandline args ")
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
return py_mod
if __name__ == "__main__" :
import cgitb
cgitb.enable(format='text')
svrmgr_add_all()
|
import asyncio
from userbot import CMD_HELP, StartTime, bot
from userbot.utils import bash, edit_or_reply, zelda_cmd
@zelda_cmd(pattern="cm (.*)")
async def amireallycontent(content):
user = await bot.get_me()
capt = str(content.pattern_match.group(1).split(" ", 2)[0])
link = str(content.pattern_match.group(1).split(" ", 2)[1])
capti = capt.replace(".", " ")
thumb = "https://telegra.ph/file/412f0f8da4a86a6fa75ff.jpg"
await asyncio.sleep(2)
output = (
f"**{capti}**\n\n"
f"⬇️ KLIK UNTUK MENONTON ⬇️\n"
f"{link}"
)
if thumb:
try:
logo = thumb
await content.delete()
msg = await bot.send_file(content.chat_id, logo, caption=output)
await asyncio.sleep(300)
# await msg.delete()
except BaseException:
await content.edit(
output + "\n\n ***Logo yang diberikan tidak valid."
"\nPastikan link diarahkan ke gambar logo**"
)
# await asyncio.sleep(100)
# await content.delete()
else:
await edit_or_reply(content, output)
CMD_HELP.update(
{
"content": f"**Plugin : **`asupan`\
\n\n**KHUSUS UNTUK OWNER BOT. BELUM TERSEDIA UNTUK USER**\
"
}
)
|
#-*-coding:utf-8-*-
import re
import random
from utils.tools import ch_count
not_match_pattern = re.compile(r"吗|\?|?|多|哪|怎|什么|啥|退|发票")
start_pattern = re.compile(
r"http|"
r"您好|"
r"你好|"
r"在|"
r"那个|"
r"有人|"
r"转人工|"
r"hi|"
r"(?=.*咨询)(?=.*订单)")
def intent_update(msg, dialog_status):
if (ch_count(msg) <= 8
and not not_match_pattern.search(msg)
and len(dialog_status.context) == 1
and start_pattern.search(msg)
and not dialog_status.start_flag):
dialog_status.intent = "start"
dialog_status.start_flag = True
return dialog_status
def start_handle(sentence,dialog_status):
greeting_sheet = [
"您好小主,在的哈,美好的一天从遇见您开始,请问有什么可以为您服务的呢(^_^)",
"您好,有什么问题我可以帮您处理或解决呢(^_^)",
"亲,请问有什么可以帮助您呢 ^_^",
"亲爱哒,小妹在的呢,有什么需要帮助的呢#E-s[数字x](^_^)",
"亲,有缘又相见了,有什么是我可以帮您的呢(^_^)"]
response = random.sample(greeting_sheet, 1)[0]
return response
|
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.models import NaiveModel
from etna.transforms.missing_values import TimeSeriesImputerTransform
from etna.transforms.missing_values.imputation import _OneSegmentTimeSeriesImputerTransform
def test_wrong_init_one_segment():
"""Check that imputer for one segment fails to init with wrong imputing strategy."""
with pytest.raises(ValueError):
_ = _OneSegmentTimeSeriesImputerTransform(strategy="wrong_strategy")
def test_wrong_init_two_segments(all_date_present_df_two_segments):
"""Check that imputer for two segments fails to fit_transform with wrong imputing strategy."""
with pytest.raises(ValueError):
_ = TimeSeriesImputerTransform(strategy="wrong_strategy")
@pytest.mark.smoke
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_all_dates_present_impute(all_date_present_df: pd.DataFrame, fill_strategy: str):
"""Check that imputer does nothing with series without gaps."""
imputer = _OneSegmentTimeSeriesImputerTransform(strategy=fill_strategy)
result = imputer.fit_transform(all_date_present_df)
np.testing.assert_array_equal(all_date_present_df["target"], result["target"])
@pytest.mark.smoke
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_all_dates_present_impute_two_segments(all_date_present_df_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that imputer does nothing with series without gaps."""
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
result = imputer.fit_transform(all_date_present_df_two_segments)
for segment in result.columns.get_level_values("segment"):
np.testing.assert_array_equal(all_date_present_df_two_segments[segment]["target"], result[segment]["target"])
def test_all_missing_impute_zero(df_all_missing: pd.DataFrame):
"""Check that imputer fills zero value if all values are nans and strategy is zero."""
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="zero")
result = imputer.fit_transform(df_all_missing)
assert np.all(result == 0)
def test_all_missing_impute_zero_two_segments(df_all_missing_two_segments: pd.DataFrame):
"""Check that imputer fills zero value if all values are nans and strategy is zero."""
imputer = TimeSeriesImputerTransform(strategy="zero")
result = imputer.fit_transform(df_all_missing_two_segments)
assert np.all(result == 0)
@pytest.mark.parametrize("fill_strategy", ["mean", "running_mean", "forward_fill"])
def test_all_missing_impute_fail(df_all_missing: pd.DataFrame, fill_strategy: str):
"""Check that imputer can't fill nans if all values are nans."""
imputer = _OneSegmentTimeSeriesImputerTransform(strategy=fill_strategy)
with pytest.raises(ValueError, match="It isn't possible to make imputation"):
_ = imputer.fit_transform(df_all_missing)
@pytest.mark.parametrize("fill_strategy", ["mean", "running_mean", "forward_fill"])
def test_all_missing_impute_fail_two_segments(df_all_missing_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that imputer can't fill nans if all values are nans."""
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
with pytest.raises(ValueError, match="It isn't possible to make imputation"):
_ = imputer.fit_transform(df_all_missing_two_segments)
def test_one_missing_value_zero(df_with_missing_value_x_index: pd.DataFrame):
"""Check that imputer with zero-strategy works correctly in case of one missing value in data."""
df, idx = df_with_missing_value_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="zero")
result = imputer.fit_transform(df)["target"]
assert result.loc[idx] == 0
assert not result.isna().any()
def test_range_missing_zero(df_with_missing_range_x_index: pd.DataFrame):
"""Check that imputer with zero-strategy works correctly in case of range of missing values in data."""
df, rng = df_with_missing_range_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="zero")
result = imputer.fit_transform(df)["target"]
expected_series = pd.Series(index=rng, data=[0 for _ in rng], name="target")
np.testing.assert_array_almost_equal(result.loc[rng].reset_index(drop=True), expected_series)
assert not result.isna().any()
def test_one_missing_value_mean(df_with_missing_value_x_index: pd.DataFrame):
"""Check that imputer with mean-strategy works correctly in case of one missing value in data."""
df, idx = df_with_missing_value_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="mean")
expected_value = df["target"].mean()
result = imputer.fit_transform(df)["target"]
assert result.loc[idx] == expected_value
assert not result.isna().any()
def test_range_missing_mean(df_with_missing_range_x_index):
"""Check that imputer with mean-strategy works correctly in case of range of missing values in data."""
df, rng = df_with_missing_range_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="mean")
result = imputer.fit_transform(df)["target"]
expected_value = df["target"].mean()
expected_series = pd.Series(index=rng, data=[expected_value for _ in rng], name="target")
np.testing.assert_array_almost_equal(result.loc[rng].reset_index(drop=True), expected_series)
assert not result.isna().any()
def test_one_missing_value_forward_fill(df_with_missing_value_x_index):
"""Check that imputer with forward-fill-strategy works correctly in case of one missing value in data."""
df, idx = df_with_missing_value_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="forward_fill")
result = imputer.fit_transform(df)["target"]
timestamps = np.array(sorted(df.index))
timestamp_idx = np.where(timestamps == idx)[0][0]
expected_value = df.loc[timestamps[timestamp_idx - 1], "target"]
assert result.loc[idx] == expected_value
assert not result.isna().any()
def test_range_missing_forward_fill(df_with_missing_range_x_index: pd.DataFrame):
"""Check that imputer with forward-fill-strategy works correctly in case of range of missing values in data."""
df, rng = df_with_missing_range_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="forward_fill")
result = imputer.fit_transform(df)["target"]
timestamps = np.array(sorted(df.index))
rng = [pd.Timestamp(x) for x in rng]
timestamp_idx = min(np.where([x in rng for x in timestamps])[0])
expected_value = df.loc[timestamps[timestamp_idx - 1], "target"]
expected_series = pd.Series(index=rng, data=[expected_value for _ in rng], name="target")
np.testing.assert_array_almost_equal(result.loc[rng], expected_series)
assert not result.isna().any()
@pytest.mark.parametrize("window", [1, -1, 2])
def test_one_missing_value_running_mean(df_with_missing_value_x_index: pd.DataFrame, window: int):
"""Check that imputer with running-mean-strategy works correctly in case of one missing value in data."""
df, idx = df_with_missing_value_x_index
timestamps = np.array(sorted(df.index))
timestamp_idx = np.where(timestamps == idx)[0][0]
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="running_mean", window=window)
if window == -1:
expected_value = df.loc[: timestamps[timestamp_idx - 1], "target"].mean()
else:
expected_value = df.loc[timestamps[timestamp_idx - window] : timestamps[timestamp_idx - 1], "target"].mean()
result = imputer.fit_transform(df)["target"]
assert result.loc[idx] == expected_value
assert not result.isna().any()
@pytest.mark.parametrize("window", [1, -1, 2])
def test_range_missing_running_mean(df_with_missing_range_x_index: pd.DataFrame, window: int):
"""Check that imputer with running-mean-strategy works correctly in case of range of missing values in data."""
df, rng = df_with_missing_range_x_index
timestamps = np.array(sorted(df.index))
timestamp_idxs = np.where([x in rng for x in timestamps])[0]
imputer = _OneSegmentTimeSeriesImputerTransform(strategy="running_mean", window=window)
result = imputer.fit_transform(df)["target"]
assert not result.isna().any()
for idx in timestamp_idxs:
if window == -1:
expected_value = result.loc[: timestamps[idx - 1]].mean()
else:
expected_value = result.loc[timestamps[idx - window] : timestamps[idx - 1]].mean()
assert result.loc[timestamps[idx]] == expected_value
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_inverse_transform_one_segment(df_with_missing_range_x_index: pd.DataFrame, fill_strategy: str):
"""Check that transform + inverse_transform don't change original df for one segment."""
df, rng = df_with_missing_range_x_index
imputer = _OneSegmentTimeSeriesImputerTransform(strategy=fill_strategy)
transform_result = imputer.fit_transform(df)
inverse_transform_result = imputer.inverse_transform(transform_result)
np.testing.assert_array_equal(df, inverse_transform_result)
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_inverse_transform_many_segments(df_with_missing_range_x_index_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that transform + inverse_transform don't change original df for two segments."""
df, rng = df_with_missing_range_x_index_two_segments
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
transform_result = imputer.fit_transform(df)
inverse_transform_result = imputer.inverse_transform(transform_result)
np.testing.assert_array_equal(df, inverse_transform_result)
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_inverse_transform_in_forecast(df_with_missing_range_x_index_two_segments: pd.DataFrame, fill_strategy: str):
"""Check that inverse_transform doesn't change anything in forecast."""
df, rng = df_with_missing_range_x_index_two_segments
ts = TSDataset(df, freq=pd.infer_freq(df.index))
imputer = TimeSeriesImputerTransform(strategy=fill_strategy)
model = NaiveModel()
ts.fit_transform(transforms=[imputer])
model.fit(ts)
ts_test = ts.make_future(3)
assert np.all(ts_test[:, :, "target"].isna())
ts_forecast = model.forecast(ts_test)
for segment in ts.segments:
true_value = ts[:, segment, "target"].values[-1]
assert np.all(ts_forecast[:, segment, "target"] == true_value)
@pytest.mark.parametrize("fill_strategy", ["mean", "zero", "running_mean", "forward_fill"])
def test_fit_transform_with_nans(fill_strategy, ts_diff_endings):
"""Check that transform correctly works with NaNs at the end."""
imputer = TimeSeriesImputerTransform(in_column="target", strategy=fill_strategy)
ts_diff_endings.fit_transform([imputer])
assert (ts_diff_endings[:, :, "target"].isna()).sum().sum() == 0
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""Run the main body of this code to execute experiments."""
from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
from autograd.misc.flatten import flatten
from code.plotting import create_figure_and_axs, setup_ax_and_save
from code.optimizers import adam, opt_params
from code.data_loader import load_mnist
from code.neural_network import init_random_params, log_gaussian, get_loss_functions
graph_iters = 100000 # The number of iterations to do the optimization for (integer).
log_likelihoods = np.zeros(graph_iters) # An array of the log likelihoods (test loss) for each iteration.
train_performance = np.zeros(graph_iters) # An array of the training performance (no regularization) for each
# iteration.
valid_loss, test_loss = np.zeros(graph_iters), np.zeros(graph_iters) # An array of the validation/test losses.
grad_norms_hypernet, grad_norms_hyper = np.zeros(graph_iters), np.zeros(graph_iters) # An array of the hypernet
# gradient norm for each iteration.
train_hypers = np.zeros(graph_iters) # An array of the hyperparameter for each hyperparameter iteration.
global_opt_iteration, global_hyperopt_iteration = 0, 0 # A record of the current hypernet/hyperparameter iteration.
hyper_cur = None # A record of the current hyperparameter
# NOTE - THESE HAVE GLOBAL SCOPE, SO THE CALLBACK CAN MODIFY THEM!!!
def experiment(train_data, valid_data, test_data, init_scale, batch_size, num_iters_hypernet, step_size_hypernet,
graph_mod, global_seed=0):
"""Run the first experiment, which consists of fitting a hypernetwork, which outputs neural network parameters.
These neural network parameters try to fit the training data with some additional loss for the hyperparameters.
We observe how the hypernetwork performs on the training and testing, by graphing it against the true loss.
The true loss is found by training a neural network to convergence at a discrete number of points.
:param train_data: The training data which is a tuple of (train_input, train_target).
:param valid_data: The testing data which is a tuple of (valid_input, valid_target).
:param test_data: The testing data which is a tuple of (test_input, test_target).
:param init_scale: The scale (positive float) for the hypernetwork initialization.
:param batch_size: The number of hyperparameters to sample for each iteration.
:param num_iters_hypernet: The number of iterations (integer) to run the hypernetwork optimizer for.
:param step_size_hypernet: The step size (positive float) for the hypernetwork optimizer.
:param graph_mod: How many iterations (integer) to weight between each graph of the loss.
:param global_seed: The seed (integer) to use when choosing a constant seed.
:return: None.
"""
assert init_scale > 0
assert step_size_hypernet > 0
assert num_iters_hypernet > 0
global hyper_cur
hyper_cur = 1.0 # Initialize the hyperparameter (float).
# Define information about hyper loss and how hyper parameters are sampled.
hyper_sample_var = 3.0 # The variance to use when sampling hyperparameters from a Gaussian distribution.
def sample_hypers(hyper, rs):
"""Sample a hyperparameter.
:param hyper: The current hyperparameter ([float]).
:param rs: A numpy randomstate.
:return: A sampled hyperparameter (float).
"""
ret = np.array([rs.randn() * hyper_sample_var + hyper]).reshape(1, -1)
return np.clip(ret, -100.0, 4.0) # The bounds on our graph.
def hyper_loss(weights, hyper):
"""Find the loss for neural network that is dependant on the hyperparameter.
:param weights: The weights ([[float]]) of the neural network.
:param hyper: The hyperparameter (float) input to the hypernetwork.
:return: The loss (float) of network dependant on the hyperparameter.
"""
return -log_gaussian(weights, np.exp(hyper))
example_hyper = sample_hypers(hyper_cur, npr.RandomState(global_seed)) # Test the sample function.
assert example_hyper is not None
train_inputs, train_targets = train_data
valid_inputs, valid_targets = valid_data
test_inputs, test_targets = test_data
batch_ind, feature_ind = 0, 1
elementary_input_size = np.shape(train_inputs)[feature_ind]
elementary_output_size = np.shape(train_targets)[feature_ind]
elementary_layer_sizes = [elementary_input_size, elementary_output_size]
num_hypers = example_hyper.shape[feature_ind] # The dimensionality of the hyperparameter space (integer).
# Define neural network and function to turn a vector into its weight structure.
example_elementary_params = init_random_params(init_scale, elementary_layer_sizes, npr.RandomState(global_seed))
flat_elementary_params, unflatten_vector_to_network_weights = flatten(example_elementary_params)
assert hyper_loss(example_elementary_params, example_hyper) is not None
num_elementary_params = len(flat_elementary_params)
# Define a hypernetwork parametrized by some hyperparameters.
hypernet_layer_sizes = [num_hypers, 50, num_elementary_params] # Note that there are no hidden units.
objective_functions = get_loss_functions(unflatten_vector_to_network_weights, sample_hypers, hyper_loss, batch_size,
train_inputs, train_targets, test_inputs, test_targets, valid_inputs,
valid_targets, global_seed)
hypernet, train_objective, valid_objective, test_objective = objective_functions[:4]
hyper_train_objective, hyper_valid_objective, hyper_test_objective = objective_functions[4:-1]
hyper_train_stochastic_objective = objective_functions[-1]
# Next, train a neural network from scratch with different hyperparameter values.
real_step_size = 0.0001 # The step size to use to find the real loss (float).
real_num_iters = 1000 # The number of iterations to use to find the real loss (integer).
range_min = -2.0 # The min log variance for the hyper parameter of the variance of weight distribution to graph.
range_max = 4.0 # The max log variance for the hyper parameter of the variance of weight distribution to graph.
num_visual_points = 10 # The number of points to test the real loss of - expensive (integer).
real_hyper_range = np.linspace(range_min + 1.0, range_max - 1.0, num_visual_points)
real_train_loss = np.zeros(real_hyper_range.shape)
real_train_performance = np.zeros(real_hyper_range.shape)
real_valid_loss = np.zeros(real_hyper_range.shape)
real_test_loss = np.zeros(real_hyper_range.shape)
min_real_valid_loss, min_real_hyper = 10e32, 10e32
for i, hypers in enumerate(real_hyper_range):
print("Optimizing network parameters: ", i)
init_params = init_random_params(init_scale, elementary_layer_sizes, npr.RandomState(global_seed))
def cur_obj(w, seed):
"""The current objective function of the neural network.
:param w: The weights ([float]) of the neural network.
:param seed: The seed (integer) for sampling a hyperparameter.
:return: The current objective value (float).
"""
return train_objective(w, hypers, seed)
optimized_params, _, _, _ = adam(grad(cur_obj), init_params, step_size=real_step_size, num_iters=real_num_iters)
real_train_loss[i] = train_objective(optimized_params, hypers, global_seed)
real_train_performance[i] = real_train_loss[i] - hyper_loss(optimized_params, hypers)
real_valid_loss[i] = valid_objective(optimized_params, hypers, global_seed)
if real_valid_loss[i] < min_real_valid_loss:
min_real_valid_loss = real_valid_loss[i]
print("Best hyperparameter found = ", hypers)
real_test_loss[i] = test_objective(optimized_params, hypers, global_seed)
fig, axs = create_figure_and_axs()
# Set up the arrays to store information for plotting.
num_hyper_test_points = 200 # Test a large number of hyperparameters with the learned function - cheap (integer)!
learned_hyper_range = np.linspace(range_min, range_max, num_hyper_test_points) # Hyperparameters to test.
hyper_train_loss = np.zeros(learned_hyper_range.shape) # Hypernetwork training loss per hyperparameter.
hyper_train_performance = np.zeros(learned_hyper_range.shape) # Hypernetwork training performance per
# hyperparameter. Note that performance is loss - regularization loss.
hyper_valid_loss, hyper_test_loss = np.zeros(learned_hyper_range.shape), np.zeros(learned_hyper_range.shape)
def callback(hyper_weights, opt_iteration, g):
"""Do whatever work is desired on each optimization iteration.
Draws graphs, prints information, and stores information.
:param hyper_weights: The weights ([[float]]) of the hypernetwork.
:param opt_iteration: The current iteration of optimization.
:param g: The gradient ([[float]]) of the optimizer.
:return: None.
"""
global log_likelihoods, valid_loss, test_loss, grad_norms_hyper, grad_norms_hypernet, global_opt_iteration
global hyper_cur
log_likelihood = hyper_train_objective(hyper_weights, hyper_cur)
log_likelihoods[global_opt_iteration] = log_likelihood # Store the training loss.
weights_cur = hypernet(hyper_weights, hyper_cur)
train_performance[global_opt_iteration] = log_likelihood - hyper_loss(weights_cur, hyper_cur)
valid_loss[global_opt_iteration] = hyper_valid_objective(hyper_weights, hyper_cur)
test_loss[global_opt_iteration] = hyper_test_objective(hyper_weights, hyper_cur)
grad_norm = np.sum([np.sum([np.sum(np.abs(weight_or_bias)) for weight_or_bias in layer]) for layer in g])
grad_norms_hypernet[global_opt_iteration] = grad_norm
grad_norms_hyper[global_opt_iteration] = grad_norms_hyper[global_opt_iteration-1]
global_opt_iteration += 1
print("Iteration {} Loss {} Grad L1 Norm {}".format(opt_iteration, log_likelihood, grad_norm))
if global_opt_iteration % graph_mod == 0: # Only print on every iteration that is a multiple of graph_mod.
[ax.cla() for ax in axs] # Clear all of the axes.
axs[0].set_xlabel('Hyperparameter $\lambda$')
axs[0].set_ylabel('Validation Loss $\mathcal{L}_{\mathrm{Valid.}}$')
for cur, hyper in enumerate(learned_hyper_range):
hyper_train_loss[cur] = hyper_train_objective(hyper_weights, hyper)
weights = hypernet(hyper_weights, hyper)
hyper_train_performance[cur] = hyper_train_loss[cur] - hyper_loss(weights, hyper)
hyper_valid_loss[cur] = hyper_valid_objective(hyper_weights, hyper)
hyper_test_loss[cur] = hyper_test_objective(hyper_weights, hyper)
axs[0].plot(real_hyper_range, real_valid_loss, 'kx', ms=28, label="Cross-validation")
axs[0].plot(learned_hyper_range, hyper_valid_loss, 'r-', label="Optimized hypernetwork")
min_hyper_found = 1.8 # Known minimum from doing a search with ~1000 points over this range.
axs[0].axvline(x=min_hyper_found, ymax=0.66, c='k', linestyle='dashed',
label='Optimal hyperparameter $\lambda^{*}$')
[ax.legend(loc='upper left', borderaxespad=0.0, fancybox=True, framealpha=0.0, fontsize=28) for ax in axs]
setup_ax_and_save(axs, fig, 'hypernets_global_small')
def hyper_train_stochastic_objective_current(hyper_weights, seed):
"""The objective for the hypernetwork, with a fixed hyperparameter.
:param hyper_weights: The weights ([[float]]) of the hypernetwork.
:param seed: The seed (integer) for sampling a hyperparameter.
:return: The hypernetwork's loss (float).
"""
return hyper_train_stochastic_objective(hyper_cur, hyper_weights, seed)
init_hypernet_params = init_random_params(init_scale, hypernet_layer_sizes, npr.RandomState(global_seed))
adam(grad(hyper_train_stochastic_objective_current), init_hypernet_params, step_size=step_size_hypernet,
num_iters=num_iters_hypernet, callback=callback)
if __name__ == '__main__':
params = opt_params(graph_iters)
_, train_images, train_labels, test_images, test_labels = load_mnist()
n_data, n_data_val, n_data_test = params['n_data'], params['n_data_val'], params['n_data_test']
train_data = (train_images[:n_data], train_labels[:n_data])
valid_data = (train_images[n_data:n_data + n_data_val], train_labels[n_data:n_data + n_data_val])
test_data = (test_images[:n_data_test], test_labels[:n_data_test])
num_iters_hypernet = graph_iters # The number of iterations to train the hypernet for (int > 0).
experiment(train_data, valid_data, test_data, params['init_scale'], params['batch_size'],
num_iters_hypernet, params['step_size_hypernet'], params['graph_mod'], params['global_seed'])
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
# This target runs a functionally empty lint to create or update the
# API versions cache if necessary. This prevents racy creation of the
# cache while linting java targets in lint_action.gypi.
'target_name': 'android_lint_cache',
'type': 'none',
'actions': [
{
'action_name': 'prepare_android_lint_cache',
'message': 'Preparing Android lint cache',
'variables': {
'android_lint_cache_stamp': '<(PRODUCT_DIR)/android_lint_cache/android_lint_cache.stamp',
'android_manifest_path': '<(DEPTH)/build/android/AndroidManifest.xml',
'result_path': '<(PRODUCT_DIR)/android_lint_cache/result.xml',
'platform_xml_path': '<(android_sdk_root)/platform-tools/api/api-versions.xml',
},
'inputs': [
'<(DEPTH)/build/android/gyp/util/build_utils.py',
'<(DEPTH)/build/android/gyp/lint.py',
'<(android_manifest_path)',
'<(platform_xml_path)',
],
'outputs': [
'<(android_lint_cache_stamp)',
'<(result_path)',
],
'action': [
'python', '<(DEPTH)/build/android/gyp/lint.py',
'--lint-path', '<(android_sdk_root)/tools/lint',
'--cache-dir', '<(PRODUCT_DIR)/android_lint_cache',
'--android-sdk-version=<(android_sdk_version)',
'--platform-xml-path', '<(platform_xml_path)',
'--manifest-path', '<(android_manifest_path)',
'--product-dir', '<(PRODUCT_DIR)',
'--result-path', '<(result_path)',
'--stamp', '<(android_lint_cache_stamp)',
'--create-cache',
'--silent',
'--enable'
],
},
],
},
],
}
|
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading of the file
data=pd.read_csv(path)
# Code starts here
#1
data['Gender'] = data['Gender'].replace('-','Agender')
data['Gender'].value_counts().plot(kind="bar")
plt.title('Gender Distribution of Superheros')
plt.xlabel("Gender")
plt.ylabel("Frequency")
plt.show()
#2
data.Alignment.value_counts().plot(kind="pie")
plt.title('Alignment Distribution of Superheros')
#plt.xlabel("Alignment")
#plt.ylabel("Frequency")
plt.show()
#3
pearson_strength = data[["Combat","Strength"]].corr(method='pearson')
print("Pearson coefficient between Combat and Strength: ",pearson_strength.iloc[1,0])
pearson_Intel = data[["Combat","Intelligence"]].corr(method='pearson')
print("Pearson coefficient between Combat and Intelligence: ",pearson_Intel.iloc[1,0])
#4
q =data['Total'].quantile([0.99])
#print(q.get(key=0.99))
d = data[['Name','Total']][(data.Total > q.get(key=0.99))]
print(d)
super_best_names = d.Name.tolist()
print(super_best_names)
|
import sys,os,subprocess,json,argparse
def main():
duration = 0
parser = argparse.ArgumentParser(description='Build BORIS from data')
parser.add_argument('--template',help="template BORIS file to load")
parser.add_argument('--videopath',help="path with mp4 to scan")
parser.add_argument('--output',help="output JSON BORIS")
parser.add_argument('--removeobs',help="remove observations from template",action="store_true")
args = parser.parse_args()
d = args.videopath
if args.template is not None:
j = json.load(open(args.template,"rb"))
jos = j["observations"]
if args.removeobs:
jos.clear()
else:
j = {}
j["independent_variables"] = {}
jos = {}
j["observations"] = jos
j["subjects_conf"] = {}
j["project_name"] = "Vigilante"
j["behavioral_categories"] = []
j["project_description"] = ""
j["coding_map"] = {}
j["project_format_version"] = "1.6"
j["time_format"] = "hh:mm:ss"
j["project_date"] = "2017-04-28T23:25:55"
j["behaviors_conf"] = {}
for x in os.listdir(d):
if x.endswith(".mp4"):
fp = os.path.join(d,x)
jfp = fp + ".json"
if not os.path.isfile(jfp):
os.system("ffprobe -v quiet -print_format json -show_format -show_streams \"%s\" > \"%s\"" % (fp,jfp))
if os.path.isfile(jfp):
q = json.load(open(jfp,"rb"))
if "streams" in q:
du = float(q["streams"][0]["duration"])
fa,fb = [float(ff) for ff in q["streams"][0]["avg_frame_rate"].split("/")]
fps = fa/fb
duration += du
name = x.split("_")[0]
jo = {}
jos[name] = jo
jo["time offset"] = 0.0
jo["independent_variables"] = {}
jo["visualize_spectrogram"] = False
jo["time offset second player"] = 0
jo["file"] = {}
jo["date"] = "2017-04-28T23:31:17"
jo["events"] = []
jo["media_info"] = dict(fps={},hasVideo={},length={},hasAudio={})
jo["media_info"]["fps"][fp] = fps
jo["media_info"]["hasVideo"][fp] = True
jo["media_info"]["length"][fp] = du
jo["media_info"]["hasAudio"][fp] = False
jo["file"]["1"] = [fp]
jo["file"]["2"] = []
jo["description"] = x
jo["type"] = "MEDIA"
jo["close_behaviors_between_videos"] = False
else:
print "issue with json of",fp
else:
print "cannot scan",fp
json.dump(j,open(args.output,"wb"),sort_keys=True,indent=4, separators=(',', ': '))
if __name__ == '__main__':
main()
|
"""
The flask application package.
"""
from configparser import ConfigParser
from flask import Flask,render_template,Config,session,request,abort
import base64
from functools import wraps
from flask import g,request,redirect,url_for
import itsdangerous
class LibConfig(Config):
"""
LibServer inherits this Config class so that there is an interface to the underlying ConfigParser held by the config global handed arund from Flask.
The purpopse here is to keep the configuration file a handle away. The admin panel completely ignores this and handles the configuration files directly.
"""
tainted=False
def __init__(self, *args, **kwargs):
"""
LibConfig is essentially just a wrapper around
a ConfigParser that reads the combined configuration
files from the command line (typically).
"""
self.localconf = ""
self.baseconf = ""
self.parser = None
self.tainted = False
Config.__init__(self, *args, **kwargs)
def get(self, section, option, default=None):
"""
Get a configuration item from the loaded configuration
files.
If the section or configuration is not declared, the
default value is returned.
"""
if self.parser.has_section(section) == False:
return default
if not self.parser.has_option(section,option):
return default
return self.parser.get(section, option)
def getBool(self,section,option,default=True):
"""
Gets a boolean value, with the default default being `True`
"""
if not self.parser.has_section(section):
return default
elif not self.parser.has_option(section,option):
return default
else:
return self.parser.getboolean(section,option)
def load(self, baseconfig, localconfig):
"""
Load a set of configuration files.
"""
self.parser = ConfigParser()
self.parser.read(baseconfig)
self.parser.read(localconfig)
self.localconf = localconfig
self.baseconf = baseconfig
class LibFlask(Flask):
"""
LibFlask is just a wrapper around the standard Flask class. It's used to hold the config_class type.
"""
config_class = LibConfig
def __init__(self,*args, **kwargs):
Flask.__init__(self, *args, **kwargs)
app = LibFlask(__name__)
def needs_authentication():
"""
Decorator: Attach this to a route and it will require that the session has been
previously authenticated.
"""
def auth_chk_wrapper(f):
# This is our decoratorated function wrapper
@wraps(f)
def deco(*args, **kwargs):
# If the session does not yet have an authentication
if not is_authenticated():
return redirect(sign_auth_path(request.full_path))
else:
return f(*args, **kwargs)
return deco
return auth_chk_wrapper
def is_authenticated():
"""Gets the authentication status of the current session"""
return ('authenticated' in session) and session["authenticated"] == True
def sign_auth_path(next_path):
"""returns a URL-safe signed next_path"""
# next_path must start with a /
if not next_path.startswith('/'):
abort(503)
# sign the next_path
notary = itsdangerous.URLSafeSerializer(app.secret_key)
next_path_signed = notary.dumps(next_path)
return url_for('authenticate', next=next_path_signed)
def unsign_auth_path(path_signed):
"""returns the path from a signed/sealed next_path"""
notary = itsdangerous.URLSafeSerializer(app.secret_key)
next_path_unsigned = notary.loads(path_signed)
return next_path_unsigned
@app.route("/auth/",methods=["GET","POST"])
def authenticate():
next = None
if 'next' in request.args:
next = request.args["next"]
elif 'next' in request.form:
next = request.form['next']
if request.method == "POST":
# Check if we're correct.
passphrase = app.config.get("general","admin_key")
# Check the request
chkpass = request.form["password"]
print("Comparing {0} == {1} ? ".format(passphrase,chkpass))
if(passphrase == chkpass):
print("Successful login!")
session["authenticated"]=True
# redirect off
if next == None:
print("No redirect specified. We're going home.")
return redirect(url_for("home"))
else:
try:
return redirect(unsign_auth_path(next))
except:
abort(500)
else:
session["authenticated"]=False
return render_template("login.html",fail=True,next=next);
else:
# are we already authenticated?
if is_authenticated():
if next != None:
try:
return redirect(unsign_auth_path(next))
except:
return redirect(url_for('home'))
return redirect(url_for("home"))
else:
return render_template("login.html",next=next)
@app.route("/auth/logout")
def logout():
session["authenticated"] = False
return redirect(url_for('home'))
@app.route('/')
def home():
# There should be more things here. I'm not sure what to put here, but there needs to be more here.
# Ideas are in TODO, but include a list of contents and the dixed they take up.
# Collection lists would be nice too.
return render_template('index.html',year=2017,title="Hello!")
|
def mensagem(msg):
tam = len(msg) + 4
print('~' * tam)
print(f' {msg}')
print('~' * tam)
# Programa Principal
mensagem(f'{"Gustavo Guanabara"}')
mensagem(f'{"Curso de Python no Youtube"}')
mensagem(f'{"CeV"}')
|
from cabi.prepare_data.complete import complete
from cabi.techniques.ensemble import random_forest
import pandas as pd
from sklearn.externals import joblib
def create_model(
file_prefix, db_engine, station_id, start, end, prep=complete,
technique=random_forest):
start = pd.to_datetime(start, infer_datetime_format=True)
end = pd.to_datetime(end, infer_datetime_format=True)
start = start.replace(
minute=(start.minute - (start.minute % 5)), second=0, microsecond=0)
end = end.replace(
minute=(end.minute - (end.minute % 5)), second=0, microsecond=0)
data = prep(db_engine, station_id, start, end)
model_empty = technique(data["X"], data["yempty"])
model_full = technique(data["X"], data["yfull"])
joblib.dump(model_empty, file_prefix + "_empty", compress=True)
joblib.dump(model_full, file_prefix + "_full", compress=True)
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from unittest import TestCase
from traceml.processors.importance_processors import calculate_importance_correlation
@pytest.mark.processors_mark
class TestFeatureImportance(TestCase):
def test_empty_value(self):
assert calculate_importance_correlation(None, None) is None
def test_single_value(self):
res = calculate_importance_correlation([{"param1": 3}], [4])
exp = {"param1": {"correlation": None, "importance": 0.0}}
assert res == exp
def test_correct_values(self):
res = calculate_importance_correlation(
[{"param1": 3}, {"param1": 4}, {"param1": 5}],
[3, 4, 5],
)
exp = {"param1": {"correlation": 1.0, "importance": 1.0}}
assert res == exp
def test_multiple_params(self):
res = calculate_importance_correlation(
[
{
"param1": 1,
"param2": 3,
},
{
"param1": 2,
"param2": 2,
},
{
"param1": 3,
"param2": 1,
},
],
[1, 2, 3],
)
exp = {
"param1": {"correlation": 1.0, "importance": 0.464},
"param2": {"correlation": -1.0, "importance": 0.536},
}
assert res == exp
def test_wrong_string_params(self):
assert calculate_importance_correlation(["foo", "bar"], []) is None
def test_complex_params(self):
res = calculate_importance_correlation(
[{"param1": "str1", "param2": 1}, {"param1": 2, "param2": 2}], [1, 2]
)
exp = {
"param1_2": {"correlation": 1.0, "importance": 0.348},
"param1_str1": {"correlation": -1.0, "importance": 0.308},
"param2": {"correlation": 1.0, "importance": 0.344},
}
assert res == exp
def test_nan_value(self):
assert (
calculate_importance_correlation(
[{"param1": 3, "param2": 1}, {"param1": 2, "param2": 2}], [np.nan, 2]
)
is None
)
def test_empty_metrics(self):
assert calculate_importance_correlation([{"foo": 2, "bar": 4}], []) is None
|
def increment_string(s):
numbers = '0123456789'
s_strip = s.rstrip(numbers)
get_numbers = s[len(s_strip):]
if len(get_numbers) > 0:
adding_proc = 1 + int(get_numbers)
return s_strip + str(adding_proc).zfill(len(get_numbers))
elif len(get_numbers) == 0:
return s + '1'
|
"""Test Scrape component setup process."""
from __future__ import annotations
from unittest.mock import patch
from homeassistant.components.scrape.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from . import MockRestData
from tests.common import MockConfigEntry
TEST_CONFIG = {
"resource": "https://www.home-assistant.io",
"name": "Release",
"select": ".current-version h1",
"value_template": "{{ value.split(':')[1] }}",
"index": 0,
"verify_ssl": True,
}
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={},
options=TEST_CONFIG,
title="Release",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.scrape.RestData",
return_value=MockRestData("test_scrape_sensor"),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.release")
assert state
async def test_setup_entry_no_data_fails(hass: HomeAssistant) -> None:
"""Test setup entry no data fails."""
entry = MockConfigEntry(
domain=DOMAIN, data={}, options=TEST_CONFIG, title="Release", entry_id="1"
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.scrape.RestData",
return_value=MockRestData("test_scrape_sensor_no_data"),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.ha_version")
assert state is None
entry = hass.config_entries.async_get_entry("1")
assert entry.state == ConfigEntryState.SETUP_RETRY
async def test_remove_entry(hass: HomeAssistant) -> None:
"""Test remove entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={},
options=TEST_CONFIG,
title="Release",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.scrape.RestData",
return_value=MockRestData("test_scrape_sensor"),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.release")
assert state
await hass.config_entries.async_remove(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.release")
assert not state
|
from __future__ import print_function
"""
Do a mouseclick somewhere, move the mouse to some destination, release
the button. This class gives click- and release-events and also draws
a line or a box from the click-point to the actual mouseposition
(within the same axes) until the button is released. Within the
method 'self.ignore()' it is checked wether the button from eventpress
and eventrelease are the same.
"""
from matplotlib.widgets import RectangleSelector
import numpy as np
import matplotlib.pyplot as plt
def line_select_callback(eclick, erelease):
'eclick and erelease are the press and release events'
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print ("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
print (" The button you used were: %s %s" % (eclick.button, erelease.button))
def toggle_selector(event):
print (' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print (' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print (' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
current_ax = plt.subplot(111) # make a new plotingrange
N = 100000 # If N is large one can see
x = np.linspace(0.0, 10.0, N) # improvement by use blitting!
plt.plot(x, +np.sin(.2*np.pi*x), lw=3.5, c='b', alpha=.7) # plot something
plt.plot(x, +np.cos(.2*np.pi*x), lw=3.5, c='r', alpha=.5)
plt.plot(x, -np.sin(.2*np.pi*x), lw=3.5, c='g', alpha=.3)
print ("\n click --> release")
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = RectangleSelector(current_ax, line_select_callback,
drawtype='box', useblit=True,
button=[1,3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels')
plt.connect('key_press_event', toggle_selector)
plt.show()
|
import numpy as np
import soundfile as sf
from scipy import signal
import random
import os
def load_audio(path):
sound, sample_rate = sf.read(path, dtype='int16')
# TODO this should be 32768.0 to get twos-complement range.
# TODO the difference is negligible but should be fixed for new models.
sound = sound.astype('float32') / 32767 # normalize audio
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # multiple channels, average
return sound
class Augment_wave(object):
def __init__(self, fs, rir_path, split='train', shift_output=True, normalize=True):
self.fs = fs
self.shift_output = shift_output
self.normalize = normalize
assert split in ['train', 'dev', 'test']
self.rir_path = rir_path
self.rir = None
self.init()
def init(self):
# construct rir dict
self.rooms = ['ConferenceRoom2', 'C236', 'D105', 'E112', 'L207', 'L212', 'Q301']
rir_all = {}
for room in self.rooms:
rir_all[room] = []
for _, _, files in os.walk(self.rir_path):
for rir in files:
if rir.endswith('.wav'):
rir_room = rir.split('_')[2].split('-')[0]
assert rir_room in self.rooms
rir_all[rir_room].append(rir)
for room in self.rooms:
random.shuffle(rir_all[room])
self.rir_dict = rir_all
def compute_early_reverb_power(self, wave, rir):
peak_index = rir.argmax()
before_peak = int(0.001 * self.fs)
after_peak = int(0.05 * self.fs)
early_rir_start = max(0, peak_index - before_peak)
early_rir_end = min(rir.shape[0], peak_index + after_peak)
early_rir = rir[early_rir_start: early_rir_end]
early_reverb = signal.fftconvolve(wave, early_rir, mode="full")
early_power = np.dot(early_reverb, early_reverb) / early_reverb.shape[0]
return early_power
def do_reverb(self, wave, rir):
rir = rir.astype("float32")
rir = rir / np.max(np.abs(rir))
early_power = self.compute_early_reverb_power(wave, rir)
wave = signal.fftconvolve(wave, rir)
return wave, early_power
def augment(self, wave, rir=None):
if rir is None:
return wave
wave = wave.astype("float32")
if self.shift_output is True:
dur2len = len(wave)
else:
dur2len = len(wave) + len(rir) - 1
before_power = np.dot(wave, wave) / wave.shape[0]
peak_index = 0
early_power = before_power
# reverberate the wave
if rir is not None:
wave, early_power = self.do_reverb(wave, rir)
if self.shift_output is True:
peak_index = rir.argmax()
# normalize wave
if self.normalize is True:
# compute the power of wave after reverberation and possibly noise
after_power = np.dot(wave, wave) / wave.shape[0]
wave = wave * (np.sqrt(before_power / after_power))
# extend wave if duration is larger than the length of wave
if dur2len > wave.shape[0]:
wave = np.pad(wave, pad_width=(0, dur2len - wave.shape[0]), mode="wrap")
# shift the output wave by shift_output
wave = wave[peak_index: peak_index + dur2len]
return wave
def inject(self, wave):
# sample a room and a rir of the room based on P(r): uniform distribution
room = np.random.choice(self.rooms)
rir_name = np.random.choice(self.rir_dict[room])
rir = load_audio(os.path.join(self.rir_path, rir_name))
return self.augment(wave, rir=rir)
|
# -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class medical_medication_dosage(models.Model):
_name = 'medical.medication.dosage'
name = fields.Char(string="Frequency",required=True)
abbreviation = fields.Char(string="Abbreviation")
code = fields.Char(string="Code")
|
from .emailService import Email
from .sms import Sms
from .console import Console
from .mqtt import Mqtt
from .attachment import Attachment
from .notification import Notification
class Services:
def __init__(self, token):
self.token = token
@property
def sms(self):
return Sms(self.token)
@property
def email(self):
return Email(self.token)
@property
def console(self):
return Console(self.token)
@property
def MQTT(self):
return Mqtt(self.token)
@property
def notification(self):
return Notification(self.token)
@property
def attachment(self):
return Attachment(self.token)
|
import sys
import logging
logger_bike = None
logger_bus = None
logger_app = None
logger_emergency = None
logger_parkings = None
logger_population = None
def bike_log(): # Creating custom logger to store logging information.
global logger_bike
if logger_bike is None:
logger_bike = create_logger('bike_api')
return logger_bike
def bus_log(): # Creating custom logger to store logging information.
global logger_bus
if logger_bus is None:
logger_bus = create_logger('bus_api')
return logger_bus
def app_log(): # Creating custom logger to store logging information.
global logger_app
if logger_app is None:
logger_app = create_logger('application')
return logger_app
def emergency_service_log(): # Creating custom logger to store logging information.
global logger_emergency
if logger_emergency is None:
logger_emergency = create_logger('emergency_service_api')
return logger_emergency
def parkings_log(): # Creating custom logger to store logging information.
global logger_parkings
if logger_parkings is None:
logger_parkings = create_logger('parkings')
return logger_parkings
def population_log(): # Creating custom logger to store logging information.
global logger_population
if logger_population is None:
logger_population = create_logger('population_api')
return logger_population
def create_logger(file_name):
logger = logging.getLogger(file_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
file_handler = logging.FileHandler(
'./main_project/Logs/' + file_name + '.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
# To create handler that prints logging info on console.
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
|
import subprocess
def traceroute(dest: str) -> dict:
"""
Trace the route to some destination IP and return a dict of intermediates.
In order to prevent the daemon itself from needing to run with elevated
privileges, this is primarily handled by the system's traceroute utility.
"""
# Sanitize input.
for char in dest:
if char not in "ABCDEFabcdef12345567890.:":
return -1
# Run system traceroute.
p = subprocess.Popen(["traceroute", "-q 1", dest], stdout=subprocess.PIPE)
d = p.stdout.read().decode("utf-8")
hops = {}
# Parse the output.
for line in d.split("\n"):
try:
tokens = line.split()
n = int(tokens[0])
if tokens[1] == "*":
ip = "?"
else:
ip = tokens[2].strip("(").strip(")")
hops[n] = ip
except:
pass
return hops
|
class TextExtractor:
"""
Extract text from a txt file
"""
def extract_text(self, file, encoding = None):
"""
Extract text from an text
:param file: file_path of text file
:return: List of dictionary in following keys
"text": Transcript text
e.g.
[
{
"text": "Hello",
}
]
"""
output = []
with open(file, "r") as f:
for line in f:
d = {}
d["text"] = line.rstrip('\n')
output.append(d)
return output |
from typing import List
from annotation.param import Param
class Jsdoc:
def __init__(self):
self.params = list()
self.name = ''
def add_param(self, param: Param):
self.params.append(param)
def set_name(self, name: str):
self.name = name
def set_type(self, type_: str):
self.type_ = type_
def set_line_number(self, line_number: int):
self.line_number = line_number
def __str__(self):
repr = "/**\n"
for param in self.params:
repr = repr + str(param) + "\n"
repr = repr + "* @return {{{}}}".format(self.type_) + "\n"
repr = repr + "*/\n"
return repr
def get_params(self) -> List[Param]:
return self.params
def get_name(self) -> str:
return self.name
def get_line_number(self) -> int:
return self.line_number
def get_type(self) -> str:
return self.type_ |
#
# STEP Part 21 Parser
#
# Copyright (c) 2011, Thomas Paviot (tpaviot@gmail.com)
# Copyright (c) 2014, Christopher HORLER (cshorler@googlemail.com)
#
# All rights reserved.
#
# This file is part of the StepClassLibrary (SCL).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import logging
import ply.lex as lex
import ply.yacc as yacc
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
####################################################################################################
# Common Code for Lexer / Parser
####################################################################################################
class Base:
tokens = ('INTEGER', 'REAL', 'USER_DEFINED_KEYWORD', 'STANDARD_KEYWORD', 'STRING', 'BINARY',
'ENTITY_INSTANCE_NAME', 'ENUMERATION', 'PART21_END', 'PART21_START', 'HEADER_SEC',
'ENDSEC', 'DATA_SEC')
####################################################################################################
# Lexer
####################################################################################################
class Lexer(Base):
def __init__(self, debug=0, optimize=0, compatibility_mode=False, header_limit=1024):
self.lexer = lex.lex(module=self, debug=debug, debuglog=logger, optimize=optimize,
errorlog=logger)
self.entity_keywords = []
self.compatibility_mode = compatibility_mode
self.header_limit = header_limit
states = (('compatibility', 'inclusive'),)
def __getattr__(self, name):
if name == 'lineno':
return self.lexer.lineno
elif name == 'lexpos':
return self.lexer.lexpos
else:
raise AttributeError
def input(self, s):
startidx = s.find('ISO-10303-21;', 0, self.header_limit)
if startidx == -1:
sys.exit('Aborting... ISO-10303-21; header not found')
self.lexer.input(s[startidx:])
self.lexer.lineno += s[0:startidx].count('\n')
if self.compatibility_mode:
self.lexer.begin('compatibility')
else:
self.lexer.begin('INITIAL')
def token(self):
try:
return next(self.lexer)
except StopIteration:
return None
def register_entities(self, entities):
self.entity_keywords.extend(entities)
# Comment (ignored)
def t_ANY_COMMENT(self, t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_ANY_PART21_START(self, t):
r'ISO-10303-21;'
return t
def t_ANY_PART21_END(self, t):
r'END-ISO-10303-21;'
return t
def t_ANY_HEADER_SEC(self, t):
r'HEADER;'
return t
def t_ANY_ENDSEC(self, t):
r'ENDSEC;'
return t
# Keywords
def t_compatibility_STANDARD_KEYWORD(self, t):
r'(?:!|)[A-Z_][0-9A-Za-z_]*'
t.value = t.value.upper()
if t.value == 'DATA':
t.type = 'DATA_SEC'
elif t.value.startswith('!'):
t.type = 'USER_DEFINED_KEYWORD'
elif t.value in self.entity_keywords:
t.type = t.value
return t
def t_ANY_STANDARD_KEYWORD(self, t):
r'(?:!|)[A-Z_][0-9A-Z_]*'
if t.value == 'DATA':
t.type = 'DATA_SEC'
elif t.value.startswith('!'):
t.type = 'USER_DEFINED_KEYWORD'
elif t.value in self.entity_keywords:
t.type = t.value
return t
def t_ANY_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Simple Data Types
t_ANY_REAL = r'[+-]*[0-9][0-9]*\.[0-9]*(?:E[+-]*[0-9][0-9]*)?'
t_ANY_INTEGER = r'[+-]*[0-9][0-9]*'
t_ANY_STRING = r"'(?:[][!\"*$%&.#+,\-()?/:;<=>@{}|^`~0-9a-zA-Z_\\ ]|'')*'"
t_ANY_BINARY = r'"[0-3][0-9A-F]*"'
t_ANY_ENTITY_INSTANCE_NAME = r'\#[0-9]+'
t_ANY_ENUMERATION = r'\.[A-Z_][A-Z0-9_]*\.'
# Punctuation
literals = '()=;,*$'
t_ANY_ignore = ' \t'
####################################################################################################
# Simple Model
####################################################################################################
class P21File:
def __init__(self, header, *sections):
self.header = header
self.sections = list(*sections)
class P21Header:
def __init__(self, file_description, file_name, file_schema):
self.file_description = file_description
self.file_name = file_name
self.file_schema = file_schema
self.extra_headers = []
class HeaderEntity:
def __init__(self, type_name, *params):
self.type_name = type_name
self.params = list(*params) if params else []
class Section:
def __init__(self, entities):
self.entities = entities
class SimpleEntity:
def __init__(self, ref, type_name, *params):
self.ref = ref
self.type_name = type_name
self.params = list(*params) if params else []
class ComplexEntity:
def __init__(self, ref, *params):
self.ref = ref
self.params = list(*params) if params else []
class TypedParameter:
def __init__(self, type_name, *params):
self.type_name = type_name
self.params = list(*params) if params else None
####################################################################################################
# Parser
####################################################################################################
class Parser(Base):
def __init__(self, lexer=None, debug=0):
self.parser = yacc.yacc(module=self, debug=debug, debuglog=logger, errorlog=logger)
if lexer is None:
lexer = Lexer()
self.lexer = lexer
def parse(self, p21_data, **kwargs):
self.lexer.input(p21_data)
self.refs = {}
if 'debug' in kwargs:
result = self.parser.parse(lexer=self.lexer, debug=logger,
**{ k: kwargs[k] for k in kwargs if k != 'debug'})
else:
result = self.parser.parse(lexer=self.lexer, **kwargs)
return result
def p_exchange_file(self, p):
"""exchange_file : PART21_START header_section data_section_list PART21_END"""
p[0] = P21File(p[2], p[3])
# TODO: Specialise the first 3 header entities
def p_header_section(self, p):
"""header_section : HEADER_SEC header_entity header_entity header_entity ENDSEC"""
p[0] = P21Header(p[2], p[3], p[4])
def p_header_section_with_entity_list(self, p):
"""header_section : HEADER_SEC header_entity header_entity header_entity header_entity_list ENDSEC"""
p[0] = P21Header(p[2], p[3], p[4])
p[0].extra_headers.extend(p[5])
def p_header_entity(self, p):
"""header_entity : keyword '(' parameter_list ')' ';'"""
p[0] = HeaderEntity(p[1], p[3])
def p_check_entity_instance_name(self, p):
"""check_entity_instance_name : ENTITY_INSTANCE_NAME"""
if p[1] in self.refs:
logger.error('Line %i, duplicate entity instance name: %s', p.lineno(1), p[1])
sys.exit('Aborting...')
else:
self.refs[p[1]] = None
p[0] = p[1]
def p_simple_entity_instance(self, p):
"""simple_entity_instance : check_entity_instance_name '=' simple_record ';'"""
p[0] = SimpleEntity(p[1], *p[3])
def p_complex_entity_instance(self, p):
"""complex_entity_instance : check_entity_instance_name '=' subsuper_record ';'"""
p[0] = ComplexEntity(p[1], p[3])
def p_subsuper_record(self, p):
"""subsuper_record : '(' simple_record_list ')'"""
p[0] = [TypedParameter(*x) for x in p[2]]
def p_data_section_list(self, p):
"""data_section_list : data_section_list data_section
| data_section"""
try: p[0] = p[1] + [p[2],]
except IndexError: p[0] = [p[1],]
def p_header_entity_list(self, p):
"""header_entity_list : header_entity_list header_entity
| header_entity"""
try: p[0] = p[1] + [p[2],]
except IndexError: p[0] = [p[1],]
def p_parameter_list(self, p):
"""parameter_list : parameter_list ',' parameter
| parameter"""
try: p[0] = p[1] + [p[3],]
except IndexError: p[0] = [p[1],]
def p_keyword(self, p):
"""keyword : USER_DEFINED_KEYWORD
| STANDARD_KEYWORD"""
p[0] = p[1]
def p_parameter_simple(self, p):
"""parameter : STRING
| INTEGER
| REAL
| ENTITY_INSTANCE_NAME
| ENUMERATION
| BINARY
| '*'
| '$'
| typed_parameter
| list_parameter"""
p[0] = p[1]
def p_list_parameter(self, p):
"""list_parameter : '(' parameter_list ')'"""
p[0] = p[2]
def p_typed_parameter(self, p):
"""typed_parameter : keyword '(' parameter ')'"""
p[0] = TypedParameter(p[1], p[3])
def p_parameter_empty_list(self, p):
"""parameter : '(' ')'"""
p[0] = []
def p_data_start(self, p):
"""data_start : DATA_SEC '(' parameter_list ')' ';'"""
pass
def p_data_start_empty(self, p):
"""data_start : DATA_SEC '(' ')' ';'
| DATA_SEC ';'"""
pass
def p_data_section(self, p):
"""data_section : data_start entity_instance_list ENDSEC"""
p[0] = Section(p[2])
def p_entity_instance_list(self, p):
"""entity_instance_list : entity_instance_list entity_instance
| empty"""
try: p[0] = p[1] + [p[2],]
except IndexError: pass # p[2] doesn't exist, p[1] is None
except TypeError: p[0] = [p[2],] # p[1] is None, p[2] is valid
def p_entity_instance(self, p):
"""entity_instance : simple_entity_instance
| complex_entity_instance"""
p[0] = p[1]
def p_simple_record_empty(self, p):
"""simple_record : keyword '(' ')'"""
p[0] = (p[1], [])
def p_simple_record_with_params(self, p):
"""simple_record : keyword '(' parameter_list ')'"""
p[0] = (p[1], p[3])
def p_simple_record_list(self, p):
"""simple_record_list : simple_record_list simple_record
| simple_record"""
try: p[0] = p[1] + [p[2],]
except IndexError: p[0] = [p[1],]
def p_empty(self, p):
"""empty :"""
pass
def test_debug():
logging.basicConfig()
logger.setLevel(logging.DEBUG)
s = open('io1-tu-203.stp', 'r').read()
parser = Parser()
try:
r = parser.parse(s, debug=1)
except SystemExit:
pass
return (parser, r)
def test():
logging.basicConfig()
logger.setLevel(logging.ERROR)
s = open('io1-tu-203.stp', 'r').read()
parser = Parser()
try:
r = parser.parse(s)
except SystemExit:
pass
return (parser, r)
if __name__ == '__main__':
test()
|
#!/usr/bin/env python
#
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A tool for constructing class loader context."""
from __future__ import print_function
import argparse
import sys
from manifest import compare_version_gt
def parse_args(args):
"""Parse commandline arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--target-sdk-version', default='', dest='sdk',
help='specify target SDK version (as it appears in the manifest)')
parser.add_argument('--host-context-for-sdk', dest='host_contexts',
action='append', nargs=2, metavar=('sdk','context'),
help='specify context on host for a given SDK version or "any" version')
parser.add_argument('--target-context-for-sdk', dest='target_contexts',
action='append', nargs=2, metavar=('sdk','context'),
help='specify context on target for a given SDK version or "any" version')
return parser.parse_args(args)
# Special keyword that means that the context should be added to class loader
# context regardless of the target SDK version.
any_sdk = 'any'
# We assume that the order of context arguments passed to this script is
# correct (matches the order computed by package manager). It is possible to
# sort them here, but Soong needs to use deterministic order anyway, so it can
# as well use the correct order.
def construct_context(versioned_contexts, target_sdk):
context = []
for [sdk, ctx] in versioned_contexts:
if sdk == any_sdk or compare_version_gt(sdk, target_sdk):
context.append(ctx)
return context
def construct_contexts(args):
host_context = construct_context(args.host_contexts, args.sdk)
target_context = construct_context(args.target_contexts, args.sdk)
context_sep = '#'
return ('class_loader_context_arg=--class-loader-context=PCL[]{%s} ; ' % context_sep.join(host_context) +
'stored_class_loader_context_arg=--stored-class-loader-context=PCL[]{%s}' % context_sep.join(target_context))
def main():
"""Program entry point."""
try:
args = parse_args(sys.argv[1:])
if not args.sdk:
raise SystemExit('target sdk version is not set')
if not args.host_contexts:
raise SystemExit('host context is not set')
if not args.target_contexts:
raise SystemExit('target context is not set')
print(construct_contexts(args))
# pylint: disable=broad-except
except Exception as err:
print('error: ' + str(err), file=sys.stderr)
sys.exit(-1)
if __name__ == '__main__':
main()
|
import base64
import requests
import urllib3
from urllib.parse import quote,unquote
# 禁用warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# 填写帐号密码
userAccounts = [
# ['userStudentId', 'password'],
['123456', 'password']
]
def func(username, pwd):
# 初始化用headers
url1 = 'http://xgxt.bjut.edu.cn/'
h1 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Connection': 'keep-alive',
'Host': 'xgxt.bjut.edu.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/85.0.4183.83 Safari/537.36 '
}
# 模拟GET HTTP/1.1
r1 = requests.get(url=url1)
print('r1状态码:', r1.status_code)
setcookie = r1.headers['Set-Cookie']
print(setcookie)
print('r1cookie:', setcookie)
strJSID = setcookie[:setcookie.index(';')]
token = setcookie[setcookie.index(',') + 8:setcookie.index('; Expires')]
# 登陆用headers
url2 = 'http://xgxt.bjut.edu.cn//login/Login.htm'
h2 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Connection': 'keep-alive',
'Cookie': strJSID,
'Host': 'xgxt.bjut.edu.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/85.0.4183.83 Safari/537.36 '
}
u = base64.b64encode(username.encode('utf-8')).decode('utf-8')
p = base64.b64encode(pwd.encode('utf-8')).decode('utf-8')
loginData = {
'username': u,
'password': p,
'verification': '',
'token': token
}
# 模拟POST 登陆
r2 = requests.post(url=url2, headers=h2, data=loginData)
print('\nr2状态码:', r2.status_code)
print('r2headers:', r2.headers)
# 打卡用headers
url3 = 'http://xgxt.bjut.edu.cn:80/syt/zzapply/operation.htm'
h3 = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Origin': 'http://xgxt.bjut.edu.cn',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, '
'like Gecko) Mobile/15E148 wxwork/3.1.16 MicroMessenger/7.0.1 Language/zh ColorScheme/Dark',
'Referer': 'http://xgxt.bjut.edu.cn/webApp/xuegong/index.html',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Connection': 'keep-alive',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': 'menuVisible=1; username='+username+'; ' + strJSID,
'Host': 'xgxt.bjut.edu.cn',
'Content-length': '1204'
}
data3=quote('data={"xmqkb":{"id":"2c95de297d4f8bfa017d85f53d267613"},"c16":"在校且住宿","c17":"在京","c18":"低风险地区","c15":"无情况","c1":"已准确登记","location_address":"北京市朝阳区平乐园100号北京工业大学","type":"YQSJSB"}&msgUrl=syt/zzglappro/index.htm?type=yqsjsb&xmid=2c95de297d4f8bfa017d85f53d267613&multiSelectData={}')
data3=data3.replace('%3D','=')
# 模拟POST 投寄打卡json
r3 = requests.post(url=url3, headers=h3,
data=data3
)
print(data3)
print('\nr3状态码:', r3.status_code)
# success->成功打卡 error->失败 Applied today->今天已经打过卡
if r3.text == 'success':
print('成功打卡')
else:
if r3.text == 'Applied today':
print('今天已经打过卡')
else:
print('打卡失败')
print(r3.text)
r3.close()
# process函数处理打卡和签到
def process():
for userAccount in userAccounts:
print('username:%s正在执行打卡' % (userAccount[0]))
func(userAccount[0], userAccount[1])
# main函数调用process
if __name__ == '__main__':
process()
|
"""Implementation of the difficulty command."""
from mcipc.rcon.client import Client
from mcipc.rcon.functions import parsed
from mcipc.rcon.response_types.difficulty import parse
from mcipc.rcon.types import Difficulty
__all__ = ['difficulty']
# pylint: disable=W0621
@parsed(parse)
def difficulty(self: Client, difficulty: Difficulty) -> bool:
"""Sets the difficulty."""
return self.run('difficulty', difficulty)
|
import argparse
import sys
from ttvduw import DocuPrinter, DataFeeder
from ttvduw_gui import TtvduwGui
def test():
#########
## testing DataFeeder
# from test_ttvduw import test_DataFeeder
# test_DataFeeder()
#########
## testing DocuPrinter
# from test_ttvduw import test_DocuPrinter
# test_DocuPrinter()
#########
# from test_ttvduw import test_all_base
# test_all_base()
#########
from test_ttvduw import test_gui
test_gui()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--template', required=True, help='模板文件。docx格式')
parser.add_argument('-f', '--data-feeder-file', required=True, help='键值数据表文件。目前只支持xlsx')
parser.add_argument('-o', '--out-path', help='输出目录。如果不提供则根据 -t 指定的模板文件名生成')
parser.add_argument('--tab-start-from-row', type=int, default=1, help='键值数据表文件从第几行开始有数据(default: 1)')
parser.add_argument('--tab-start-from-col', type=int, default=1, help='键值数据表文件从第几列开始有数据(default: 1)')
parser.add_argument('--custom-out-names-with-keys', nargs='+', help='使用哪些键的值作为输出文件名')
if len(sys.argv) > 1:
# command line mode
args = parser.parse_args()
the_doc = DocuPrinter(args.template, out_path=args.out_path)
with DataFeeder(args.data_feeder_file,
tab_start_from_row=args.tab_start_from_row,
tab_start_from_col=args.tab_start_from_col,
) as df:
for c in df.context_feed():
the_doc.set_context(c)
the_doc.write(keys=args.custom_out_names_with_keys)
else:
# GUI mode
ttvduw_app = TtvduwGui()
ttvduw_app.mainloop()
if __name__ == '__main__':
# test()
main()
print('Done') |
import unittest
from anchore_engine.util.rpm import compare_versions
class TestRpmVersionHandling(unittest.TestCase):
"""
Tests for version comparisons of rpm package versions maintained in anchore engine.
Note that this does not exactly confirm the official rpm spec
"""
def test_version_comparison(self):
test_epoch = [
('1:0', '0:1', 1),
('1:0', '1', -1),
('1:2', '1', 1),
('2:4.19.01-1.el7_5', '4.19.1-1.el7_5', 0),
('4.19.01-1.el7_5', '2:4.19.1-1.el7_5', 0),
('0:4.19.1-1.el7_5', '2:4.19.1-1.el7_5', -1),
('4.19.0-1.el7_5', '12:4.19.0-1.el7', 1),
('3:4.19.0-1.el7_5', '4.21.0-1.el7', -1),
('4:1.2.3-3-el7_5', '1.2.3-el7_5~snapshot1', 1)
]
test_no_epoch = [
('1', '1', 0),
('4.19.0a-1.el7_5', '4.19.0c-1.el7', -1),
('4.19.0-1.el7_5', '4.21.0-1.el7', -1),
('4.19.01-1.el7_5', '4.19.10-1.el7_5', -1),
('4.19.0-1.el7_5', '4.19.0-1.el7', 1),
('4.19.0-1.el7_5', '4.17.0-1.el7', 1),
('4.19.01-1.el7_5', '4.19.1-1.el7_5', 0),
('4.19.1-1.el7_5', '4.19.1-01.el7_5', 0),
('4.19.1', '4.19.1', 0),
('1.2.3-el7_5~snapshot1', '1.2.3-3-el7_5', -1)
]
for i in test_epoch + test_no_epoch:
self.assertEqual(i[2], compare_versions(i[0], i[1]), 'comparison between {} and {}'.format(i[0], i[1]))
print('Tested: {}'.format(i))
|
"""Class decorators and other helpers."""
import numpy as np
import inspect
from vectorbt.utils import checks
from vectorbt.utils.config import merge_dicts
def get_kwargs(func):
"""Get names and default values of keyword arguments from the signature of `func`."""
return {
k: v.default
for k, v in inspect.signature(func).parameters.items()
if v.default is not inspect.Parameter.empty
}
def add_nb_methods(nb_funcs, module_name=None):
"""Class decorator to wrap Numba functions methods of an accessor class.
`nb_funcs` should contain tuples of Numba functions, whether they are reducing, and optionally `index_or_name`.
Requires the instance to have attribute `wrapper` of type `vectorbt.base.array_wrapper.ArrayWrapper`."""
def wrapper(cls):
for info in nb_funcs:
checks.assert_type(info, tuple)
if len(info) == 3:
nb_func, is_reducing, name_or_index = info
elif len(info) == 2:
nb_func, is_reducing = info
name_or_index = None
else:
raise ValueError("Each tuple should have either length 2 or 3")
def nb_method(self,
*args,
_nb_func=nb_func,
_is_reducing=is_reducing,
_name_or_index=name_or_index,
wrap_kwargs=None,
**kwargs):
default_kwargs = get_kwargs(nb_func)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
if '_1d' in _nb_func.__name__:
# One-dimensional array as input
a = _nb_func(self.to_1d_array(), *args, **{**default_kwargs, **kwargs})
if _is_reducing:
return self.wrapper.wrap_reduced(a, name_or_index=_name_or_index, **wrap_kwargs)
return self.wrapper.wrap(a, **wrap_kwargs)
else:
# Two-dimensional array as input
a = _nb_func(self.to_2d_array(), *args, **{**default_kwargs, **kwargs})
if _is_reducing:
return self.wrapper.wrap_reduced(a, name_or_index=_name_or_index, **wrap_kwargs)
return self.wrapper.wrap(a, **wrap_kwargs)
# Replace the function's signature with the original one
sig = inspect.signature(nb_func)
self_arg = tuple(inspect.signature(nb_method).parameters.values())[0]
sig = sig.replace(parameters=(self_arg,) + tuple(sig.parameters.values())[1:])
nb_method.__signature__ = sig
if module_name is not None:
nb_method.__doc__ = f"See `{module_name}.{nb_func.__name__}`"
else:
nb_method.__doc__ = f"See `{nb_func.__name__}`"
setattr(cls, nb_func.__name__.replace('_1d', '').replace('_nb', ''), nb_method)
return cls
return wrapper
def add_binary_magic_methods(np_funcs, translate_func):
"""Class decorator to add binary magic methods using NumPy to the class."""
def wrapper(cls):
for fname, np_func in np_funcs:
def magic_func(self, other, np_func=np_func):
return translate_func(self, other, np_func)
setattr(cls, fname, magic_func)
return cls
return wrapper
def add_unary_magic_methods(np_funcs, translate_func):
"""Class decorator to add unary magic methods using NumPy to the class."""
def wrapper(cls):
for fname, np_func in np_funcs:
def magic_func(self, np_func=np_func):
return translate_func(self, np_func)
setattr(cls, fname, magic_func)
return cls
return wrapper
binary_magic_methods = [
# comparison ops
('__eq__', np.equal),
('__ne__', np.not_equal),
('__lt__', np.less),
('__gt__', np.greater),
('__le__', np.less_equal),
('__ge__', np.greater_equal),
# arithmetic ops
('__add__', np.add),
('__sub__', np.subtract),
('__mul__', np.multiply),
('__pow__', np.power),
('__mod__', np.mod),
('__floordiv__', np.floor_divide),
('__truediv__', np.true_divide),
('__radd__', lambda x, y: np.add(y, x)),
('__rsub__', lambda x, y: np.subtract(y, x)),
('__rmul__', lambda x, y: np.multiply(y, x)),
('__rpow__', lambda x, y: np.power(y, x)),
('__rmod__', lambda x, y: np.mod(y, x)),
('__rfloordiv__', lambda x, y: np.floor_divide(y, x)),
('__rtruediv__', lambda x, y: np.true_divide(y, x)),
# mask ops
('__and__', np.bitwise_and),
('__or__', np.bitwise_or),
('__xor__', np.bitwise_xor),
('__rand__', lambda x, y: np.bitwise_and(y, x)),
('__ror__', lambda x, y: np.bitwise_or(y, x)),
('__rxor__', lambda x, y: np.bitwise_xor(y, x))
]
unary_magic_methods = [
('__neg__', np.negative),
('__pos__', np.positive),
('__abs__', np.absolute),
('__invert__', np.invert)
]
|
# Generated by Django 3.2.5 on 2021-07-21 10:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='users.user')),
('form', models.IntegerField()),
('enrollment_year', models.IntegerField()),
],
options={
'db_table': 'students',
},
),
]
|
import time
from zxcvbn.matching import omnimatch
from zxcvbn.scoring import minimum_entropy_match_sequence
def password_strength(password, user_inputs=[]):
start = time.time()
matches = omnimatch(password, user_inputs)
result = minimum_entropy_match_sequence(password, matches)
result['calc_time'] = time.time() - start
return result
|
# -*- coding: utf-8 -*-
# properties.py
#
# Copyright 2019 Hiroshi Miura <miurahr@linux.com>
from __future__ import unicode_literals
import os
import pkg_resources
# This class is Borg
class Configurations(object):
_shared_state = {}
data_path = pkg_resources.resource_filename(__name__, 'data')
jisyo_hepburn_hira = 'hepburnhira3.db'
jisyo_passport_hira = 'passporthira3.db'
jisyo_kunrei_hira = 'kunreihira3.db'
jisyo_itaiji = 'itaijidict3.db'
jisyo_kanwa = 'kanwadict4.db'
jisyo_hepburn = 'hepburndict3.db'
jisyo_passport = 'passportdict3.db'
jisyo_kunrei = 'kunreidict3.db'
def __new__(cls, *p, **k):
self = object.__new__(cls, *p, **k)
self.__dict__ = cls._shared_state
return self
def dictpath(self, dbfile):
return os.path.join(self.data_path, dbfile)
class Ch:
space = 0x20
at_mark = 0x40
alphabet_A = 0x41
alphabet_Z = 0x5a
square_bra = 0x5b
back_quote = 0x60
alphabet_a = 0x61
alphabet_z = 0x7a
bracket_bra = 0x7b
tilda = 0x7e
delete = 0x7f
ideographic_space = 0x3000
postal_mark_face = 0x3020
wavy_dash = 0x3030
ideographic_half_fill_space = 0x303f
greece_Alpha = 0x0391
greece_Rho = 0x30a1
greece_Sigma = 0x30a3
greece_Omega = 0x03a9
greece_alpha = 0x03b1
greece_omega = 0x03c9
cyrillic_A = 0x0410
cyrillic_E = 0x0401
cyrillic_e = 0x0451
cyrillic_ya = 0x044f
zenkaku_exc_mark = 0xff01
zenkaku_slash_mark = 0xff0f
zenkaku_number_zero = 0xff10
zenkaku_number_nine = 0xff1a
zenkaku_A = 0xff21
zenkaku_a = 0xff41
endmark = [ord(a) for a in [")", "]", "!", ",", ".", u'\u3001', u'\u3002']]
class Convert_Tables():
'''
convert symbols to alphabet
based on Original KAKASI's EUC_JP - alphabet converter table
--------------------------------------------------------------------------
a1 a0 | 、 。 , . ・ : ; ? ! ゛ ゜ ´ ` ¨
" ",",",".",",",".",".",":",";","?",
"!","\"","(maru)","'","`","..",
a1 b0 | ^  ̄ _ ヽ ヾ ゝ ゞ 〃 仝 々 〆 〇 ー ― ‐ /
"~","~","_","(kurikaesi)","(kurikaesi)","(kurikaesi)",
"(kurikaesi)","(kurikaesi)","(kurikaesi)","(kurikaesi)",
"sime","(maru)","^","-","-","/",
a1 c0 | \ ~ ∥ | … ‥ ‘ ’ “ ” ( ) 〔 〕 [ ]
"\\","~","||","|","...","..","`","'","\"","\"","(",")","[","]","[","]",
"{","}","<",">","<<",">>","(",")","(",")","(",")","+","-","+-","X",
a1 d0 | { } 〈 〉 《 》 「 」 『 』 【 】 + - ± ×
a1 e0 | ÷ = ≠ < > ≦ ≧ ∞ ∴ ♂ ♀ ° ′ ″ ℃ ¥
"/","=","!=","<",">","<=",">=","(kigou)","...",
"(osu)","(mesu)","(do)","'","\"","(Sessi)","\\",
a1 f0 | $ ¢ £ % # & * @ § ☆ ★ ○ ● ◎ ◇
"$","(cent)","(pound)","%","#","&","*","@",
"(setu)","(hosi)","(hosi)","(maru)","(maru)","(maru)","(diamond)"
---------------------------------------------------------------------------
----------------------------------------------------------
a2 a0 | ◆ □ ■ △ ▲ ▽ ▼ ※ 〒 → ← ↑ ↓ 〓
a2 b0 | ∈ ∋ ⊆ ⊇ ⊂ ⊃ a2 c0 | ∪ ∩ ∧ ∨ ¬ ⇒ ⇔ ∀
a2 d0 | ∃ ∠ ⊥ ⌒ ∂
a2 e0 | ∇ ≡ ≒ ≪ ≫ √ ∽ ∝ ∵ ∫ ∬
a2 f0 | Å ‰ ♯ ♭ ♪ † ‡ ¶ ◯
----------------------------------------------------------
Greek convertion table
----------------------------------------------------------
"Alpha", "Beta", "Gamma", "Delta", "Epsilon", "Zeta", "Eta", "Theta",
"Iota", "Kappa", "Lambda", "Mu", "Nu", "Xi", "Omicron", "Pi", "Rho",
"Sigma", "Tau", "Upsilon", "Phi", "Chi", "Psi", "Omega",
"", "", "", "", "", "", "", "",
"alpha", "beta", "gamma", "delta", "epsilon", "zeta", "eta", "theta",
"iota", "kappa", "lambda", "mu", "nu", "xi", "omicron", "pi", "rho",
"sigma", "tau", "upsilon", "phi", "chi", "psi", "omega"
----------------------------------------------------------
'''
# U3000 - 301F
# \u3000、。〃〄〇〆々〈〉《》「」『』【】〒〓〔〕〖〗〘〙
# 〚〛〜〝〞〟〠
symbol_table_1 = [" ", ",", ".", '"', "(kigou)", "(kurikaesi)", "(sime)", "(maru)", "<", ">",
"<<", ">>", "(", ")", "(", ")", "(", ")", "(kigou)", "(geta)",
"(", ")", "(", ")", "(", ")", "(", ")", "~", "(kigou)", "\"",
"(kigou)", "(kigou)"]
# U3030 - 3040
# 〰〱〲〳〴〵〶〷〼〽〾〿
symbol_table_2 = ["-", "(kurikaesi)",
"(kurikaesi)", "(kurikaesi)", "(kurikaesi)", "(kurikaesi)",
"(kigou)", "XX", None, None, None, None, "(masu)", "(kurikaesi)", " ", " "]
# U0391-03A9
symbol_table_3 = ["Alpha", "Beta", "Gamma", "Delta", "Epsilon", "Zeta", "Eta", "Theta",
"Iota", "Kappa", "Lambda", "Mu", "Nu", "Xi", "Omicron", "Pi", "Rho", None,
"Sigma", "Tau", "Upsilon", "Phi", "Chi", "Psi", "Omega"]
# U03B1-03C9
symbol_table_4 = ["alpha", "beta", "gamma", "delta", "epsilon", "zeta", "eta", "theta",
"iota", "kappa", "lambda", "mu", "nu", "xi", "omicron", "pi", "rho", "final sigma",
"sigma", "tau", "upsilon", "phi", "chi", "psi", "omega"]
# UFF01-FF0F
symbol_table_5 = ["!", "\"", "#", "$", "%", "&", "'", "(", ")", "*", "+",
",", "-", ".", "/"]
# cyriilic
cyrillic_table = { # basic cyrillic characters
'\u0410': 'A', '\u0411': 'B', '\u0412': 'V', # АБВ
'\u0413': 'G', '\u0414': 'D', '\u0415': 'E', # ГДЕ
'\u0401': 'E', '\u0416': 'Zh', '\u0417': 'Z', # ЁЖЗ
'\u0418': 'I', '\u0419': 'Y', '\u041a': 'K', # ИЙК
'\u041b': 'L', '\u041c': 'M', '\u041d': 'N', # ЛМН
'\u041e': 'O', '\u041f': 'P', '\u0420': 'R', # ОПР
'\u0421': 'S', '\u0422': 'T', '\u0423': 'U', # СТУ
'\u0424': 'F', '\u0425': 'H', '\u0426': 'Ts', # ФХЦ
'\u0427': 'Ch', '\u0428': 'Sh', '\u0429': 'Sch', # ЧШЩ
'\u042a': '', '\u042b': 'Y', '\u042c': '', # ЪЫЬ
'\u042d': 'E', '\u042e': 'Yu', '\u042f': 'Ya', # ЭЮЯ
'\u0430': 'a', '\u0431': 'b', '\u0432': 'v', # абв
'\u0433': 'g', '\u0434': 'd', '\u0435': 'e', # где
'\u0451': 'e', '\u0436': 'zh', '\u0437': 'z', # ёжз
'\u0438': 'i', '\u0439': 'y', '\u043a': 'k', # ийк
'\u043b': 'l', '\u043c': 'm', '\u043d': 'n', # лмн
'\u043e': 'o', '\u043f': 'p', '\u0440': 'r', # опр
'\u0441': 's', '\u0442': 't', '\u0443': 'u', # сту
'\u0444': 'f', '\u0445': 'h', '\u0446': 'ts', # фхц
'\u0447': 'ch', '\u0448': 'sh', '\u0449': 'sch', # чшщ
'\u044a': '', '\u044b': 'y', '\u044c': '', # ъыь
'\u044d': 'e', '\u044e': 'yu', '\u044f': 'ya' # эюя
}
alpha_table_1 = ["\u3000", "\uff01", "\uff02", "\uff03", "\uff04", "\uff05", "\uff06",
"\uff07", "\uff08", "\uff09", "\uff0a", "\uff0b", "\uff0c", "\uff0d",
"\uff0e", "\uff0f", # !"#$%&'()*+,-./
"\uff10", "\uff11", "\uff12", "\uff13", "\uff14", "\uff15", "\uff16",
"\uff17", "\uff18", "\uff19", # 0...9
"\uff1a", "\uff1b", "\uff1c", "\uff1d",
"\uff1e", "\uff1f", "\uff20"] # :;<=>?@
alpha_table_2 = ["\uff3b", "\uff3c", "\uff3d", "\uff3e", "\uff3f", "\uff40"] # [\]^_`
alpha_table_3 = ["\uff5b", "\uff5c", "\uff5d", "\uff5e"] # {|}~
|
from my_enc_dec2 import encrypt, decrypt
KEY = 'small_key'
def test_encrypt():
assert encrypt('cat', KEY) == 'WOV'
assert encrypt('dog', KEY) == 'X]I'
def test_decrypt():
assert decrypt('WOV', KEY) == 'cat'
assert decrypt('X]I', KEY) == 'dog'
test_encrypt()
test_decrypt()
|
import h5py
import numpy as np
import scipy
from typing import List, Tuple
from tensorflow.keras.utils import to_categorical
# from ml4h.tensor_writer_ukbb import tensor_path
from ml4h.normalizer import ZeroMeanStd1, Standardize
from ml4h.tensormap.general import tensor_path
from ml4h.TensorMap import TensorMap, Interpretation, no_nans, make_range_validator
from ml4h.defines import ECG_REST_LEADS, ECG_REST_MEDIAN_LEADS, ECG_REST_AMP_LEADS, ECG_SEGMENTED_CHANNEL_MAP, ECG_CHAR_2_IDX
from ml4h.tensormap.general import get_tensor_at_first_date, normalized_first_date, pass_nan, build_tensor_from_file
from ml4h.metrics import weighted_crossentropy, ignore_zeros_logcosh
from ml4h.tensormap.ukb.demographics import age_in_years_tensor
_HRR_SENTINEL = -1000
# BIKE ECG
def _check_phase_full_len(hd5: h5py.File, phase: str):
phase_len = get_tensor_at_first_date(hd5, 'ecg_bike', f'{phase}_duration')
valid = True
if phase == 'pretest':
valid &= phase_len == 15
elif phase == 'exercise':
valid &= phase_len == 360
elif phase == 'rest':
valid &= phase_len == 60
else:
raise ValueError(f'Phase {phase} is not a valid phase.')
if not valid:
raise ValueError(f'{phase} phase is not full length')
def _first_date_bike_recovery(tm: TensorMap, hd5: h5py.File, dependents=None):
_check_phase_full_len(hd5, 'rest')
original = get_tensor_at_first_date(hd5, tm.path_prefix, tm.name)
recovery = original[-tm.shape[0]:]
return recovery.reshape(tm.shape)
def _first_date_bike_pretest(tm: TensorMap, hd5: h5py.File, dependents=None):
_check_phase_full_len(hd5, 'pretest')
original = get_tensor_at_first_date(hd5, tm.path_prefix, tm.name)
pretest = original[:tm.shape[0]]
return pretest.reshape(tm.shape)
def _first_date_hrr(tm: TensorMap, hd5: h5py.File, dependents=None):
_check_phase_full_len(hd5, 'rest')
last_hr = get_tensor_at_first_date(hd5, 'ecg_bike', 'trend_heartrate')[-1]
max_hr = get_tensor_at_first_date(hd5, 'ecg_bike', 'max_hr')
return max_hr - last_hr
def _healthy_check(hd5):
for phase in ('pretest', 'exercise', 'rest'):
_check_phase_full_len(hd5, phase)
max_load = max(get_tensor_at_first_date(hd5, 'ecg_bike', 'trend_load'))
if max_load < 60:
raise ValueError('Max load not high enough')
def _healthy_bike(tm: TensorMap, hd5: h5py.File, dependents=None):
_healthy_check(hd5)
return normalized_first_date(tm, hd5)
def _healthy_hrr(tm: TensorMap, hd5: h5py.File, dependents=None):
_healthy_check(hd5)
return _first_date_hrr(tm, hd5)
def _median_pretest(tm: TensorMap, hd5: h5py.File, dependents=None):
_healthy_check(hd5)
times = get_tensor_at_first_date(hd5, 'ecg_bike', 'trend_time')
tensor = np.abs(
get_tensor_at_first_date(
hd5, tm.path_prefix, 'float_array', tm.name,
),
)
return np.median(tensor[times <= 15])
def _new_hrr(tm: TensorMap, hd5: h5py.File, dependents=None):
_check_phase_full_len(hd5, 'rest')
hrs = get_tensor_at_first_date(hd5, 'ecg_bike', 'trend_heartrate')
phases = get_tensor_at_first_date(hd5, 'ecg_bike', 'trend_phasename')
min_hr = hrs[phases == 2].min()
max_hr = get_tensor_at_first_date(hd5, 'ecg_bike', 'max_hr')
max_pred = get_tensor_at_first_date(hd5, 'ecg_bike', 'max_pred_hr')
hrr = max_hr - min_hr
if max_hr / max_pred > 150:
raise ValueError('Max hr / max pred hr too high.')
if hrr > 80:
raise ValueError('HRR too high.')
return hrr
def _sentinel_hrr(tm: TensorMap, hd5: h5py.File, dependents=None):
try:
return _new_hrr(tm, hd5)
except ValueError:
return _HRR_SENTINEL
def _hr_achieved(tm: TensorMap, hd5: h5py.File, dependents=None):
_check_phase_full_len(hd5, 'rest')
max_hr = get_tensor_at_first_date(hd5, 'ecg_bike', 'max_hr')
max_pred = get_tensor_at_first_date(hd5, 'ecg_bike', 'max_pred_hr')
return max_hr / max_pred
def _warp_ecg(ecg):
i = np.arange(ecg.shape[0])
warped = i + (
np.random.rand() * 100 * np.sin(i / (500 + np.random.rand() * 100))
+ np.random.rand() * 100 * np.cos(i / (500 + np.random.rand() * 100))
)
warped_ecg = np.zeros_like(ecg)
for j in range(ecg.shape[1]):
warped_ecg[:, j] = np.interp(i, warped, ecg[:, j])
return warped_ecg
def _make_ecg_rest(
instance: int = 2, downsample_steps: int = 0,
short_time_nperseg: int = 0, short_time_noverlap: int = 0,
):
def ecg_rest_from_file(tm, hd5, dependents={}):
tensor = np.zeros(tm.shape, dtype=np.float32)
for k in hd5[tm.path_prefix]:
if k in tm.channel_map:
data = tm.hd5_first_dataset_in_group(
hd5, f'{tm.path_prefix}/{k}/instance_{instance}',
)
if short_time_nperseg > 0 and short_time_noverlap > 0:
f, t, short_time_ft = scipy.signal.stft(
data, nperseg=short_time_nperseg, noverlap=short_time_noverlap,
)
tensor[..., tm.channel_map[k]] = short_time_ft
elif downsample_steps > 1:
tensor[:, tm.channel_map[k]] = np.array(data, dtype=np.float32)[ ::downsample_steps]
tensor[:, tm.channel_map[k]] = data
return tensor
return ecg_rest_from_file
def _get_lead_cm(length):
lead_cm = {}
lead_weights = []
for i in range(length):
wave_val = i - (length//2)
lead_cm['w'+str(wave_val).replace('-', '_')] = i
lead_weights.append((np.abs(wave_val+1)/(length/2)) + 1.0)
return lead_cm, lead_weights
def _make_rhythm_tensor(skip_poor=True):
def rhythm_tensor_from_file(tm, hd5, dependents={}):
categorical_data = np.zeros(tm.shape, dtype=np.float32)
ecg_interpretation = str(
tm.hd5_first_dataset_in_group(
hd5, 'ukb_ecg_rest/ecg_rest_text/',
)[()],
)
if skip_poor and 'Poor data quality' in ecg_interpretation:
raise ValueError(f'Poor data quality skipped by {tm.name}.')
for channel in tm.channel_map:
if channel.replace('_', ' ') in ecg_interpretation:
categorical_data[tm.channel_map[channel]] = 1.0
return categorical_data
for rhythm in ['sinus', 'Sinus']:
if rhythm in ecg_interpretation:
categorical_data[tm.channel_map['Other_sinus_rhythm']] = 1.0
return categorical_data
categorical_data[tm.channel_map['Other_rhythm']] = 1.0
return categorical_data
return rhythm_tensor_from_file
def label_from_ecg_interpretation_text(tm, hd5, dependents={}):
categorical_data = np.zeros(tm.shape, dtype=np.float32)
ecg_interpretation = str(
tm.hd5_first_dataset_in_group(
hd5, 'ukb_ecg_rest/ecg_rest_text/',
)[()],
)
for channel in tm.channel_map:
if channel in ecg_interpretation:
categorical_data[tm.channel_map[channel]] = 1.0
return categorical_data
if 'no_' + tm.name in tm.channel_map:
categorical_data[tm.channel_map['no_' + tm.name]] = 1.0
return categorical_data
else:
raise ValueError(
f"ECG categorical interpretation could not find any of these keys: {tm.channel_map.keys()}",
)
# Extract RAmplitude and SAmplitude for LVH criteria
def _make_ukb_ecg_rest(population_normalize: float = None):
def ukb_ecg_rest_from_file(tm, hd5, dependents={}):
if 'ukb_ecg_rest' not in hd5:
raise ValueError(
'Group with R and S amplitudes not present in hd5',
)
tensor = get_tensor_at_first_date(
hd5, tm.path_prefix, tm.name, pass_nan,
)
try:
if population_normalize is None:
tensor = tm.zero_mean_std1(tensor)
else:
tensor /= population_normalize
except:
ValueError(f'Cannot normalize {tm.name}')
return tensor
return ukb_ecg_rest_from_file
def _make_ukb_ecg_rest_lvh():
def ukb_ecg_rest_lvh_from_file(tm, hd5, dependents={}):
# Lead order seems constant and standard throughout, but we could eventually tensorize it from XML
lead_order = ECG_REST_AMP_LEADS
avl_min = 1100.0
sl_min = 3500.0
cornell_female_min = 2000.0
cornell_male_min = 2800.0
if 'ukb_ecg_rest' not in hd5:
raise ValueError(
'Group with R and S amplitudes not present in hd5',
)
tensor_ramp = get_tensor_at_first_date(
hd5, tm.path_prefix, 'ramplitude', pass_nan,
)
tensor_samp = get_tensor_at_first_date(
hd5, tm.path_prefix, 'samplitude', pass_nan,
)
criteria_sleads = [lead_order[l] for l in ['V1', 'V3']]
criteria_rleads = [lead_order[l] for l in ['aVL', 'V5', 'V6']]
if np.any(np.isnan(np.union1d(tensor_ramp[criteria_rleads], tensor_samp[criteria_sleads]))):
raise ValueError(
'Missing some of the R and S amplitude readings needed to evaluate LVH criteria',
)
is_female = 'Genetic-sex_Female_0_0' in hd5['categorical']
is_male = 'Genetic-sex_Male_0_0' in hd5['categorical']
# If genetic sex not available, try phenotypic
if not(is_female or is_male):
is_female = 'Sex_Female_0_0' in hd5['categorical']
is_male = 'Sex_Male_0_0' in hd5['categorical']
# If neither available, raise error
if not(is_female or is_male):
raise ValueError('Sex info required to evaluate LVH criteria')
if tm.name == 'avl_lvh':
is_lvh = tensor_ramp[lead_order['aVL']] > avl_min
elif tm.name == 'sokolow_lyon_lvh':
is_lvh = tensor_samp[lead_order['V1']] +\
np.maximum(tensor_ramp[lead_order['V5']], tensor_ramp[lead_order['V6']]) > sl_min
elif tm.name == 'cornell_lvh':
is_lvh = tensor_ramp[lead_order['aVL']] + \
tensor_samp[lead_order['V3']]
if is_female:
is_lvh = is_lvh > cornell_female_min
if is_male:
is_lvh = is_lvh > cornell_male_min
else:
raise ValueError(
f'{tm.name} criterion for LVH is not accounted for',
)
# Following convention from categorical TMAPS, positive has cmap index 1
tensor = np.zeros(tm.shape, dtype=np.float32)
index = 0
if is_lvh:
index = 1
tensor[index] = 1.0
return tensor
return ukb_ecg_rest_lvh_from_file
def _ecg_rest_to_segment(population_normalize=None, hertz=500, random_offset_seconds=0):
def ecg_rest_section_to_segment(tm, hd5, dependents={}):
tensor = np.zeros(tm.shape, dtype=np.float32)
segmented = tm.dependent_map.hd5_first_dataset_in_group(
hd5, tm.dependent_map.hd5_key_guess(),
)
offset_seconds = float(segmented.attrs['offset_seconds'])
random_offset_samples = 0
if random_offset_seconds > 0:
random_offset_begin = np.random.uniform(random_offset_seconds)
offset_seconds += random_offset_begin
random_offset_samples = int(random_offset_begin * hertz)
offset_begin = int(offset_seconds * hertz)
segment_index = np.array(
segmented[random_offset_samples:random_offset_samples+tm.dependent_map.shape[0]], dtype=np.float32,
)
dependents[tm.dependent_map] = to_categorical(
segment_index, tm.dependent_map.shape[-1],
)
for k in hd5[tm.path_prefix]:
if k in tm.channel_map:
tensor[:, tm.channel_map[k]] = np.array(hd5[tm.path_prefix][k], dtype=np.float32)[
offset_begin:offset_begin+tm.shape[0]
]
if population_normalize is None:
tm.normalization = {'zero_mean_std1': 1.0}
else:
tensor /= population_normalize
return tensor
return ecg_rest_section_to_segment
ecg_bike_hrr = TensorMap(
'hrr', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': 30.55, 'std': 12.81},
tensor_from_file=_first_date_hrr,
)
ecg_bike_healthy_max_hr = TensorMap(
'max_hr', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'],
normalization={'mean': 113.7, 'std': 13.3}, shape=(1,),
tensor_from_file=_healthy_bike,
)
ecg_bike_healthy_hrr = TensorMap(
'hrr', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': 30.47, 'std': 11.76},
tensor_from_file=_healthy_hrr,
)
ecg_bike_healthy_resting = TensorMap(
'resting_hr', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': 70.0, 'std': 11.62},
tensor_from_file=_healthy_bike,
)
ecg_bike_med_pretest_hr = TensorMap(
'trend_heartrate', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': 70., 'std': 11.},
tensor_from_file=_median_pretest,
)
ecg_bike_med_pretest_stamp = TensorMap(
'trend_stamplitude', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': .03, 'std': .03},
tensor_from_file=_median_pretest,
)
ecg_bike_med_pretest_jpoint = TensorMap(
'trend_jpointamplitude', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': .032, 'std': .46},
tensor_from_file=_median_pretest,
)
ecg_bike_med_pretest_stamp20 = TensorMap(
'trend_stamplitude20ms', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': .03, 'std': .03},
tensor_from_file=_median_pretest,
)
ecg_bike_recovery = TensorMap(
'full', shape=(30000, 1), path_prefix='ecg_bike', validator=no_nans,
tensor_from_file=_first_date_bike_recovery,
)
ecg_bike_pretest = TensorMap(
'full', shape=(500 * 15 - 4, 3), path_prefix='ecg_bike', validator=no_nans,
normalization={
'mean': np.array(
[7, -7, 3.5],
)[np.newaxis], 'std': np.array([31, 30, 16])[np.newaxis],
},
tensor_from_file=_first_date_bike_pretest,
)
ecg_bike_pretest_5k = TensorMap(
'full', shape=(5000, 3), path_prefix='ecg_bike', validator=no_nans,
normalization={
'mean': np.array(
[7, -7, 3.5],
)[np.newaxis], 'std': np.array([31, 30, 16])[np.newaxis],
},
tensor_from_file=_first_date_bike_pretest,
)
ecg_bike_new_hrr = TensorMap(
'hrr', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': 31, 'std': 12},
tensor_from_file=_new_hrr,
)
ecg_bike_hrr_sentinel = TensorMap(
'hrr', path_prefix='ecg_bike', metrics=['mae'], shape=(1,),
normalization={'mean': 31, 'std': 12}, sentinel=_HRR_SENTINEL,
tensor_from_file=_sentinel_hrr,
)
ecg_bike_hrr_student = TensorMap(
'hrr', path_prefix='ecg_bike', metrics=['mae'], shape=(1,),
normalization={'mean': 31, 'std': 12}, sentinel=_HRR_SENTINEL,
tensor_from_file=build_tensor_from_file(
'inference.tsv', 'ecg_bike_hrr-sentinel_prediction',
),
)
ecg_bike_hr_achieved = TensorMap(
'hr_achieved', path_prefix='ecg_bike', loss='logcosh', metrics=['mae'], shape=(1,),
normalization={'mean': .68, 'std': .1},
tensor_from_file=_hr_achieved,
)
ecg_rest_raw = TensorMap(
'ecg_rest_raw', Interpretation.CONTINUOUS, shape=(5000, 12), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
channel_map=ECG_REST_LEADS, normalization=Standardize(mean=0, std=2000),
)
ecg_rest_raw_100 = TensorMap(
'ecg_rest_raw_100', Interpretation.CONTINUOUS, shape=(5000, 12), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
channel_map=ECG_REST_LEADS, normalization=Standardize(mean=0, std=100),
)
ecg_rest_raw_10 = TensorMap(
'ecg_rest_raw_10', Interpretation.CONTINUOUS, shape=(5000, 12), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
channel_map=ECG_REST_LEADS, normalization=Standardize(mean=0, std=10),
)
ecg_rest = TensorMap(
'strip', Interpretation.CONTINUOUS, shape=(5000, 12), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
channel_map=ECG_REST_LEADS, normalization=ZeroMeanStd1(),
)
ecg_rest_2500_ukb = TensorMap(
'ecg_rest_2500', Interpretation.CONTINUOUS, shape=(2500, 12), path_prefix='ukb_ecg_rest', channel_map=ECG_REST_LEADS,
tensor_from_file=_make_ecg_rest(downsample_steps=2), normalization=ZeroMeanStd1(),
)
ecg_rest_stft = TensorMap(
'ecg_rest_stft', Interpretation.CONTINUOUS, shape=(33, 158, 12), path_prefix='ukb_ecg_rest', channel_map=ECG_REST_LEADS,
tensor_from_file=_make_ecg_rest(short_time_nperseg=64, short_time_noverlap=32), normalization=ZeroMeanStd1()
)
ecg_rest_stft_512 = TensorMap(
'ecg_rest_stft_512', shape=(257, 314, 12), path_prefix='ukb_ecg_rest', channel_map=ECG_REST_LEADS,
tensor_from_file=_make_ecg_rest(short_time_nperseg=512, short_time_noverlap=496), normalization=ZeroMeanStd1(),
)
ecg_rest_stack = TensorMap(
'strip', Interpretation.CONTINUOUS, shape=(600, 12, 8), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
channel_map=ECG_REST_LEADS, normalization=ZeroMeanStd1(),
)
ecg_rest_median_raw = TensorMap(
'median', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(600, 12), loss='logcosh', activation='linear', tensor_from_file=_make_ecg_rest(),
metrics=['mse', 'mae', 'logcosh'], channel_map=ECG_REST_MEDIAN_LEADS, normalization=Standardize(mean=0, std=2000),
)
ecg_rest_median = TensorMap(
'median', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(600, 12), loss='logcosh', activation='linear', tensor_from_file=_make_ecg_rest(),
metrics=['mse', 'mae', 'logcosh'], channel_map=ECG_REST_MEDIAN_LEADS, normalization=ZeroMeanStd1(),
)
ecg_rest_median_stack = TensorMap(
'median', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(600, 12, 1), activation='linear', tensor_from_file=_make_ecg_rest(),
metrics=['mse', 'mae', 'logcosh'], loss='logcosh', loss_weight=1.0,
channel_map=ECG_REST_MEDIAN_LEADS, normalization=ZeroMeanStd1(),
)
ecg_median_1lead = TensorMap(
'median', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(600, 1), loss='logcosh', loss_weight=10.0, tensor_from_file=_make_ecg_rest(),
activation='linear', metrics=['mse', 'mae', 'logcosh'], channel_map={'lead': 0}, normalization=ZeroMeanStd1(),
)
ecg_rest_1lead = TensorMap(
'strip', Interpretation.CONTINUOUS, shape=(600, 8), path_prefix='ukb_ecg_rest', channel_map={'lead': 0}, tensor_from_file=_make_ecg_rest(),
dependent_map=ecg_median_1lead, normalization=ZeroMeanStd1(),
)
ecg_median_1lead_categorical = TensorMap(
'median', Interpretation.CATEGORICAL, shape=(600, 32), activation='softmax', tensor_from_file=_make_ecg_rest(),
channel_map=_get_lead_cm(32)[0], normalization=ZeroMeanStd1(),
loss=weighted_crossentropy(
np.array(_get_lead_cm(32)[1]), 'ecg_median_categorical',
),
)
ecg_rest_1lead_categorical = TensorMap(
'strip', shape=(600, 8), path_prefix='ukb_ecg_rest', tensor_from_file=_make_ecg_rest(),
normalization=ZeroMeanStd1(),
channel_map={
'window0': 0, 'window1': 1, 'window2': 2, 'window3': 3,
'window4': 4, 'window5': 5, 'window6': 6, 'window7': 7,
},
dependent_map=ecg_median_1lead_categorical,
)
ecg_rhythm = TensorMap(
'ecg_rhythm', Interpretation.CATEGORICAL, tensor_from_file=_make_rhythm_tensor(),
loss=weighted_crossentropy([1.0, 2.0, 3.0, 3.0, 20.0, 20.0], 'ecg_rhythm'),
channel_map={
'Normal_sinus_rhythm': 0, 'Sinus_bradycardia': 1, 'Marked_sinus_bradycardia': 2,
'Other_sinus_rhythm': 3, 'Atrial_fibrillation': 4, 'Other_rhythm': 5,
},
)
ecg_rhythm_poor = TensorMap(
'ecg_rhythm', Interpretation.CATEGORICAL, tensor_from_file=_make_rhythm_tensor(False),
loss=weighted_crossentropy(
[1.0, 2.0, 3.0, 3.0, 20.0, 20.0], 'ecg_rhythm_poor',
),
channel_map={
'Normal_sinus_rhythm': 0, 'Sinus_bradycardia': 1, 'Marked_sinus_bradycardia': 2,
'Other_sinus_rhythm': 3, 'Atrial_fibrillation': 4, 'Other_rhythm': 5,
},
)
ecg_rest_age = TensorMap(
'ecg_rest_age', Interpretation.CONTINUOUS, tensor_from_file=age_in_years_tensor('ecg_rest_date'), loss='logcosh',
channel_map={'ecg_rest_age': 0}, validator=make_range_validator(0, 110), normalization={'mean': 65, 'std': 7.7},
)
acute_mi = TensorMap(
'acute_mi', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text, channel_map={'no_acute_mi': 0, 'ACUTE MI': 1},
loss=weighted_crossentropy([0.1, 10.0], 'acute_mi'),
)
anterior_blocks = TensorMap(
'anterior_blocks', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_anterior_blocks': 0, 'Left anterior fascicular block': 1,
'Left posterior fascicular block': 2,
},
loss=weighted_crossentropy([0.1, 10.0, 10.0], 'anterior_blocks'),
)
av_block = TensorMap(
'av_block', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text, channel_map={'no_av_block': 0, 'st degree AV block': 1},
loss=weighted_crossentropy([0.1, 10.0], 'av_block'),
)
incomplete_right_bundle_branch_block = TensorMap(
'incomplete_right_bundle_branch_block', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_incomplete_right_bundle_branch_block': 0,
'Incomplete right bundle branch block': 1,
},
loss=weighted_crossentropy(
[0.1, 10.0], 'incomplete_right_bundle_branch_block',
),
)
infarcts = TensorMap(
'infarcts', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_infarcts': 0, 'Anterior infarct': 1, 'Anteroseptal infarct': 2,
'Inferior infarct': 3, 'Lateral infarct': 4, 'Septal infarct': 5,
},
loss=weighted_crossentropy([0.1, 4.0, 6.0, 7.0, 6.0, 4.0], 'infarcts'),
)
left_atrial_enlargement = TensorMap(
'left_atrial_enlargement', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_left_atrial_enlargement': 0,
'Left atrial enlargement': 1,
},
loss=weighted_crossentropy([0.1, 10.0], 'left_atrial_enlargement'),
)
left_ventricular_hypertrophy = TensorMap(
'left_ventricular_hypertrophy', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_left_ventricular_hypertrophy': 0,
'Left ventricular hypertrophy': 1,
},
loss=weighted_crossentropy([0.1, 10.0], 'left_ventricular_hypertrophy'),
)
lvh_fine = TensorMap(
'lvh_fine', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text, loss=weighted_crossentropy([0.5, 12.0, 16.0, 30.0, 36.0], 'lvh_fine'),
channel_map={
'no_lvh_fine': 0, 'Minimal voltage criteria for LVH may be normal variant': 1,
'Moderate voltage criteria for LVH may be normal variant': 2, 'Voltage criteria for left ventricular hypertrophy': 3,
'Left ventricular hypertrophy': 4,
},
)
premature_atrial_complexes = TensorMap(
'premature_atrial_complexes', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_premature_atrial_complexes': 0,
'premature atrial complexes': 1,
},
loss=weighted_crossentropy([0.1, 10.0], 'premature_atrial_complexes'),
)
premature_supraventricular_complexes = TensorMap(
'premature_supraventricular_complexes', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_premature_supraventricular_complexes': 0,
'premature supraventricular complexes': 1,
},
loss=weighted_crossentropy(
[0.1, 10.0], 'premature_supraventricular_complexes',
),
)
premature_ventricular_complexes = TensorMap(
'premature_ventricular_complexes', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text,
channel_map={
'no_premature_ventricular_complexes': 0,
'premature ventricular complexes': 1,
},
loss=weighted_crossentropy([0.1, 10.0], 'premature_ventricular_complexes'),
)
prolonged_qt = TensorMap(
'prolonged_qt', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text, channel_map={'no_prolonged_qt': 0, 'Prolonged QT': 1},
loss=weighted_crossentropy([0.1, 10.0], 'prolonged_qt'),
)
ecg_rest_ramplitude_raw = TensorMap(
'ramplitude', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(12,), tensor_from_file=_make_ukb_ecg_rest(1.0),
loss='logcosh', metrics=['mse', 'mape', 'mae'], loss_weight=1.0,
)
ecg_rest_samplitude_raw = TensorMap(
'samplitude', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(12,), tensor_from_file=_make_ukb_ecg_rest(1.0),
loss='logcosh', metrics=['mse', 'mape', 'mae'], loss_weight=1.0,
)
ecg_rest_ramplitude = TensorMap(
'ramplitude', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(12,), tensor_from_file=_make_ukb_ecg_rest(),
loss='logcosh', metrics=['mse', 'mape', 'mae'], loss_weight=1.0,
)
ecg_rest_samplitude = TensorMap(
'samplitude', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', shape=(12,), tensor_from_file=_make_ukb_ecg_rest(),
loss='logcosh', metrics=['mse', 'mape', 'mae'], loss_weight=1.0,
)
ecg_rest_lvh_avl = TensorMap(
'avl_lvh', Interpretation.CATEGORICAL, path_prefix='ukb_ecg_rest', tensor_from_file=_make_ukb_ecg_rest_lvh(),
channel_map={'no_avl_lvh': 0, 'aVL LVH': 1},
loss=weighted_crossentropy([0.006, 1.0], 'avl_lvh'),
)
ecg_rest_lvh_sokolow_lyon = TensorMap(
'sokolow_lyon_lvh', Interpretation.CATEGORICAL, path_prefix='ukb_ecg_rest', tensor_from_file=_make_ukb_ecg_rest_lvh(),
channel_map={'no_sokolow_lyon_lvh': 0, 'Sokolow Lyon LVH': 1},
loss=weighted_crossentropy([0.005, 1.0], 'sokolov_lyon_lvh'),
)
ecg_rest_lvh_cornell = TensorMap(
'cornell_lvh', Interpretation.CATEGORICAL, path_prefix='ukb_ecg_rest', tensor_from_file=_make_ukb_ecg_rest_lvh(),
channel_map={'no_cornell_lvh': 0, 'Cornell LVH': 1},
loss=weighted_crossentropy([0.003, 1.0], 'cornell_lvh'),
)
ecg_segmented = TensorMap(
'ecg_segmented', Interpretation.CATEGORICAL, shape=(1224, len(ECG_SEGMENTED_CHANNEL_MAP)), path_prefix='ecg_rest',
cacheable=False, channel_map=ECG_SEGMENTED_CHANNEL_MAP,
)
ecg_section_to_segment = TensorMap(
'ecg_section_to_segment', shape=(1224, 12), path_prefix='ecg_rest', dependent_map=ecg_segmented,
channel_map=ECG_REST_LEADS, tensor_from_file=_ecg_rest_to_segment(),
)
ecg_section_to_segment_warp = TensorMap(
'ecg_section_to_segment', shape=(1224, 12), path_prefix='ecg_rest', dependent_map=ecg_segmented,
cacheable=False, channel_map=ECG_REST_LEADS, tensor_from_file=_ecg_rest_to_segment(),
augmentations=[_warp_ecg],
)
ecg_segmented_second = TensorMap(
'ecg_segmented', Interpretation.CATEGORICAL, shape=(496, len(ECG_SEGMENTED_CHANNEL_MAP)), path_prefix='ecg_rest',
cacheable=False, channel_map=ECG_SEGMENTED_CHANNEL_MAP,
)
ecg_second_to_segment = TensorMap(
'ecg_second_to_segment', shape=(496, 12), path_prefix='ecg_rest', dependent_map=ecg_segmented_second,
cacheable=False, channel_map=ECG_REST_LEADS, tensor_from_file=_ecg_rest_to_segment(random_offset_seconds=1.5),
)
ecg_second_to_segment_warp = TensorMap(
'ecg_second_to_segment', shape=(496, 12), path_prefix='ecg_rest', dependent_map=ecg_segmented_second,
cacheable=False, channel_map=ECG_REST_LEADS, tensor_from_file=_ecg_rest_to_segment(random_offset_seconds=1.5),
augmentations=[_warp_ecg],
)
poor_data_quality = TensorMap(
'poor_data_quality', Interpretation.CATEGORICAL, tensor_from_file=label_from_ecg_interpretation_text, channel_map={'no_poor_data_quality': 0, 'Poor data quality': 1},
loss=weighted_crossentropy([0.1, 3.0], 'poor_data_quality'),
)
####
ecg_semi_coarse = TensorMap(
'ecg_semi_coarse', Interpretation.CATEGORICAL, loss=weighted_crossentropy([1.0, 1.0, 2.0, 4.0, 16.0, 20.0], 'ecg_semi_coarse'),
channel_map={'Normal_sinus_rhythm': 0, 'Sinus_bradycardia': 1, 'Marked_sinus_bradycardia': 2, 'Other_sinus_rhythm': 3, 'Atrial_fibrillation': 4, 'Other_rhythm': 5},
)
ecg_semi_coarse_with_poor = TensorMap(
'ecg_semi_coarse_with_poor', Interpretation.CATEGORICAL, loss=weighted_crossentropy([1.0, 2.0, 3.0, 3.0, 20.0, 20.0], 'ecg_semi_coarse_with_poor'),
channel_map={'Normal_sinus_rhythm': 0, 'Sinus_bradycardia': 1, 'Marked_sinus_bradycardia': 2, 'Other_sinus_rhythm': 3, 'Atrial_fibrillation': 4, 'Other_rhythm': 5},
)
ecg_normal = TensorMap(
'ecg_normal', Interpretation.CATEGORICAL, loss=weighted_crossentropy([2.0, 3.0, 3.0, 3.0], 'ecg_normal'),
channel_map={'Normal_ECG': 0, 'Abnormal_ECG': 1, 'Borderline_ECG': 2, 'Otherwise_normal_ECG': 3},
)
ecg_infarct = TensorMap(
'ecg_infarct', Interpretation.CATEGORICAL, channel_map={'no_infarct': 0, 'infarct': 1},
loss=weighted_crossentropy([1.0, 8.0], 'ecg_infarct'),
)
ecg_poor_data = TensorMap(
'ecg_poor_data', Interpretation.CATEGORICAL, channel_map={'no_poor_data_quality': 0, 'poor_data_quality': 1},
loss=weighted_crossentropy([1.0, 8.0], 'ecg_poor_data'),
)
ecg_block = TensorMap(
'ecg_block', Interpretation.CATEGORICAL, channel_map={'no_block': 0, 'block': 1},
loss=weighted_crossentropy([1.0, 8.0], 'ecg_block'),
)
ecg_rest_next_char = TensorMap('ecg_rest_next_char', Interpretation.LANGUAGE, shape=(len(ECG_CHAR_2_IDX),), channel_map=ECG_CHAR_2_IDX, activation='softmax', loss='categorical_crossentropy', loss_weight=2.0)
ecg_rest_text = TensorMap('ecg_rest_text', Interpretation.LANGUAGE, shape=(100, len(ECG_CHAR_2_IDX)), path_prefix='ukb_ecg_rest', channel_map={'context': 0, 'alphabet': 1}, dependent_map=ecg_rest_next_char)
p_axis = TensorMap(
'PAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PAxis': 0}, loss='logcosh', validator=make_range_validator(-50, 130),
normalization={'mean': 48.7, 'std': 23.1},
)
p_duration = TensorMap(
'PDuration', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PDuration': 0}, loss='logcosh', validator=make_range_validator(30, 140),
normalization={'mean': 96.1, 'std': 18.85},
)
p_offset = TensorMap(
'POffset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'POffset': 0}, loss='logcosh', validator=make_range_validator(200, 500),
normalization={'mean': 369.1, 'std': 28.42},
)
p_onset = TensorMap(
'POnset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'POnset': 0}, loss='logcosh', validator=make_range_validator(120, 400),
normalization={'mean': 275.1, 'std': 26.420},
)
pp_interval = TensorMap(
'PPInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PPInterval': 0}, loss='logcosh', validator=make_range_validator(300, 1800),
normalization={'mean': 1036.1, 'std': 185.0},
)
pq_interval = TensorMap(
'PQInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PQInterval': 0}, loss='logcosh', validator=make_range_validator(70, 400),
normalization={'mean': 165.9, 'std': 26.3},
)
q_offset = TensorMap(
'QOffset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QOffset': 0}, loss='logcosh', validator=make_range_validator(300, 600),
normalization={'mean': 525.1, 'std': 13.52},
)
q_onset = TensorMap(
'QOnset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QOnset': 0}, loss='logcosh', validator=make_range_validator(370, 600),
normalization={'mean': 435.1, 'std': 11.420},
)
qrs_complexes = TensorMap(
'QRSComplexes', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QRSComplexes': 0}, loss='logcosh', validator=make_range_validator(0, 60),
normalization={'mean': 8.0, 'std': 20.0},
)
qrs_duration = TensorMap(
'QRSDuration', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QRSDuration': 0}, loss='logcosh', validator=make_range_validator(45, 175),
normalization={'mean': 89.53, 'std': 12.21},
)
qrs_num = TensorMap(
'QRSNum', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QRSNum': 0}, loss='logcosh', validator=make_range_validator(2, 30),
normalization={'mean': 9.61, 'std': 1.64},
)
qt_interval = TensorMap(
'QTInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTInterval': 0}, loss='logcosh', validator=make_range_validator(300, 600),
normalization={'mean': 426.1, 'std': 32.24},
)
qt_interval_quintiles = TensorMap(
'QTInterval', Interpretation.DISCRETIZED, path_prefix='ukb_ecg_rest',
channel_map={'QTInterval': 0}, normalization={'mean': 426.1, 'std': 32.24},
discretization_bounds=[-0.842, -0.253, 0.253, 0.842],
)
qtc_interval = TensorMap(
'QTCInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTCInterval': 0}, loss='logcosh', validator=make_range_validator(300, 600),
normalization={'mean': 419.1, 'std': 20.7},
)
r_axis = TensorMap(
'RAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'RAxis': 0}, loss='logcosh', validator=make_range_validator(-100, 200),
normalization={'mean': 25.7, 'std': 36.6},
)
rr_interval = TensorMap(
'RRInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'RRInterval': 0}, loss='logcosh', validator=make_range_validator(400, 2000),
normalization={'mean': 1040.61, 'std': 175.5},
)
ventricular_rate = TensorMap(
'VentricularRate', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'VentricularRate': 0}, validator=make_range_validator(30, 150),
loss='logcosh', normalization={'mean': 59.3, 'std': 10.6},
)
t_offset = TensorMap(
'TOffset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'TOffset': 0}, loss='logcosh', validator=make_range_validator(700, 1000),
normalization={'mean': 860.7, 'std': 32.52},
)
t_axis = TensorMap(
'TAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'TAxis': 0}, loss='logcosh', validator=make_range_validator(-100, 200),
normalization={'mean': 40.8, 'std': 32.6},
)
af_prs = TensorMap('AF_PRS_LDscore', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'AF_PRS_LDscore': 0}, normalization={'mean': -1.0, 'std': 0.4})
charge = TensorMap(
'charge', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'charge': 0}, normalization={'mean': 12.0, 'std': 2.0},
validator=make_range_validator(0, 20),
)
qtc_intervalp = TensorMap(
'QTCInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTCInterval': 0}, loss='logcosh', validator=make_range_validator(100, 900),
parents=[qt_interval, rr_interval], normalization={'mean': 419.1, 'std': 20.7},
)
qrs_durationpp = TensorMap(
'QRSDuration', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QRSDuration': 0}, loss='logcosh', validator=make_range_validator(45, 175),
normalization={'mean': 89.53, 'std': 12.21},
parents=[qtc_intervalp],
)
p_axis_sentinel = TensorMap(
'PAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PAxis': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 48.7, 'std': 23.1},
)
p_duration_sentinel = TensorMap(
'PDuration', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PDuration': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 96.1, 'std': 18.85},
)
p_offset_sentinel = TensorMap(
'POffset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'POffset': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 369.1, 'std': 28.42},
)
p_onset_sentinel = TensorMap(
'POnset', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'POnset': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 275.1, 'std': 26.420},
)
pp_interval_sentinel = TensorMap(
'PPInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PPInterval': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 1036.1, 'std': 185.0},
)
pq_interval_sentinel = TensorMap(
'PQInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'PQInterval': 0}, sentinel=0, metrics=['logcosh'],
normalization={'mean': 165.9, 'std': 26.3},
)
qrs_duration_sentinel = TensorMap(
'QRSDuration', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QRSDuration': 0}, sentinel=0,
normalization={'mean': 89.53, 'std': 12.21},
)
qt_interval_sentinel = TensorMap(
'QTInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTInterval': 0}, sentinel=0,
normalization={'mean': 426.1, 'std': 32.24},
)
qtc_interval_sentinel = TensorMap(
'QTCInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTCInterval': 0}, sentinel=0,
normalization={'mean': 419.1, 'std': 20.7},
)
qtc_intervalp_sentinel = TensorMap(
'QTCInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTCInterval': 0}, sentinel=0,
normalization={'mean': 419.1, 'std': 20.7},
parents=[qt_interval, rr_interval],
)
qtc_intervalp_sentinel = TensorMap(
'QTCInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'QTCInterval': 0}, sentinel=0,
normalization={'mean': 419.1, 'std': 20.7},
parents=[qt_interval, rr_interval],
)
r_axis_sentinel = TensorMap('RAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'RAxis': 0}, sentinel=0, normalization={'mean': 25.7, 'std': 36.6})
rr_interval_sentinel = TensorMap(
'RRInterval', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'RRInterval': 0}, sentinel=0,
normalization={'mean': 1040.61, 'std': 175.5},
)
t_axis_sentinel = TensorMap('TAxis', Interpretation.CONTINUOUS, path_prefix='ukb_ecg_rest', channel_map={'TAxis': 0}, sentinel=0, normalization={'mean': 40.8, 'std': 32.6})
bb_baseline = TensorMap(
'bb_baseline', Interpretation.CATEGORICAL, channel_map={'no_bb_baseline': 0, 'bb_baseline': 1},
loss=weighted_crossentropy([0.0453, 0.9547], 'bb_baseline'),
)
ccb_baseline = TensorMap(
'ccb_baseline', Interpretation.CATEGORICAL, channel_map={'no_ccb_baseline': 0, 'ccb_baseline': 1},
loss=weighted_crossentropy([0.0044, 0.9956], 'ccb_baseline'),
)
class1_baseline = TensorMap(
'class1_baseline', Interpretation.CATEGORICAL, channel_map={'no_class1_baseline': 0, 'class1_baseline': 1},
loss=weighted_crossentropy([0.0023, 0.9977], 'class1_baseline'),
)
class3_baseline = TensorMap(
'class3_baseline', Interpretation.CATEGORICAL, channel_map={'no_class3_baseline': 0, 'class3_baseline': 1},
loss=weighted_crossentropy([0.0011, 0.9989], 'class3_baseline'),
)
qtc_drug_def_baseline = TensorMap(
'qtc_drug_def_baseline', Interpretation.CATEGORICAL,
channel_map={'no_qtc_drug_def_baseline': 0, 'qtc_drug_def_baseline': 1},
loss=weighted_crossentropy([0.0210, 0.9790], 'qtc_drug_def_baseline'),
)
qtc_drug_poss_baseline = TensorMap(
'qtc_drug_poss_baseline', Interpretation.CATEGORICAL,
channel_map={'no_qtc_drug_poss_baseline': 0, 'qtc_drug_poss_baseline': 1},
loss=weighted_crossentropy([0.0189, 0.9811], 'qtc_drug_poss_baseline'),
)
combined_qtc_drug_baseline = TensorMap(
'combined_qtc_drug_baseline', Interpretation.CATEGORICAL,
channel_map={'no_combined_qtc_drug_baseline': 0, 'combined_qtc_drug_baseline': 1},
loss=weighted_crossentropy([0.0389, 0.9611], 'combined_qtc_drug_baseline'),
)
class1_baseline = TensorMap('class1_baseline', Interpretation.CATEGORICAL, channel_map={'no_class1_baseline': 0, 'class1_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0023, 0.9977], 'class1_baseline'))
bb_baseline = TensorMap('bb_baseline', Interpretation.CATEGORICAL, channel_map={'no_bb_baseline': 0, 'bb_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0453, 0.9547], 'bb_baseline'))
class3_baseline = TensorMap('class3_baseline', Interpretation.CATEGORICAL, channel_map={'no_class3_baseline': 0, 'class3_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0011, 0.9989], 'class3_baseline'))
ccb_baseline = TensorMap('ccb_baseline', Interpretation.CATEGORICAL, channel_map={'no_ccb_baseline': 0, 'ccb_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0044, 0.9956], 'ccb_baseline'))
qtc_drug_def_baseline = TensorMap('qtc_drug_def_baseline', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_def_baseline': 0, 'qtc_drug_def_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0210, 0.9790], 'qtc_drug_def_baseline'))
qtc_drug_poss_baseline = TensorMap('qtc_drug_poss_baseline', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_poss_baseline': 0, 'qtc_drug_poss_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0189, 0.9811], 'qtc_drug_poss_baseline'))
class1_fu = TensorMap('class1_fu', Interpretation.CATEGORICAL, channel_map={'no_class1_fu': 0, 'class1_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0018, 0.9982], 'class1_fu'))
bb_fu = TensorMap('bb_fu', Interpretation.CATEGORICAL, channel_map={'no_bb_fu': 0, 'bb_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0306, 0.9694], 'bb_fu'))
class3_fu = TensorMap('class3_fu', Interpretation.CATEGORICAL, channel_map={'no_class3_fu': 0, 'class3_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0006, 0.9994], 'class3_fu'))
ccb_fu = TensorMap('ccb_fu', Interpretation.CATEGORICAL, channel_map={'no_ccb_fu': 0, 'ccb_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0035, 0.9965], 'ccb_fu'))
qtc_drug_def_fu = TensorMap('qtc_drug_def_fu', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_def_fu': 0, 'qtc_drug_def_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0140, 0.9860], 'qtc_drug_def_fu'))
qtc_drug_poss_fu = TensorMap('qtc_drug_poss_fu', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_poss_fu': 0, 'qtc_drug_poss_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0127, 0.9873], 'qtc_drug_poss_fu'))
qtc_drug_def_any = TensorMap('qtc_drug_def_any', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_def_any': 0, 'qtc_drug_def_any': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0302, 0.9698], 'qtc_drug_def_any'))
qtc_drug_poss_any = TensorMap('qtc_drug_poss_any', Interpretation.CATEGORICAL, channel_map={'no_qtc_drug_poss_any': 0, 'qtc_drug_poss_any': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0267, 0.9733], 'qtc_drug_poss_any'))
any_class1 = TensorMap('any_class1', Interpretation.CATEGORICAL, channel_map={'no_any_class1': 0, 'any_class1': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0031, 0.9969], 'any_class1'))
any_bb = TensorMap('any_bb', Interpretation.CATEGORICAL, channel_map={'no_any_bb': 0, 'any_bb': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0602, 0.9398], 'any_bb'))
any_class3 = TensorMap('any_class3', Interpretation.CATEGORICAL, channel_map={'no_any_class3': 0, 'any_class3': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0013, 0.9987], 'any_class3'))
any_ccb = TensorMap('any_ccb', Interpretation.CATEGORICAL, channel_map={'no_any_ccb': 0, 'any_ccb': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0062, 0.9938], 'any_ccb'))
combined_qtc_drug_baseline = TensorMap('combined_qtc_drug_baseline', Interpretation.CATEGORICAL, channel_map={'no_combined_qtc_drug_baseline': 0, 'combined_qtc_drug_baseline': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0389, 0.9611], 'combined_qtc_drug_baseline'))
combined_qtc_drug_fu = TensorMap('combined_qtc_drug_fu', Interpretation.CATEGORICAL, channel_map={'no_combined_qtc_drug_fu': 0, 'combined_qtc_drug_fu': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0260, 0.9740], 'combined_qtc_drug_fu'))
combined_qtc_drug_any = TensorMap('combined_qtc_drug_any', Interpretation.CATEGORICAL, channel_map={'no_combined_qtc_drug_any': 0, 'combined_qtc_drug_any': 1}, loss_weight=100.0, loss=weighted_crossentropy([0.0546, 0.9454], 'combined_qtc_drug_any'))
ecg_bike_max_hr_no0 = TensorMap(
'bike_max_hr', Interpretation.CONTINUOUS, channel_map={'bike_max_hr': 0},
loss=ignore_zeros_logcosh, metrics=['logcosh'], normalization={'mean': 110.03, 'std': 20.04},
)
ecg_bike_resting_hr_no0 = TensorMap(
'bike_resting_hr', Interpretation.CONTINUOUS, channel_map={'bike_resting_hr': 0},
loss=ignore_zeros_logcosh, metrics=['logcosh'], normalization={'mean': 71.2, 'std': 12.57},
)
ecg_bike_max_pred_hr_no0 = TensorMap(
'bike_max_pred_hr', Interpretation.CONTINUOUS, channel_map={'bike_max_pred_hr': 0},
loss=ignore_zeros_logcosh, metrics=['logcosh'], normalization={'mean': 167.5, 'std': 5.78},
)
ecg_bike_max_hr = TensorMap(
'max_hr', path_prefix='ecg_bike', loss='logcosh', metrics=['mape'],
normalization={'mean': 110.03, 'std': 20.04}, shape=(1,),
tensor_from_file=normalized_first_date,
)
ecg_bike_resting_hr = TensorMap(
'resting_hr', Interpretation.CONTINUOUS, path_prefix='ecg_bike', loss='logcosh', shape=(1,),
metrics=['mape'], normalization={'mean': 71.2, 'std': 12.57},
tensor_from_file=normalized_first_date,
)
ecg_bike_age = TensorMap(
'age', Interpretation.CONTINUOUS, path_prefix='ecg_bike', loss='logcosh', metrics=['mape'], shape=(1,),
normalization={'mean': 60, 'std': 7.65},
tensor_from_file=normalized_first_date,
)
ecg_bike_max_pred_hr = TensorMap(
'max_pred_hr', Interpretation.CONTINUOUS, path_prefix='ecg_bike', loss='logcosh', metrics=['mape'], shape=(1,),
normalization={'mean': 167.5, 'std': 5.81},
tensor_from_file=normalized_first_date,
)
ecg_bike_trend_hr = TensorMap(
'trend_heartrate', Interpretation.CONTINUOUS, shape=(106, 1), path_prefix='ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_trend_load = TensorMap(
'trend_load', Interpretation.CONTINUOUS, shape=(106, 1), path_prefix='ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_trend_grade = TensorMap(
'trend_grade', Interpretation.CONTINUOUS, shape=(106, 1), path_prefix='ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_hr = TensorMap(
'trend_heartrate', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_load = TensorMap(
'trend_load', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_grade = TensorMap(
'trend_grade', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_artifact = TensorMap(
'trend_artifact', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_mets = TensorMap(
'trend_mets', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_pacecount = TensorMap(
'trend_pacecount', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_phasename = TensorMap(
'trend_phasename', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_phasetime = TensorMap(
'trend_phasetime', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_time = TensorMap(
'trend_time', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_trend_vecount = TensorMap(
'trend_vecount', Interpretation.CONTINUOUS, shape=(87,), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
ecg_bike_raw_full = TensorMap(
'full', Interpretation.CONTINUOUS, shape=(216500, 3), path_prefix='ukb_ecg_bike',
tensor_from_file=normalized_first_date,
)
|
# RLE Source: https://www.kaggle.com/rakhlin/fast-run-length-encoding-python
import os
import numpy as np
import pandas as pd
from skimage.morphology import label
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def prob_to_rles(x, cutoff):
lab_img = label(x > cutoff)
for i in range(1, lab_img.max() + 1):
yield rle_encoding(lab_img == i)
def label_to_rles(lab_img):
# For each nuclei, skipping the first element(background '0' value)
for i in np.unique(lab_img)[1:]:
yield rle_encoding(lab_img == i)
def generate_submission(test_ids, predictions, threshold, submission_file):
"""Generates a submission RLE CSV file based on the input predicted masks.
Args:
test_ids: List of all test_ids that were predicted.
predictions: List of all full-size predicted masks, where each pixel is
represented as a probability between 0 and 1.
threshold: The threshold above which a predicted probability is considered 'True'.
submission_file: The output submission file name.
"""
new_test_ids = []
rles = []
for n, id_ in enumerate(test_ids):
rle = list(prob_to_rles(predictions[n], threshold))
rles.extend(rle)
new_test_ids.extend([id_] * len(rle))
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv(submission_file, index=False)
def generate_submission_from_df_prediction(dfs, threshold, submission_file):
"""Generates a submission RLE CSV file based on the input data frame's predicted image.
Args:
dfs: List of all data frames that contain labeled masks to submit.
threshold: The threshold above which a predicted probability is considered 'True'.
submission_file: The output submission file name.
"""
basename, _ = os.path.split(submission_file)
if not os.path.isdir(basename) and basename:
os.makedirs(basename)
new_test_ids = []
rles = []
for df in dfs:
for img_id, prediction in zip(df.imageID, df.prediction):
mask = prediction[:,:,0]
rle = list(prob_to_rles(mask, threshold))
rles.extend(rle)
new_test_ids.extend([img_id] * len(rle))
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv(submission_file, index=False)
def generate_submission_from_df(dfs, submission_file):
"""Generates a submission RLE CSV file based on the labeled mask in a data frame.
Args:
dfs: List of all data frames that contain labeled masks to submit.
submission_file: The output submission file name.
"""
basename, _ = os.path.split(submission_file)
if not os.path.isdir(basename) and basename:
os.makedirs(basename)
new_test_ids = []
rles = []
for df in dfs:
for img_id, label in zip(df.imageID, df.label):
if len(np.unique(label)) == 1:
rles.extend([''])
new_test_ids.extend([img_id])
else:
rle = list(label_to_rles(label))
rles.extend(rle)
new_test_ids.extend([img_id] * len(rle))
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv(submission_file, index=False) |
while True:
up_c = input('Результаты через запятую (без пробелов):\n').rstrip().split(',')
if up_c == ['']:
exit(0)
up_ball = ((int(up_c[2]) - int(up_c[1])) + (int(up_c[4]) - int(up_c[3])) + (int(up_c[6])) - int(up_c[5])) / 3
up_ball = round(up_ball, 2)
print(up_ball)
|
"""Init strategies to be used for running jobs."""
import logging
import os
from copy import deepcopy
import numpy as np
import yaml
from sklearn.experimental import (enable_hist_gradient_boosting,
enable_iterative_imputer)
from sklearn.ensemble import (HistGradientBoostingClassifier,
HistGradientBoostingRegressor)
from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer
from sklearn.linear_model import LogisticRegressionCV, RidgeCV
from sklearn.model_selection import (GridSearchCV, KFold, ShuffleSplit,
StratifiedShuffleSplit)
from .strategy import Strategy
logger = logging.getLogger(__name__)
strategies = list()
# Load some params from custom file
filepath = 'custom/strategy_params.yml'
if os.path.exists(filepath):
with open(filepath, 'r') as file:
params = yaml.safe_load(file)
else:
params = dict()
# Load or defaults
n_outer_splits = params.get('n_outer_splits', 2)
n_inner_splits = params.get('n_inner_splits', 2)
n_jobs = params.get('n_jobs', 1)
n_iter = params.get('n_iter', 1)
n_repeats = params.get('n_repeats', 1)
compute_importance = params.get('compute_importance', False)
learning_curve = params.get('learning_curve', False)
n_learning_trains = params.get('n_learning_trains', 5)
iterative_imputer_max_iter = params.get('iterative_imputer_max_iter', 10)
roc = params.get('roc', False)
param_space = params.get('param_space', None)
train_set_steps = params.get('train_set_steps', [])
min_test_set = params.get('min_test_set', 0.2)
n_splits = params.get('n_splits', 5)
# Default RS
RS = 42
logger.info(f'Loaded strategy_params.yml with following parameters:')
logger.info(f'n_outer_splits: {n_outer_splits}')
logger.info(f'n_inner_splits: {n_inner_splits}')
logger.info(f'n_jobs: {n_jobs}')
logger.info(f'n_iter: {n_iter}')
logger.info(f'n_repeats: {n_repeats}')
logger.info(f'compute_importance: {compute_importance}')
logger.info(f'learning_curve: {learning_curve}')
logger.info(f'n_learning_trains: {n_learning_trains}')
logger.info(f'iterative_imputer_max_iter: {iterative_imputer_max_iter}')
logger.info(f'roc: {roc}')
logger.info(f'param_space: {param_space}')
logger.info(f'RS: {RS}')
logger.info(f'train_set_steps: {train_set_steps}')
logger.info(f'min_test_set: {min_test_set}')
if param_space is None:
param_space = {
'learning_rate': [0.05, 0.1, 0.3],
'max_depth': [3, 6, 9]
}
# A strategy to run a classification
strategies.append(Strategy(
name='Classification',
estimator=HistGradientBoostingClassifier(random_state=RS),
inner_cv=StratifiedShuffleSplit(n_splits=n_inner_splits, train_size=0.8, random_state=RS),
search=GridSearchCV,
param_space=param_space,
search_params={
'scoring': 'roc_auc_ovr_weighted',
'verbose': 0,
'n_jobs': n_jobs,
'return_train_score': True,
},
# search=RandomizedSearchCV,
# param_space={
# 'learning_rate': uniform(1e-5, 1),
# 'max_iter': range(10, 500)
# },
# search_params={
# 'scoring': 'recall',
# 'verbose': 0,
# 'n_jobs': n_jobs,
# 'return_train_score': True,
# 'n_iter': n_iter
# },
outer_cv=KFold(n_splits=n_outer_splits, shuffle=True, random_state=RS),
compute_importance=compute_importance,
importance_params={
'n_jobs': n_jobs,
'n_repeats': n_repeats,
},
learning_curve=learning_curve,
learning_curve_params={
'scoring': 'roc_auc_ovr_weighted',
'train_sizes': np.linspace(0.1, 1, n_learning_trains)
},
roc=roc,
train_set_steps=train_set_steps,
min_test_set=min_test_set,
n_splits=n_splits,
))
strategies.append(Strategy(
name='Classification_Logit',
estimator=LogisticRegressionCV(random_state=RS, cv=StratifiedShuffleSplit(n_splits=n_inner_splits, train_size=0.8, random_state=RS),),
inner_cv=None,
search=None,
param_space=None,
outer_cv=KFold(n_splits=n_outer_splits, shuffle=True, random_state=RS),
compute_importance=compute_importance,
importance_params={
'n_jobs': n_jobs,
'n_repeats': n_repeats,
},
learning_curve=learning_curve,
learning_curve_params={
'scoring': 'roc_auc_ovr_weighted',
'train_sizes': np.linspace(0.1, 1, n_learning_trains)
},
roc=roc,
train_set_steps=train_set_steps,
min_test_set=min_test_set,
n_splits=n_splits,
))
# A strategy to run a regression
strategies.append(Strategy(
name='Regression',
estimator=HistGradientBoostingRegressor(loss='least_absolute_deviation', random_state=RS),
inner_cv=ShuffleSplit(n_splits=n_inner_splits, train_size=0.8, random_state=RS),
search=GridSearchCV,
param_space=param_space,
search_params={
'scoring': ['r2', 'neg_mean_absolute_error'],
'refit': 'r2',
'verbose': 0,
'n_jobs': n_jobs,
'return_train_score': True,
},
# search=RandomizedSearchCV,
# param_space={
# 'learning_rate': uniform(1e-5, 1),
# 'max_depth': range(3, 11)
# },
# search_params={
# 'scoring': ['r2', 'neg_mean_absolute_error'],
# 'refit': 'r2',
# 'verbose': 0,
# 'return_train_score': True,
# 'n_iter': n_iter,
# 'n_jobs': n_jobs
# },
outer_cv=KFold(n_splits=n_outer_splits, shuffle=True, random_state=RS),
compute_importance=compute_importance,
importance_params={
'n_jobs': n_jobs,
'n_repeats': n_repeats,
},
learning_curve=learning_curve,
learning_curve_params={
'scoring': 'r2',
'train_sizes': np.linspace(0.1, 1, n_learning_trains)
},
roc=roc,
train_set_steps=train_set_steps,
min_test_set=min_test_set,
n_splits=n_splits,
))
strategies.append(Strategy(
name='Regression_Ridge',
estimator=RidgeCV(cv=ShuffleSplit(n_splits=n_inner_splits, train_size=0.8, random_state=RS)),
inner_cv=None,
search=None,
param_space=None,
search_params=None,
outer_cv=KFold(n_splits=n_outer_splits, shuffle=True, random_state=RS),
compute_importance=compute_importance,
importance_params={
'n_jobs': n_jobs,
'n_repeats': n_repeats,
},
learning_curve=learning_curve,
learning_curve_params={
'scoring': 'r2',
'train_sizes': np.linspace(0.1, 1, n_learning_trains)
},
roc=roc,
train_set_steps=train_set_steps,
min_test_set=min_test_set,
n_splits=n_splits,
))
# Add imputation to the previous strategies
imputers = {
'Mean': SimpleImputer(strategy='mean'),
'Mean+mask': SimpleImputer(strategy='mean', add_indicator=True),
'Med': SimpleImputer(strategy='median'),
'Med+mask': SimpleImputer(strategy='median', add_indicator=True),
'Iterative': IterativeImputer(max_iter=iterative_imputer_max_iter,
random_state=RS),
'Iterative+mask': IterativeImputer(add_indicator=True,
max_iter=iterative_imputer_max_iter,
random_state=RS),
'KNN': KNNImputer(),
'KNN+mask': KNNImputer(add_indicator=True),
}
# Add imputed versions of the previosu strategies
imputed_strategies = list()
for imputer_name, imputer in imputers.items():
for strategy in strategies:
strategy = deepcopy(strategy)
strategy.imputer = imputer
strategy.name = f'{strategy.name}_imputed_{imputer_name}'
imputed_strategies.append(strategy)
strategies = strategies + imputed_strategies
strategies = {strategy.name: strategy for strategy in strategies}
|
import os
import shutil
import re
from fontInfoData import getAttrWithFallback, intListToNum, normalizeStringForPostscript
from outlineOTF import OutlineOTFCompiler
from featureTableWriter import FeatureTableWriter, winStr, macStr
from kernFeatureWriter import KernFeatureWriter
try:
sorted
except NameError:
def sorted(l):
l = list(l)
l.sort()
return l
class MakeOTFPartsCompiler(object):
"""
This object will create the "parts" needed by the FDK.
The only external method is :meth:`ufo2fdk.tools.makeotfParts.compile`.
There is one attribute, :attr:`ufo2fdk.tools.makeotfParts.path`
that may be referenced externally. That is a dictionary of
paths to the various parts.
When creating this object, you must provide a *font*
object and a *path* indicating where the parts should
be saved. Optionally, you can provide a *glyphOrder*
list of glyph names indicating the order of the glyphs
in the font. You may also provide an *outlineCompilerClass*
argument that will serve as the outline source compiler.
The class passed for this argument must be a subclass of
:class:`ufo2fdk.tools.outlineOTF.OutlineOTFCompiler`.
"""
def __init__(self, font, path, features=None, glyphOrder=None, outlineCompilerClass=OutlineOTFCompiler):
self.font = font
self.path = path
self.log = []
self.outlineCompilerClass = outlineCompilerClass
# store the path to an eventual custom feature file
self.features = features
# store the glyph order
if glyphOrder is None:
glyphOrder = sorted(font.keys())
self.glyphOrder = glyphOrder
# make the paths for all files
self.paths = dict(
outlineSource=os.path.join(path, "font.otf"),
menuName=os.path.join(path, "menuname"),
glyphOrder=os.path.join(path, "glyphOrder"),
fontInfo=os.path.join(path, "fontinfo"),
features=os.path.join(path, "features")
)
def compile(self):
"""
Compile the parts.
"""
# set up the parts directory removing
# an existing directory if necessary.
if os.path.exists(self.path):
shutil.rmtree(self.path)
os.mkdir(self.path)
# build the parts
self.setupFile_outlineSource(self.paths["outlineSource"])
self.setupFile_menuName(self.paths["menuName"])
self.setupFile_glyphOrder(self.paths["glyphOrder"])
self.setupFile_fontInfo(self.paths["fontInfo"])
self.setupFile_features(self.paths["features"])
def setupFile_outlineSource(self, path):
"""
Make the outline source file.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
c = self.outlineCompilerClass(self.font, path, self.glyphOrder)
c.compile()
self.log += c.log
def setupFile_menuName(self, path):
"""
Make the menu name source file. This gets the values for
the file using the fallback system as described below:
==== ===
[PS] postscriptFontName
f= openTypeNamePreferredFamilyName
s= openTypeNamePreferredSubfamilyName
l= styleMapFamilyName
m=1, openTypeNameCompatibleFullName
==== ===
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
psName = getAttrWithFallback(self.font.info,"postscriptFontName")
lines = [
"[%s]" % psName
]
# family name
familyName = getAttrWithFallback(self.font.info,"openTypeNamePreferredFamilyName")
encodedFamilyName = winStr(familyName)
lines.append("f=%s" % encodedFamilyName)
if encodedFamilyName != familyName:
lines.append("f=1,%s" % macStr(familyName))
# style name
styleName = getAttrWithFallback(self.font.info,"openTypeNamePreferredSubfamilyName")
encodedStyleName = winStr(styleName)
lines.append("s=%s" % encodedStyleName)
if encodedStyleName != styleName:
lines.append("s=1,%s" % macStr(styleName))
# compatible name
winCompatible = getAttrWithFallback(self.font.info,"styleMapFamilyName")
## the second qualification here is in place for Mac Office <= 2004.
## in that app the menu name is pulled from name ID 18. the font
## may have standard naming data that combines to a length longer
## than the app can handle (see Adobe Tech Note #5088). the designer
## may have created a specific openTypeNameCompatibleFullName to
## get around this problem. sigh, old app bugs live long lives.
if winCompatible != familyName or self.font.info.openTypeNameCompatibleFullName is not None:
# windows
l = "l=%s" % normalizeStringForPostscript(winCompatible)
lines.append(l)
# mac
macCompatible = getAttrWithFallback(self.font.info,"openTypeNameCompatibleFullName")
l = "m=1,%s" % macStr(macCompatible)
lines.append(l)
text = "\n".join(lines) + "\n"
f = open(path, "wb")
f.write(text)
f.close()
def setupFile_glyphOrder(self, path):
"""
Make the glyph order source file.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
lines = []
for glyphName in self.glyphOrder:
if glyphName in self.font and self.font[glyphName].unicode is not None:
code = self.font[glyphName].unicode
code = "%04X" % code
if len(code) <= 4:
code = "uni%s" % code
else:
code = "u%s" % code
line = "%s %s %s" % (glyphName, glyphName, code)
else:
line = "%s %s" % (glyphName, glyphName)
lines.append(line)
text = "\n".join(lines) + "\n"
f = open(path, "wb")
f.write(text)
f.close()
def setupFile_fontInfo(self, path):
"""
Make the font info source file. This gets the values for
the file using the fallback system as described below:
========================== ===
IsItalicStyle styleMapStyleName
IsBoldStyle styleMapStyleName
PreferOS/2TypoMetrics openTypeOS2Selection
IsOS/2WidthWeigthSlopeOnly openTypeOS2Selection
IsOS/2OBLIQUE openTypeOS2Selection
========================== ===
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
lines = []
# style mapping
styleMapStyleName = getAttrWithFallback(self.font.info,"styleMapStyleName")
if styleMapStyleName in ("italic", "bold italic"):
lines.append("IsItalicStyle true")
else:
lines.append("IsItalicStyle false")
if styleMapStyleName in ("bold", "bold italic"):
lines.append("IsBoldStyle true")
else:
lines.append("IsBoldStyle false")
# fsSelection bits
selection = getAttrWithFallback(self.font.info,"openTypeOS2Selection")
if 7 in selection:
lines.append("PreferOS/2TypoMetrics true")
else:
lines.append("PreferOS/2TypoMetrics false")
if 8 in selection:
lines.append("IsOS/2WidthWeigthSlopeOnly true")
else:
lines.append("IsOS/2WidthWeigthSlopeOnly false")
if 9 in selection:
lines.append("IsOS/2OBLIQUE true")
else:
lines.append("IsOS/2OBLIQUE false")
# write the file
if lines:
f = open(path, "wb")
f.write("\n".join(lines))
f.close()
def setupFile_features(self, path):
"""
Make the features source file. If any tables
or the kern feature are defined in the font's
features, they will not be overwritten.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
# force absolute includes into the features
if self.font.path is None:
existingFeaturePath = None
existing = self.font.features.text
if existing is None:
existing = ""
elif self.features is not None:
existingFeaturePath = os.path.normpath(os.path.join(self.font.path, self.features))
with open(existingFeaturePath, "r") as fea:
text = fea.read()
existing = forceAbsoluteIncludesInFeatures(text, os.path.dirname(existingFeaturePath))
else:
existingFeaturePath = os.path.join(self.font.path, "features.fea")
existing = forceAbsoluteIncludesInFeatures(self.font.features.text, os.path.dirname(self.font.path))
# break the features into parts
features, tables = extractFeaturesAndTables(existing, scannedFiles=[existingFeaturePath])
# build tables that are not in the existing features
autoTables = {}
if "head" not in tables:
autoTables["head"] = self.writeFeatures_head()
if "hhea" not in tables:
autoTables["hhea"] = self.writeFeatures_hhea()
if "OS/2" not in tables:
autoTables["OS/2"] = self.writeFeatures_OS2()
if "name" not in tables:
autoTables["name"] = self.writeFeatures_name()
# build the kern feature if necessary
autoFeatures = {}
if "kern" not in features and len(self.font.kerning):
autoFeatures["kern"] = self.writeFeatures_kern()
# write the features
features = [existing]
for name, text in sorted(autoFeatures.items()):
features.append(text)
for name, text in sorted(autoTables.items()):
features.append(text)
features = "\n\n".join(features)
# write the result
f = open(path, "wb")
f.write(features)
f.close()
def writeFeatures_kern(self):
"""
Write the kern feature to a string and return it.
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
writer = KernFeatureWriter(self.font)
return writer.write()
def writeFeatures_head(self):
"""
Write the head to a string and return it.
This gets the values for the file using the fallback
system as described below:
===== ===
X.XXX versionMajor.versionMinor
===== ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
versionMajor = getAttrWithFallback(self.font.info, "versionMajor")
versionMinor = getAttrWithFallback(self.font.info, "versionMinor")
value = "%d.%s" % (versionMajor, str(versionMinor).zfill(3))
writer = FeatureTableWriter("head")
writer.addLineWithKeyValue("FontRevision", value)
return writer.write()
def writeFeatures_hhea(self):
"""
Write the hhea to a string and return it.
This gets the values for the file using the fallback
system as described below:
=========== ===
Ascender openTypeHheaAscender
Descender openTypeHheaDescender
LineGap openTypeHheaLineGap
CaretOffset openTypeHheaCaretOffset
=========== ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
ascender = getAttrWithFallback(self.font.info, "openTypeHheaAscender")
descender = getAttrWithFallback(self.font.info, "openTypeHheaDescender")
lineGap = getAttrWithFallback(self.font.info, "openTypeHheaLineGap")
caret = getAttrWithFallback(self.font.info, "openTypeHheaCaretOffset")
writer = FeatureTableWriter("hhea")
writer.addLineWithKeyValue("Ascender", _roundInt(ascender))
writer.addLineWithKeyValue("Descender", _roundInt(descender))
writer.addLineWithKeyValue("LineGap", _roundInt(lineGap))
writer.addLineWithKeyValue("CaretOffset", _roundInt(caret))
return writer.write()
def writeFeatures_name(self):
"""
Write the name to a string and return it.
This gets the values for the file using the fallback
system as described below:
========= ===
nameid 0 copyright
nameid 7 trademark
nameid 8 openTypeNameManufacturer
nameid 9 openTypeNameDesigner
nameid 10 openTypeNameDescription
nameid 11 openTypeNameManufacturerURL
nameid 12 openTypeNameDesignerURL
nameid 13 openTypeNameLicense
nameid 14 openTypeNameLicenseURL
nameid 19 openTypeNameSampleText
========= ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
idToAttr = [
(0 , "copyright"),
(7 , "trademark"),
(8 , "openTypeNameManufacturer"),
(9 , "openTypeNameDesigner"),
(10 , "openTypeNameDescription"),
(11 , "openTypeNameManufacturerURL"),
(12 , "openTypeNameDesignerURL"),
(13 , "openTypeNameLicense"),
(14 , "openTypeNameLicenseURL"),
(19 , "openTypeNameSampleText")
]
multilineNameTableEntries = {}
lines = []
for id, attr in idToAttr:
value = getAttrWithFallback(self.font.info, attr)
if value is None:
continue
s = 'nameid %d "%s";' % (id, winStr(value))
lines.append(s)
s = 'nameid %d 1 "%s";' % (id, macStr(value))
lines.append(s)
if not lines:
return ""
writer = FeatureTableWriter("name")
for line in lines:
writer.addLine(line)
return writer.write()
def writeFeatures_OS2(self):
"""
Write the OS/2 to a string and return it.
This gets the values for the file using the fallback
system as described below:
============= ===
FSType openTypeOS2Type
Panose openTypeOS2Panose
UnicodeRange openTypeOS2UnicodeRanges
CodePageRange openTypeOS2CodePageRanges
TypoAscender openTypeOS2TypoAscender
TypoDescender openTypeOS2TypoDescender
TypoLineGap openTypeOS2TypoLineGap
winAscent openTypeOS2WinAscent
winDescent openTypeOS2WinDescent
XHeight xHeight
CapHeight capHeight
WeightClass openTypeOS2WeightClass
WidthClass openTypeOS2WidthClass
Vendor openTypeOS2VendorID
============= ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
codePageBitTranslation = {
0 : "1252",
1 : "1250",
2 : "1251",
3 : "1253",
4 : "1254",
5 : "1255",
6 : "1256",
7 : "1257",
8 : "1258",
16 : "874",
17 : "932",
18 : "936",
19 : "949",
20 : "950",
21 : "1361",
48 : "869",
49 : "866",
50 : "865",
51 : "864",
52 : "863",
53 : "862",
54 : "861",
55 : "860",
56 : "857",
57 : "855",
58 : "852",
59 : "775",
60 : "737",
61 : "708",
62 : "850",
63 : "437"
}
# writer
writer = FeatureTableWriter("OS/2")
# type
writer.addLineWithKeyValue("FSType", intListToNum(getAttrWithFallback(self.font.info, "openTypeOS2Type"), 0, 16))
# panose
panose = [str(i) for i in getAttrWithFallback(self.font.info, "openTypeOS2Panose")]
writer.addLineWithKeyValue("Panose", " ".join(panose))
# unicode ranges
unicodeRange = [str(i) for i in getAttrWithFallback(self.font.info, "openTypeOS2UnicodeRanges")]
if unicodeRange:
writer.addLineWithKeyValue("UnicodeRange", " ".join(unicodeRange))
# code page ranges
codePageRange = [codePageBitTranslation[i] for i in getAttrWithFallback(self.font.info, "openTypeOS2CodePageRanges") if i in codePageBitTranslation]
if codePageRange:
writer.addLineWithKeyValue("CodePageRange", " ".join(codePageRange))
# vertical metrics
writer.addLineWithKeyValue("TypoAscender", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoAscender")))
writer.addLineWithKeyValue("TypoDescender", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoDescender")))
writer.addLineWithKeyValue("TypoLineGap", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoLineGap")))
writer.addLineWithKeyValue("winAscent", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2WinAscent")))
writer.addLineWithKeyValue("winDescent", abs(_roundInt(getAttrWithFallback(self.font.info, "openTypeOS2WinDescent"))))
writer.addLineWithKeyValue("XHeight", _roundInt(getAttrWithFallback(self.font.info, "xHeight")))
writer.addLineWithKeyValue("CapHeight", _roundInt(getAttrWithFallback(self.font.info, "capHeight")))
writer.addLineWithKeyValue("WeightClass", getAttrWithFallback(self.font.info, "openTypeOS2WeightClass"))
writer.addLineWithKeyValue("WidthClass", getAttrWithFallback(self.font.info, "openTypeOS2WidthClass"))
writer.addLineWithKeyValue("Vendor", '"%s"' % getAttrWithFallback(self.font.info, "openTypeOS2VendorID"))
return writer.write()
includeRE = re.compile(
"(include\s*\(\s*)"
"([^\)]+)"
"(\s*\))" # this won't actually capture a trailing space.
)
forceAbsoluteIncludesInFeaturesTestText = """
# absolute path
include(/Users/bob/foo1/bar1/default.fea);
# relative path
include(foo2/bar2/default.fea);
# . syntax
include(./foo3/bar3/default.fea);
# .. syntax
include(../foo4/bar4/default.fea);
# spaces around path
include( foo5/bar5/default.fea );
"""
forceAbsoluteIncludesInFeaturesTestResult = """
# absolute path
include(/Users/bob/foo1/bar1/default.fea);
# relative path
include(/test1/test2/foo2/bar2/default.fea);
# . syntax
include(/test1/test2/foo3/bar3/default.fea);
# .. syntax
include(/test1/foo4/bar4/default.fea);
# spaces around path
include( /test1/test2/foo5/bar5/default.fea );
"""
def forceAbsoluteIncludesInFeatures(text, directory):
"""
Convert relative includes in the *text*
to absolute includes.
>>> result = forceAbsoluteIncludesInFeatures(forceAbsoluteIncludesInFeaturesTestText, "/test1/test2")
>>> result == forceAbsoluteIncludesInFeaturesTestResult
True
"""
for match in reversed(list(includeRE.finditer(text))):
start, includePath, close = match.groups()
# absolute path
if os.path.isabs(includePath):
continue
# relative path
currentDirectory = directory
parts = includePath.split(os.sep)
for index, part in enumerate(parts):
part = part.strip()
if not part:
continue
# .. = up one level
if part == "..":
currentDirectory = os.path.dirname(currentDirectory)
# . = current level
elif part == ".":
continue
else:
break
subPath = os.sep.join(parts[index:])
srcPath = os.path.join(currentDirectory, subPath)
includeText = start + srcPath + close
text = text[:match.start()] + includeText + text[match.end():]
return text
def _roundInt(value):
return int(round(value))
# ----------------------
# Basic Feature Splitter
# ----------------------
stringRE = re.compile(
"(\"[^$\"]*\")"
)
featureTableStartRE = re.compile(
"("
"feature"
"\s+"
"\S{4}"
"\s*"
"\{"
"|"
"table"
"\s+"
"\S{4}"
"\s*"
"\{"
")",
re.MULTILINE
)
featureNameRE = re.compile(
"feature"
"\s+"
"(\S{4})"
"\s*"
"\{"
)
tableNameRE = re.compile(
"table"
"\s+"
"(\S{4})"
"\s*"
"\{"
)
def extractFeaturesAndTables(text, scannedFiles=[]):
# strip all comments
decommentedLines = [line.split("#")[0] for line in text.splitlines()]
text = "\n".join(decommentedLines)
# replace all strings with temporary placeholders.
destringedLines = []
stringReplacements = {}
for line in text.splitlines():
if "\"" in line:
line = line.replace("\\\"", "__ufo2fdk_temp_escaped_quote__")
for found in stringRE.findall(line):
temp = "__ufo2fdk_temp_string_%d__" % len(stringReplacements)
line = line.replace(found, temp, 1)
stringReplacements[temp] = found.replace("__ufo2fdk_temp_escaped_quote__", "\\\"")
line = line.replace("__ufo2fdk_temp_escaped_quote__", "\\\"")
destringedLines.append(line)
text = "\n".join(destringedLines)
# extract all includes
includes = []
for match in includeRE.finditer(text):
start, includePath, close = match.groups()
includes.append(includePath)
# slice off the text that comes before
# the first feature/table definition
precedingText = ""
startMatch = featureTableStartRE.search(text)
if startMatch is not None:
start, end = startMatch.span()
precedingText = text[:start].strip()
text = text[start:]
else:
precedingText = text
text = ""
# break the features
broken = _textBreakRecurse(text)
# organize into tables and features
features = {}
tables = {}
for text in broken:
text = text.strip()
if not text:
continue
# replace the strings
finalText = text
for temp, original in stringReplacements.items():
if temp in finalText:
del stringReplacements[temp]
finalText = finalText.replace(temp, original, 1)
finalText = finalText.strip()
# grab feature or table names and store
featureMatch = featureNameRE.search(text)
if featureMatch is not None:
features[featureMatch.group(1)] = finalText
else:
tableMatch = tableNameRE.search(text)
tables[tableMatch.group(1)] = finalText
# scan all includes
for path in includes:
if path in scannedFiles:
continue
scannedFiles.append(path)
if os.path.exists(path):
f = open(path, "r")
text = f.read()
f.close()
f, t = extractFeaturesAndTables(text, scannedFiles)
features.update(f)
tables.update(t)
return features, tables
def _textBreakRecurse(text):
matched = []
match = featureTableStartRE.search(text)
if match is None:
matched.append(text)
else:
start, end = match.span()
# add any preceding text to the previous item
if start != 0:
precedingText = matched.pop(0)
precedingText += text[:start]
matched.insert(0, precedingText)
# look ahead to see if there is another feature
next = text[end:]
nextMatch = featureTableStartRE.search(next)
if nextMatch is None:
# if nothing has been found, add
# the remaining text to the feature
matchedText = text[start:]
matched.append(matchedText)
else:
# if one has been found, grab all text
# from before the feature start and add
# it to the current feature.
nextStart, nextEnd = nextMatch.span()
matchedText = text[:end + nextStart]
matched.append(matchedText)
# recurse through the remaining text
matched += _textBreakRecurse(next[nextStart:])
return matched
extractFeaturesAndTablesTestText = """
@foo = [bar];
# test commented item
#feature fts1 {
# sub foo by bar;
#} fts1;
feature fts2 {
sub foo by bar;
} fts2;
table tts1 {
nameid 1 "feature this { is not really a \\\"feature that { other thing is";
} tts1;feature fts3 { sub a by b;} fts3;
"""
extractFeaturesAndTablesTestResult = (
{
'fts2': 'feature fts2 {\n sub foo by bar;\n} fts2;',
'fts3': 'feature fts3 { sub a by b;} fts3;'
},
{
'tts1': 'table tts1 {\n nameid 1 "feature this { is not really a \\"feature that { other thing is";\n} tts1;'
}
)
def testBreakFeaturesAndTables():
"""
>>> r = extractFeaturesAndTables(extractFeaturesAndTablesTestText)
>>> r == extractFeaturesAndTablesTestResult
True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import logging
from rastervision.pytorch_learner.dataset import (ImageDataset, TransformType,
SlidingWindowGeoDataset,
RandomWindowGeoDataset)
log = logging.getLogger(__name__)
class ClassificationImageDataset(ImageDataset):
def __init__(self, *args, **kwargs):
super().__init__(
*args, **kwargs, transform_type=TransformType.classification)
class ClassificationSlidingWindowGeoDataset(SlidingWindowGeoDataset):
def __init__(self, *args, **kwargs):
super().__init__(
*args, **kwargs, transform_type=TransformType.classification)
def init_windows(self):
super().init_windows()
self.scene.label_source.populate_labels(cells=self.windows)
class ClassificationRandomWindowGeoDataset(RandomWindowGeoDataset):
def __init__(self, *args, **kwargs):
super().__init__(
*args, **kwargs, transform_type=TransformType.classification)
|
from flask_restful import Resource
from flask import request
from main.server import app, cache, db
from main.server.models import Games, GameSchema
games_schema = GameSchema(many=True)
game_schema = GameSchema()
@app.after_request
def add_header(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET,POST'
response.headers[
'Access-Control-Allow-Headers'] = 'Access-Control-Allow-Headers, Origin,Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers'
return response
class GameListResource(Resource):
@cache.cached(timeout=100)
def get(self):
"""Gets all Artwork on the server"""
games = Games.query.all()
games = games_schema.dump(games)
if not games:
return {'status': 'success', 'games': games}, 206 # Partial Content Served
return {'status': 'success', 'games': games}, 200
class GameCount(Resource):
@cache.cached(timeout=100)
def get(self):
"""Gets the number of games available on the server"""
return {'status': 'success', 'count': Games.query.count()}, 200
|
import gdsfactory as gf
def test_transition_ports() -> None:
width1 = 0.5
width2 = 1.0
x1 = gf.cross_section.strip(width=width1)
x2 = gf.cross_section.strip(width=width2)
xt = gf.path.transition(cross_section1=x1, cross_section2=x2, width_type="linear")
path = gf.path.straight(length=5)
c = gf.path.extrude(path, xt)
assert c.ports["o1"].cross_section.cross_section1.width == width1
assert c.ports["o2"].cross_section.cross_section2.width == width2
if __name__ == "__main__":
# test_transition_ports()
width1 = 0.5
width2 = 1.0
x1 = gf.cross_section.strip(width=0.5)
x2 = gf.cross_section.strip(width=1.0)
xt = gf.path.transition(cross_section1=x1, cross_section2=x2, width_type="linear")
path = gf.path.straight(length=5)
c = gf.path.extrude(path, xt)
assert c.ports["o1"].cross_section.cross_section1.width == width1
assert c.ports["o2"].cross_section.cross_section2.width == width2
c.show()
|
from esl._esl._economics._finance import *
|
###################################################
# header_meshes.py
# This file contains declarations for meshes
# DO NOT EDIT THIS FILE!
###################################################
render_order_plus_1 = 0x00000001
|
import random
from app.fiftycents.entities.card import Card
class Deck():
def __init__(self, size=1, empty = False):
self.cards = []
if not empty:
self.cards = [Card(r) for r in Card.RANKS
for i in range(Card(r).count)
for j in range(size)]
self._shuffle()
def __repr__(self):
return '<Deck {}>'.format(len(self.cards))
def _shuffle(self):
random.shuffle(self.cards)
def draw(self):
return self.cards.pop()
def bury(self, card):
self.cards.insert(0, card)
def cover(self, card):
self.cards.append(card)
if __name__ == "__main__":
d = Deck()
d._reset(3)
from collections import Counter
counter = Counter([c.rank for c in d.cards])
print(counter) |
def double(arg: int) -> int:
return arg * 2
x: List[str] = ['a', 'bb']
x1 = list(map(double @ length_of_str, x))
x2 = list(map(double, list(map(length_of_str, x))))
assert x1 == x2 # どちらの値も[2, 4]
|
import logging
from corona_data_collector.tests.common import get_db_test_row, run_full_db_data_test
logging.basicConfig(level=logging.INFO)
TEST_FIELDS = {
# db field corona_bot_answers field
"version": "questionare_version",
"routine_uses_public_transportation": "public_transportation_last_week",
"routine_uses_public_transportation_bus": "public_transportation_bus",
"routine_uses_public_transportation_train": "public_transportation_train",
"routine_uses_public_transportation_taxi": "public_transportation_taxi",
"routine_uses_public_transportation_other": "public_transportation_other",
"routine_visits_prayer_house": "habits_prayer_house",
"routine_wears_mask": "last_week_wear_mask",
"routine_wears_gloves": "last_week_wear_gloves",
"routine_last_asked": "routine_last_asked",
}
TEST_DATA = {
# id in DB:
# "db field": ("value in db", "value_in_corona_bot_answers")
94: {
"version": ("0.1.0",),
"routine_uses_public_transportation": (None, ""),
"routine_uses_public_transportation_bus": (None, ""),
"routine_uses_public_transportation_train": (None, ""),
"routine_uses_public_transportation_taxi": (None, ""),
"routine_uses_public_transportation_other": (None, ""),
"routine_visits_prayer_house": (None, ""),
"routine_wears_mask": (None, ""),
"routine_wears_gloves": (None, ""),
"routine_last_asked": (None, ""),
},
400000: {
"version": ("2.2.2",),
"routine_uses_public_transportation": (None, ""),
"routine_uses_public_transportation_bus": (None, ""),
"routine_uses_public_transportation_train": (None, ""),
"routine_uses_public_transportation_taxi": (None, ""),
"routine_uses_public_transportation_other": (None, ""),
"routine_visits_prayer_house": (None, ""),
"routine_wears_mask": (None, ""),
"routine_wears_gloves": (None, ""),
"routine_last_asked": (None, ""),
},
# get_db_test_row("3.0.0", "routine_uses_public_transportation", "false", show_fields=[
# "routine_uses_public_transportation", "routine_uses_public_transportation_bus", "routine_uses_public_transportation_train", "routine_uses_public_transportation_taxi", "routine_uses_public_transportation_other"
# ]) # 734885
734885: {
"version": ("3.0.0",),
"routine_uses_public_transportation": (False, "0"),
"routine_uses_public_transportation_bus": (None, "0"),
"routine_uses_public_transportation_train": (None, "0"),
"routine_uses_public_transportation_taxi": (None, "0"),
"routine_uses_public_transportation_other": (None, "0"),
"routine_visits_prayer_house": (False, "0"),
"routine_wears_mask": ("always", "3"),
"routine_wears_gloves": ("never", "0"),
"routine_last_asked": (1588837240349, "2020-05-07T07:40:40.000Z"),
},
# get_db_test_row("3.0.0", "routine_uses_public_transportation", "true", show_fields=[
# "routine_uses_public_transportation", "routine_uses_public_transportation_bus", "routine_uses_public_transportation_train", "routine_uses_public_transportation_taxi", "routine_uses_public_transportation_other"
# ]) # 732749 (taxi=null, train=null, bus=true, other=null)
732749: {
"version": ("3.0.0",),
"routine_uses_public_transportation": (True, "1"),
"routine_uses_public_transportation_bus": (True, "1"),
"routine_uses_public_transportation_train": (None, "0"),
"routine_uses_public_transportation_taxi": (None, "0"),
"routine_uses_public_transportation_other": (None, "0"),
"routine_visits_prayer_house": (False, "0"),
"routine_wears_mask": ("always", "3"),
"routine_wears_gloves": ("never", "0"),
"routine_last_asked": (1588795349500, "2020-05-06T20:02:29.000Z"),
},
# 732802 (taxi=true, train=null, bus=true, other=null)
732802: {
"version": ("3.0.0",),
"routine_uses_public_transportation": (True, "1"),
"routine_uses_public_transportation_bus": (True, "1"),
"routine_uses_public_transportation_train": (None, "0"),
"routine_uses_public_transportation_taxi": (True, "1"),
"routine_uses_public_transportation_other": (None, "0"),
"routine_visits_prayer_house": (False, "0"),
"routine_wears_mask": ("mostly_yes", "2"),
"routine_wears_gloves": ("mostly_no", "1"),
"routine_last_asked": (1588795887517, "2020-05-06T20:11:27.000Z"),
},
}
run_full_db_data_test(TEST_FIELDS, TEST_DATA)
|
def top3(products, amounts, prices):
|
#!/usr/bin/env python3
# Synthesise AnkleSoC on iCEStick, with some given RAM size and RAM init binary.
import sys
import os.path
sys.path.append(os.path.abspath(os.path.dirname(sys.argv[0]) + "/../hdl"))
import argparse
from nmigen import *
from nmigen_boards.icestick import ICEStickPlatform
from anklesoc import AnkleSoC
def anyint(x):
return int(x, 0)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--ramload", "-r", help="Optional binary file to preload RAM")
parser.add_argument("--memsize", default=4096, type=anyint)
parser.add_argument("--resetvector", default=0x440000, type=anyint)
parser.add_argument("--program", "-p", action="store_true")
args = parser.parse_args(argv)
mem_init = None
if args.ramload is not None:
mem_init = open(args.ramload, "rb").read()
top = AnkleSoC(ram_size_bytes=args.memsize, ram_init=mem_init, cpu_reset_vector=args.resetvector)
ICEStickPlatform().build(top, program=args.program, synth_opts="-abc2")
if __name__ == "__main__":
main(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.