content
stringlengths 5
1.05M
|
|---|
from pathlib import Path
from ...graphs import Graph003
from ...utils import BaseGraphSystemTester
from ....engine_input import SubprefixHijack
from ....engine import BGPSimpleAS
from ....engine import BGPAS
from ....engine import ROVSimpleAS
from ....engine import ROVAS
class BaseFig2Tester(BaseGraphSystemTester):
GraphInfoCls = Graph003
EngineInputCls = SubprefixHijack
base_dir = Path(__file__).parent
adopting_asns = [3, 4]
class Test007Fig2BGPSimple(BaseFig2Tester):
BaseASCls = BGPSimpleAS
AdoptASCls = BGPSimpleAS
class Test008Fig2BGP(BaseFig2Tester):
BaseASCls = BGPAS
AdoptASCls = BGPAS
class Test009Fig2ROVSimple(BaseFig2Tester):
BaseASCls = BGPSimpleAS
AdoptASCls = ROVSimpleAS
class Test010ROV(BaseFig2Tester):
BaseASCls = BGPAS
AdoptASCls = ROVAS
|
import subprocess as sub
import sys
import os
import uuid
import calendar
import datetime
import cPickle as pickle
import shutil
#input_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/input_info.txt', 'r')
input_file=open('/home/kploof/STICS_data_processing/input_info/input_info.txt', 'r')
file_lines = input_file.readlines()
input_file.close()
batch_info = [x.strip('\n') for x in file_lines]
#variables determined from batch info text file
start_date = ''
stop_date = ''
ion_list = []
coincidence = ''
save_dir = '/tmp/'
wtdc_write_dir = '/shrg1/wind/LV2_development/v2/'
ctr = 0
#handling input from text file
for batch_line in batch_info:
ctr = ctr + 1
if batch_line[0] == '#':
continue
elif 'ion list' in batch_line:
if ion_list:
print ('Input file has incorrect format')
print ('Multiple ion lists provided')
sys.exit(1)
name_and_vars = batch_line.split(':')
unfiltered_ion_list = name_and_vars[1]
ion_list = [ion.strip() for ion in unfiltered_ion_list.split(',')]
elif 'start date' in batch_line:
if start_date:
print ('Input file has incorrect format')
print ('Multiple start dates provided')
sys.exit(1)
name_and_vars = batch_line.split(':')
start_date = name_and_vars[1].strip()
elif 'stop date' in batch_line:
if stop_date:
print ('Input file has incorrect format')
print ('Multiple stop dates provided')
sys.exit(1)
name_and_vars = batch_line.split(':')
stop_date = name_and_vars[1].strip()
elif 'coincidence' in batch_line:
if coincidence:
print ('Input file has incorrect format')
print ('Multiple coincidences provided')
sys.exit(1)
name_and_vars = batch_line.split(':')
coincidence_full = name_and_vars[1].strip().lower()
if coincidence_full == 'double':
coincidence = '0'
elif coincidence_full == 'triple':
coincidence = '1'
else:
print ('Input file has incorrect format')
print ('Coincidence must be either double or triple. Different method selected in input')
sys.exit(1)
else:
print ('Invalid line (' + str(ctr) + ') in input file, skipping over')
#make sure all necessary variables are included
if not ion_list:
print ('Input file has missing parameters')
print ('Must provide ion list')
sys.exit(1)
if not start_date:
print ('Input file has missing parameters')
print ('Must provide start date')
sys.exit(1)
if not stop_date:
print ('Input file has missing parameters')
print ('Must provide stop date')
sys.exit(1)
if not coincidence:
print ('Input file has missing parameters')
print ('Must provide ion list')
sys.exit(1)
#create directories for run
cur_time = datetime.datetime.now()
cur_time_str = cur_time.isoformat()
cur_date = str(cur_time_str.split('T')[0])
cur_date = cur_date[0:4]+cur_date[5:7]+cur_date[8:10]
processing_year = start_date[0:4]
salt = str(uuid.uuid4())
save_dir = save_dir + cur_date + '_' + processing_year + '_' + salt + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
wtdc_write_dir = wtdc_write_dir + processing_year + '/'
if not os.path.exists(wtdc_write_dir):
os.makedirs(wtdc_write_dir)
os.makedirs(wtdc_write_dir+'double_coincidence/')
os.makedirs(wtdc_write_dir+'double_coincidence/qsub/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM_plots/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM_plots/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/AFM_plots/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA_plots/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA_plots/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/ERPA_plots/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/moments/')
os.makedirs(wtdc_write_dir+'double_coincidence/moments/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/moments/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/VDF/')
os.makedirs(wtdc_write_dir+'double_coincidence/VDF/msph/')
os.makedirs(wtdc_write_dir+'double_coincidence/VDF/sw/')
os.makedirs(wtdc_write_dir+'double_coincidence/pha_dump/')
os.makedirs(wtdc_write_dir+'triple_coincidence/')
os.makedirs(wtdc_write_dir+'triple_coincidence/qsub/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM_plots/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM_plots/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/AFM_plots/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA_plots/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA_plots/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/ERPA_plots/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/moments/')
os.makedirs(wtdc_write_dir+'triple_coincidence/moments/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/moments/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/VDF/')
os.makedirs(wtdc_write_dir+'triple_coincidence/VDF/msph/')
os.makedirs(wtdc_write_dir+'triple_coincidence/VDF/sw/')
os.makedirs(wtdc_write_dir+'triple_coincidence/pha_dump/')
#Specify Accumulation Time------------
# accum_time=30*60 #sec
accum_time=3*60 #sec
#Data type output options--------------
#0-counts, 1-PSD, 2-dJ/dE
# data_type_list=['C','DF','dJ']
data_type_list=['DF'] #just PSD, this should the only one we have to run with new reprocessed output files
data_type_dict=dict(C=0, DF=1, dJ=2)
#Create moment files?-----------
moment_toggle=1 #1= create file, 0=don't create
#########################End of inputs###############################
#preallocate list to store resultant filenames
dateFile_list=[];
#Generate list of start and stop dates
num_days=(datetime.date(int(stop_date[0:4]), int(stop_date[4:6]), int(stop_date[6:8])) -
datetime.date(int(start_date[0:4]), int(start_date[4:6]), int(start_date[6:8])) ).days+1
count = num_days * len(ion_list) - 1 # 0 index so subtract 1
base = datetime.date(int(start_date[0:4]), int(start_date[4:6]), int(start_date[6:8]))
date_list = [base + datetime.timedelta(days=x) for x in range(0, num_days)] #range cuts off last one
string_list=[temp.strftime('%Y%m%d') for temp in date_list]
year_month_day_list=string_list
if coincidence[0] == '0':
wtdc_write_dir = wtdc_write_dir + 'double_coincidence/'
else:
wtdc_write_dir = wtdc_write_dir + 'triple_coincidence/'
for k in xrange(len(year_month_day_list)):
for j in xrange(len(ion_list)):
for i in xrange(len(data_type_list)):
year_month_day=year_month_day_list[k]
ion=ion_list[j]
data_type=data_type_dict[data_type_list[i]]
#open pipe to "driver.pl" so I can make an output file
pipe=sub.Popen("/home/kploof/STICS_data_processing/scripts/batch_file_setup.pl", stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE) #setup piping
pipe.stdin.write(ion+"\n") #select ion
pipe.stdin.write(str(accum_time)+"\n") #select accumulation time
pipe.stdin.write("0.0\n") #selection efficiency threshold
pipe.stdin.write("0\n") #select dimensionality of data
pipe.stdin.write(str(coincidence)+"\n") #select TOF binning #edited to TOF (was previous MOQ) Vishnu 10-06-2018 0 = double coincidence, 1 = triple coincidence
pipe.stdin.write(str(data_type)+"\n") #select output data type
if data_type == 1: #if data type set to PSD
if moment_toggle:
pipe.stdin.write("1\n") #toggle for moment file creation, only asked this if output data type =1
#set lower SW velocity bound to 0.0
pipe.stdin.write("0.0\n") #set lower bound for SW velocity filer, only asks this if moment file
#create toggle is =1
else:
pipe.stdin.write("0\n")
pipe.stdin.write("/shrg1/wind/sw/wtdcLV2_Lite/SWE_Files/"+
year_month_day[0:4]+"_WIND_SWE_90Sec.dat"+"\n") #SWE data location
#ex. -> ../SWE_Files/2000_WIND_SWE_90Sec.dat, access the "year" portion of the date string
pipe.stdin.write(wtdc_write_dir + "\n") #data storage directory
pipe.stdin.write("2\n") #time range input select
pipe.stdin.write(year_month_day+"\n") #select day of year (yyyymmdd)
pipe.stdin.close() #close off input to file
while pipe.returncode is None: #this part seems to be included in online samples...
pipe.poll()
print(pipe.stdout.read())
print(pipe.stderr.read())
uniq_filename=('dateFile_'+year_month_day+'_'+ion+'_'+
data_type_list[i]+'_'+str(uuid.uuid4())+'.dat')
shutil.move('dateFile.dat', save_dir+uniq_filename) #had to change from os.rename since wind is mounted, and python renaming from one file system to another is not allowed
#want to save list of filenames generated
dateFile_list.append(save_dir+uniq_filename)
#save final list of file names
save_name='driver_file_list'
f1=open(save_dir+save_name+'.pkl', 'wb')
pickle.dump(dateFile_list, f1) #save to file
f1.close() #close file
print '# of dateFiles produced = '
print str(len(dateFile_list))
count = len(dateFile_list) - 1
#save information for qsub to use
count_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/counts.txt', 'w+')
count_file.write(str(count))
count_file.close()
savedir_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/input_dir.txt', 'w+')
savedir_file.write(save_dir)
savedir_file.close()
wtdc_write_dir_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/wtdc_write_dir.txt', 'w+')
wtdc_write_dir_file.write(wtdc_write_dir)
wtdc_write_dir_file.close()
coincidence_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/coincidence.txt', 'w+')
coincidence_file.write(coincidence[0])
coincidence_file.close()
year_file = open('/shrg1/wind/sw/STICS_data_processing/input_info/year.txt', 'w+')
year_file.write(processing_year)
year_file.close()
|
from CommonCode.convertJSONTOPb import ConvertJSONToPb
from CommonCode.convertPbToJSON import ConvertPbToJSON
from Services.sendPushNotificationService import SendPushNotificationService
from protobuff import pushnotification_pb2
class SendPushNotificationHandler:
@staticmethod
def snedNotification(builder):
service = SendPushNotificationService()
m_converter = ConvertJSONToPb()
m_convertPbtoJson = ConvertPbToJSON()
builder = m_converter.converjsontoPBProper(response=builder,
instanceType=pushnotification_pb2.PushNotificationRequestPb())
return m_convertPbtoJson.converPbtojson(service.sendNotification(pushNotificationRequestPb=builder))
|
#!/usr/bin/env python3
import argparse
from aircrushcore.cms import compute_node, compute_node_collection, session_collection, task_instance_collection
from aircrushcore.controller.configuration import AircrushConfig
from aircrushcore.dag import Workload
from aircrushcore.cms import *
from aircrushcore.datacommons.data_commons import DataCommons
from os.path import exists,dirname
import os,sys
import importlib
import getpass
#from operators import invoke_operator
import datetime
import ast
import subprocess
import argparse
import socket
aircrush=None
crush_host=None
args=None
def ready():
#Sense readiness to do something
return True
def getOperatorClassDefinition(task_uuid:str):
#This function uses the task uuid to load the associated operator module
#and return a class defintion
task = TaskCollection(cms_host=crush_host).get_one(task_uuid) #fetch task definition
operator=task.field_operator #identify the operator to execute
module=f"{task.field_operator}_operator" #build filename for dynamic load
operator_module=importlib.import_module(f"operators.{module}") #dynamically import identified operator
op_class=getattr(operator_module,operator) #get class defintion
return op_class
def getMyComputeNodeUUID():
cluster=aircrush.config['COMPUTE']['cluster']
account=aircrush.config['COMPUTE']['account']
working_dir=aircrush.config['COMPUTE']['working_directory'] #os.environ.get("SCRATCH")
username=getpass.getuser()
metadata={
"title":f"{cluster}/{username}",
"field_host":cluster,
"field_username":username,
"field_password":"",
"field_working_directory":working_dir,
"cms_host":crush_host
}
cn_col = ComputeNodeCollection(cms_host=crush_host)
matching_cn = cn_col.get(filter=f"&filter[field_username][value]={username}&filter[field_host][value]={cluster}")
for match in matching_cn:
metadata['uuid']=match
break
n = ComputeNode(metadata=metadata)
nuid=n.upsert()
return nuid
def pullContainer(uri:str):
#return "requirements.txt" ##TODO
if (args.container):
return args.container
container_dir=aircrush.config['COMPUTE']['singularity_container_location']
sif = f"{container_dir}/{uri[uri.rfind('/')+1:len(uri)].replace(':','_')}.sif"
if os.path.isfile(sif):
print(f"Container exists - will not overwrite ({sif})")
return sif
cmdArray=["singularity","pull","--dir",container_dir,uri]
print(cmdArray)
ret = subprocess.call(cmdArray)
if not os.path.isfile(sif):
raise Exception(f"Failed to pull container specified. {sif} not found")
return sif
def pull_source_data(project,subject,session):
wd=aircrush.config['COMPUTE']['working_directory']
datacommons=aircrush.config['COMPUTE']['commons_path']
#Test if we are on an HCP node, use sbatch to perform rsync if so
root=f"/projects/{project.field_path_to_exam_data}/datasets/source"
#If none of the if statements below match, the entire source will be cloned
print(f"start root:{datacommons}/{root}")
if os.path.isdir(f"{datacommons}/{root}/{subject.title}"):
root=f"{root}/{subject.title}"
if os.path.isdir(f"{datacommons}/{root}/{session.title}"):
root=f"{datacommons}/{root}/{session.title}"
if os.path.isdir(f"{datacommons}/{root}/sub-{subject.title}"):
root=f"{root}/sub-{subject.title}"
if os.path.isdir(f"{datacommons}/{root}/ses-{session.title}"):
root=f"{root}/ses-{session.title}"
print(f"new root: {root}")
source_session_dir=f"{datacommons}/{root}"
target_session_dir=f"{wd}/{root}"
# if os.path.isdir(target_session_dir):
# #It's already there, ignore quietly
# print(f"{target_session_dir} Already exists")
# return
# else:
print(f"Cloning ({source_session_dir}) to local working directory ({target_session_dir})")
os.makedirs(target_session_dir,exist_ok=True)
# ret = subprocess.getstatusoutput("which sbatch")
# if ret[0]==0:
# print("sbatch exists, starting asynchronous copy")
# else:
# print("SBATCH doesn't exist, performing synchronous copy")
if not os.path.isdir(source_session_dir):
raise Exception(f"Subject/session not found on data commons ({source_session_dir})")
rsync_cmd=f"rsync -r {source_session_dir} {target_session_dir}"
print(rsync_cmd)
ret = subprocess.getstatusoutput(rsync_cmd)
if ret[0]!=0:
raise Exception(f"Failed to copy session directory: {ret[1]}")
def pull_data(stage,project,subject,session):
if stage=="source":
pull_source_data(project,subject,session)
else:
wd=aircrush.config['COMPUTE']['working_directory']
##Get the hostname of cluster hosting data commons for remote rsync
##user must have setup known_hosts for unattended rsync
if aircrush.config['COMMONS']['data_transfer_node']:
data_transfer_node=aircrush.config['COMMONS']['data_transfer_node']
if not data_transfer_node=="":
if not data_transfer_node[-1]==":": #Add a colon to the end for rsync syntax if necessary
data_transfer_node=f"{data_transfer_node}:"
print(f"The data commons is found on another cluster {data_transfer_node} User must have setup unattended rsync using ssh-keygen for this process to be scheduled.")
else:
data_transfer_node=""
datacommons=aircrush.config['COMMONS']['commons_path']
root=f"projects/{project.field_path_to_exam_data}/datasets/{stage}/sub-{subject.title}/ses-{session.title}/"
source_session_dir=f"{data_transfer_node}{datacommons}/{root}"
target_session_dir=f"{wd}/{root}"
print(f"Cloning ({source_session_dir}) to local working directory ({target_session_dir})")
os.makedirs(target_session_dir,exist_ok=True)
print(f"DTN:[{data_transfer_node}]")
if data_transfer_node=="":
if not os.path.isdir(source_session_dir):
raise Exception(f"Subject/session not found on data commons ({source_session_dir})")
rsync_cmd=["rsync","-r",f"{source_session_dir}",f"{target_session_dir}"]
ret,out = getstatusoutput(rsync_cmd)
if ret!=0:
raise Exception(f"Failed to copy session directory: {out}")
def getstatusoutput(command):
print(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE)
out, _ = process.communicate()
return (process.returncode, out)
def push_data(stage,project,subject,session,**kwargs):
if stage=="source":
print("ERROR: Source data is read-only. It cannot be pushed back to the data commons")
return
else:
wd=aircrush.config['COMPUTE']['working_directory']
##Get the hostname of cluster hosting data commons for remote rsync
##user must have setup known_hosts for unattended rsync
if aircrush.config['COMMONS']['data_transfer_node']:
data_transfer_node=aircrush.config['COMMONS']['data_transfer_node']
if not data_transfer_node=="":
if not data_transfer_node[-1]==":": #Add a colon to the end for rsync syntax if necessary
data_transfer_node=f"{data_transfer_node}:"
print(f"The data commons is found on another cluster {data_transfer_node} User must have setup unattended rsync using ssh-keygen for this process to be scheduled.")
else:
data_transfer_node=""
datacommons=aircrush.config['COMMONS']['commons_path']
#Test if we are on an HCP node, use sbatch to perform rsync if so
if stage=="derivatives":
if "pipelines" in kwargs:
pipelines=kwargs['pipelines']
for pipeline in pipelines:
root=f"projects/{project.field_path_to_exam_data}/datasets/{pipeline.field_id}/{stage}/sub-{subject.title}/ses-{session.title}/"
source_session_dir=f"{wd}/{root}"
target_session_dir=f"{data_transfer_node}{datacommons}/{root}"
print(f"Cloning ({source_session_dir}) back to data commons ({target_session_dir})")
print(f"DTN:[{data_transfer_node}]")
rsync_cmd=["rsync","-r",f"{source_session_dir}",f"{target_session_dir}"]
ret,out = getstatusoutput(rsync_cmd)
if ret!=0:
raise Exception(f"Failed to copy session directory: {out}")
else:
raise Exception("WARNING: You attepted to return derivatives to the data commons but did not specify which pipelines.")
else:
root=f"projects/{project.field_path_to_exam_data}/datasets/{stage}/sub-{subject.title}/ses-{session.title}/"
source_session_dir=f"{wd}/{root}"
target_session_dir=f"{data_transfer_node}{datacommons}/{root}"
print(f"Cloning ({source_session_dir}) back to data commons ({target_session_dir})")
print(f"DTN:[{data_transfer_node}]")
rsync_cmd=["rsync","-r",f"{source_session_dir}",f"{target_session_dir}"]
ret,out = getstatusoutput(rsync_cmd)
if ret!=0:
raise Exception(f"Failed to copy session directory: {out}")
def parameter_expansion(cmdArray,parms_to_add,**kwargs):
project=None
subject=None
session=None
workingdir=""
datacommons=""
pipeline=""
if 'project' in kwargs:
project=kwargs['project']
if 'subject' in kwargs:
subject=kwargs['subject']
if 'session' in kwargs:
session=kwargs['session']
if 'workingdir' in kwargs:
workingdir=kwargs['workingdir']
if 'datacommons' in kwargs:
datacommons=kwargs['datacommons']
if 'pipeline' in kwargs:
pipeline=kwargs['pipeline']
for k in parms_to_add:
parm= parms_to_add[k]
parm = parm.replace("#workingdir",workingdir)
parm = parm.replace("#datacommons",datacommons)
parm = parm.replace("#pipeline",pipeline)
parm = parm.replace("#subject",subject.title)
parm = parm.replace("#session",session.title)
parm = parm.replace('#project',project.field_path_to_exam_data)
parm = parm.replace('#datasetdir',f"{workingdir}/projects/{project.field_path_to_exam_data}/datasets/")
if not k[0:7]=="sbatch-":
cmdArray.append(f"--{k}")
cmdArray.append(parm)
return cmdArray
def ini_settings():
homedir=os.path.expanduser('~')
crush_config=f"{homedir}/.crush.ini"
if not os.path.isfile(crush_config):
settings={}
settings['REST']={}
settings['COMPUTE']={}
settings['COMMONS']={}
print(f"Looks like this is your first time here. Let's get setup, settings will be stored in ~/.crush.ini")
conf = open(crush_config, "w")
settings['REST']['endpoint']=input("What is the URL of your Aircrush CMS [http://20.63.59/9/]:")
if settings['REST']['endpoint'] == "":
settings['REST']['endpoint']= "http://20.63.59.9/"
settings['REST']['username']=input("Aircrush username:")
while settings['REST']['username']=="":
settings['REST']['username']=input("Aircrush username:")
settings['REST']['password']=input("Aircrush password:")
while settings['REST']['password']=="":
settings['REST']['password']=input("Aircrush password:")
hostname=os.environ.get("CC_CLUSTER")
if hostname==None:
hostname=socket.gethostname()
settings['COMPUTE']['cluster']=input(f"Cluster name [{hostname}]")
if settings['COMPUTE']['cluster']=="":
settings['COMPUTE']['cluster']=hostname
settings['COMPUTE']['account']=input("SLURM account to charge (e.g. def-username):")
while settings['COMPUTE']['account']=="":
settings['COMPUTE']['account']=input("SLURM account to charge (e.g. def-username):")
scratch=os.environ.get("SCRATCH")
if scratch==None:
scratch="~/scratch"
settings['COMPUTE']['working_directory']=input(f"Working directory for scratch [{scratch}/aircrush]:")
if settings['COMPUTE']['working_directory']=="":
settings['COMPUTE']['working_directory']=f"{scratch}/aircrush"
os.makedirs(settings['COMPUTE']['working_directory'])
settings['COMPUTE']['concurrency_limit']=input("Max concurrent jobs [10]:")
if settings['COMPUTE']['concurrency_limit']=="":
settings['COMPUTE']['concurrency_limit']=10
settings['COMPUTE']['seconds_between_failures']=input("Seconds to wait between failures[18000](default 5 hrs, providing time for mitigation):")
if settings['COMPUTE']['seconds_between_failures']=="":
settings['COMPUTE']['seconds_between_failures']=18000
settings['COMMONS']['commons_path']=input(f"Location of data commons. If DC is remote, provide path on that host. (e.g. ...[HERE]/projects/project-id/datasets/source):")
while settings['COMMONS']['commons_path']=="":
print("\thint: /home/username/projects/def-username/shared/")
settings['COMMONS']['commons_path']=input(f"Location of data commons (e.g. ...[HERE]/projects/project-id/datasets/source):")
settings['COMMONS']['data_tranfer_node']=input(f"Data transfer node of cluster hosting data commons (leave blank if the data commons is on this cluster):")
settings['COMPUTE']['singularity_container_location']=input(f"Location for storing active singularity containers [{settings['COMMONS']['commons_path']}/code/containers]:")
if settings['COMPUTE']['singularity_container_location']=="":
settings['COMPUTE']['singularity_container_location']=f"{settings['COMMONS']['commons_path']}/code/containers"
os.makedirs(settings['COMPUTE']['singularity_container_location'])
print("Writing file")
L = [
"[REST]\n",
f"username={settings['REST']['username']}\n",
f"password={settings['REST']['password']}\n",
f"endpoint={settings['REST']['endpoint']}\n\n",
"[COMPUTE]\n",
f"cluster={settings['COMPUTE']['cluster']}\n",
f"account={settings['COMPUTE']['account']}\n",
f"working_directory={settings['COMPUTE']['working_directory']}\n",
f"concurrency_limit={settings['COMPUTE']['concurrency_limit']}\n",
f"seconds_between_failures={settings['COMPUTE']['seconds_between_failures']}\n"
f"singularity_container_location={settings['COMPUTE']['singularity_container_location']}\n\n",
"[COMMONS]\n",
f"commons_path={settings['COMMONS']['commons_path']}\n",
f"data_transfer_node={settings['COMMONS']['data_transfer_node']}\n"
]
conf.writelines(L)
conf.close()
return AircrushConfig(crush_config)
def createJob(cmdArray,parms_to_add,**kwargs):
taskinstance_uid=kwargs['taskinstance_uid'] if 'taskinstance_uid' in kwargs else None
project=kwargs['project'].field_path_to_exam_data if 'project' in kwargs else ""
subject=kwargs['subject'].title if 'subject' in kwargs else ""
session=kwargs['session'].title if 'session' in kwargs else ""
workingdir=kwargs['workingdir'] if 'workingdir' in kwargs else aircrush.config['COMPUTE']['working_directory']
datacommons=kwargs['datacommons'] if 'datacommons' in kwargs else aircrush.config['COMPUTE']['commons_path']
pipeline=kwargs['pipeline'] if 'pipeline' in kwargs else ""
sbatch_time = parms_to_add['sbatch-time'] if 'sbatch-time' in parms_to_add else ""
sbatch_account = aircrush.config['COMPUTE']['account']
sbatch_cpus_per_task = parms_to_add['sbatch-cpus-per-task'] if 'sbatch-cpus-per-task' in parms_to_add else ""
sbatch_mem_per_cpu = parms_to_add['sbatch-mem-per-cpu'] if 'sbatch-mem-per-cpu' in parms_to_add else ""
if not os.path.exists(f"{workingdir}/jobs/{project}/{subject}"):
os.makedirs(f"{workingdir}/jobs/{project}/{subject}")
attempt=1
basefile=f"{workingdir}/jobs/{project}/{subject}/{session}_{taskinstance_uid}_{attempt}"
while os.path.isfile(f"{basefile}.sl"):
attempt+=1
basefile=f"{workingdir}/jobs/{project}/{subject}/{session}_{taskinstance_uid}_{attempt}"
jobfile=f"{basefile}.sl"
stdoutfile=f"{basefile}.out"
stderrfile=f"{basefile}.err"
conf = open(jobfile, "w")
L = [
"#!/bin/bash",
f"#SBATCH -e {stderrfile}",
f"#SBATCH -o {stdoutfile}",
f"#SBATCH --time {sbatch_time}" if not sbatch_time=="" else "",
f"#SBATCH --account {sbatch_account}" if not sbatch_account=="" else "",
f"#SBATCH --cpus-per-task {sbatch_cpus_per_task}" if not sbatch_cpus_per_task=="" else "",
f"#SBATCH --mem-per-cpu {sbatch_mem_per_cpu}" if not sbatch_mem_per_cpu=="" else "",
"module load singularity/3.8",
' '.join(cmdArray),
]
job_script = '\n'.join(L)
conf.write(job_script)
conf.close()
print(f"Slurm job written to {jobfile}")
toreturn={}
toreturn['jobfile']=jobfile
toreturn['stdout']=stdoutfile
toreturn['stderr']=stderrfile
return toreturn
def get_slurm_id(stdout:str):
print(f"given {stdout}")
lines=stdout.split('\n')
if len(lines)>0:
if lines[0][:20]=="Submitted batch job ":
tokens=lines[0].split()
jobid=tokens[-1]
return jobid
return 0
def get_seff_completion_state(stdout:str):
lines=stdout.split('\n')
for line in lines:
if line[:6]=="State:":
tokens=line.split()
status=tokens[1]
return status
return None
########### PENDING
# Job ID: 18982311
# Cluster: cedar
# User/Group: dmattie/dmattie
# State: PENDING
# Cores: 1
# Efficiency not available for jobs in the PENDING state.
return 'COMPLETED'
def check_running_jobs(node_uuid):
w=Workload(aircrush)
tis =w.get_running_tasks(node_uuid)
active_tis=len(tis)
reviewed_tis=active_tis
if active_tis>0:
print(f"Checking for status on {active_tis} jobs thought to be running on this compute node.")
for ti in tis:
if tis[ti].field_jobid:
#seff_cmd=f"/usr/bin/local/seff {tis[ti].field_jobid}"
seff_cmd=['seff',f"{tis[ti].field_jobid}"]
try:
ret = subprocess.run(seff_cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, universal_newlines=True,shell=True)
# ret = subprocess.call(cmdArray)
if ret.returncode==0:
status=get_seff_completion_state(ret.stdout)
if status=='COMPLETED':
tis[ti].field_seff=ret.stdout
if tis[ti].field_logfile and os.path.isfile(tis[ti].field_logfile):
logfile = open(tis[ti].field_logfile,'r')
log_contents = logfile.read()
if len(log_contents)>2000:
log_contents=f"log file has been truncated. see output log for complete detail\n\n{log_contents[-2000:]}"
tis[ti].body=log_contents
updateStatus(tis[ti],"completed")
if status=='FAILED':
if tis[ti].field_errorfile and os.path.isfile(tis[ti].field_errorfile):
logfile = open(tis[ti].field_errorfile,'r')
error_contents = logfile.read()
if len(error_contents)>2000:
error_contents=f"log file has been truncated. see error log for complete detail\n\n{error_contents[-2000:]}"
tis[ti].field_errorlog=error_contents
if not tis[ti].field_remaining_retries:
tis[ti].field_remaining_retries=5
tis[ti].field_seff=ret.stdout
updateStatus(tis[ti],"failed")
else:
if tis[ti]==0:
tis[ti].field_seff=ret.stdout
updateStatus(tis[ti],"halted","Too many failed retries. This job will not continue without manual intervention")
else:
tis[ti].field_remaining_retries-=1
tis[ti].field_seff=ret.stdout
updateStatus(tis[ti],"failed")
reviewed_tis=reviewed_tis-1
except Exception as e:
print(f"Failed to execute seff, {e}")
if reviewed_tis > 0:
print(f"\t{reviewed_tis} jobs not accounted for")
else:
print("\tAll running jobs on this node accounted for and updated in CMS")
def doSomething():
nuid = "4d065840-dd33-44dc-be97-623e7d743bce" #dmattie on narval
#nuid = getMyComputeNodeUUID()
# check_running_jobs(nuid)
cascade_status_to_subject(nuid)
if args.statusonly:
return
w=Workload(aircrush) #The list of things to do
todo = w.get_next_task(node_uuid=nuid) #Do whatever the big brain tells us to do next
if(todo): #Todo is a TaskInstance
print("----------Got one-------------")
task_col = TaskCollection(cms_host=crush_host)
task = task_col.get_one(uuid=todo.field_task)
#execute(operator=task.field_operator,params=task.field_parameters)
#Invocation INFO
messages=[]
now = datetime.datetime.now()
try:
#updateStatus(todo,"running","","")
messages.append(f"Invoking operator: {task.field_operator} =====================")
messages.append(f"Current date and time: {str(now)}")
session_col = SessionCollection(cms_host=crush_host)
session = session_col.get_one(todo.field_associated_participant_ses)
subject=None
project=None
if not session == None:
subject = session.subject()
if not subject == None:
project = subject.project()
container = pullContainer(task.field_singularity_container)
workingdir=aircrush.config['COMPUTE']['working_directory']
datacommons=aircrush.config['COMMONS']['commons_path']
bindings=""
if args.bind:
mounts=args.bind.split()
for mount in mounts:
bindings=bindings+f"-B {mount} "
cmdArray=["singularity","run","--app",task.field_operator,bindings,container]
try:
parms = ast.literal_eval(task.field_parameters)
except:
msg=f"Parameters for task {task.field_operator} are malformed. Expected valid JSON string:{task.field_parameters}"
print(msg)
sys.exit(1)
# pullSession(project,subject,session)
# pulldata
print(f"Pulling any necessary data for operation")
#pull_data("source",project,subject,session)
pull_data("rawdata",project,subject,session)
#pull_data("derivatives",project,subject,session)
#
# for k in parms:
# if parms[k]=="source":
# pull_data("source",project,subject,session)
# if parms[k]=="rawdata":
# pull_data("rawdata",project,subject,session)
# if parms[k=="#erivatives":
# pull_data("derivatives",project,subject,session)
print("Performing Parameter Keyword Expansion")
cmdArray = parameter_expansion(cmdArray,parms,
datacommons=datacommons,
workingdir=workingdir,
project=project,
subject=subject,
session=session)
#messages.append(f"cmdArray:{cmdArray}")
print("Creating SLURM job")
jobfiles = createJob(cmdArray,parms,
datacommons=datacommons,
workingdir=workingdir,
project=project,
subject=subject,
session=session,
taskinstance_uid=todo.uuid)
sbatch_cmd=["sbatch",jobfiles['jobfile']]
print(sbatch_cmd)
ret = subprocess.run(sbatch_cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, universal_newlines=True,shell=True)
if ret.returncode==0:
jobid=get_slurm_id(ret.stdout)
print(f"SLURM assingned job id:{jobid}")
if jobid!=0:
todo.field_jobid=jobid
todo.field_seff=""
todo.field_errorfile=jobfiles['stderr']
todo.field_logfile=jobfiles['stdout']
if os.path.isfile(jobfiles['jobfile']):
sbatch_contents_handle = open(jobfiles['jobfile'],'r')
sbatch_contents = sbatch_contents_handle.read()
messages.append(sbatch_contents)
updateStatus(todo,"running",'<br/>\n'.join(messages),ret.stderr)
else:
messages.append(f"\nERROR: SLURM ID returned was unexpectedly 0.")
updateStatus(todo,"failed",'<br/>\n'.join(messages),ret.stderr)
else:
updateStatus(todo,"failed",'<br/>\n'.join(messages),ret.stderr)
except Exception as e:
print(e)
print("there")
if hasattr(e, 'message'):
new_errors=e.message
else:
new_errors=str(e)
messages.append(f"Current date and time: {str(now)}")
updateStatus(todo,"failed",'<br/>\n'.join(messages),new_errors)
# op = invoke_operator(id=todo.uuid,cms_host=crush_host)
#op_class = getOperatorClassDefinition(todo.field_task) #use the task UUID to determine which operator to use
#op = op_class(id=todo.uuid,cms_host=crush_host) #instantiate it based on task instance
# op.execute(aircrush) #Let's do this!
else:
print("Nothing to do.")
# if(todo): #Todo is a TaskInstance
# print("-----------------------")
# op_class = getOperatorClassDefinition(todo.field_task) #use the task UUID to determine which operator to use
# op = op_class(id=todo.uuid,cms_host=crush_host) #instantiate it based on task instance
# op.execute(aircrush) #Let's do this!
# else:
# print("Nothing to do.")
#def execute(operator:str,params:str):#**kwargs):
def updateStatus(task_instance,status:str,detail:str="",new_errors:str=""):
# Valid statuses
###########################
# notstarted,running,failed,completed
task_instance.field_status=status
task_instance.body = detail
task_instance.field_errorlog = new_errors
print(f"Updating job status to CMS:{task_instance.field_jobid} ({task_instance.title}")
uuid=task_instance.upsert()
def cascade_status_to_subject(node_uuid):
node_col=ComputeNodeCollection(cms_host=crush_host);
node=node_col.get_one(uuid=node_uuid)
attached_sessions=node.allocated_sessions()
subjects_of_attached_sessions={}
for session_uuid in attached_sessions:
session=attached_sessions[session_uuid]
count_running=0
count_failed=0
count_completed=0
count_notstarted=0
pipelines={}
ti_col=TaskInstanceCollection(cms_host=crush_host,session=session.uuid)
tis_for_session=ti_col.get()
for ti in tis_for_session:
if tis_for_session[ti].field_status=='completed':
count_completed+=1
continue
if tis_for_session[ti].field_status=='running':
count_running+=1
continue
if tis_for_session[ti].field_status=='failed':
count_failed+=1
continue
count_notstarted+=1
if ti.field_pipeline:
pipelines[ti.field_pipeline]=ti.pipeline()
session.field_status=derive_parent_status(count_failed,count_running,count_completed,count_notstarted)
subject=session.subject()
subjects_of_attached_sessions[subject.uuid]=subject
project=subject.project()
if session.field_status=='completed':
push_data("rawdata",project,subject,session)
push_data("derivatives",project,subject,session,pipelines=pipelines)
session.field_responsible_compute_node=None #Free up a slot on compute node for more
session.upsert()
for subject in subjects_of_attached_sessions:
count_running=0
count_failed=0
count_completed=0
count_notstarted=0
ses_col=SessionCollection(cms_host=crush_host,subject=subject)
sessions_for_subject=ses_col.get()
for sess in sessions_for_subject:
if sessions_for_subject[sess].field_status=='completed':
count_completed+=1
continue
if sessions_for_subject[sess].field_status=='running':
count_running+=1
continue
if sessions_for_subject[sess].field_status=='failed':
count_failed+=1
continue
count_notstarted+=1
subjects_of_attached_sessions[subject].field_status=derive_parent_status(count_failed,count_running,count_completed,count_notstarted)
subjects_of_attached_sessions[subject].upsert()
def derive_parent_status(failed,running,completed,notstarted):
if running>0:
if failed>0:
return "limping"
if failed==0:
return "running"
if failed>0:
if notstarted==0:
return "failed"
else:
return "limping"
if completed>0:
if notstarted==0:
return "completed"
else:
return "running"
return "notstarted"
def doSync():
dc=DataCommons(aircrush)
dc.initialize()
dc.SyncWithCMS()
def purge():
endpoint = aircrush.config['REST']['endpoint']
yn = input(f"Are you sure you want to delete all task instance, sessions and subjects from {endpoint} ? [N|y]")
if yn=='y' or yn=='Y':
print("Purging task instances")
ti_collection = TaskInstanceCollection(cms_host=crush_host)
tis = ti_collection.get()
print(f"\tfound {len(tis)} to delete")
for ti in tis:
tis[ti].delete()
print("Purging Subjects")
sub_collection=SubjectCollection(cms_host=crush_host)
subjects = sub_collection.get()
print(f"\tfound {len(subjects)} to delete")
for sub in subjects:
subjects[sub].delete()
print("Purging sessions")
ses_collection = SessionCollection(cms_host=crush_host)
sessions = ses_collection.get()
print(f"\tfound {len(sessions)} to delete")
for ses in sessions:
sessions[ses].delete()
def main():
global aircrush,crush_host,args
parser = argparse.ArgumentParser(
description="CRUSH client command line utility. Start all tasks with this command")
parser.add_argument('-sync',action='store_true',
help="Synchronize subjects and exams in the data commons with the CMS")
parser.add_argument('-container',action='store',type=str,
help="Specify a local container to override whatever the pipeline task intends to use.")
parser.add_argument('-purge',action='store_true',
help='Permanently remove all task instances, sessions, and subjects from the CMS' )
parser.add_argument('-republish',action='store_true',
help='For any objects in CMS that are unpublished, re-publish them if they probably should be')
parser.add_argument('-bind',action='store',type=str,
help='A comma separated list of directories that should be bound to the singularity container so files are accessible to the container')
parser.add_argument('-statusonly',action='store_true',
help="Update the status of running jobs, do not invoke more work")
args = parser.parse_args()
aircrush=ini_settings()
try:
crush_host=Host(
endpoint=aircrush.config['REST']['endpoint'],
username=aircrush.config['REST']['username'],
password=aircrush.config['REST']['password']
)
except Exception as e:
print("[ERROR] Unable to connect to CMS. Check host and settings found in ~/.crush.ini; in section [REST]. Is the CMS accessible?")
print(e)
sys.exit(1)
try:
os.putenv('AIRCRUSH_CONTAINERS',aircrush.config['COMPUTE']['singularity_container_location'])
except:
print("[ERROR] .ini setting now found; expected 'singularity_container_location' in section [COMPUTE]")
sys.exit(1)
if (args.sync):
doSync()
exit()
if (args.purge):
purge()
exit()
if ready():
doSomething()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Zenine
#!/usr/bin/env python
import urllib2
import sys
import sqlite3
import cookielib
from lxml import etree
from bs4 import BeautifulSoup
import time
#coding changing
reload(sys)
#SQL_RESULT,数据库清洗SQL,还要一个模块未实现
#1.SELECT * FROM company58 WHERE email !='' AND LENGTH(PHONE_NO)==11 AND SUBSTR(PHONE_NO,1,1)=='1'
#2.SELECT * FROM company58 WHERE email =='' AND LENGTH(PHONE_NO)==11 AND SUBSTR(PHONE_NO,1,1)=='1'
#3.SELECT * FROM company58 WHERE not (LENGTH(PHONE_NO)==11 AND SUBSTR(PHONE_NO,1,1)=='1') and hr!=""
#print sys.getdefaultencoding()
sys.setdefaultencoding("utf-8")
headers = {'User-Agent': '"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0"'}
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
#数据库名称及城市名称进行设置
city = "sh"
database="HR_hunter_%s.db"%city
conn = sqlite3.connect(database)
#设置区#
business_list={'zplvyoujiudian':'餐饮','jiazhengbaojiexin':'家政保洁/安保','meirongjianshen':'美容/美发','zpjiudian':'酒店/旅游','zpwentiyingshi':'娱乐/休闲','zpanmo':'保健按摩','zpjianshen':'运动健身','renli':'人事/行政/后勤','siji':'司机','zpguanli':'高级管理','yewu':'销售','kefu':'客服','zpshangwumaoyi':'贸易/采购','chaoshishangye':'超市/百货/零售','zptaobao':'淘宝职位','zpfangchan':'房产中介','shichang':'市场/媒介/公关','zpguanggao':'广告/会展/咨询','zpmeishu':'美术/设计/创意','zpshengchankaifa':'普工/技工','zpshengchan':'生产管理/研发','zpwuliucangchu':'物流/仓储','xiaofeipin':'服装/纺织/食品','zhikonganfang':'质控/安防','zpqiche':'汽车制造/服务','tech':'计算机/互联网/通信','zpjixieyiqi':'电子/电气','zpjixie':'机械/仪器仪表','zpfalvzixun':'法律','zhuanye':'教育培训','fanyizhaopin':'翻译','zpxiezuochuban':'编辑/出版/印刷','zpcaiwushenji':'财务/审计/统计','jinrongtouzi':'金融/银行/证券/投资','zpjinrongbaoxian':'保险','zpyiyuanyiliao':'医院/医疗/护理','zpzhiyao':'制药/生物工程','huanbao':'环保','zpfangchanjianzhu':'建筑','zpwuye':'物业管理','nonglinmuyu':'农/林/牧/渔业','zhaopin':'其他职位'}
city_list = {'bj': '北京', 'sh': '上海', 'gz': '广州', 'sz': '深圳', 'nj': '南京', 'hz': '杭州', 'su': '苏州', 'tj': '天津', 'cq':'重庆', 'cd':'成都', 'wh':'武汉','qd':'青岛','wx':'无锡','dl':'大连'}
def get_company(city ,business ,page):
#电脑端爬取公司信息
url = "http://" + city + ".58.com/" + business + "/pn" + str(page) + "/?PGTID=0d30365b-0004-f8b5-cb93-88ed5156515e&ClickID=1"
req = urllib2.Request(url, headers = headers)
content = urllib2.urlopen(req, timeout=60).read()
if isinstance(content, unicode):
pass
else:
content = content.encode('utf-8')
bsobj = BeautifulSoup(content, "lxml")
company = bsobj.find_all(["a"], class_="fl")
for each in company:
company_name = each.get("title")
company_url = each.get("href")
for i in range(2, len(company_url)):
if company_url[-i] == '/':
company_id = company_url[-i+1: -1]
break
if company_name == None or company_id.isdigit() != True:
continue
save_company(city,company_name,company_id,company_url,business,page)
# print company_url, company_id, company_name, get_info(company_id)
def get_info(id):
#返回手机端信息
url="http://qy.m.58.com/m_detail/"+str(id)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req, timeout=60).read()
# if isinstance(content, unicode):
# pass
# else:
# content = content.encode('GBK')
htmlSource = etree.HTML(content)
#company_name=htmlSource.xpath("/html/body/div[1]/div[3]/h1")[0].text
try:
phone=htmlSource.xpath("/html/body/div[1]/div[5]/div/div[2]/dl/dd[1]/p/span[1]")[0].text
hr=htmlSource.xpath("/html/body/div[1]/div[5]/div/div[2]/dl/dd[1]/p/span[2]")[0].text.encode('raw_unicode_escape')
except Exception as e:
print id
return ["" ,"" ,""]
try:
email = htmlSource.xpath("/html/body/div[1]/div[5]/div/div[2]/dl/dd[2]")[0].text
if email == "\r\n\t\t\t\t\t\t":
email = ""
except Exception as e:
email = ""
return [phone, hr, email]
def save_company(city, company_name, company_id, company_url, business, page):
#存数据库
sql_inq="select count(*) from company58 WHERE COMPANY_ID='" +company_id+"'"
cu = conn.cursor()
cu.execute(sql_inq)
result = cu.fetchone()
if result[0] :
return
[phone, hr, email]=get_info(company_id)
sql_insert = "insert into company58(CITY,COMPANY_NAME,COMPANY_ID,COMPANY_URL,BUSINESS,PAGE,HR,PHONE_NO,EMAIL)values("\
+ "'" + city + "'," + "'" + company_name + "'" + ",'" + str(company_id) + "','" + \
company_url + "','" + business_list[business]+ "','" +str(page) + "','"+str(hr)+"','"+phone+"','"+email+"')"
cu.execute(sql_insert)
conn.commit()
time.sleep(1)
def patch():
#补丁
sql_inq = "select * from company58 where HR='' Order by company_ID"
cu = conn.cursor()
cu.execute(sql_inq)
result = cu.fetchall()
length = len(result)
count = 0
for i in range(length):
try:
[phone, hr, email] = get_info(result[i][6])
except Exception as e :
print e, result[i][6]
if hr == "":
continue
sql_update = "update company58 set HR= '" + hr + "', PHONE_NO= '" + phone + "', EMAIL= '" + email + "'WHERE COMPANY_ID= '" + result[i][6] + "'"
cu.execute(sql_update)
conn.commit()
count += 1
print sql_update
time.sleep(4)
print "updated:" + str(count) + " all :" + str(length)
# print result
def patch2():
sql_inq = "select * from company58 where email like '% %' "
cu = conn.cursor()
cu.execute(sql_inq)
result = cu.fetchall()
length = len(result)
for i in range(length):
sql_update = "update company58 set EMAIL= '' WHERE COMPANY_ID= '" + result[i][6] + "'"
cu.execute(sql_update)
conn.commit()
print str(length) + "EMAIL have been updated"
for business in business_list:
for page in range(1, 100):
try:
get_company(city, business, page)
except urllib2.HTTPError, e:
print('HTTPError: ', e.code, city, business, page,)
except Exception as e:
print e, city, business, page,
patch()
patch2()
conn.close()
|
import argparse
import json
import os
from tqdm import tqdm
from experiment import *
from results import MultipleResults
from joblib.parallel import Parallel, delayed
parser = None
folder = None
run_times = None
random_seed = None
def create_model(model_params):
if 'ridge_model' in model_params:
model_name = 'ridge_model'
model = lambda : ridge_model(**model_params['ridge_model'])
print(f"Using model: {model_name}")
yield model, model_name
def create_dataset(dataset_params):
if 'make_regression' in dataset_params:
data_name = "make_regression"
print(f"Using dataset: {data_name}")
dataset = lambda : get_synthetic_dataset(**dataset_params['make_regression'])
yield dataset, data_name
def single_model(dataset_params, model_params, params):
print(f"Running single-model experiment")
for dataset, data_name in create_dataset(dataset_params):
X, y, _ = dataset()
for model, model_name in create_model(model_params):
for trial in tqdm(range(0, run_times)):
save_to = f"{folder}/{data_name}/{trial}"
os.makedirs(f"{save_to}", exist_ok=True)
prepare_params = {k: params[k] for k in params.keys() & {'train_size'}}
prepare_params.update({k: dataset_params[data_name][k]
for k in dataset_params[data_name].keys() & {'use_log'}})
single_model_experiment(X, y, model,
model_name=f"{save_to}/{model_name}", **prepare_params)
def hidden_loop(dataset_params, model_params, params):
print(f"Running hidden-loop experiment")
for dataset, data_name in create_dataset(dataset_params):
X, y, _ = dataset()
for model, model_name in create_model(model_params):
hle_results = MultipleResults(model_name, **HiddenLoopExperiment.default_state)
def process_trial(trial):
hle_local = MultipleResults(model_name, **HiddenLoopExperiment.default_state)
hle = HiddenLoopExperiment(X, y, model, model_name)
prepare_params = {k: params[k] for k in params.keys() & {'train_size', 'A'}}
prepare_params.update({k: dataset_params[data_name][k]
for k in dataset_params[data_name].keys() & {'use_log'}})
hle.prepare_data(**prepare_params)
loop_params = {k: params[k] for k in params.keys() & {'adherence', 'usage', 'step'}}
hle.run_experiment(**loop_params)
hle_local.add_state(trial=trial, **vars(hle))
return hle_local
results = Parallel(n_jobs=-1, verbose=10)(delayed(process_trial)(trial) for trial in range(0, run_times))
for hle_result in results:
hle_results.add_results(**hle_result.get_state)
target_folder = f"{folder}/{data_name}"
os.makedirs(target_folder, exist_ok=True)
hle_results.plot_multiple_results(target_folder, **HiddenLoopExperiment.default_figures)
hle_results.save_state(f"{target_folder}")
def init_random(random_seed):
return init_random_state(random_seed)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("kind", type=str, help="Kind of experiment: single-model, hidden-loop")
parser.add_argument("--params", type=str, help="A json string with experiment parameters")
parser.add_argument("--model_params", type=str, help="A json string with model name and parameters")
parser.add_argument("--folder", type=str, help="Save results to this folder", default="./results")
parser.add_argument("--random_seed", type=int, help="Use the provided value to init the random state", default=42)
parser.add_argument("--run_times", type=int, help="How many time to repeat the trial", default=1)
parser.add_argument("--dataset", type=str, help="Name of the dataset ('boston') or json for make_regression", default='\"boston\"')
args = parser.parse_args()
model_str = args.model_params
params_str = args.params
dataset_str = args.dataset
kind = args.kind
folder = args.folder
random_seed = args.random_seed
run_times = args.run_times
os.makedirs(folder, exist_ok=True)
model_dict = json.loads(model_str)
params_dict = json.loads(params_str)
dataset_params = json.loads(dataset_str)
init_random_state(random_seed)
if kind == "single-model":
single_model(dataset_params, model_dict, params_dict)
elif kind == "hidden-loop":
hidden_loop(dataset_params, model_dict, params_dict)
else:
parser.error("Unknown experiment kind: " + kind)
|
import unittest
import os
from DockerBuildSystem import DockerComposeTools
from DockerFeed.Handlers.StackHandler import StackHandler
from tests import TestUtilities
class TestStackHandler(unittest.TestCase):
def test_DeployRemove(self):
handler: StackHandler = TestUtilities.CreateStackHandler()
handler.Init()
TestUtilities.AssertInfrastructureExists(True)
handler.Deploy(['nginx_test'])
TestUtilities.AssertStacksExists(["nginx_test"], True)
handler.Remove(['nginx_test'])
TestUtilities.AssertStacksExists(["nginx_test"], False)
handler.Deploy(['nginx_test>=1.1.0'])
TestUtilities.AssertStacksExists(["nginx_test"], True)
handler.Remove(['nginx_test'])
TestUtilities.AssertStacksExists(["nginx_test"], False)
self.assertRaises(Exception, handler.Deploy, ['nginx_test>=2.1.0'])
self.assertRaises(Exception, handler.Deploy, ['non_existent_nginx_test>=2.1.0'])
def test_DeployOnlyValidStacks(self):
handler: StackHandler = TestUtilities.CreateStackHandler(source="tests/invalidTestStacks", verifyStacksOnDeploy=True)
handler.Init()
TestUtilities.AssertInfrastructureExists(True)
handler.Deploy(['nginx_test_invalid_config'])
TestUtilities.AssertStacksExists(["nginx_test_invalid_config"], False)
handler.Deploy(['nginx_test_invalid_secret'])
TestUtilities.AssertStacksExists(["nginx_test_invalid_secret"], False)
handler.Deploy(['nginx_test_invalid_volume'])
TestUtilities.AssertStacksExists(["nginx_test_invalid_volume"], False)
handler.Deploy(['nginx_test_invalid_port'])
TestUtilities.AssertStacksExists(["nginx_test_invalid_port"], False)
def test_InitPrune(self):
handler: StackHandler = TestUtilities.CreateStackHandler()
handler.Init()
TestUtilities.AssertInfrastructureExists(True)
handler.Deploy()
TestUtilities.AssertStacksExists(['nginx_test'], True)
handler.Prune()
TestUtilities.AssertStacksExists(['nginx_test'], False)
stacks = handler.List()
self.assertGreater(len(stacks), 0)
TestUtilities.AssertStacksExists(stacks, False)
def test_List(self):
handler: StackHandler = TestUtilities.CreateStackHandler()
stacks = handler.List()
self.assertFalse('nginx_test==1.0.0' in stacks)
self.assertFalse('nginx_test==1.1.0' in stacks)
self.assertTrue('nginx_test==1.1.1' in stacks)
self.assertTrue('nginx_test_digest==1.1.0' in stacks)
stacks = handler.List(['nginx_test>1.0.0'])
self.assertFalse('nginx_test==1.0.0' in stacks)
self.assertFalse('nginx_test==1.1.0' in stacks)
self.assertTrue('nginx_test==1.1.1' in stacks)
def test_Verify(self):
handler: StackHandler = TestUtilities.CreateStackHandler()
self.assertFalse(handler.Verify())
self.assertTrue(handler.Verify(['nginx_test_digest']))
def test_Run(self):
DockerComposeTools.DockerComposeBuild(["tests/testBatchStacks/docker-compose.batch.1.0.0.yml"])
handler: StackHandler = TestUtilities.CreateStackHandler(source="tests/testBatchStacks",
swmInfrastructureFiles=["tests/testBatchStacks/swarm.management,yml"])
handler.Init()
os.environ['SHOULD_FAIL'] = 'false'
self.assertTrue(handler.Run())
os.environ['SHOULD_FAIL'] = 'true'
self.assertFalse(handler.Run(['batch==1.0.0']))
if __name__ == '__main__':
unittest.main()
|
import h5py
import numpy as np
import matplotlib.pyplot as plt
original_results = '../output/cell_vars.h5'
def plot_figs(nrn_results=None):
orig_h5 = h5py.File(original_results, 'r')
new_h5 = h5py.File(nrn_results, 'r')
gids = np.array(new_h5['/mapping/gids'])
indx_ptrs = np.array(new_h5['/mapping/index_pointer'])
for plot_num, (gid, indx) in enumerate(zip(gids, indx_ptrs)):
new_voltage = np.array(new_h5['/v/data']) if len(new_h5['/v/data'].shape) == 1 else np.array(new_h5['/v/data'][:, indx])
orig_voltage = np.array(orig_h5['/v/data'][:, gid])
plt.subplot(1, 1, 1)
times = np.linspace(0.0, 4000.0, len(orig_voltage))
plt.plot(times, new_voltage[1:], label='NEURON')
plt.plot(times, orig_voltage, label='AIBS')
plt.legend()
plt.title('gid = {}'.format(gid))
plt.show()
#plot_figs('cellvar_gid0_472363762_Scnn1a.h5')
#plot_figs('cellvar_gid1_473863510_Rorb.h5')
#plot_figs('cellvar_gid2_473863035_Nr5a1.h5')
#plot_figs('cellvar_gid3_472912177_PV1.h5')
plot_figs('cellvar_gid4_473862421_PV2.h5')
|
import setuptools
with open("README.md", "rb") as fh:
long_description = fh.read().decode('utf-8')
setuptools.setup(
name="packel",
version="1.0.1",
author="kiwec",
author_email="c.wolf@kiwec.net",
description="Packet serialization/deserialization in a Pythonic way.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kiwec/packel",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: Public Domain",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
class RemovedInWagtailMenus31Warning(DeprecationWarning):
pass
removed_in_next_version_warning = RemovedInWagtailMenus31Warning
class RemovedInWagtailMenus32Warning(PendingDeprecationWarning):
pass
class RemovedInWagtailMenus33Warning(PendingDeprecationWarning):
pass
|
## Copyright (c) Microsoft Corporation.
## Licensed under the MIT License.
# empty __init__.py file to allow this directory to be imported from elsewhere
|
import datetime
import re
import gws
import gws.tools.misc
import gws.types as t
_default_editor = {
t.AttributeType.bool: 'checkbox',
t.AttributeType.bytes: 'file',
t.AttributeType.date: 'date',
t.AttributeType.datetime: 'datetime',
t.AttributeType.float: 'float',
t.AttributeType.geometry: '',
t.AttributeType.int: 'int',
t.AttributeType.intlist: 'list',
t.AttributeType.str: 'str',
t.AttributeType.text: 'text',
t.AttributeType.time: 'time',
}
#:export
class ModelEditor(t.Data):
accept: t.Optional[str]
items: t.Optional[t.Any]
max: t.Optional[float]
min: t.Optional[float]
multiple: t.Optional[bool]
pattern: t.Optional[str]
type: str
#:export
class ModelRule(t.Data):
"""Attribute conversion rule"""
editable: bool = True #: attribute is editable
editor: t.Optional[ModelEditor]
expression: str = '' #: attribute conversion expression
format: t.FormatStr = '' #: attribute formatter
name: str = '' #: target attribute name
source: str = '' #: source attribute name
title: str = '' #: target attribute title
type: t.AttributeType = 'str' #: target attribute type
value: t.Optional[str] #: constant value
class Config(t.Config):
"""Data model"""
crs: t.Optional[t.Crs] #: CRS for this model
geometryType: t.Optional[t.GeometryType] #: specific geometry type
rules: t.List[ModelRule] #: attribute conversion rules
class ModelRuleProps(t.Props):
editable: bool
editor: ModelEditor
name: str
title: str
type: str
class ModelProps(t.Props):
geometryType: str
crs: str
rules: t.List[ModelRuleProps]
#:export IModel
class Object(gws.Object, t.IModel):
def configure(self):
super().configure()
p = self.var('rules', default=[])
self.rules: t.List[t.ModelRule] = [self._configure_rule(r) for r in p]
self.geometry_type: t.GeometryType = self.var('geometryType')
self.geometry_crs: t.Crs = self.var('crs')
@property
def props(self):
return ModelProps(
rules=[
ModelRuleProps(
name=r.name,
editable=r.editable,
editor=r.editor,
title=r.title,
type=r.type,
) for r in self.rules
],
geometryType=self.geometry_type,
crs=self.geometry_crs,
)
def apply(self, atts: t.List[t.Attribute]) -> t.List[t.Attribute]:
return self.apply_to_dict({a.name: a.value for a in atts})
def apply_to_dict(self, d: dict) -> t.List[t.Attribute]:
return [t.Attribute(
title=r.title,
name=r.name,
value=self._apply_rule(r, d),
type=r.type,
editable=r.editable,
) for r in self.rules]
@property
def attribute_names(self) -> t.List[str]:
"""List of attributes used by the model."""
names = set()
for r in self.rules:
if r.get('value'):
continue
if r.get('source'):
names.add(r.source)
continue
if r.get('format'):
names.update(re.findall(r'{([\w.]+)', r.format))
continue
names.add(r.name)
return sorted(names)
def _apply_rule(self, rule: t.ModelRule, d: dict):
s = rule.get('value')
if s is not None:
return s
s = rule.get('source')
if s:
# @TODO
if rule.get('type') == 'bytes':
return None
return d.get(s)
s = rule.get('format')
if s:
if '{' not in s:
return s
return gws.tools.misc.format_placeholders(s, d)
# no value/source/format present - return values[name]
return d.get(rule.name, '')
def _configure_rule(self, r):
rule = t.ModelRule(r)
name = rule.get('name')
title = rule.get('title')
if not name:
name = gws.as_uid(title) if title else rule.get('source')
if not name:
raise gws.Error('missing attribute name')
rule.name = name
rule.title = title or name
rule.type = rule.get('type') or t.AttributeType.str
if not rule.editor:
rule.editor = ModelEditor(type=_default_editor.get(rule.type, 'str'))
return rule
|
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
import sys # to write the output while running unit tests
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is not available"""
# override the behavior of the ConnectionHandler
# use patch to mock the ConnectionHandler
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# the __getitem__ is actually called when you retrieve the database
# mock the __getitem__ using patch and assign as a variable 'gi'
# during the test the mock item will override django behavior and
# replace with mock object which will return value 'True' and allow
# to monitor how many times the command was called
gi.return_value = True
call_command('wait_for_db') # testing the command 'wait_for_db'
self.assertEqual(gi.call_count, 1) # testing the command is called
# once
sys.stderr.write(repr('test 1 is done') + '\n')
@patch('time.sleep', return_value=True) # in the test it won't wait the
# seconds, rather speed up the test by replacing the behavior of time.sleep
# this code does the same as line 14, and it would pass in as an argument
# to test_wait_for_db function, that is why we use 'ts' even though we are
# not using it in the test
def test_wait_for_db(self, ts):
"""test waiting for db"""
# the number is 6 is used to make it reseasonable in realtime execution
# any number higher than 1 can be used here
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# 5 times raise the operational error, and the 6th time doesn't
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
sys.stderr.write(repr('test 2 is done') + '\n')
|
from .__main__ import FFTTransform
__all__ = [
'FFTTransform'
]
|
import argparse
import json
from . import categories, category, palette, search
def main() -> int:
parser = argparse.ArgumentParser(
description='Search for emoji information')
group = parser.add_mutually_exclusive_group()
group.add_argument('--search', type=str, required=False, default="",
help='Emoji to search for')
group.add_argument('--category', type=str, required=False, default="",
help='Category to get list of emojis')
group.add_argument('--categories', required=False, action='store_true', default=False,
help='Get list of emoji categories')
group.add_argument('--palette', required=False, action='store_true', default=False,
help='Get JSON object of all categories and their emojis')
args = parser.parse_args()
if args.search:
print(json.dumps(dict(search(args.search)), ensure_ascii=False))
elif args.category:
print(category(args.category))
elif args.palette:
print(json.dumps(dict(palette()), ensure_ascii=False))
elif args.categories:
print(categories)
else:
parser.error("Invalid command")
return 1
return 0
if __name__ == "__main__":
exit(main())
|
from app.main import db
import jwt
from app.main.models.ChallengesModel import ChallengesModel
from app.main.models.TestCasesModel import TestCasesModel
import json
def save_changes(data):
try:
db.session.add(data)
db.session.commit()
except Exception as e:
db.session.rollback()
def change(row):
row = dict(row)
row["created_at"] = str(row["created_at"])
return row
def get_all_challenge():
challenges = db.engine.execute(
"select challenges.challenge_name,challenges.id, count(test_cases.id) as count from challenges join test_cases on challenges.id=test_cases.challenge_id group by challenges.id")
# print(challenges.created_at)
challenges = [dict(row) for row in challenges]
return {
"challenges": challenges
}
def get_contest_challenge():
print("helllo")
|
import numpy as np
from progress.bar import Bar
import time
from constants import MAX_COLOR_VALUE
def normalize(arr):
"""
Normalize a vector using numpy.
Args:
arr(darray): Input vector
Returns:
darray: Normalized input vector
"""
norm = np.linalg.norm(arr)
if norm == 0:
return arr
return arr / norm
def distance(p1, p2):
"""
Get the distance between points p1 and p2
Args:
p1(ndarray): Point 1
p2(ndarray): Point 2
Returns:
float: Distance
"""
dist = np.linalg.norm(p1 - p2)
return dist
def humanize_time(secs):
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hours, mins, secs)
def degrees2radians(degrees):
return (degrees / 360) * 2 * np.pi
def normalize_color(color):
return color / MAX_COLOR_VALUE
def blerp(img_arr, x, y):
# Interpolate values of pixel neighborhood of x and y
i = int(np.round(x))
j = int(np.round(y))
# But not in the borders
height, width, _ = img_arr.shape
# Flip y value to go from top to bottom
y = height - y
if i == 0 or j == 0 or i == width or j == height:
if i == width:
i -= 1
if j == height:
j -= 1
return img_arr[j][i]
# t and s are interpolation parameters that go from 0 to 1
t = x - i + 0.5
s = y - j + 0.5
# Bilinear interpolation
color = (
img_arr[j - 1][i - 1] * (1 - t) * (1 - s)
+ img_arr[j - 1][i] * t * (1 - s)
+ img_arr[j][i - 1] * (1 - t) * s
+ img_arr[j][i] * t * s
)
return color
def adjust(normal_map_pixel):
"""
Adjust the normal map pixel so that it can have negative values.
Args:
normal_map_pixel(ndarray): Three channels RGB pixel
representing a normal vector.
Returns:
ndarray: A ready to use unit normal vector
"""
r, g, b = normal_map_pixel
# x and y will be from [-1 to 1] and z from [0 to 1]
x = 2 * r - 1
y = 2 * g - 1
z = b
normal_vector = np.array([x, y, z])
normalized = normalize(normal_vector)
return normalized
def progress_bar(iterations, msg):
step_size = np.round(iterations / 100).astype(int)
bar = Bar(msg, max=100, suffix='%(percent)d%%')
return bar, step_size
class Timer:
def __init__(self):
self.start_time = 0
self.end_time = 0
self.elapsed_time = 0
def start(self):
self.start_time = time.time()
def stop(self):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
def __str__(self):
return humanize_time(self.elapsed_time)
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train rednet30."""
import argparse
import os
import time
import mindspore.nn as nn
from mindspore import context
from mindspore import dataset as ds
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.common import set_seed
from mindspore.train.model import Model
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from src.dataset import Dataset
from src.model import REDNet30
def train_net(opt):
"""train"""
device_id = int(os.getenv('DEVICE_ID', '0'))
rank_id = int(os.getenv('RANK_ID', '0'))
device_num = int(os.getenv('DEVICE_NUM', '1'))
if opt.platform == "GPU":
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
else:
context.set_context(mode=context.GRAPH_MODE, save_graphs=False,
device_target="Ascend", device_id=device_id)
# if distribute:
if opt.is_distributed:
init()
rank_id = get_rank()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
device_num=device_num, gradients_mean=True)
# dataset
print("============== Loading Data ==============")
train_dataset = Dataset(opt.dataset_path, opt.patch_size)
train_de_dataset = ds.GeneratorDataset(train_dataset, ["input", "label"], num_shards=device_num,
shard_id=rank_id, shuffle=True)
train_de_dataset = train_de_dataset.batch(opt.batch_size, drop_remainder=True)
step_size = train_de_dataset.get_dataset_size()
print("============== Loading Model ==============")
model = REDNet30()
optimizer = nn.Adam(model.trainable_params(), learning_rate=opt.lr)
loss = nn.MSELoss()
loss_scale_manager = DynamicLossScaleManager(init_loss_scale=opt.init_loss_scale, scale_window=1000)
model = Model(model, loss_fn=loss, optimizer=optimizer, loss_scale_manager=loss_scale_manager, amp_level="O3")
time_cb = TimeMonitor(data_size=step_size)
loss_cb = LossMonitor()
cb = [time_cb, loss_cb]
config_ck = CheckpointConfig(keep_checkpoint_max=opt.ckpt_save_max)
ckpt_cb = ModelCheckpoint(prefix='RedNet30_{}'.format(rank_id),
directory=os.path.join("ckpt", 'ckpt_' + str(rank_id) + '/'), config=config_ck)
cb += [ckpt_cb]
print("============== Starting Training ==============")
model.train(opt.num_epochs, train_de_dataset, callbacks=cb, dataset_sink_mode=True)
print("================== Finished ==================")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='./data/BSD300', help='training image path')
parser.add_argument('--platform', type=str, default='GPU', choices=('Ascend', 'GPU'), help='run platform')
parser.add_argument('--is_distributed', type=bool, default=False, help='distributed training')
parser.add_argument('--patch_size', type=int, default=50, help='training patch size')
parser.add_argument('--batch_size', type=int, default=16, help='training batch size')
parser.add_argument('--num_epochs', type=int, default=1000, help='epoch number')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--ckpt_save_max', type=int, default=5, help='maximum number of checkpoint files can be saved')
parser.add_argument('--init_loss_scale', type=float, default=65536., help='initialize loss scale')
option = parser.parse_args()
set_seed(option.seed)
time_start = time.time()
train_net(option)
time_end = time.time()
print('train time: %f' % (time_end - time_start))
|
#Test modwsgi.
#Hello world test application to make sure modwsgi runs. Link from you apache file to here to test.
def application(environ, start_response):
status = '200 OK'
output = 'Hello World!'
#print >> sys.stderr, "sys.stderr"
#print >> environ["wsgi.errors"], "wsgi.errors"
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
|
import importlib
import itertools
from collections import namedtuple
import pytest
from dsch import exceptions, frontend, schema
from dsch.backends import inmem
backend_data = namedtuple('backend_data', ('module', 'storage_path'))
@pytest.fixture(params=('hdf5', 'mat', 'npz'))
def backend(request, tmpdir):
backend = backend_data(
module=importlib.import_module('dsch.backends.' + request.param),
storage_path=str(tmpdir.join('test_frontend.' + request.param))
)
return backend
@pytest.fixture(params=('hdf5', 'inmem', 'mat', 'npz'))
def foreign_backend(request, tmpdir):
if request.param == 'inmem':
storage_path = '::inmem::'
else:
storage_path = str(tmpdir.join('test_frontend_foreign.' +
request.param))
backend = backend_data(
module=importlib.import_module('dsch.backends.' + request.param),
storage_path=storage_path
)
return backend
def test_create(backend):
schema_node = schema.Bool()
storage = frontend.create(backend.storage_path, schema_node)
assert isinstance(storage, backend.module.Storage)
assert storage.schema_node == schema_node
def test_create_inmem():
schema_node = schema.Bool()
storage = frontend.create('::inmem::', schema_node)
assert isinstance(storage, inmem.Storage)
assert storage.schema_node == schema_node
def test_create_from(backend, foreign_backend):
schema_node = schema.Bool()
source_storage = frontend.create(foreign_backend.storage_path, schema_node)
source_storage.data.value = True
dest_storage = frontend.create_from(backend.storage_path, source_storage)
assert dest_storage.schema_node.hash() == source_storage.schema_node.hash()
assert dest_storage.data.value is True
def test_load(backend):
schema_node = schema.Bool()
storage = frontend.create(backend.storage_path, schema_node)
storage.data.value = True
storage.save()
new_storage = frontend.load(backend.storage_path)
assert isinstance(new_storage, backend.module.Storage)
assert new_storage.data.value is True
def test_load_require_schema(backend):
schema_node = schema.Bool()
storage = frontend.create(backend.storage_path, schema_node)
storage.data.value = True
storage.save()
frontend.load(backend.storage_path, required_schema=schema_node)
def test_load_required_schema_hash(backend):
schema_node = schema.Bool()
storage = frontend.create(backend.storage_path, schema_node)
storage.data.value = True
storage.save()
schema_hash = storage.schema_hash()
frontend.load(backend.storage_path, required_schema_hash=schema_hash)
def test_load_validation_fail(backend):
schema_node = schema.String(max_length=3)
storage = frontend.create(backend.storage_path, schema_node)
storage.data.value = 'spam'
storage.save(force=True)
with pytest.raises(exceptions.ValidationError):
frontend.load(backend.storage_path)
# With force=True, no exception must be raised.
frontend.load(backend.storage_path, force=True)
class TestPseudoStorageNode:
"""Tests for the PseudoStorage class when given a data node."""
@pytest.fixture
def storage(self, foreign_backend):
schema_node = schema.Compilation({
'spam': schema.Bool(),
'eggs': schema.Bytes(),
})
return frontend.create(foreign_backend.storage_path, schema_node)
def test_context(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bool(), True)
with pseudo as p:
assert p.data is not None
assert p.storage is None
assert pseudo.schema_node == storage.data.spam.schema_node
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_init(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bool(), False)
assert pseudo.data is not None
assert pseudo.storage is None
assert pseudo.schema_node is not None
def test_init_deferred(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bool(), True)
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_open_close(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bool(), True)
pseudo.open()
assert pseudo.data is not None
assert pseudo.storage is None
assert pseudo.schema_node == storage.data.spam.schema_node
pseudo.close()
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_open_fail(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bytes(),
True)
with pytest.raises(exceptions.InvalidSchemaError):
pseudo.open()
def test_schema_alternative(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bytes(),
True, (schema.Bool(),))
pseudo.open()
assert pseudo.schema_node == storage.data.spam.schema_node
def test_schema_alternative_fail(self, storage):
pseudo = frontend.PseudoStorage(storage.data.spam, schema.Bytes(),
True, (schema.String(),))
with pytest.raises(exceptions.InvalidSchemaError):
pseudo.open()
class TestPseudoStorageStr:
"""Tests for the PseudoStorage class when given a string."""
@pytest.fixture(params=tuple(
(b, e) for b, e in itertools.product(
('hdf5', '::inmem::', 'npz', 'mat'), (False, True)
) if not (b == '::inmem::' and e)
))
def storage_path(self, request, tmpdir):
"""Prepare storage path for all valid variants.
Variants are tuples ``(backend, existing)``, where ``existing``
indicates whether a storage should be created before the test.
"""
backend, existing = request.param
if backend in ('hdf5', 'npz', 'mat'):
# File backends
storage_path = str(tmpdir.join('test_pseudo.' + backend))
elif backend == '::inmem::':
storage_path = '::inmem::'
if existing:
storage = frontend.create(storage_path, schema.Bool())
storage.data.value = True
if hasattr(storage, 'save') and callable(storage.save):
storage.save()
del storage
return storage_path
@pytest.fixture(params=('hdf5', 'npz', 'mat'))
def storage_path_existing(self, request, tmpdir):
storage_path = str(tmpdir.join('test_pseudo.' + request.param))
storage = frontend.create(storage_path, schema.Bool())
storage.data.value = True
storage.save()
del storage
return storage_path
def test_context(self, storage_path):
pseudo = frontend.PseudoStorage(storage_path, schema.Bool(), True)
with pseudo as p:
assert p.data is not None
assert p.storage is not None
assert p.schema_node == p.storage.data.schema_node
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_init(self, storage_path):
pseudo = frontend.PseudoStorage(storage_path, schema.Bool(), False)
assert pseudo.data is not None
assert pseudo.storage is not None
assert pseudo.storage.storage_path == storage_path
assert pseudo.schema_node is not None
def test_init_deferred(self, storage_path):
pseudo = frontend.PseudoStorage(storage_path, schema.Bool(), True)
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_open_close(self, storage_path):
pseudo = frontend.PseudoStorage(storage_path, schema.Bool(), True)
pseudo.open()
assert pseudo.data is not None
assert pseudo.storage is not None
assert pseudo.schema_node == pseudo.storage.data.schema_node
pseudo.close()
assert pseudo.data is None
assert pseudo.storage is None
assert pseudo.schema_node is None
def test_open_fail(self, storage_path_existing):
pseudo = frontend.PseudoStorage(storage_path_existing, schema.Bytes(), True)
with pytest.raises(exceptions.InvalidSchemaError):
pseudo.open()
def test_schema_alternative(self, storage_path_existing):
pseudo = frontend.PseudoStorage(storage_path_existing, schema.Bytes(),
True, (schema.Bool(),))
pseudo.open()
assert pseudo.schema_node == pseudo.storage.data.schema_node
def test_schema_alternative_fail(self, storage_path_existing):
pseudo = frontend.PseudoStorage(storage_path_existing, schema.Bytes(),
True, (schema.String(),))
with pytest.raises(exceptions.InvalidSchemaError):
pseudo.open()
|
import importlib
import subprocess
import sys
def _package_exists(config: dict) -> bool:
"""Check to see whether a package exists."""
try:
importlib.import_module(config["import"])
except ModuleNotFoundError:
return False
return True
def package_from_config(config: dict) -> str:
"""Given a config return the pip install string."""
install_str = config["url"]
if install_str.endswith(".git"):
install_str = install_str[:-4]
install_branch = config.get("github_branch", "main")
return f"git+{install_str}@{install_branch}#egg={config['import']}"
def _install(package):
"""Install a package using pip."""
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def install_package(config: dict, allow_install: bool = False):
"""Install the python package if it doesn't exist."""
# now check to see whether the package exists
if not _package_exists(config):
package = package_from_config(config)
if allow_install:
_install(package)
else:
raise Exception(
"Package does not exist. Try installing it with: \n"
f"`!pip install -e {package}`"
)
|
"""
Autogenerated by ghenerate script, part of Quantarhei
http://github.com/tmancal74/quantarhei
Tomas Mancal, tmancal74@gmai.com
Generated on: 2018-11-12 14:35:11
Edit the functions below to give them desired functionality.
In present version of `ghenerate`, no edits or replacements
are perfomed in the feature file text.
"""
import numpy
from behave import given
from behave import when
from behave import then
import quantarhei as qr
#
# Given ...
#
@given('that I have an aggregate of three molecules with no dephasing set')
def step_given_1(context):
"""
Given that I have an aggregate of three molecules with no dephasing set
"""
agg = qr.TestAggregate("trimer-2")
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1, 100.0)
agg.build()
context.agg = agg
#
# When ...
#
@when('I ask the aggregate to return PureDephasing object')
def step_when_2(context):
"""
When I ask the aggregate to return PureDephasing object
"""
agg = context.agg
pd = agg.get_PureDephasing()
context.pd = pd
#
# Then ...
#
@then('I get an empty PureDephasing object')
def step_then_3(context):
"""
Then I get an empty PureDephasing object
"""
pd = context.pd
agg = context.agg
Ntot = agg.Ntot
comp = numpy.zeros((Ntot,Ntot), dtype=qr.REAL)
numpy.testing.assert_allclose(comp, pd.data)
#
# Given ...
#
@given('that I have an aggregate of three molecules with dephasing rates set')
def step_given_4(context):
"""
Given that I have an aggregate of three molecules with dephasing rates set
"""
agg = qr.TestAggregate("trimer-2")
for mol in agg.monomers:
mol.set_transition_width((0,1), 1.0/100.0)
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1, 100.0)
agg.build()
context.agg = agg
#
# Then ...
#
@then('I get a PureDephasing object with some rates')
def step_then_5(context):
"""
Then I get a PureDephasing object with some rates
"""
pd = context.pd
agg = context.agg
Ntot = agg.Ntot
comp = numpy.zeros((Ntot,Ntot), dtype=qr.REAL)
a1 = numpy.array(comp.shape)
a2 = numpy.array(pd.data.shape)
numpy.testing.assert_allclose(a1, a2)
#
# Given ...
#
@given('that I have an aggregate of two identical molecules with dephasing rates set')
def step_given_6(context):
"""
Given that I have an aggregate of two identical molecules with dephasing rates set
"""
agg = qr.TestAggregate("homodimer-2")
for mol in agg.monomers:
mol.set_transition_width((0,1), 1.0/100.0)
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1, 100.0)
agg.build()
context.agg = agg
#
# Then ...
#
@then('I get a PureDephasing object with electronic dephasing rates equal to zero')
def step_then_7(context):
"""
Then I get a PureDephasing object with electronic dephasing rates equal to zero
"""
pd = context.pd
a1 = pd.data
a2 = numpy.zeros(pd.data.shape, dtype=qr.REAL)
a2[:,:] = a1
a2[1,2] = 0.0
a2[2,1] = 0.0
numpy.testing.assert_allclose(a1, a2)
#
# Given ...
#
@given('that I have an aggregate of two molecules with nuclear mode each and with electronic dephasing rates set')
def step_given_8(context):
"""
Given that I have an aggregate of two molecules with nuclear mode each and with electronic dephasing rates set
"""
agg = qr.TestAggregate("dimer-2-vib")
for mol in agg.monomers:
mol.set_transition_width((0,1), 1.0/100.0)
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1, 100.0)
agg.build()
context.agg = agg
|
import csv
import dateutil.parser
import os
import splparser.parser
from user import *
from query import *
from logging import getLogger as get_logger
from os import path
from splparser.exceptions import SPLSyntaxError, TerminatingSPLSyntaxError
BYTES_IN_MB = 1048576
LIMIT = 2000*BYTES_IN_MB
logger = get_logger("queryutils")
def get_users_from_file(filename, users):
"""Populate the users dictionary with users and their queris from the given file.
:param filename: The .csv file containing user queries
:type filename: str
:param users: The user dict into which to place the users
:type users: dict
:rtype: None
"""
logger.debug("Reading from file:" + filename)
first = True
with open(filename) as datafile:
reader = csv.DictReader(datafile)
for row in reader:
logger.debug("Attempting to read row.")
# Get basic user information.
username = row.get('user', None)
if username is not None:
username = unicode(username.decode("utf-8"))
case = row.get('case_id', None)
if case is not None:
case = unicode(case.decode("utf-8"))
# Check if we've seen this user before.
user = None
userhash = None
if username is not None and case is not None:
userhash = ".".join([username, case])
user = users.get(userhash, None)
elif username is not None and case is None:
userhash = username
user = users.get(userhash, None)
else:
userhash = ""
user = users.get(userhash, None)
if user is None:
user = User(username)
users[userhash] = user
user.case_id = case
# Get basic query information.
timestamp = row.get('_time', None)
if timestamp is not None:
timestamp = float(dateutil.parser.parse(timestamp).strftime('%s.%f'))
querystring = row.get('search', None)
if querystring is not None:
querystring = unicode(querystring.decode("utf-8")).strip()
# Tie the query and the user together.
query = Query(querystring, timestamp)
user.queries.append(query)
query.user = user
# Get additional query information and add it to the query.
runtime = row.get('runtime', None)
if runtime is None:
runtime = row.get('total_run_time', None)
if runtime is not None:
try:
runtime = float(runtime.decode("utf-8"))
except:
runtime = None
query.execution_time = runtime
search_et = row.get('search_et', None)
if search_et is not None:
try:
search_et = float(search_et.decode("utf-8"))
except:
search_et = None
query.earliest_event = search_et
search_lt = row.get('search_lt', None)
if search_lt is not None:
try:
search_lt = float(search_lt.decode("utf-8"))
except:
search_lt = None
query.latest_event = search_lt
range = row.get('range', None)
if range is not None:
try:
range = float(range.decode("utf-8"))
except:
range = None
query.range = range
is_realtime = row.get('is_realtime', None)
if is_realtime is not None and is_realtime == "false":
is_realtime = False
if is_realtime is not None and is_realtime == "true":
is_realtime = True
query.is_realtime = is_realtime
searchtype = row.get('searchtype', None)
if searchtype is None:
searchtype = row.get('search_type', None)
if searchtype is not None:
searchtype = unicode(searchtype.decode("utf-8"))
query.search_type = searchtype
if query.search_type == "adhoc":
query.is_interactive = True
splunk_id = row.get('search_id', None)
if splunk_id is not None:
splunk_id = unicode(splunk_id.decode("utf-8"))
query.splunk_search_id = splunk_id
savedsearch_name = row.get('savedsearch_name', None)
if savedsearch_name is not None:
savedsearch_name = unicode(savedsearch_name.decode("utf-8"))
query.saved_search_name = savedsearch_name
logger.debug("Successfully read query.")
def get_users_from_directory(directory, users, limit=LIMIT):
"""Populate the users dict with users from the .csv files.
:param directory: The path to the directory containing the .csv files
:type directory: str
:param users: The dict to contain the users read from the .csv files
:type users: dict
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: None
"""
raw_data_files = get_csv_files(directory, limit=limit)
for f in raw_data_files:
get_users_from_file(f, users)
def get_csv_files(dir, limit=LIMIT):
"""Return the paths to all the .csv files in the given directory.
:param dir: The path to the given directory
:type dir: str
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: list
"""
csv_files = []
bytes_added = 0.
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in filenames:
if filename[-4:] == '.csv':
full_filename = path.join(path.abspath(dir), filename)
csv_files.append(full_filename)
bytes_added += path.getsize(full_filename)
if bytes_added > limit:
return csv_files
return csv_files
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import urllib
import urlparse
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import config
from saml2 import class_name
from saml2 import extension_elements_to_elements
from saml2 import saml
from saml2 import samlp
from saml2 import sigver
from saml2 import s_utils
from saml2.assertion import Assertion
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.client import Saml2Client
from saml2.config import SPConfig
from saml2.response import LogoutResponse
from saml2.saml import NAMEID_FORMAT_PERSISTENT, EncryptedAssertion
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NameID
from saml2.server import Server
from saml2.sigver import pre_encryption_part, rm_xmltag
from saml2.s_utils import do_attribute_statement
from saml2.s_utils import factory
from saml2.time_util import in_a_while
from fakeIDP import FakeIDP
from fakeIDP import unpack_form
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
def add_subelement(xmldoc, node_name, subelem):
s = xmldoc.find(node_name)
if s > 0:
x = xmldoc.rindex("<", 0, s)
tag = xmldoc[x+1:s-1]
c = s+len(node_name)
spaces = ""
while xmldoc[c] == " ":
spaces += " "
c += 1
xmldoc = xmldoc.replace(
"<%s:%s%s/>" % (tag, node_name, spaces),
"<%s:%s%s>%s</%s:%s>" % (tag, node_name, spaces, subelem, tag,
node_name))
return xmldoc
def for_me(condition, me):
for restriction in condition.audience_restriction:
audience = restriction.audience
if audience.text.strip() == me:
return True
def ava(attribute_statement):
result = {}
for attribute in attribute_statement.attribute:
# Check name_format ??
name = attribute.name.strip()
result[name] = []
for value in attribute.attribute_value:
result[name].append(value.text.strip())
return result
def _leq(l1, l2):
return set(l1) == set(l2)
# def test_parse_3():
# xml_response = open(XML_RESPONSE_FILE3).read()
# response = samlp.response_from_string(xml_response)
# client = Saml2Client({})
# (ava, name_id, real_uri) = \
# client.do_response(response, "xenosmilus.umdc.umu.se")
# print 40*"="
# print ava
# print 40*","
# print name_id
# assert False
REQ1 = {"1.2.14": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery Destination="https://idp.example.com/idp/" ID="id1"
IssueInstant="%s" Version="2.0" xmlns:ns0="urn:oasis:names:tc:SAML:2
.0:protocol"><ns1:Issuer Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:entity" xmlns:ns1="urn:oasis:names:tc:SAML:2
.0:assertion">urn:mace:example.com:saml:roland:sp</ns1:Issuer><ns1:Subject
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion"><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>""",
"1.2.16": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion" Destination="https://idp
.example.com/idp/" ID="id1" IssueInstant="%s" Version="2.0"><ns1:Issuer
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">urn:mace:example
.com:saml:roland:sp</ns1:Issuer><ns1:Subject><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>"""}
nid = NameID(name_qualifier="foo", format=NAMEID_FORMAT_TRANSIENT,
text="123456")
class TestClient:
def setup_class(self):
self.server = Server("idp_conf")
conf = config.SPConfig()
conf.load_file("server_conf")
self.client = Saml2Client(conf)
def test_create_attribute_query1(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
reqstr = "%s" % req.to_string()
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
issuer = req.issuer
assert issuer.text == "urn:mace:example.com:saml:roland:sp"
attrq = samlp.attribute_query_from_string(reqstr)
print attrq.keyswv()
assert _leq(attrq.keyswv(), ['destination', 'subject', 'issue_instant',
'version', 'id', 'issuer'])
assert attrq.destination == req.destination
assert attrq.id == req.id
assert attrq.version == req.version
assert attrq.issuer.text == issuer.text
assert attrq.issue_instant == req.issue_instant
assert attrq.subject.name_id.format == name_id.format
assert attrq.subject.name_id.text == name_id.text
def test_create_attribute_query2(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
attribute={
("urn:oid:2.5.4.42",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"givenName"): None,
("urn:oid:2.5.4.4",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"surname"): None,
("urn:oid:1.2.840.113549.1.9.1",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri"): None,
},
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
print req.to_string()
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
assert len(req.attribute) == 3
# one is givenName
seen = []
for attribute in req.attribute:
if attribute.name == "urn:oid:2.5.4.42":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "givenName"
seen.append("givenName")
elif attribute.name == "urn:oid:2.5.4.4":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "surname"
seen.append("surname")
elif attribute.name == "urn:oid:1.2.840.113549.1.9.1":
assert attribute.name_format == saml.NAME_FORMAT_URI
if getattr(attribute, "friendly_name"):
assert False
seen.append("email")
assert _leq(seen, ["givenName", "surname", "email"])
def test_create_attribute_query_3(self):
req_id, req = self.client.create_attribute_query(
"https://aai-demo-idp.switch.ch/idp/shibboleth",
"_e7b68a04488f715cda642fbdd90099f5",
format=saml.NAMEID_FORMAT_TRANSIENT,
message_id="id1")
assert isinstance(req, samlp.AttributeQuery)
assert req.destination == "https://aai-demo-idp.switch" \
".ch/idp/shibboleth"
assert req.id == "id1"
assert req.version == "2.0"
assert req.issue_instant
assert req.issuer.text == "urn:mace:example.com:saml:roland:sp"
nameid = req.subject.name_id
assert nameid.format == saml.NAMEID_FORMAT_TRANSIENT
assert nameid.text == "_e7b68a04488f715cda642fbdd90099f5"
def test_create_auth_request_0(self):
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso", message_id="id1")[1]
ar = samlp.authn_request_from_string(ar_str)
print ar
assert ar.assertion_consumer_service_url == ("http://lingon.catalogix"
".se:8087/")
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_TRANSIENT
def test_create_auth_request_vo(self):
assert self.client.config.vorg.keys() == [
"urn:mace:example.com:it:tek"]
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso",
"urn:mace:example.com:it:tek", # vo
nameid_format=NAMEID_FORMAT_PERSISTENT,
message_id="666")[1]
ar = samlp.authn_request_from_string(ar_str)
print ar
assert ar.id == "666"
assert ar.assertion_consumer_service_url == "http://lingon.catalogix" \
".se:8087/"
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_PERSISTENT
assert nid_policy.sp_name_qualifier == "urn:mace:example.com:it:tek"
def test_sign_auth_request_0(self):
#print self.client.config
req_id, areq = self.client.create_authn_request(
"http://www.example.com/sso", sign=True, message_id="id1")
ar_str = "%s" % areq
ar = samlp.authn_request_from_string(ar_str)
assert ar
assert ar.signature
assert ar.signature.signature_value
signed_info = ar.signature.signed_info
#print signed_info
assert len(signed_info.reference) == 1
assert signed_info.reference[0].uri == "#id1"
assert signed_info.reference[0].digest_value
print "------------------------------------------------"
try:
assert self.client.sec.correctly_signed_authn_request(
ar_str, self.client.config.xmlsec_binary,
self.client.config.metadata)
except Exception: # missing certificate
self.client.sec.verify_signature(ar_str, node_name=class_name(ar))
def test_response(self):
IDP = "urn:mace:example.com:saml:roland:idp"
ava = {"givenName": ["Derek"], "sn": ["Jeter"],
"mail": ["derek@nyy.mlb.com"], "title": ["The man"]}
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id_policy=nameid_policy,
userid="foba0001@example.com",
authn=AUTHN)
resp_str = "%s" % resp
resp_str = base64.encodestring(resp_str)
authn_response = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
assert authn_response is not None
assert authn_response.issuer() == IDP
assert authn_response.response.assertion[0].issuer.text == IDP
session_info = authn_response.session_info()
print session_info
assert session_info["ava"] == {'mail': ['derek@nyy.mlb.com'],
'givenName': ['Derek'],
'sn': ['Jeter'],
'title': ["The man"]}
assert session_info["issuer"] == IDP
assert session_info["came_from"] == "http://foo.example.com/service"
response = samlp.response_from_string(authn_response.xmlstr)
assert response.destination == "http://lingon.catalogix.se:8087/"
# One person in the cache
assert len(self.client.users.subjects()) == 1
subject_id = self.client.users.subjects()[0]
print "||||", self.client.users.get_info_from(subject_id, IDP)
# The information I have about the subject comes from one source
assert self.client.users.issuers_of_info(subject_id) == [IDP]
# --- authenticate another person
ava = {"givenName": ["Alfonson"], "sn": ["Soriano"],
"mail": ["alfonson@chc.mlb.com"], "title": ["outfielder"]}
resp_str = "%s" % self.server.create_authn_response(
identity=ava,
in_response_to="id2",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id_policy=nameid_policy,
userid="also0001@example.com",
authn=AUTHN)
resp_str = base64.encodestring(resp_str)
self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id2": "http://foo.example.com/service"})
# Two persons in the cache
assert len(self.client.users.subjects()) == 2
issuers = [self.client.users.issuers_of_info(s) for s in
self.client.users.subjects()]
# The information I have about the subjects comes from the same source
print issuers
assert issuers == [[IDP], [IDP]]
def test_init_values(self):
entityid = self.client.config.entityid
print entityid
assert entityid == "urn:mace:example.com:saml:roland:sp"
print self.client.metadata.with_descriptor("idpsso")
location = self.client._sso_location()
print location
assert location == 'http://localhost:8088/sso'
my_name = self.client._my_name()
print my_name
assert my_name == "urn:mace:example.com:saml:roland:sp"
def test_sign_then_encrypt_assertion(self):
# Begin with the IdPs side
_sec = self.server.sec
assertion = s_utils.assertion_factory(
subject=factory(saml.Subject, text="_aaa",
name_id=factory(
saml.NameID,
format=saml.NAMEID_FORMAT_TRANSIENT)),
attribute_statement=do_attribute_statement(
{
("", "", "surName"): ("Jeter", ""),
("", "", "givenName"): ("Derek", ""),
}
),
issuer=self.server._issuer(),
)
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file="pki/mykey.pem",
node_id=assertion.id)
# Create an Assertion instance from the signed assertion
_ass = saml.assertion_from_string(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="https:#www.example.com",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
assertion=_ass
)
enctext = _sec.crypto.encrypt_assertion(response, _sec.cert_file,
pre_encryption_part())
seresp = samlp.response_from_string(enctext)
# Now over to the client side
_csec = self.client.sec
if seresp.encrypted_assertion:
decr_text = _csec.decrypt(enctext)
seresp = samlp.response_from_string(decr_text)
resp_ass = []
sign_cert_file = "pki/mycert.pem"
for enc_ass in seresp.encrypted_assertion:
assers = extension_elements_to_elements(
enc_ass.extension_elements, [saml, samlp])
for ass in assers:
if ass.signature:
if not _csec.verify_signature("%s" % ass,
sign_cert_file,
node_name=class_name(ass)):
continue
resp_ass.append(ass)
seresp.assertion = resp_ass
seresp.encrypted_assertion = None
#print _sresp
assert seresp.assertion
def test_sign_then_encrypt_assertion2(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser = Assertion({"givenName": "Derek", "sn": "Jeter"})
assertion = asser.construct(
self.client.config.entityid, "_012345",
"http://lingon.catalogix.se:8087/",
factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT),
policy=self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
attrconvs=self.server.config.attribute_converters,
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login")
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file=self.client.sec.key_file,
node_id=assertion.id)
sigass = rm_xmltag(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="https://www.example.com",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
encrypted_assertion=EncryptedAssertion()
)
xmldoc = "%s" % response
# strangely enough I get different tags if I run this test separately
# or as part of a bunch of tests.
xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass)
enctext = _sec.crypto.encrypt_assertion(xmldoc, _sec.cert_file,
pre_encryption_part())
#seresp = samlp.response_from_string(enctext)
resp_str = base64.encodestring(enctext)
# Now over to the client side
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
#assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.ava == {'givenName': ['Derek'], 'sn': ['Jeter']}
# Below can only be done with dummy Server
IDP = "urn:mace:example.com:saml:roland:idp"
class TestClientWithDummy():
def setup_class(self):
self.server = FakeIDP("idp_all_conf")
conf = SPConfig()
conf.load_file("servera_conf")
self.client = Saml2Client(conf)
self.client.send = self.server.receive
def test_do_authn(self):
binding = BINDING_HTTP_REDIRECT
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
IDP, "http://www.example.com/relay_state",
binding=binding, response_binding=response_binding)
assert isinstance(sid, basestring)
assert len(http_args) == 4
assert http_args["headers"][0][0] == "Location"
assert http_args["data"] == []
redirect_url = http_args["headers"][0][1]
_, _, _, _, qs, _ = urlparse.urlparse(redirect_url)
qs_dict = urlparse.parse_qs(qs)
req = self.server.parse_authn_request(qs_dict["SAMLRequest"][0],
binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
def test_do_attribute_query(self):
response = self.client.do_attribute_query(
IDP, "_e7b68a04488f715cda642fbdd90099f5",
attribute={"eduPersonAffiliation": None},
nameid_format=NAMEID_FORMAT_TRANSIENT)
def test_logout_1(self):
""" one IdP/AA logout from"""
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"surName": "Andersson",
"mail": "anders.andersson@example.com"
}
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.global_logout(nid, "Tired", in_a_while(minutes=5))
print resp
assert resp
assert len(resp) == 1
assert resp.keys() == entity_ids
response = resp[entity_ids[0]]
assert isinstance(response, LogoutResponse)
def test_post_sso(self):
binding = BINDING_HTTP_POST
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
"urn:mace:example.com:saml:roland:idp", relay_state="really",
binding=binding, response_binding=response_binding)
_dic = unpack_form(http_args["data"][3])
req = self.server.parse_authn_request(_dic["SAMLRequest"], binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
# Normally a response would now be sent back to the users web client
# Here I fake what the client will do
# create the form post
http_args["data"] = urllib.urlencode(_dic)
http_args["method"] = "POST"
http_args["dummy"] = _dic["SAMLRequest"]
http_args["headers"] = [('Content-type',
'application/x-www-form-urlencoded')]
response = self.client.send(**http_args)
print response.text
_dic = unpack_form(response.text[3], "SAMLResponse")
resp = self.client.parse_authn_request_response(_dic["SAMLResponse"],
BINDING_HTTP_POST,
{sid: "/"})
ac = resp.assertion.authn_statement[0].authn_context
assert ac.authenticating_authority[0].text == \
'http://www.example.com/login'
assert ac.authn_context_class_ref.text == INTERNETPROTOCOLPASSWORD
# if __name__ == "__main__":
# tc = TestClient()
# tc.setup_class()
# tc.test_response()
if __name__ == "__main__":
tc = TestClientWithDummy()
tc.setup_class()
tc.test_do_attribute_query()
|
# https://colab.research.google.com/drive/16B56hyMC-apzhoYOjFnF--ejHUpS0f77?usp=sharing
# -*- coding: utf-8 -*-
"""bigo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16B56hyMC-apzhoYOjFnF--ejHUpS0f77
# Big-O Notation: Plots
The purpose of this notebook is to visualize the order of growth of some functions used frequently in the algorithm analysis. Note that this is an interactive notebook meaning that besides of just running all the code below you may also fool around with it. Try to plug in you favorite functions and/or change the ranges below and see what happens. Proceed by repeatedly clicking the Run button. To start over, select Kernel -> Restart and Clear Output.
## Definitions
We start by reminding the definitions. Consider two functions $f(n)$ and $g(n)$ that are defined for all positive integers and take on non-negative real values. (Some frequently used functions used in algorithm design: $\log n$, $\sqrt{n}$, $n\log n$, $n^3$, $2^n$). We say that **$f$ grows slower than $g$** and write $f \prec g$, if $\frac{f(n)}{g(n)}$ goes to 0 as $n$ grows. We say that **$f$ grows no faster than $g$** and write $f \preceq g$, if there exists a constant $c$ such that $f(n) \le c \cdot g(n)$ for all $n$.
Three important remarks.
1. $f \prec g$ is the same as $f=o(g)$ (small-o) and $f \preceq g$ is the same as $f=O(g)$ (big-O). In this notebook, we've decided to stick to the $\preceq$ notation, since many learners find this notation more intuitive. One source of confusion is the following: many learners are confused by the statement like "$5n^2=O(n^3)$". When seeing such a statement, they claim: "But this is wrong! In fact, $5n^2=O(n^2)$!" At the same time, both these statements are true: $5n^2=O(n^3)$ and also $5n^2=O(n^2)$. They both just say that $5n^2$ grows no faster than both $n^2$ and $n^3$. In fact, $5n^2$ grows no faster than $n^2$ and grows slower than $n^3$. In $\preceq$ notation, this is expressed as follows: $5n^2 \preceq n^2$ and $5n^2 \preceq n^3$. This resembles comparing integers: if $x=2$, then both statements $x \le 2$ and $x \le 3$ are correct.
2. Note that if $f \prec g$, then also $f \preceq g$. In plain English: if $f$ grows slower than $g$, then $f$ certainly grows no faster than $g$.
3. Note that we need to use a fancy $\preceq$ symbol instead of the standard less-or-equal sign $\le$, since the latter one is typically used as follows: $f \le g$ if $f(n) \le g(n)$ for all $n$. Hence, for example, $5n^2 \not \le n^2$, but $5n^2 \preceq n^2$.
## Plotting: two simple examples
We start by loading two modules responsible for plotting.
"""
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
"""Now, plotting a function is as easy as the following three lines of code. It shows the plot of a function $7n^2+6n+5$ in the range $1 \le n \le 100$. Note that the scale of the $y$-axis adjusts nicely."""
n = np.linspace(1, 100)
plt.plot(n, 7 * n * n + 6 * n + 5)
plt.show()
"""Now, let us add a function $20n$ to the previous example to visualize that $20n$ grows slower than $7n^2+6n+5$."""
n = np.linspace(1, 100)
plt.plot(n, 7 * n * n + 6 * n + 5, label="7n^2+6n+5")
plt.plot(n, 20 * n, label="20n")
plt.legend(loc='upper left')
plt.show()
"""## Common rules
Before proceeding with visualizations, let's review the common rules of comparing the order of growth of functions arising frequently in algorithm analysis.
1. Multiplicative constants can be omitted: $c \cdot f \preceq f$. Examples: $5n^2 \preceq n^2$, $\frac{n^2}{3} \preceq n^2$.
2. Out of two polynomials, the one with larger degree grows faster: $n^a \preceq n^b$ for $0 \le a \le b$. Examples: $n \prec n^2$, $\sqrt{n} \prec n^{2/3}$, $n^2 \prec n^3$, $n^0 \prec \sqrt{n}$.
3. Any polynomial grows slower than any exponential: $n^a \prec b^n$ for $a \ge 0, b>1$. Examples: $n^3 \prec 2^n$, $n^{10} \prec 1.1^n$.
4. Any polylogarithm grows slower than any polynomial: $(\log n)^a \prec n^b$ for $a, b>0$. Examples: $(\log n)^3 \prec \sqrt{n}$, $n\log n \prec n^2$.
5. Smaller terms can be ommited: if $f \prec g$, then $f+g\preceq g$. Examples: $n^2+n \preceq n^2$, $2^n+n^9 \preceq 2^n$.
## Rule 5: Smaller terms can be omitted
Consider $7n^2+6n+5$ again. Both $6n$ and $5$ grow slower than $7n^2$. For this reason, they can be omitted. To visualize this, let's first plot the functions $7n^2+6n+5$ and $7n^2$ for $1 \le n \le 5$.
"""
n = np.linspace(1, 5)
plt.plot(n, 7 * n * n + 6 * n + 5, label="7n^2+6n+5")
plt.plot(n, 7 * n * n, label="7n^2")
plt.legend(loc='upper left')
plt.show()
"""As expected, $7n^2+6n+5$ is always larger than $7n^2$ (as $n$ is positive). Next, we plot the same two functions but for $1 \le n \le 100$."""
n = np.linspace(1, 100)
plt.plot(n, 7 * n * n + 6 * n + 5, label="7n^2+6n+5")
plt.plot(n, 7 * n * n, label="7n^2")
plt.legend(loc='upper left')
plt.show()
"""We see that as $n$ grows, the contribution of $6n+5$ becomes more and more negligible.
Another way of justifying this, is to plot the function $\frac{7n^2+6n+5}{7n^2}$.
"""
x = np.linspace(1, 100)
plt.plot(n, (7 * n * n + 6 * n + 5)/(7 * n * n))
plt.show()
"""As we see, as $n$ grows, the fraction approaches 1.
## Rule 1: Multiplicative constants can be ommitted
In terms of big-O notation, $7n^2+6n+5=O(n^2)$, i.e., $7n^2+6n+5$ grows no faster than $n^2$. This again can be visualized by plotting their fraction. As we see, their fraction is always at most 18 and approaches 7. In other words, $7n^2+6n+5 \le 18n^2$ for all $n \ge 1$.
"""
n = np.linspace(1, 100)
plt.plot(n, (7 * n * n + 6 * n + 5)/(n * n))
plt.show()
"""## Rule 2: Out of two polynomials, the one with larger degree grows faster
For constants $a > b > 0$, $n^a$ grows faster than $n^b$. This, in particular, means that $n^b=O(n^a)$. To visualize it, let's plot $n$, $n^2$, and $n^3$.
"""
n = np.linspace(1, 10)
plt.plot(n, n, label="n")
plt.plot(n, n * n, label="n^2")
plt.plot(n, n * n * n, label="n^3")
plt.legend(loc='upper left')
plt.show()
"""Let's now see what happens on a bigger scale: instead of the range $1 \le n \le 10$, consider the range $1 \le n \le 100$."""
n = np.linspace(1, 100)
plt.plot(n, n, label="n")
plt.plot(n, n * n, label="n^2")
plt.plot(n, n * n * n, label="n^3")
plt.legend(loc='upper left')
plt.show()
"""## Rule 3: Any polynomial grows slower than any exponential
Let's plot $n^4$ and $2^n$ in the range $1 \le n \le 10$.
"""
n = np.linspace(1, 10)
plt.plot(n, n ** 4, label="n^4")
plt.plot(n, 2 ** n, label="2^n")
plt.legend(loc='upper left')
plt.show()
"""The plot reveals that in this range $n^4$ is always greater than $2^n$. This however does not mean that $n^4$ grows faster than $2^n$! To ensure this, let's take a look at a larger range $1 \le n \le 20$."""
n = np.linspace(1, 20)
plt.plot(n, n ** 4, label="n^4")
plt.plot(n, 2 ** n, label="2^n")
plt.legend(loc='upper left')
plt.show()
"""## Rule 4: Any polylogarithm grows slower than any polynomial
To visualize this rule, we start by plotting the two most standard representatives: $\log n$ and $n$. The following plot shows that $\log n$ indeed grows slower than $n$.
"""
n = np.linspace(1, 20)
plt.plot(n, n, label="n")
plt.plot(n, np.log(n), label="log n")
plt.legend(loc='upper left')
plt.show()
"""Now consider a more exotic example: $(\log n)^3$ versus $\sqrt{n}$ (recall that $\sqrt{n}$ is a polynomial function since $\sqrt{n}=n^{0.5}$)."""
n = np.linspace(1, 100)
plt.plot(n, n ** .5, label="n^.5")
plt.plot(n, np.log(n) ** 3, label="(log n)^3")
plt.legend(loc='upper left')
plt.show()
"""This looks strange: it seems that $(\log n)^3$ grows faster than $\sqrt{n}$. Let's do the standard trick: increase the range from $[1,100]$ to, say, $[1, 1\,000\,000]$."""
n = np.linspace(1, 10 ** 6)
plt.plot(n, n ** .5, label="n^.5")
plt.plot(n, np.log(n) ** 3, label="(log n)^3")
plt.legend(loc='upper left')
plt.show()
"""Surprisingly, the logaritmic function is still above the polynomial one! This shows that it is in fact dangerous to decide which function grows faster just by looking at how they behave for some not so large values of $n$. The rule "any polynomial grows faster than any polylogarithm" means that **eventually** the polynomial function will become larger and larger than polylogarithmic. But the rule does not specify for what value of $n$ this happens for the first time.
To finally ensure that $\sqrt{n}$ outperforms $(\log n)^3$ eventually, let's increase the range to $10^8$.
"""
n = np.linspace(1, 10 ** 8)
plt.plot(n, n ** .5, label="n^.5")
plt.plot(n, np.log(n) ** 3, label="(log n)^3")
plt.legend(loc='upper left')
plt.show()
"""Also, let's consider an even large interval to make sure that these two functions don't switch back."""
n = np.linspace(1, 10 ** 15)
plt.plot(n, n ** .5, label="n^.5")
plt.plot(n, np.log(n) ** 3, label="(log n)^3")
plt.legend(loc='upper left')
plt.show()
"""## Exercise
As the final exercise, try to find the value of $n$ where $n^{0.1}$ becomes larger than $(\log n)^5$.
"""
n = np.linspace(1, 100)
plt.plot(n, n ** .1, label="n^.1")
plt.plot(n, np.log(n) ** 5, label="(log n)^5")
plt.legend(loc='upper left')
plt.show()
|
def getResizedLeftSMPLX_UpperLeg(vertices, customerEstimatedUpperLegLenInches, customerEstimatedHeightInches, customerEstimatedMaxLowerLegWidthInches_X, customerEstimatedMaxLowerLegDepthInches_Z, customerEstimatedMaxUpperLegWidthInches_X, customerEstimatedMaxUpperLegDepthInches_Z):
# Idea: have this spit out all the body parts with centroids at (0,0,0) ? Then it's a different function's job to "put Humpty-Dumpty back together again" -NXB, August 15, 2020
# NOTE: generalize this to be "getResizedLeftSMPLX_BodyPartNameHere(..., otherParams, ...)" ?
# -nxb, August 15, 2020
'''
# Idea: This function can be generalized to each body part? Code reuse would prevent some headaches, make it so changing error-catching in just the general function would fix the error(s) for all the body parts rather than just the one,
# -nxb, August 15, 2020
'''
def scaleLegLinearlyWithYHeight(verts, yTop, yBot, xWidthAtTopYHeight_RealCustomerInches, xWidthAtBotYHeight_RealCustomerInches, zDepthAtTopYHeight_RealCustomerInches, zDepthAtBotYHeight_RealCustomerInches):
'''
This old code was pasted here at
4:47 P.M. on August 31, 2020
-NXB
'''
# TODO: make this fast (AKA "performant"). (vectorize it) -nxb; August 24, 2020 at 5:15 P.M.
# NOTE: the code is fairly performant. (about 5 secs. O(5 seconds) ) At least while I'm only scaling the LowerLeg, most of the time is spent on file-IO rather than in this method. (see output from "cProfile" below ) :
#============================================================================================================
''' Some output from the "cProfile" command (sort by total time taken)
=============================================================================================================
Some output from the "cProfile" command (sort by total time taken)
=============================================================================================================
"`p3 -m cProfile -s tottime examples/nxb_demo.py --model-folder /home/nathan_bendich/Downloads/SMPL-X_Models/models/ --plot-joints=True --gender="male" `" -nxb, August 28, 2020
=============================================================================================================
==========================================================================
ncalls tottime percall cumtime percall filename:lineno(function)
==========================================================================
======================================================================================================
1336 0.596 0.000 0.596 0.000 {method 'read' of '_io.FileIO' objects}
91/88 0.558 0.006 0.562 0.006 {built-in method _imp.create_dynamic}
322 0.316 0.001 0.316 0.001 {method 'decompress' of 'zlib.Decompress' objects}
1336 0.209 0.000 0.209 0.000 {built-in method marshal.loads}
5603 0.201 0.000 0.201 0.000 {built-in method posix.stat}
396 0.157 0.000 0.157 0.000 {method 'read' of '_io.BufferedReader' objects}
2292 0.154 0.000 0.154 0.000 {method 'format' of 'str' objects}
286/207 0.147 0.001 0.181 0.001 {built-in method numpy.core._multiarray_umath.implement_array_function}
1336 0.115 0.000 0.711 0.001 <frozen importlib._bootstrap_external>:830(get_data)
1081 0.114 0.000 0.114 0.000 {built-in method builtins.compile}
2725/2666 0.113 0.000 0.289 0.000 {built-in method builtins.__build_class__}
======================================================================================================
'''
# TODO: make this fast (AKA "performant"). (vectorize it) -nxb; August 24, 2020 at 5:15 P.M.
# TODO: fix all these "hell-names" while also making sure the names are actually descriptive to future readers. Perhaps you should just document? -nxb, August 27, 2020 at 6:12 P.M.
# There's no way to make everything PERFECT, Bendich.
# By "hell-names," I mean shit like "xWidthAtTopYHeight_RealCustomerInches", "xWidthAtBotYHeight_RealCustomerInches", -nxb, August 27, 2020 at 6:12 P.M.
# It was even worse at other time(s): ridiculously long names and shit like "xWidth_RealCustomerMeasure_InInches_atTopYHeightTheYValueOfWhichIsInPixelInches"
# -nxb, August 27, 2020 at 6:12 P.M.
# TODO: find the bug(s) in this function and/or approach. -nxb; Aug 24, 3:39 P.M.
# TODO: rename variables so it's clear to future-NXB that xWidthAtBotYHeight_RealCustomerInches, xWidthAtTopYHeight_RealCustomerInches, zDepthAtBotYHeight_RealCustomerInches, and zDepthAtTopYHeight_RealCustomerInches are REAL_CUSTOMER_MEASURES_IN_INCHES.
# TODO: write this function with proper error-checking. rewrite this with proper error-checking, proper input of parameters into the function "scaleLegLinearlyWithYHeight(verts, ... , ... , ... , ... , ... , ... )" instead of just "moreParams" as we put in the funccall to scaleLegLinearlyWithYHeight() on line 619 in the function entitled "makeLeftLeg()" -nxb, August 27, 2020
# TODO
# TODO: make sure "`np.isclose()`" has a high enough tolerance. (ie. the assertion shouldn't fail when everything is fine) -nxb, August 24, 2020
# TODO
#assert np.isclose(verts[:,Y].max(), yTop) and np.isclose(verts[:,Y].min(), yBot) # TODO: rewrite this with proper error-checking, proper input of parameters into the function "scaleLegLinearlyWithYHeight(verts, ... , ... , ... , ... , ... , ... )" instead of just "moreParams" as we put in the funccall to scaleLegLinearlyWithYHeight() on line 619 in the function entitled "makeLeftLeg()"
# TODO
# TODO: make sure "`np.isclose()`" has a high enough tolerance. (ie. the assertion shouldn't fail when everything is fine) -nxb, August 24, 2020
# TODO
#===================================================================================================
# in main()
# This stub/function header was written on August 17, 2020: -nxb
"""
leftLegVerts = makeLeftLeg(
resizedLeftUpperLegVerts,
resizedLeftLowerLegVerts,
mergingParams={"leftUpperLegParams":leftUpperLegParams, "leftLowerLegParams":leftLowerLegParams}
)
vs = ...
"""
""" DON'T DELETE! this information is always always always pretty useful.
DON'T DELETE! this information is always always always pretty useful.
DON'T DELETE! this information is always always always pretty useful.
DON'T DELETE! this information is always always always pretty useful.
DON'T DELETE! this information is always always always pretty useful.
maxesAndMins: NOTE: was this from "`verts`" or from "`joints`" ? -nxb, August 14, 2020
{'xMax': 0.8656285,
'xMin': -0.8656085,
'yMax': 0.43483356,
'yMin': -1.3589503,
'zMax': 0.15359592,
'zMin': -0.1527159}
"""
#===================================================================================================
# in main()
# TODO: automate estimating customer's lowerLeg length from video(s) and OpenPose. -nxb, August 14, 2020
#====================================================
# Move the lowerLeg back and forth and
# left and right until it's directly
# under the upperLeg
#
# -nxb, August 18, 2020
#====================================================
# (I begun a more technical way of saying it,
# but I figure the code is the technically arcane, hard-to-understand version anyway.
# In case you want to see my "failures," here's the more "arcane" diction-style comment:
# "Calculate the "lateral" shift to align the vertices:" -nxb, August 18, 2020
#====================================================
#===================================================================================================
#===================================================================================================
# old docstring for "leftLowerLeg(...)"
'''
This lowerLeg function SHOULD do the following: (August 18, 2020)
(This docstring was written on August 18, 2020)
"""
Expected, desired, non-buggy behavior:
1. Returns vertices **__MOSTLY__** centered on (0,0,0), except the yMin is now 0.
2. The truth is a little more detailed:
a. Basically, the SMPL-X base model I'm using has the center of the chest at (0,0,0).
(ie. The sternum is at the right xy position. The anatomical location of z==0 is about as "deep" in the human body as the armpit(s))
b. Therefore, the output mesh should have the "bottom of the lowerLeg" at y==0 for easier matching
This is only the way it should be for bodyParts like the upperLeg-lowerLeg boundary where the translation-before-smoothing is all along the **__Y__** axis. For instance, for the foreArm-upperArm boundary in T-pose, the boundary should be along the **__X__** axis **__INSTEAD__**
"""
'''
#===================================================================================================
#raise Exception( "fucking finish your code, Bendich.")
# Idea: have this spit out all the body parts with centroids at (0,0,0) ? Then it's a different function's job to "put Humpty-Dumpty back together again" -NXB, August 15, 2020
# NOTE: generalize this to be "getResizedLeftSMPLX_BodyPartNameHere(..., otherParams, ...)" ?
# -nxb, August 15, 2020
'''
This function can be generalized to each body part?
Code reuse would prevent some headaches, make it so changing error-catching in just the general function would fix the error(s) for all the body parts rather than just the one,
August 15, 2020
'''
# TODO: find the EXACT RIGHT VERTEX in SMPL-X that will let us scale the lowerLeg correctly
#===================================================================================================
#===================================================================================================
#===================================================================================================
#===================================================================================================
#===================================================================================================
# NOTE NOTE NOTE NOTE NOTE this iteration isn't perfect. Especially when it comes to the measurement detail(s) from just a few "regular" RGB images
# NOTE: NOTE: NOTE: NOTE: NOTE: slight assumption that causes a problem: I can't really scale the lowerLeg directly to yHeight==1, because the SMPL-X lowerLeg we get in T-Pose IS SLANTED, not completely "vertical," EVEN WHEN the pose is the "canonical T-Pose"
# TODO: find the exact right vertex (multiple vertic(es), ESPECIALLY when the WHOLE BODY comes into play) in SMPL-X that will let us scale the lowerLeg correctly
# NOTE NOTE NOTE NOTE NOTE
#===================================================================================================
#===================================================================================================
#===================================================================================================
#===================================================================================================
#===================================================================================================
|
# encoding:utf-8
import urllib2
import cookielib
"""
下面的这个程序完成的是从响应中将cookie信息提取出来
"""
# 创建一个对象存储cookie
cookie = cookielib.CookieJar()
# 自动提取
header = urllib2.HTTPCookieProcessor(cookie)
# 处理cookie
opener = urllib2.build_opener(header)
response = opener.open("http://www.renren.com/")
cookies = ""
for data in cookie:
cookies = cookies + data.name + "=" + data.value + "\r\n"
print(cookies)
|
N = int(input())
div = []
i = 1
while i * i <= N:
if N % i == 0:
div.append(i)
if N // i != i:
div.append(N // i)
i += 1
div.sort()
prev = 0
ans = 0
for d in div:
m = N // d - 1
if m > 0 and N // m == N % m:
ans += m
print(ans)
|
from models.base.EntityBase import EntityBase
from models.base.integration.DataIntegrationConnectionDatabaseBase import DataIntegrationConnectionDatabaseBase
from models.base.integration.DataIntegrationConnectionFileBase import DataIntegrationConnectionFileBase
from models.base.integration.DataIntegrationConnectionQueueBase import DataIntegrationConnectionQueueBase
from infrastructor.json.BaseConverter import BaseConverter
@BaseConverter.register
class DataIntegrationConnectionBase(EntityBase):
def __init__(self,
SourceOrTarget: int = None,
DataIntegrationId: int = None,
ConnectionId: int = None,
DataIntegration=None,
Connection=None,
Database:DataIntegrationConnectionDatabaseBase=None,
File:DataIntegrationConnectionFileBase=None,
Queue:DataIntegrationConnectionQueueBase=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.SourceOrTarget: int = SourceOrTarget
self.DataIntegrationId: str = DataIntegrationId
self.ConnectionId: str = ConnectionId
self.DataIntegration = DataIntegration
self.Connection = Connection
self.Database = Database
self.File = File
self.Queue = Queue
|
"""
This is the Journal Module
"""
import os
def load_journal(name):
"""
This method creates and loads a new ourn
:param name: the name of the journal
:return: a list with entries in the journal as its elements
"""
data = []
filename = get_journal_path(name)
if os.path.exists(filename):
with open(filename) as fin:
for entry in fin.readlines():
data.append(entry.rstrip())
return data
def add_entry(entry, data):
"""
Appends entry to journal data
:param entry: The entry of our journal
:param data: The journal data structure
:return: None
"""
data.append(entry)
def save_journal(name, data):
"""
writes and saves the data in the journal
:param the name of the file to be written to
:param the journal
:return: None
"""
filename = get_journal_path(name)
print("....saving to '{}'".format(filename))
with open(filename, 'w') as fout:
for entry in data:
fout.write(entry + "\n")
def get_journal_path(name):
"""
Gets the full path to the journal
:param the name of the journal
:return: The filepath to the journal
"""
filename = os.path.abspath(os.path.join(".", "journals", name + '.jrl'))
return filename
|
import re
import datetime
import decimal
from twisted.internet import defer
from scrapy.spider import BaseSpider
from scrapy.http import Request, Response
from scrapy.utils.py26 import json
class SpiderReferencer(object):
"""Class to serialize (and deserialize) objects (typically dicts)
containing references to running spiders (ie. Spider objects). This is
required because simplejson fails to serialize dicts containing
non-primitive types as keys, even when you override
ScrapyJSONEncoder.default() with a custom encoding mechanism.
"""
spider_ref_re = re.compile('^spider:([0-9a-f]+)?:?(.+)?$')
def __init__(self, crawler):
self.crawler = crawler
def get_reference_from_spider(self, spider):
return 'spider:%x:%s' % (id(spider), spider.name)
def get_spider_from_reference(self, ref):
"""Returns the Spider referenced by text, if text is a spider
reference. Otherwise it returns the text itself. If the text references
a non-running spider it raises a RuntimeError.
"""
m = self.spider_ref_re.search(ref)
if m:
spid, spname = m.groups()
for spider in self.crawler.engine.open_spiders:
if "%x" % id(spider) == spid or spider.name == spname:
return spider
raise RuntimeError("Spider not running: %s" % ref)
return ref
def encode_references(self, obj):
"""Look for Spider objects and replace them with spider references"""
if isinstance(obj, BaseSpider):
return self.get_reference_from_spider(obj)
elif isinstance(obj, dict):
d = {}
for k, v in obj.items():
k = self.encode_references(k)
v = self.encode_references(v)
d[k] = v
return d
elif isinstance(obj, (list, tuple)):
return [self.encode_references(x) for x in obj]
else:
return obj
def decode_references(self, obj):
"""Look for spider references and replace them with Spider objects"""
if isinstance(obj, basestring):
return self.get_spider_from_reference(obj)
elif isinstance(obj, dict):
d = {}
for k, v in obj.items():
k = self.decode_references(k)
v = self.decode_references(v)
d[k] = v
return d
elif isinstance(obj, (list, tuple)):
return [self.decode_references(x) for x in obj]
else:
return obj
class ScrapyJSONEncoder(json.JSONEncoder):
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def __init__(self, *a, **kw):
crawler = kw.pop('crawler', None)
self.spref = kw.pop('spref', None) or SpiderReferencer(crawler)
super(ScrapyJSONEncoder, self).__init__(*a, **kw)
def encode(self, o):
if self.spref:
o = self.spref.encode_references(o)
return super(ScrapyJSONEncoder, self).encode(o)
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
return o.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, defer.Deferred):
return str(o)
elif isinstance(o, Request):
return "<%s %s %s>" % (type(o).__name__, o.method, o.url)
elif isinstance(o, Response):
return "<%s %s %s>" % (type(o).__name__, o.status, o.url)
else:
return super(ScrapyJSONEncoder, self).default(o)
class ScrapyJSONDecoder(json.JSONDecoder):
def __init__(self, *a, **kw):
crawler = kw.pop('crawler', None)
self.spref = kw.pop('spref', None) or SpiderReferencer(crawler)
super(ScrapyJSONDecoder, self).__init__(*a, **kw)
def decode(self, s):
o = super(ScrapyJSONDecoder, self).decode(s)
if self.spref:
o = self.spref.decode_references(o)
return o
|
from bokeh.io import output_file, show
from bokeh.models import CustomJS, Slider, Div, Column
# NOTE: the JS functions to forvide the format code for strings is found the answer
# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format
callback = CustomJS(code="""
var s1 = slider1.get('value')
var s2 = slider2.get('value')
var s3 = slider3.get('value')
if (!String.prototype.format) {
String.prototype.format = function() {
var args = arguments;
return this.replace(/{(\d+)}/g, function(match, number) {
return typeof args[number] != 'undefined'
? args[number]
: match
;
});
};
}
para.set('text', "<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}".format(s1, s2, s3))
""")
para = Div(text="<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0")
s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous")
s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
s3 = Slider(title="Slider 3 (Mouse Up)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="mouseup")
callback.args['para'] = para
callback.args['slider1'] = s1
callback.args['slider2'] = s2
callback.args['slider3'] = s3
output_file('slider_callback_policy.html')
show(Column(s1, s2, s3, para))
|
from .fluent import Fluent
___red_end_user_data_statement__ = (
"This cog does not persistently store data about users. This cog uses google's free translator api"
)
def setup(bot):
cog = Fluent(bot)
bot.add_cog(cog)
|
from django.urls import path
from . import views
app_name = "sheets"
urlpatterns = [
path('feature_values', views.get_feature, name="feature_values"),
path('feature_name_tree', views.get_feature_name_tree, name="feature_name_tree"),
path('feature_search_list', views.get_feature_search_list, name="feature_search_list")
]
|
# Copyright (c) 2017 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest import test
import testscenarios
from testscenarios.scenarios import multiply_scenarios
from neutron.tests.tempest.common import ssh
from neutron.tests.tempest import config
from neutron.tests.tempest.scenario import base
from neutron.tests.tempest.scenario import constants
CONF = config.CONF
load_tests = testscenarios.load_tests_apply_scenarios
class FloatingIpTestCasesMixin(object):
credentials = ['primary', 'admin']
@classmethod
@test.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(FloatingIpTestCasesMixin, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair()
cls.secgroup = cls.os_primary.network_client.create_security_group(
name=data_utils.rand_name('secgroup-'))['security_group']
cls.security_groups.append(cls.secgroup)
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
if cls.same_network:
cls._dest_network = cls.network
else:
cls._dest_network = cls._create_dest_network()
@classmethod
def _create_dest_network(cls):
network = cls.create_network()
subnet = cls.create_subnet(network,
cidr=netaddr.IPNetwork('10.10.0.0/24'))
cls.create_router_interface(cls.router['id'], subnet['id'])
return network
def _create_server(self, create_floating_ip=True, network=None):
if network is None:
network = self.network
port = self.create_port(network, security_groups=[self.secgroup['id']])
if create_floating_ip:
fip = self.create_and_associate_floatingip(port['id'])
else:
fip = None
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])['server']
waiters.wait_for_server_status(self.os_primary.servers_client,
server['id'],
constants.SERVER_STATUS_ACTIVE)
return {'port': port, 'fip': fip, 'server': server}
def _test_east_west(self):
# The proxy VM is used to control the source VM when it doesn't
# have a floating-ip.
if self.src_has_fip:
proxy = None
proxy_client = None
else:
proxy = self._create_server()
proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
# Source VM
if self.src_has_fip:
src_server = self._create_server()
src_server_ip = src_server['fip']['floating_ip_address']
else:
src_server = self._create_server(create_floating_ip=False)
src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
ssh_client = ssh.Client(src_server_ip,
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
# Destination VM
if self.dest_has_fip:
dest_server = self._create_server(network=self._dest_network)
else:
dest_server = self._create_server(create_floating_ip=False,
network=self._dest_network)
# Check connectivity
self.check_remote_connectivity(ssh_client,
dest_server['port']['fixed_ips'][0]['ip_address'])
if self.dest_has_fip:
self.check_remote_connectivity(ssh_client,
dest_server['fip']['floating_ip_address'])
class FloatingIpSameNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = True
@decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
def test_east_west(self):
self._test_east_west()
class FloatingIpSeparateNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = False
@decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
def test_east_west(self):
self._test_east_west()
|
from redbot.core import commands
from .loot import Loot
def setup(bot: commands.Bot):
bot.add_cog(Loot(bot))
|
from .delete import random_deletion
from .swap import random_swap
|
from flask import Flask, render_template, request, session
import os
from flask_session import Session
#Configure app
app = Flask(__name__)
#Configure session
app.config["SESSION_PERMANENT"]= False
app.config["SESSION_TYPE"]="filesystem"
Session(app)
images=os.path.join("static", "images")
app.config["UPLOAD_FOLDER"]= images
chocolate_Images = os.listdir("static/images/chocolates")
print(len(chocolate_Images))
@app.route("/")
def index():
return render_template("index.html")
@app.route("/favorites", methods=["GET", "POST"])
def favorites():
#Ensure favorites exist
if "favourite" not in session:
session["favourite"]= {}
if request.method == "POST":
image_path = request.form.get("id")
favorite_dic = session["favourite"]
if image_path not in favorite_dic:
favorite_dic[image_path] = image_path
print(favorite_dic)
return render_template("favorites.html")
return render_template("favorites.html")
@app.route("/pricelist")
def pricelist():
return render_template("/pricelist.html")
@app.route("/chocolates")
def chocolates():
# chocolate_images = os.listdir("static/images/chocolates")
chocolate_images = ["images/chocolates/" + images for images in chocolate_Images]
# len_chocolates = len(chocolate_images)
# chocolate = {}
# for k,v in enumerate(chocolate_images):
# key = k
# image = v
# chocolate[key] = image
return render_template("/chocolates.html", chocolate=chocolate_images)
@app.route("/decorations")
def decorations():
return render_template("/decorations.html")
@app.route("/gifts")
def gifts():
return render_template("/gifts.html")
|
#/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Test script to exercises different ways Ble Advertisements can run in
concurrency. This test was designed to be run in a shield box.
"""
import concurrent
import os
import time
from queue import Empty
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.test_utils.bt.BleEnum import AdvertiseSettingsAdvertiseMode
from acts.test_utils.bt.BleEnum import ScanSettingsCallbackType
from acts.test_utils.bt.BleEnum import ScanSettingsScanMode
from acts.test_utils.bt.bt_test_utils import adv_succ
from acts.test_utils.bt.bt_test_utils import generate_ble_advertise_objects
from acts.test_utils.bt.bt_test_utils import generate_ble_scan_objects
from acts.test_utils.bt.bt_test_utils import get_advanced_droid_list
from acts.test_utils.bt.bt_test_utils import reset_bluetooth
from acts.test_utils.bt.bt_test_utils import scan_result
from acts.test_utils.bt.bt_test_utils import take_btsnoop_logs
class ConcurrentBleAdvertisingTest(BluetoothBaseTest):
default_timeout = 10
max_advertisements = 4
def __init__(self, controllers):
BluetoothBaseTest.__init__(self, controllers)
self.droid_list = get_advanced_droid_list(self.android_devices)
self.scn_ad = self.android_devices[0]
self.adv_ad = self.android_devices[1]
self.max_advertisements = self.droid_list[1]['max_advertisements']
if self.max_advertisements == 0:
self.tests = ()
return
def on_fail(self, test_name, begin_time):
self.log.debug(
"Test {} failed. Gathering bugreport and btsnoop logs".format(
test_name))
take_btsnoop_logs(self.android_devices, self, test_name)
reset_bluetooth(self.android_devices)
def setup_test(self):
return reset_bluetooth(self.android_devices)
def _verify_n_advertisements(self, num_advertisements, filter_list):
test_result = False
address_list = []
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_ALL_MATCHES.value)
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(
AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)
advertise_data = self.adv_ad.droid.bleBuildAdvertiseData()
advertise_settings = self.adv_ad.droid.bleBuildAdvertiseSettings()
advertise_callback_list = []
for i in range(num_advertisements):
advertise_callback = self.adv_ad.droid.bleGenBleAdvertiseCallback()
advertise_callback_list.append(advertise_callback)
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
self.log.info("Advertisement {} started.".format(i + 1))
except Empty as error:
self.log.info("Advertisement {} failed to start.".format(i +
1))
self.log.debug("Test failed with Empty error: {}".format(
error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test failed, filtering callback onSuccess never occurred: "
"{}".format(error))
return False
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback = self.scn_ad.droid.bleGenScanCallback()
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
start_time = time.time()
while (start_time + self.default_timeout) > time.time():
event = None
try:
event = self.scn_ad.ed.pop_event(
scan_result.format(scan_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with: {}".format(error))
return test_result
except concurrent.futures._base.TimeoutError as error:
self.log.debug("Test failed with: {}".format(error))
return test_result
address = event['data']['Result']['deviceInfo']['address']
if address not in address_list:
address_list.append(address)
if len(address_list) == num_advertisements:
test_result = True
break
for callback in advertise_callback_list:
self.adv_ad.droid.bleStopBleAdvertising(callback)
self.scn_ad.droid.bleStopBleScan(scan_callback)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_defaults(self):
"""Testing max advertisements.
Test that a single device can have the max advertisements
concurrently advertising.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Start scanning on the max_advertisements as defined in the script.
4. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 0
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleBuildScanFilter(filter_list)
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_include_device_name_and_filter_device_name(
self):
"""Testing max advertisement variant.
Test that a single device can have the max advertisements
concurrently advertising. Include the device name as a part of the filter
and advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include device name in each advertisement.
4. Include device name filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleBuildScanFilter(filter_list)
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_exclude_device_name_and_filter_device_name(
self):
"""Test max advertisement variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the device name as a part of the filter but not the
advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include device name filter in the scanner.
4. Start scanning on the max_advertisements as defined in the script.
5. Verify that no advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(False)
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleBuildScanFilter(filter_list)
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return not test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_manufacturer_data(self):
"""Test max advertisement variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the manufacturer data as a part of the filter and
advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include manufacturer data in each advertisement.
4. Include manufacturer data filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterManufacturerData(1, "1")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleAddAdvertiseDataManufacturerId(1, "1")
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_manufacturer_data_mask(self):
"""Test max advertisements variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the manufacturer data mask as a part of the filter
and advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include manufacturer data in each advertisement.
4. Include manufacturer data mask filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterManufacturerData(1, "1", "1")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleAddAdvertiseDataManufacturerId(1, "1")
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_service_data(self):
"""Test max advertisement variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the service data as a part of the filter and
advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include service data in each advertisement.
4. Include service data filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterServiceData(
"0000110A-0000-1000-8000-00805F9B34FB", "11,17,80")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleAddAdvertiseDataServiceData(
"0000110A-0000-1000-8000-00805F9B34FB", "11,17,80")
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_manufacturer_data_mask_and_include_device_name(
self):
"""Test max advertisement variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the device name and manufacturer data as a part of
the filter and advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include device name and manufacturer data in each advertisement.
4. Include device name and manufacturer data filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleSetScanFilterManufacturerData(1, "1", "1")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleAddAdvertiseDataManufacturerId(1, "1")
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_service_uuids(self):
"""Test max advertisement variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the service uuid as a part of the filter and
advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include service uuid in each advertisement.
4. Include service uuid filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 1
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterServiceUuid(
"00000000-0000-1000-8000-00805f9b34fb")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleSetAdvertiseDataSetServiceUuids(
["00000000-0000-1000-8000-00805f9b34fb"])
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_with_service_uuid_and_service_mask(self):
"""Test max advertisements variant.
Test that a single device can have the max advertisements concurrently
advertising. Include the service mask as a part of the filter and
advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Include service uuid in each advertisement.
4. Include service mask filter in the scanner.
5. Start scanning on the max_advertisements as defined in the script.
6. Verify that all advertisements are found.
Expected Result:
All advertisements should start without errors.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterServiceUuid(
"00000000-0000-1000-8000-00805f9b34fb",
"00000000-0000-1000-8000-00805f9b34fb")
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.adv_ad.droid.bleSetAdvertiseDataSetServiceUuids(
["00000000-0000-1000-8000-00805f9b34fb"])
test_result = self._verify_n_advertisements(self.max_advertisements,
filter_list)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_advertisements_plus_one(self):
"""Test max advertisements plus one.
Test that a single device can have the max advertisements concurrently
advertising but fail on starting the max advertisements plus one.
filter and advertisement data.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Start max_advertisements + 1.
Expected Result:
The last advertisement should fail.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 0
"""
test_result = True
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleBuildScanFilter(filter_list)
test_result = self._verify_n_advertisements(
self.max_advertisements + 1, filter_list)
return not test_result
@BluetoothBaseTest.bt_test_wrap
def test_start_two_advertisements_on_same_callback(self):
"""Test invalid advertisement scenario.
Test that a single device cannot have two advertisements start on the
same callback.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Call start ble advertising on the same callback.
Expected Result:
The second call of start advertising on the same callback should fail.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 1
"""
test_result = True
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with Empty error: {}".format(error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test failed, filtering callback onSuccess never occurred: {}"
.format(error))
try:
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
test_result = False
except Empty as error:
self.log.debug("Test passed with Empty error: {}".format(error))
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test passed, filtering callback onSuccess never occurred: {}"
.format(error))
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_toggle_advertiser_bt_state(self):
"""Test forcing stopping advertisements.
Test that a single device resets its callbacks when the bluetooth state is
reset. There should be no advertisements.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Call start ble advertising.
4. Toggle bluetooth on and off.
5. Scan for any advertisements.
Expected Result:
No advertisements should be found after toggling Bluetooth on the
advertising device.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 2
"""
test_result = True
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with Empty error: {}".format(error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test failed, filtering callback onSuccess never occurred: {}".format(
error))
filter_list, scan_settings, scan_callback = generate_ble_scan_objects(
self.scn_ad.droid)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
try:
self.scn_ad.ed.pop_event(
scan_result.format(scan_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with: {}".format(error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.debug("Test failed with: {}".format(error))
return False
self.scn_ad.droid.bleStopBleScan(scan_callback)
test_result = reset_bluetooth([self.android_devices[1]])
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
if not test_result:
return test_result
try:
self.scn_ad.ed.pop_event(
scan_result.format(scan_callback), self.default_timeout)
return False
except Empty as error:
self.log.debug("Test passed with: {}".format(error))
except concurrent.futures._base.TimeoutError as error:
self.log.debug("Test passed with: {}".format(error))
self.scn_ad.droid.bleStopBleScan(scan_callback)
self.adv_ad.droid.bleStopBleAdvertising(advertise_callback)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_restart_advertise_callback_after_bt_toggle(self):
"""Test starting an advertisement on a cleared out callback.
Test that a single device resets its callbacks when the bluetooth state
is reset.
Steps:
1. Setup the scanning android device.
2. Setup the advertiser android device.
3. Call start ble advertising.
4. Toggle bluetooth on and off.
5. Call start ble advertising on the same callback.
Expected Result:
Starting an advertisement on a callback id after toggling bluetooth
should fail.
Returns:
Pass if True
Fail if False
TAGS: LE, Advertising, Concurrency
Priority: 1
"""
test_result = True
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with Empty error: {}".format(error))
test_result = False
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test failed, filtering callback onSuccess never occurred: {}".format(
error))
test_result = reset_bluetooth([self.android_devices[1]])
if not test_result:
return test_result
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.debug("Test failed with Empty error: {}".format(error))
test_result = False
except concurrent.futures._base.TimeoutError as error:
self.log.debug(
"Test failed, filtering callback onSuccess never occurred: {}".format(
error))
return test_result
|
#!/usr/bin/python3
# coding: utf-8
import numpy as np
import plotly.graph_objects as go
from scipy import fftpack
def fft_denoise(x, y, showFigure=True, freq_int=0.15, freq_th=0.18, freq_min_A=0.03):
n = len(x)
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y))
fig.show()
y_hat = fftpack.fft(y) / (n/2)
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y_hat.real))
fig.show()
freq = fftpack.fftfreq(n, freq_int)
if showFigure and False:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=freq))
fig.show()
y_hat[freq < 0] = 0
y_hat[freq > freq_th] = 0
y_hat[np.abs(y_hat) < freq_min_A] = 0
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y_hat.real))
fig.show()
y2 = np.real(fftpack.ifft(y_hat) * (n))
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, mode='lines',
line=dict(width=.5, color='red')))
fig.add_trace(go.Scatter(x=x, y=y2, mode='lines+markers',
marker=dict(size=1, color='blue')))
fig.show()
return y2
|
import argparse
import boto3
import configparser
import datetime
import math
import json
import time
from decimal import Decimal
from binance.exceptions import BinanceAPIException
from binance.client import Client
def get_timestamp():
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(
description="""
This is a basic Binance DCA buying/selling bot.
ex:
ETH-BTC SELL 0.00125 BTC (sell 0.00125 BTC worth of ETH)
ETH-BTC SELL 0.1 ETH (sell 0.1 ETH)
""",
formatter_class=argparse.RawTextHelpFormatter
)
# Required positional arguments
parser.add_argument('market_name', help="(e.g. BTC-USD, ETH-BTC, etc)")
parser.add_argument('order_side',
type=str,
choices=["BUY", "SELL"])
parser.add_argument('amount',
type=Decimal,
help="The quantity to buy or sell in the amount_currency")
parser.add_argument('amount_currency',
help="The currency the amount is denominated in")
# Optional switches
parser.add_argument('-c', '--settings_config',
default="settings.conf",
dest="settings_config_file",
help="Override default settings config file location")
parser.add_argument('-d', '--dynamic_dca',
action='store_true',
default=False,
dest="dynamic_dca",
help="""Scale the trade amount up or down depending on 24hr price change""")
parser.add_argument('-l', '--live',
action='store_true',
default=False,
dest="live_mode",
help="""Submit live orders. When omitted, just tests API connection
and amount without submitting actual orders""")
parser.add_argument('-j', '--job',
action='store_true',
default=False,
dest="job_mode",
help="""Suppress the confirmation step before submitting
actual orders""")
if __name__ == "__main__":
args = parser.parse_args()
market_name = args.market_name
order_side = args.order_side.lower()
amount = args.amount
amount_currency = args.amount_currency
is_dynamic_dca = args.dynamic_dca
live_mode = args.live_mode
job_mode = args.job_mode
print("%s: STARTED: %s" % (get_timestamp(), args))
if not live_mode:
print("\n")
print("\t================= NOT in Live mode =================")
print("\t* *")
print("\t* No actual trades being submitted! *")
print("\t* *")
print("\t====================================================")
print("\n")
# Read settings
config = configparser.ConfigParser()
config.read(args.settings_config_file)
api_key = config.get('API', 'API_KEY')
api_secret = config.get('API', 'SECRET_KEY')
try:
sns_topic = config.get('AWS', 'SNS_TOPIC')
aws_access_key_id = config.get('AWS', 'AWS_ACCESS_KEY_ID')
aws_secret_access_key = config.get('AWS', 'AWS_SECRET_ACCESS_KEY')
except configparser.NoSectionError:
sns_topic = None
if sns_topic:
# Prep boto SNS client for email notifications
sns = boto3.client(
"sns",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name="us-east-1" # N. Virginia
)
# Instantiate API client
client = Client(api_key, api_secret)
# Get exchange info (pairs available, min order sizes, etc.)
exchange_info = client.get_exchange_info()
"""
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"quoteAssetPrecision": 8,
"baseCommissionPrecision": 8,
"quoteCommissionPrecision": 8,
"orderTypes": [
"LIMIT",
"LIMIT_MAKER",
"MARKET",
"STOP_LOSS_LIMIT",
"TAKE_PROFIT_LIMIT"
],
"icebergAllowed": true,
"ocoAllowed": true,
"quoteOrderQtyMarketAllowed": true,
"isSpotTradingAllowed": true,
"isMarginTradingAllowed": true,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
},
{
"filterType": "PERCENT_PRICE",
"multiplierUp": "5",
"multiplierDown": "0.2",
"avgPriceMins": 5
},
{
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
},
{
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00010000",
"applyToMarket": true,
"avgPriceMins": 5
},
{
"filterType": "ICEBERG_PARTS",
"limit": 10
},
{
"filterType": "MARKET_LOT_SIZE",
"minQty": "0.00000000",
"maxQty": "8949.33836294",
"stepSize": "0.00000000"
},
{
"filterType": "MAX_NUM_ORDERS",
"maxNumOrders": 200
},
{
"filterType": "MAX_NUM_ALGO_ORDERS",
"maxNumAlgoOrders": 5
}
],
"permissions": [
"SPOT",
"MARGIN"
]
}
"""
for market in exchange_info.get("symbols"):
if market.get("symbol") == market_name:
base_currency = market.get("baseAsset")
quote_currency = market.get("quoteAsset")
quote_asset_precision = market.get("quoteAssetPrecision")
# What's this asset's minimum purchase?
for filter in market.get("filters"):
if filter.get('filterType') == 'MIN_NOTIONAL':
base_min_size = Decimal(filter.get("minNotional")).normalize()
elif filter.get('filterType') == 'LOT_SIZE':
base_increment = Decimal(filter.get("stepSize")).normalize()
elif filter.get('filterType') == 'PRICE_FILTER':
quote_increment = Decimal(filter.get("tickSize")).normalize()
if not base_min_size:
raise Exception("MIN_NOTIONAL.minNotional not found in %s info" % market_name)
if not base_increment:
raise Exception("LOT_SIZE.stepSize not found in %s info" % market_name)
if not quote_increment:
raise Exception("PRICE_FILTER.tickSize not found in %s info" % market_name)
if amount_currency == quote_currency:
amount_currency_is_quote_currency = True
elif amount_currency == base_currency:
amount_currency_is_quote_currency = False
else:
raise Exception("amount_currency %s not in market %s" % (amount_currency,
market_name))
print(market)
print("base_min_size: %s" % base_min_size)
print("base_increment: %s" % base_increment)
print("quote_increment: %s" % quote_increment)
if live_mode and not job_mode:
print("\n================================================\n")
response = input("\tLive purchase! Confirm Y/[n]: ")
if response != 'Y':
print("Exiting without submitting orders.")
exit()
purchase_summary = ""
if is_dynamic_dca:
step_size = Decimal("5.0")
amount_multiplier = Decimal("1.0")
amount_divider = Decimal("0.5")
orig_amount = amount
# Get the current 24hr price diff
ticker = client.get_ticker(symbol=market_name)
percent_change = Decimal(ticker.get("priceChangePercent"))
steps = int(math.floor(abs(percent_change / step_size)))
print(f"\tDynamic DCA\n" +
f"\tpercent_change: {percent_change}%\n" +
f"\tsteps: {steps}")
if steps > 0:
if (order_side == 'buy' and percent_change < 0.0) or \
(order_side == 'sell' and percent_change > 0.0):
# We want to multiply up our trade amount
amount += amount * amount_multiplier * Decimal(steps)
print(f"Dynamic DCA scaling amount up {steps}x to {amount}")
else:
# Divide down the trade amount
amount -= amount * amount_divider * Decimal(steps)
if amount <= 0.0:
print(f"Dynamic DCA canceling trade at {percent_change}%")
amount = Decimal("0.0")
else:
print(f"Dynamic DCA scaling amount down {steps}x to {amount}")
else:
# No changes to apply
is_dynamic_dca = False
# What's the current best offer?
# Binance maker/taker fees are the same so just do a market order for fast order
# fills.
depth = client.get_order_book(symbol=market_name, limit=5)
if order_side == 'buy':
market_price = Decimal(depth.get("bids")[0][0])
else:
market_price = Decimal(depth.get("asks")[0][0])
print("market_price: %s %s" % (market_price, quote_currency))
# Denominate our target amount as necessary, then quantize to base_increment
if amount_currency_is_quote_currency:
base_currency_amount = (amount / market_price).quantize(base_increment)
else:
base_currency_amount = Decimal(amount).quantize(base_increment)
print("base_currency_amount: %s %s" % (base_currency_amount, base_currency))
order_value = (base_currency_amount * market_price).quantize(
Decimal('1e-%d' % quote_asset_precision)
)
if order_value < base_min_size:
message = f"Cannot purchase {float(base_currency_amount)} {base_currency} @ {market_price} {quote_currency}. " + \
f"Resulting order of {order_value:.8f} {quote_currency} " + \
f"is below the minNotional value of {base_min_size} {quote_currency}"
print(message)
if is_dynamic_dca:
purchase_summary = "Dynamic DCA: %0.2f%% (%dx): %s %s order of %s (%s) %s CANCELED" % (
percent_change,
steps,
market_name,
order_side,
amount,
orig_amount,
amount_currency
)
print(purchase_summary)
if sns_topic and live_mode:
sns.publish(
TopicArn=sns_topic,
Subject=purchase_summary,
Message=message
)
exit()
else:
print("order_value: %s %s" % (order_value, quote_currency))
if not live_mode:
if order_side == 'buy':
side = Client.SIDE_BUY
else:
side = Client.SIDE_SELL
order = client.create_test_order(
symbol=market_name,
side=side,
type=Client.ORDER_TYPE_MARKET,
quantity=float(base_currency_amount)
)
if order:
print(order)
else:
try:
if order_side == 'buy':
order = client.order_market_buy(
symbol=market_name,
quantity=float(base_currency_amount))
else:
order = client.order_market_sell(
symbol=market_name,
quantity=float(base_currency_amount))
except BinanceAPIException as e:
print(f'Unable to place {market_name} order: {e}')
if sns_topic and live_mode:
sns.publish(
TopicArn=sns_topic,
Subject=f'Unable to place {market_name} order',
Message=str(e)
)
exit()
print(json.dumps(order, indent=4))
"""
{
"symbol": "ADABTC",
"orderId": 194439891,
"orderListId": -1,
"clientOrderId": "jfsd09eijfsdkl",
"transactTime": 1596984553336,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "0.00012380",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "0.00001238",
"qty": "10.00000000",
"commission": "0.00004701",
"commissionAsset": "BNB",
"tradeId": 40016638
}
]
}
"""
market_price = order.get("fills")[0].get("price")
if is_dynamic_dca:
purchase_summary = "Dynamic DCA: %0.2f%% (%dx): %s %s order of %s (%s) %s %s @ %s %s" % (
percent_change,
steps,
market_name,
order_side,
amount,
orig_amount,
amount_currency,
order.get("status"),
market_price,
quote_currency
)
else:
purchase_summary = "%s %s order of %s %s %s @ %s %s" % (
market_name,
order_side,
amount,
amount_currency,
order.get("status"),
market_price,
quote_currency
)
if sns_topic and live_mode:
sns.publish(
TopicArn=sns_topic,
Subject=purchase_summary,
Message=json.dumps(order, indent=4)
)
print("\n================================================")
print(purchase_summary)
if not live_mode:
print("(NOT in live mode - no actual orders placed!)")
|
'''
Created on Apr 19, 2015
@author: tristan
'''
import inspect
from sqlalchemy.orm.collections import InstrumentedList
def obj_map(src, dest, rules=None, mapper=None):
'''
dest could be 1) instance => copy the data to dest and return it
2) class => create a class instance, copy the data and return it
rules: conversion rule
'''
if inspect.isclass(dest):
target = dest()
else:
target = dest
if hasattr(target, '_sa_instance_state'):
columns = target._sa_instance_state.attrs._data.keys()
else:
columns = target.__dict__.keys()
src_columns = dir(src)
if rules is not None:
src_columns = src_columns + rules.keys()
for prop_name in src_columns:
if not prop_name.startswith('_') and prop_name in columns:
# if it is list then map each
# else map itself
if rules is not None and rules.has_key(prop_name):
setattr(target, prop_name, None if rules[prop_name] is None else rules[prop_name](src))
continue
elif hasattr(src, prop_name):
srcValue = getattr(src,prop_name)
else:
srcValue = None
if type(srcValue) is list or type(srcValue) is InstrumentedList:
c = list()
for v in srcValue:
c.append(obj_map(v, mapper[type(v)], rules, mapper))
setattr(target, prop_name, c)
elif type(srcValue) is object:
setattr(target, prop_name, obj_map(srcValue,
mapper[type(srcValue)], rules=rules, mapper=mapper))
else:
setattr(target,prop_name, srcValue)
return target
|
import csv
import hashlib
import inspect
import logging
from numbers import Number
from namespace import *
import codecs
import os
import petl as etl
import re
from loader.prefixes import PREFIX_LANGUAGE, PREFIX_MULTIMEDIA
from lxml import etree
from petl.util.base import Table
from rdflib import Literal, RDF, RDFS, XSD
# A logger to be used for logging warnings or errors detected during loading.
warning_log = logging.getLogger("load_warnings")
warning_log.setLevel(logging.WARNING)
def num_to_str(num):
"""
Converts a number to a string.
If the number is already a string, then just returns.
"""
if isinstance(num, Number):
return str(int(num))
return num
def join_if_not_empty(items, sep=" "):
"""
Joins a list of items with a provided separator.
Skips an empty item.
"""
joined = ""
for item in items:
if item and len(item) > 0:
if joined != "":
joined += sep
joined += item
return joined
def to_hash_identifier(prefix, parts):
"""
Return an identifier composed of the prefix and hash of the parts.
"""
hash_parts = hashlib.md5("".join([unicode(part) for part in parts if part]).encode("utf-8"))
return "%s-%s" % (prefix, hash_parts.hexdigest())
def season_to_month(season):
"""
Converts a season to the corresponding month.
"""
return {
"Spring": 1,
"Summer": 5,
"Fall": 8
}[season]
months = ("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December")
def month_str_to_month_int(month_str):
"""
Converts a month name to the corresponding month number.
If already a number, returns the number.
Also, tries to convert the string to a number.
"""
if isinstance(month_str, Number):
return month_str
try:
return int(month_str)
except ValueError:
pass
return months.index(month_str)+1
def month_int_to_month_str(month_int):
if isinstance(month_int, basestring):
return month_int
return months[month_int-1]
def add_date(date_uri, year, g, month=None, day=None, label=None):
"""
Adds triples for a date.
Return True if date was added.
"""
# Date
# Filtering out dates that are set to 1900.
if year and str(year) != "1900":
g.add((date_uri, RDF.type, VIVO.DateTimeValue))
# Day, month, and year
if day and month:
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-%02d-%02dT00:00:00" % (
year, month_str_to_month_int(month), day),
datatype=XSD.dateTime)))
g.add((date_uri,
RDFS.label,
Literal(label or "%s %s, %s" % (month_int_to_month_str(month), num_to_str(day), num_to_str(year)))))
# Month and year
elif month:
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-%02d-01T00:00:00" % (
year, month_str_to_month_int(month)),
datatype=XSD.dateTime)))
g.add((date_uri,
RDFS.label,
Literal(label or "%s %s" % (month, num_to_str(year)))))
else:
# Just year
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-01-01T00:00:00" % (
year),
datatype=XSD.dateTime)))
g.add((date_uri, RDFS.label, Literal(label or num_to_str(year))))
return True
return False
term_re = re.compile("(Spring|Summer|Fall) (\d\d\d\d)")
def add_season_date(date_uri, date_str, g):
"""
Parses a season date (e.g., Spring 2012) and adds tripes.
Returns true if parse was successful.
"""
if date_str:
m = term_re.match(date_str)
if m:
season = m.group(1)
year = m.group(2)
return add_date(date_uri, year, g, season_to_month(season), label=date_str)
return False
def add_date_interval(interval_uri, subject_uri, g, start_uri=None, end_uri=None):
"""
Adds triples for a date interval.
"""
if start_uri or end_uri:
g.add((interval_uri, RDF.type, VIVO.DateTimeInterval))
g.add((subject_uri, VIVO.dateTimeInterval, interval_uri))
if start_uri:
g.add((interval_uri, VIVO.start, start_uri))
if end_uri:
g.add((interval_uri, VIVO.end, end_uri))
language_map = {
"ARAB": "Arabic",
"BENG": "Bengali",
"CHIN": "Chinese",
"FREN": "French",
"GERM": "German",
"HIND": "Hindi/Urdu",
"ITAL": "Italian",
"JAPN": "Japanese",
"KREN": "Korean",
"MAND": "Mandarin",
"PORT": "Portuguese",
"PUNJ": "Punjabi",
"RUSS": "Russian",
"SPAN": "Spanish"
}
def add_language(language, person_uri, g):
language_uri = D[to_hash_identifier(PREFIX_LANGUAGE, (language,))]
g.add((language_uri, RDF.type, LINKVOJ.Lingvo))
g.add((language_uri, RDFS.label, Literal(language)))
g.add((person_uri, LINKVOJ.expertUnderstanding, language_uri))
def add_multimedia(multimedia, person_uri, multimedia_predicate, g):
if not multimedia.endswith(","):
multimedia += ","
for multimedia_string in re.findall(r".\|.+?\|.+?,", multimedia):
(multimedia_type, multimedia_label, multimedia_url) = multimedia_string[:-1].split("|")
multimedia_uri = D[to_hash_identifier(PREFIX_MULTIMEDIA, multimedia_url)]
if multimedia_type == "A":
multimedia_class = BIBO.AudioDocument
elif multimedia_type == "O":
multimedia_class = BIBO.Webpage
else:
multimedia_class = VIVO.Video
g.add((multimedia_uri, RDF.type, multimedia_class))
g.add((person_uri, multimedia_predicate, multimedia_uri))
g.add((multimedia_uri, RDFS.label, Literal(multimedia_label)))
g.add((multimedia_uri, VCARD.url, Literal(multimedia_url, datatype=XSD.anyURI)))
def strip_gw_prefix(string):
if isinstance(string, basestring) and string.startswith("GW_"):
return string[3:]
return string
def xml_result_generator(filepath):
"""
Returns a generator that provides maps of field names to values read from
xml produced by mysql --xml.
"""
# Using lxml because recover=True makes it tolerant of unicode encoding problems.
for event, row_elem in etree.iterparse(filepath, tag="row", recover=True):
result = {}
for field_elem in row_elem.iter("field"):
if "xsi:nil" in field_elem.attrib or not field_elem.text:
value = None
else:
# Strip whitespace
value = field_elem.text.strip()
result[field_elem.get("name")] = value
row_elem.clear()
yield result
def remove_extra_args(func_args, func):
"""
Removes values from map of function arguments that are not necessary to invoke the function.
"""
(arg_names, varargs, keywords, defaults) = inspect.getargspec(func)
for key in list(func_args.keys()):
if key not in arg_names:
del func_args[key]
def valid_department_name(name):
if name and name not in ("No Department", "University-level Dept"):
return True
return False
def valid_college_name(name):
if name and name not in ("University", "No College Designated"):
return True
return False
# Register banner dialect
csv.register_dialect("banner", delimiter="|")
# Map of banner position codes to VIVO classes
pos_code_to_classes = {
# Research scientist or related
"28101": "NonFacultyAcademic",
"28301": "NonFacultyAcademic",
"28302": "NonFacultyAcademic",
"28502": "NonFacultyAcademic",
"283R2": "NonFacultyAcademic",
"283R1": "NonFacultyAcademic",
"28102": "NonFacultyAcademic",
"19S01": "NonFacultyAcademic",
"28501": "NonFacultyAcademic",
"27401": "NonFacultyAcademic",
# Postdoc
"289A1": "Postdoc",
"289A2": "Postdoc",
# Librarian
"OC221": "Librarian",
"OC231": "Librarian",
"OD311": "Librarian",
"OC241": "Librarian",
"OC211": "Librarian",
"30401": "Librarian",
"OC341": "Librarian",
"OA411": "Librarian",
"OC321": "Librarian"
}
def get_netid_lookup(data_dir):
"""
Returns a map of gwids to netids.
"""
netid_map = {}
with codecs.open(os.path.join(data_dir, "vivo_demographic.txt"), 'r', encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file, dialect="banner")
for row in reader:
netid_map[row["EMPLOYEEID"]] = row["NETID"]
return netid_map
def demographic_intersection(gwids, data_dir):
"""
Returns the intersection of a provided list of gwids and the gwids in banner
demographic data.
"""
demo_gwids = set()
with codecs.open(os.path.join(data_dir, "vivo_demographic.txt"), 'r', encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file, dialect="banner")
for row in reader:
demo_gwids.add(row["EMPLOYEEID"])
return list(demo_gwids.intersection(gwids))
def get_non_faculty_gwids(data_dir, non_fac_limit=None):
"""
Returns the list of non-faculty gwids.
This is determined by taking the intersection of gwids in banner
demographic data and gwids in mygw data and
removing all faculty gwids.
"""
mygw_gwids = []
for result in xml_result_generator(os.path.join(data_dir, "mygw_users.xml")):
mygw_gwids.append(result["gw_id"])
# Only gwids with demographic data
demo_gwids = demographic_intersection(mygw_gwids, data_dir)
# Not faculty gwids
fac_gwids = get_faculty_gwids(data_dir)
gwids = [gw_id for gw_id in demo_gwids if gw_id not in fac_gwids]
if non_fac_limit is not None and len(gwids) > non_fac_limit:
return gwids[:non_fac_limit]
else:
return gwids
def get_faculty_gwids(data_dir, fac_limit=None):
"""
Returns the list of faculty gwids.
This is determined by taking the intersection of gwids in banner
demographic data and fis_faculty in certain roles.
"""
gwids = set()
# fis faculty
for result in xml_result_generator(os.path.join(data_dir, "fis_faculty.xml")):
if result["role"] in ("Dean", "Dep Head", "Provost", "Faculty", "Faculty-COI", "CLAD"):
gwids.add(result["gw_id"])
demo_gwids = demographic_intersection(gwids, data_dir)
if fac_limit is not None and len(demo_gwids) > fac_limit:
return demo_gwids[:fac_limit]
else:
return demo_gwids
def mediaexpert_intersection(gwids, data_dir):
"""
Returns the intersection of a provided list of gwids and the gwids in mediaexpert data.
"""
mediaexpert_gwids = set()
for result in xml_result_generator(os.path.join(data_dir, "mygw_mediaexperts.xml")):
mediaexpert_gwids.add(result["gw_id"])
return list(mediaexpert_gwids.intersection(gwids))
def get_skip_name_gwids(data_dir):
"""
Returns the list of gwids for mediaexperts that have names.
"""
skip_name_gwids = set()
for result in xml_result_generator(os.path.join(data_dir, "mygw_mediaexperts.xml")):
if result["last_name"]:
skip_name_gwids.add(result["gw_id"])
return list(skip_name_gwids)
def format_phone_number(phone_number):
if phone_number:
clean_phone_number = phone_number.replace("-", "").replace(" ", "")
if len(clean_phone_number) == 10:
return "%s-%s-%s" % (clean_phone_number[0:3], clean_phone_number[3:6], clean_phone_number[6:])
return None
def frommysqlxml(filename):
return MySqlXmlView(filename)
etl.frommysqlxml = frommysqlxml
class MySqlXmlView(Table):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
yielded_field_names = False
for event, row_elem in etree.iterparse(self.filename, tag="row", recover=True):
field_names = []
values = []
for field_elem in row_elem.iter("field"):
if "xsi:nil" in field_elem.attrib or not field_elem.text:
value = None
else:
# Strip whitespace
value = unicode(field_elem.text).strip()
field_names.append(field_elem.get("name"))
values.append(value)
row_elem.clear()
if not yielded_field_names:
yield field_names
yielded_field_names = True
yield values
|
def swap(arr, index_1, index_2):
temp = arr[index_1]
arr[index_1] = arr[index_2]
arr[index_2] = temp
def bubble_sort(arr):
for i in range(len(arr)):
# iterate through unplaced elements
for idx in range(len(arr) - i - 1):
if arr[idx] > arr[idx + 1]:
# swap elements
arr[idx], arr[idx + 1] = arr[idx + 1], arr[idx]
nums = [5, 2, 9, 1, 5, 6]
print("Pre-Sort: {0}".format(nums))
bubble_sort(nums)
print("Post-Sort: {0}".format(nums))
|
from bake import *
from scheme import *
from spire.schema import Schema
from spire.support.task import SpireTask
from spire.util import get_package_data
try:
import alembic
except ImportError:
alembic = None
else:
from alembic import command
class CreateSchema(SpireTask):
name = 'spire.schema.create'
description = 'creates a spire schema within a database'
parameters = {
'incremental': Boolean(description='only create new tables', default=False),
'schemas': Sequence(Text(nonnull=True), description='the schemas to create'),
}
def run(self, runtime):
schemas = self['schemas']
if schemas is None:
schemas = []
for token, conf in self.assembly.filter_configuration('schema').iteritems():
schemas.append(conf['schema'])
if not schemas:
runtime.report('no schemas specified or configured; aborting')
return
from spire.schema import Schema
for name in schemas:
interface = Schema.interface(name)
if not self['incremental']:
interface.drop_schema()
runtime.report('creating %r schema' % name)
interface.create_schema()
class DeploySchema(SpireTask):
name = 'spire.schema.deploy'
description = 'deploys a spire schema'
parameters = {
'drop': Boolean(default=False),
'schema': Text(nonempty=True),
}
def run(self, runtime):
from spire.schema import Schema
name = self['schema']
interface = Schema.interface(name)
if self['drop']:
runtime.report('dropping schema %r' % name)
interface.drop_schema()
runtime.report('deploying schema %r to %r' % (name, interface.url))
interface.deploy_schema()
class MigrationTask(SpireTask):
supported = bool(alembic)
parameters = {
'path': Path(description='path to migrations directory', required=True),
'schema': Text(description='name of target schema'),
}
@property
def config(self):
from alembic.config import Config
config = Config()
config.set_main_option('script_location', str(self['path']))
return config
@property
def schema(self):
schema = self['schema']
if schema:
return schema
candidates = Schema.schemas.keys()
if len(candidates) == 1:
return candidates[0]
raise TaskError('no schema specified')
def prepare_environment(self):
root = self['path']
(root / 'versions').mkdir_p()
class InitializeMigrations(MigrationTask):
name = 'spire.migrations.init'
description = 'initialize a migrations directory for a schema'
def run(self, runtime):
root = self['path']
schema = self['schema']
if root.exists():
runtime.report('migrations directory exists for %r; aborting' % schema)
return
root.makedirs_p()
(root / 'versions').mkdir()
script = get_package_data('spire.schema:templates/script.py.mako.tmpl')
(root / 'script.py.mako').write_bytes(script)
env = get_package_data('spire.schema:templates/env.py.tmpl')
(root / 'env.py').write_bytes(env % {
'schema': self.schema,
})
runtime.report('created migrations directory for %r' % self.schema)
class CreateMigration(MigrationTask):
name = 'spire.migrations.create'
description = 'creates a new schema migration'
parameters = {
'autogenerate': Boolean(description='autogenerate migration', default=False),
'title': Text(description='short title for migration', required=True),
}
def run(self, runtime):
self.prepare_environment()
command.revision(self.config, message=self['title'], autogenerate=self['autogenerate'])
class Downgrade(MigrationTask):
name = 'spire.migrations.downgrade'
description = 'downgrade to an older version of the schema'
parameters = {
'revision': Text(description='revision to downgrade to', default='base'),
'sql': Boolean(description='generate sql instead of downgrading database', default=False),
}
def run(self, runtime):
command.downgrade(self.config, revision=self['revision'], sql=self['sql'])
class Upgrade(MigrationTask):
name = 'spire.migrations.upgrade'
description = 'upgrade to an newer version of the schema'
parameters = {
'revision': Text(description='revision to upgrade to', default='head'),
'sql': Boolean(description='generate sql instead of upgrading database', default=False),
}
def run(self, runtime):
command.upgrade(self.config, revision=self['revision'], sql=self['sql'])
class ShowBranches(MigrationTask):
name = 'spire.migrations.branches'
description = 'show un-spliced branch points'
def run(self, runtime):
command.branches(self.config)
class ShowCurrent(MigrationTask):
name = 'spire.migrations.current'
description = 'show current migration revision'
def run(self, runtime):
command.current(self.config)
class ShowHistory(MigrationTask):
name = 'spire.migrations.history'
description = 'show changeset history'
def run(self, runtime):
command.history(self.config)
|
import caffe
import scipy.io as sio
import os
import cv2
import numpy as np
import yaml
from multiprocessing import Process, Queue
import random
import h5py
import fast_rcnn.bbox_transform
from utils.cython_bbox import bbox_overlaps
import numpy as np
import utils.zl_utils as zl
import random
def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def bbox_transform(ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
class RelationDatalayer(caffe.Layer):
def get_minibatch(self):
blobs = {}
data = []
visual = []
classeme = []
classeme_s = []
classeme_o = []
visual_s = []
visual_o = []
loc_s = []
loc_o = []
location = []
labels = []
cnt = 0
while cnt < self._batch_size:
if self.imidx >=len(self.imids):
random.shuffle(self.imids)
self.imidx = 0
imid = self.imids[self.imidx]
self.imidx += 1
gt_rlp_labels = self.gt_labels[imid]['rlp_labels']
gt_sub_boxes= self.gt_labels[imid]['sub_boxes']
gt_obj_boxes = self.gt_labels[imid]['obj_boxes']
classemes = self.vgg_data[imid]['classemes']
visuals = self.vgg_data[imid]['visuals']
locations = self.vgg_data[imid]['locations']
cls_confs = self.vgg_data[imid]['cls_confs']
for i in xrange(gt_rlp_labels.shape[0]):
gt_rlp_label = gt_rlp_labels[i]
gt_sub_box = gt_sub_boxes[i]
gt_obj_box= gt_obj_boxes[i]
overlaps = bbox_overlaps(
np.array([gt_sub_box, gt_obj_box]),
locations.astype(np.float))
if overlaps.shape[0] == 0:
continue
sub_sorted = overlaps[0].argsort()[-30:][::-1]
obj_sorted = overlaps[1].argsort()[-30:][::-1]
while len(sub_sorted) > 0 and overlaps[0][sub_sorted[-1]] < .7: sub_sorted = sub_sorted[:-1]
while len(obj_sorted) > 0 and overlaps[1][obj_sorted[-1]] < .7: obj_sorted = obj_sorted[:-1]
if len(sub_sorted) <= 0 or len(obj_sorted) <= 0:
continue
for s in sub_sorted[:1]:
for o in obj_sorted[:1]:
if s != o and cnt < self._batch_size:
sub_visual = visuals[s]
obj_visual = visuals[o]
sub_clsmemes = classemes[s]
obj_clsmemes = classemes[o]
sub_box_encoded = bbox_transform(np.array([locations[o]]), np.array([locations[s]]))[0]
obj_box_encoded = bbox_transform(np.array([locations[s]]), np.array([locations[o]]))[0]
pre_lbl = gt_rlp_label[1]
labels.append(np.float32(pre_lbl))
classeme_s.append(sub_clsmemes)
classeme_o.append(obj_clsmemes)
visual_s.append(sub_visual)
visual_o.append(obj_visual)
loc_s.append(sub_box_encoded)
loc_o.append(obj_box_encoded)
visual.append(np.hstack((sub_visual, obj_visual)))
classeme.append(np.hstack((sub_clsmemes, obj_clsmemes)))
location.append(np.hstack((sub_box_encoded, obj_box_encoded)))
cnt += 1
if cnt >= self._batch_size:
break
blobs['classeme'] = np.array(classeme)
blobs['visual'] = np.array(visual)
blobs['location'] = np.array(location)
blobs['label'] = np.array(labels)
return blobs
def setup(self, bottom, top):
self._cur_idx = 0
self.vgg_data = {}
self.gt_labels = {}
vgg_h5 = h5py.File("output/precalc/vg1_2_2016_train.hdf5", 'r')
if os.path.exists('output/cache/vg1_2_2016_train.pkl'):
self.vgg_data = zl.load('output/cache/vg1_2_2016_train.pkl')
print 'loaded train data from cache'
else:
print 'Preloading training data'
zl.tick()
for k in vgg_h5.keys():
classemes = vgg_h5[k]['classemes'][...]
visuals = vgg_h5[k]['visuals'][...]
locations = vgg_h5[k]['locations'][...]
cls_confs = vgg_h5[k]['cls_confs'][...]
self.vgg_data[k]={}
self.vgg_data[k]['classemes']=classemes
self.vgg_data[k]['visuals']=visuals
self.vgg_data[k]['cls_confs']=cls_confs
self.vgg_data[k]['locations']=locations
print 'done preloading training data %f'%zl.tock()
zl.save('output/cache/vg1_2_2016_train.pkl',self.vgg_data)
vgg_h5.close()
self.meta = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5', 'r')
if os.path.exists('output/cache/vg1_2_2016_gt.pkl'):
self.gt_labels = zl.load('output/cache/vg1_2_2016_gt.pkl')
print 'loaded gt data from cache'
else:
print 'Preloading gt'
zl.tick()
for k in self.meta['gt/train'].keys():
rlp_labels = self.meta['gt/train/%s/rlp_labels'%k][...]
sub_boxes = self.meta['gt/train/%s/sub_boxes'%k][...].astype(np.float)
obj_boxes = self.meta['gt/train/%s/obj_boxes'%k][...].astype(np.float)
self.gt_labels[k] = {}
self.gt_labels[k]['rlp_labels']=rlp_labels
self.gt_labels[k]['sub_boxes']=sub_boxes
self.gt_labels[k]['obj_boxes']=obj_boxes
print 'done preloading gt %f'%zl.tock()
zl.save('output/cache/vg1_2_2016_gt.pkl',self.gt_labels)
self.imids = []
for k in self.vgg_data.keys():
self.imids.append(k)
self.imidx = 0
random.shuffle(self.imids)
layer_params = yaml.load(self.param_str_)
self._batch_size = layer_params['batch_size']
self.train_data = []
self._name_to_top_map = {}
top[0].reshape(self._batch_size, 201*2)
top[1].reshape(self._batch_size, 4096*2)
top[2].reshape(self._batch_size, 4*2)
top[3].reshape(self._batch_size)
self._name_to_top_map['classeme'] = 0
self._name_to_top_map['visual'] = 1
self._name_to_top_map['location'] = 2
self._name_to_top_map['label'] = 3
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self.get_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
|
# Generated by Django 3.1.2 on 2020-10-02 10:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('audiobook', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='audiobook',
old_name='file',
new_name='file_field',
),
]
|
# Desenvolva um programa que leia as duas notas de um aluno,
# calcule e mostre a sua média.
not1 = float(input('Digite a nota 1: '))
not2 = float(input('Digite a nota 2: '))
média = (not1 + not2)/2
print('A média do aluno entre {} e {} será de {:.1f}'.format(not1, not2, média))
|
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
import sys
class Solution(object):
#accepted
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
res = -sys.maxsize
low, high = 1, n
while low <= high:
mid = low + (high-low) // 2
g = guess(mid)
if g == 0:
res = mid
break
elif g < 0:
high = mid -1
else:
low = mid + 1
return res
|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2020 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision: 477 $
# Date: $Date: 2019-12-01 20:16:10 -0500 (Sun, 01 Dec 2019) $
import string
import sys
import MySQLdb
from isfdb import *
from common import *
from login import *
from SQLparsing import *
from library import *
from xml.dom import minidom
from xml.dom import Node
if __name__ == '__main__':
PrintHeader('Recent Activity')
PrintNavbar('recent_activity', 0, 0, 'recent_activity.cgi', 0)
print '<ul>'
print '<li>%s' % ISFDBLink('recent.cgi', '', 'Recent Edits')
print '<li>%s' % ISFDBLink('recent_primary_ver.cgi', '', 'Recent Primary Verifications')
print '<li>%s' % ISFDBLink('recentver.cgi', '', 'Recently Added Secondary Verifications')
print '<li>%s' % ISFDBLink('removed_secondary_verifications.cgi', '', 'Recently Removed Secondary Verifications')
print '</ul>'
PrintTrailer('recent_activity', 0, 0)
|
import logging
class PowerSourceItem:
def __init__(self, device_id, DeviceClass, device_instance=None):
self.device_id = device_id
self.DeviceClass = DeviceClass
self.capacity = None
self.load = 0.0
self.price = None
self.device_instance = device_instance
self.capacity_changed = False
self.load_changed = False
self.logger = logging.LoggerAdapter(logging.getLogger("lpdm"), {"sim_seconds": "", "device_id": "psi"})
def __repr__(self):
return "id: {}, class: {}, capacity: {}, load: {}, price: {}".format(
self.device_id,
self.DeviceClass,
self.capacity,
self.load,
self.price
)
def update_status(self):
if not self.device_instance is None:
self.device_instance.update_status()
def is_configured(self):
"""Is this power source configured? ie has capacity and price been set?"""
return not self.capacity is None and not self.price is None
def is_available(self):
"""Is this power source configured? ie has capacity and price been set?"""
return self.is_configured() and self.capacity > 0
def can_handle_load(self, new_load):
"""Can this power souce handle the additional load?"""
if self.capacity is None:
raise Exception("The capacity for {} has not been set.".format(self.device_id))
if self.price is None:
raise Exception("The price for {} has not been set.".format(self.device_id))
return (self.load + new_load) <= self.capacity
def add_load(self, new_load):
"""Add additional load to the power source"""
if self.can_handle_load(new_load):
self.set_load(self.load + new_load)
self.logger.debug("message: Add load {}".format(new_load))
self.logger.debug("message: Total load {}".format(self.load))
return True
else:
return False
def set_load(self, load):
"""
Set the load for the power source.
Also set a boolean flag indicating the load has changed.
"""
if load > 0 and not self.is_available():
raise Exception("Attempted to put load of {} on a power source that has not been configured.".format(load))
if load != self.load:
self.load_changed = True
self.load = load
# if there's an actual device instance connected then set that load as well
if self.device_instance:
self.device_instance.set_load(load)
def set_capacity(self, capacity):
"""
Set the capacity for the power source.
Also set a boolean flag indicating the capacity has changed.
"""
if capacity != self.capacity:
self.capacity_changed = True
self.capacity = capacity
# if self.load > self.capacity:
# raise Exception("Load > capacity ({} > {})".format(self.load, self.capacity))
def has_changed(self):
"""Has this power source been changed"""
return self.capacity_changed or self.load_changed
def reset_changed(self):
"""Set the load/capacity changed flags to False"""
self.capacity_changed = False
self.load_changed = False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# file: extensions.py
# authors: jonathan kelley, anthony tarola
# ---
# license: the mit license
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the above copyright notice and this permission notice shall being included
# in all copies or substantial portions of the Software.
import logging
import os
import os.path
import pipes
import re
import subprocess
from tornado import gen
from tornado.process import Subprocess
from tornado.ioloop import IOLoop
import toro
log = logging.getLogger(__name__)
class SecurityFaultDangerousUserInput(Exception):
""" this will be raised if an escape sequence is attempted in parameters
if you try to pull tricks, the json parser errors expecting a delimiter
(like a newline) but this can't be fulfilled because that triggers
this exception again, in a recurisve manner.
if you send the ANSI CR or LF characters you will also fail and be
trapped by the json decoder for invalid input as the pipes function
will escape those sort of control characters.
this should provide a reasonable layer of security unless there is
an attack on the regex, or python string libraries that is currently
unpublished.
as always, use a reasonable layer of authentication in front of this
API as absolutely no security measure is close to foolproof.
"""
def __init___(self, dErrorArguments):
Exception.__init__(self, "{0}".format(dErrArguments))
self.dErrorArguments = dErrorArguements
class ExtensionCollection(dict):
""" load the collection of extensions """
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def metadata(self, tags):
""" return the metadata for all of the extensions, keyed by name """
output = {}
for key, value in self.items():
if (tags['tags']) or (tags['not_tags']) or (tags['any_tags']):
if (set(tags['tags']).issubset(value.tags)) and (tags['tags']):
output[key] = value.metadata()
continue
if tags['not_tags']:
output[key] = value.metadata()
for tag in tags['not_tags']:
if (tag in value.tags):
output.pop(key, None)
break
for tag in tags['any_tags']:
if tag in value.tags:
output[key] = value.metadata()
break
else:
output[key] = value.metadata()
return output
def name(self, tags):
""" return a list of just the names of all extensions """
output = []
for key, value in self.items():
if (tags['tags']) or (tags['not_tags']) or (tags['any_tags']):
if (set(tags['tags']).issubset(value.tags)) and (tags['tags']):
output.append(value.name)
continue
if tags['not_tags']:
output.append(value.name)
for tag in tags['not_tags']:
if (tag in value.tags):
output.remove(value.name)
break
for tag in tags['any_tags']:
if tag in value.tags:
output.append(value.name)
break
else:
output.append(value.name)
return output
class Extension(object):
""" a single route extension in the directory """
def __init__(self, filename, name, description, params, filtered_params, tags, http_method, output, needs_lock):
self.lock = toro.Lock()
self.filename = filename
self.name = name
self.description = description
self.params = params
self.filtered_params = filtered_params
self.tags = tags
self.http_method = http_method
self.needs_lock = needs_lock
self.output = output
def filter_params(self, params):
filtered_params = dict(params)
for k, v in filtered_params.items():
if k in self.filtered_params:
filtered_params[k] = 'FILTERED'
return filtered_params
@gen.engine
def execute(self, params, callback):
log.info("Executing extension: {0} with params: {1}".format(
self.filename, self.filter_params(params)))
if self.needs_lock:
with (yield gen.Task(self.lock.aquire(self.lock))):
response = yield gen.Task(self.do_execute, params)
else:
response = yield gen.Task(self.do_execute, params)
callback(response)
@gen.engine
def do_execute(self, params, callback):
env = self.create_env(params)
if env == False:
# If the environment has manipulated string input, raise
raise SecurityFaultDangerousUserInput(params)
extension_dir = os.path.dirname(self.filename)
os.chdir(extension_dir)
if self.output == 'combined':
child = Subprocess(
self.filename,
env=env,
stdout=Subprocess.STREAM,
stderr=subprocess.STDOUT,
io_loop=IOLoop.instance()
)
retcode, stdout = yield [
gen.Task(child.set_exit_callback),
gen.Task(child.stdout.read_until_close)
]
callback((child.returncode, stdout.split()))
else:
child = Subprocess(
self.filename,
env=env,
stdout=Subprocess.STREAM,
stderr=Subprocess.STREAM,
io_loop=IOLoop.instance()
)
retcode, stdout, stderr = yield [
gen.Task(child.set_exit_callback),
gen.Task(child.stdout.read_until_close),
gen.Task(child.stderr.read_until_close)
]
callback((child.returncode, stdout.splitlines(), stderr.splitlines()))
def param_attempts_escape(self, parameter):
"""
Perform a scan to see if this is an escaping parameter.
"""
empty_parameter_value = "''\"'\"''\"'\"''"
open_sequence = len(tuple(re.finditer(r"''\"'\"'", parameter)))
close_sequence = len(tuple(re.finditer(r"'\"'\"''", parameter)))
escapes = open_sequence + close_sequence
if escapes > 0 and parameter != empty_parameter_value:
return True
else:
return False
def create_env(self, input):
output = {}
stop = False
# add all the parameters as env variables
for param in self.params:
name = param['name']
value = input.get(name, '')
real_param = pipes.quote(pipes.quote(value))
output[name.upper()] = real_param
if self.param_attempts_escape(real_param):
return False
return output
def metadata(self):
return {
"filename": self.filename,
"http_method": self.http_method,
"name": self.name,
"description": self.description,
"params": self.params,
"filtered_params": self.filtered_params,
"tags": self.tags,
"output": self.output,
"lock": self.needs_lock
}
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__, self.metadata())
def create_collection(directory):
""" create the extension collection for the directory """
log.info("Getting extension from directory {0}".format(directory))
collection = ExtensionCollection()
for (dirpath, _, filenames) in os.walk(directory):
for filename in filenames:
# grab the file's absolute path, and name
path = os.path.join(dirpath, filename)
full_path = os.path.abspath(path)
# format the name for sanity
name = path.replace(directory + os.sep, '')
name = '.'.join(name.split(".")[:-1])
name = re.sub(r'(\W)+', '_', name)
log.info("Adding extension with name: {0} and path: {1}".format(
name, full_path))
extension = create_extension(name, full_path)
if extension is not None:
collection[name] = extension
return collection
def create_extension(extension_name, filename):
""" parse a extension, returning a Extension object """
# script defaults
description = None
params = []
filtered_params = []
tags = []
http_method = 'post'
output = 'split'
lock = False
# warn the user if we can't execute this file
if not os.access(filename, os.X_OK):
log.error("Filename {0} not executable, file ignored".format(filename))
return None
# grab file contents
with open(filename) as f:
contents = list(f)
in_block = False
# loop over the contents of the file
for line in contents:
# all lines should be bash style comments
if not line.startswith("#"):
continue
# we don't need the first comment, or extranious whitespace
line = line.replace("#", "").strip()
# start of the extension block
if not in_block and line.startswith("-- config --"):
in_block = True
continue
# end of the extension block, so we'll stop here
if in_block and line.startswith("-- config --") \
or in_block and line.startswith("-- end config --"):
in_block = False
break
# make sure the line is good
if not ':' in line:
continue
# prep work for later
key, value = [item.strip() for item in line.split(':')]
# description
if in_block and key == "description":
description = value
continue
# http_method
if in_block and key == "http_method":
if value.lower() in ['get', 'post', 'put', 'delete']:
http_method = value.lower()
continue
else:
log.warn(
"Unrecognized http_method type in -- config -- block: {0}".format(value.lower()))
continue
if value.lower() in ['split', 'combined']:
output = value.lower()
continue
else:
log.warn(
"Unrecognized output type in -- config -- block: {0}".format(value.lower()))
continue
# param
if in_block and key == "param":
# handle the optional description
if "-" in value:
name, desc = [item.strip() for item in value.split('-')]
params.append({'name': name, 'description': desc})
continue
params.append({'name': value})
continue
# filtered_params
if in_block and key == "filtered_params":
filter_values = [filter_value.strip()
for filter_value in value.split(',')]
if len(filter_values) > 1:
for filter_value in filter_values:
filtered_params.append(filter_value)
continue
filtered_params.append(value)
continue
# tags
if in_block and key == "tags":
tag_values = [tag_value.strip() for tag_value in value.split(',')]
if len(tag_values) > 1:
for tag_value in tag_values:
tags.append(tag_value)
continue
tags.append(value)
continue
# lock
if in_block and key == "lock":
lock = (value == "True")
continue
# ignore PEP 263 Source Code Encodings
if line.startswith("-*- coding:") or line.startswith("coding=") or line.startswith("vim: set fileencoding="):
continue
# pass for license heading or config block comment lines
if line.lower().startswith("license:") or line.startswith("---"):
continue
log.warn("Unrecognized line in -- config -- block: {0}".format(line))
# if in_bock is true, then we never got an end to the block, which is bad
if in_block:
log.error(
"File with filename {0} missing a -- config -- end block, ignoring".format(filename))
return None
return Extension(filename, extension_name, description, params, filtered_params, tags, http_method, output, lock)
|
from django.contrib import admin
from .models import Cases
class Cases_Admin(admin.ModelAdmin):
readonly_fields = ('date_created','date_modified')
admin.site.register(Cases, Cases_Admin)
|
# this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates:
C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
C(template_host) contains the node name of the template's machine.
C(template_uid) the numeric user id of the owner.
C(template_path) the path of the template.
C(template_fullpath) is the absolute path of the template.
C(template_run_date) is the date that the template was rendered."
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
choices: [ "yes", "no" ]
default: "no"
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
choices: [ '\n', '\r', '\r\n' ]
default: '\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- If this is set to True the first newline after a block is removed (block, not variable tag!).
default: "no"
version_added: '2.4'
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
choices: [ "yes", "no" ]
default: "yes"
notes:
- For Windows you can use M(win_template) which uses '\r\n' as C(newline_sequence).
- Including a string that uses a date in the template will result in the template being marked 'changed' each time
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = r'''
# Example from Ansible Playbooks
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: 0644
# The same example, but using symbolic modes equivalent to 0644
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: "u=rw,g=r,o=r"
# Create a DOS-style text file from a template
- template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
# Copy a new "sudoers" file into place, after passing validation with visudo
- template:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
# Update sshd configuration safely, avoid locking yourself out
- template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
import torch
import torch.nn as nn
class SeqModel(nn.Module):
def __init__(
self,
embedding_size=200,
vocab_size=300,
output_size=5,
hidden_size=200,
num_layers=2,
nonlin="tanh",
dropout_rate=0.7,
mode=0,
unit="lstm",
more_features=False,
):
# add glove param here? ^^
super(SeqModel, self).__init__()
self.mode = mode
self.unit = unit
self.more_features = more_features
self.embedding = nn.Embedding(
num_embeddings=vocab_size, embedding_dim=embedding_size
)
self.embedding.weight.data.normal_(0.0, 0.05) # mean=0.0, mu=0.05
if mode == 0:
if unit == "lstm":
self.lstm_rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
elif unit == "gru":
self.gru_rnn = nn.GRU(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
else:
# baseline: unidirectional rnn
self.rnn = nn.RNN(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlin,
)
if more_features:
self.linear_layer = nn.Linear(
hidden_size + len(numeric_features), output_size
)
else:
self.linear_layer = nn.Linear(hidden_size, output_size)
# model with dropout:
if mode == 1:
if unit == "lstm":
self.lstm_rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout_rate,
)
elif unit == "gru":
self.gru_rnn = nn.GRU(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout_rate,
)
else:
self.rnn = nn.RNN(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlin,
dropout=dropout_rate,
)
if more_features:
self.linear_layer = nn.Linear(
hidden_size + len(numeric_features), output_size
)
else:
self.linear_layer = nn.Linear(hidden_size, output_size)
# Bidirectional model
if mode == 2:
if unit == "lstm":
self.lstm_rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout_rate,
bidirectional=True,
)
elif unit == "gru":
self.gru_rnn = nn.GRU(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout_rate,
bidirectional=True,
)
else:
self.rnn = nn.RNN(
input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlin,
dropout=dropout_rate,
bidirectional=True,
)
if more_features:
self.linear_layer = nn.Linear(
hidden_size * 2 + len(numeric_features), output_size
)
else:
self.linear_layer = nn.Linear(hidden_size * 2, output_size)
self.activation_fn = nonlin
self.softmax_layer = nn.LogSoftmax(dim=1)
def forward(self, x, x_concat=None):
# permute x?
out = self.embedding(x)
if self.unit == "lstm":
out, (h_state, c_state) = self.lstm_rnn(out)
elif self.unit == "gru":
out, h_state = self.gru_rnn(out)
else:
out, h_state = self.rnn(out)
out = out[-1]
if self.more_features:
out = torch.cat((out, x_concat.permute(1, 0)), dim=1)
out = self.linear_layer(out)
out = self.softmax_layer(out)
return out
|
# https://leetcode.com/problems/binary-tree-maximum-path-sum/
#
# algorithms
# Hard (29.46%)
# Total Accepted: 177,254
# Total Submissions: 601,635
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
res = [root.val]
def recursive(node):
if not node:
return float('-inf')
left, right = recursive(node.left), recursive(node.right)
l, r = left + node.val, right + node.val
a = l + r - node.val
res[0] = max(res[0], left, right, l, r, node.val, a)
return max(l, r, node.val)
recursive(root)
return res[0]
|
#!/usr/bin/env python
"""
Helper script to ingest generated data to a search index.
You must have access to a search index for this to work.
"""
import json
import globus_sdk
from login import load_tokens
import argparse
# 'cr3' index
INDEX = '11f4dbe5-1449-4d65-af83-72d322b117f3'
SEARCH_DATA = 'gmeta_ingest_doc.json'
def ingest():
with open(SEARCH_DATA) as f:
ingest_doc = json.loads(f.read())
tokens = load_tokens()
auther = globus_sdk.AccessTokenAuthorizer(
tokens['search.api.globus.org']['access_token'])
sc = globus_sdk.SearchClient(authorizer=auther)
preview = [ent['subject'] for ent in ingest_doc['ingest_data']['gmeta']]
print('\n'.join(preview))
print('Ingest these to "{}"?'.format(
sc.get_index(INDEX).data['display_name']))
user_input = input('Y/N> ')
if user_input in ['yes', 'Y', 'y', 'yarr']:
sc.ingest(INDEX, ingest_doc)
print('Finished')
else:
print('Aborting')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input Gmet json data file")
args = parser.parse_args()
if args.i:
SEARCH_DATA = args.i
ingest()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 15:25:20 2020
@author: babraham
"""
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.graph_objs as go
import plotly.figure_factory as ff
import os
import numpy as np
import datetime as dt
from django_plotly_dash import DjangoDash
from va_explorer.va_data_management.models import Location, VerbalAutopsy
from django.forms.models import model_to_dict
import re
import time
# ================APP DEFINITION===============#
# NOTE: to include external stylesheets, set external_stylesheets parameter in constructor
# app = dash.Dash(__name__) # Dash constructor
app = DjangoDash(name="va_dashboard", serve_locally=True, add_bootstrap_links=True)
# TODO: We should eventually move this mapping to someplace where it's more configurable
# ===========INITIAL CONFIG VARIABLES=============#
# initial timefraome for map data to display
INITIAL_TIMEFRAME = "1 year"
# folder where geojson is kept
JSON_DIR = "va_explorer/va_analytics/dash_apps/geojson"
# Zambia Geojson pulled from: https://adr.unaids.org/dataset/zambia-geographic-data-2019
JSON_FILE = "zambia_geojson.json"
# initial granularity
INITIAL_GRANULARITY = "province"
# initial metric to plot on map
INITIAL_MAP_METRIC = "Total Deaths"
# ============Lookup dictionaries =================#
def load_lookup_dicts():
lookup = dict()
# dictionary mapping time labels to days (or all)
lookup["time_dict"] = {"1 week": 7, "1 month": 30, "1 year": 365, "all": "all"}
# dictionary mapping demographic variable names to corresponding VA survey columns
lookup["demo_to_col"] = {
"age group": "age_group",
"sex": "Id10019",
"place of death": "Id10058",
}
# colors used for plotting
lookup["color_list"] = [
"rgb(24,162,185)",
"rgb(201,0,1)",
"rgb(8,201,0)",
"rgb(240,205,21)",
"rgb(187,21,240)",
"rgb(250,250,248)",
"rgb(162,162,162)",
]
# colorscale used for map
lookup["map_colorscale"] = [(0.0, 'rgb(255,255,255)'),
(0.01, 'rgb(255,255,255)'),
(0.01, 'rgb(0, 147, 146)'),
(0.16666666666666666, 'rgb(0, 147, 146)'),
(0.16666666666666666, 'rgb(57, 177, 133)'),
(0.3333333333333333, 'rgb(57, 177, 133)'),
(0.3333333333333333, 'rgb(156, 203, 134)'),
(0.5, 'rgb(156, 203, 134)'),
(0.5, 'rgb(233, 226, 156)'),
(0.6666666666666666, 'rgb(233, 226, 156)'),
(0.6666666666666666, 'rgb(238, 180, 121)'),
(0.8333333333333333, 'rgb(238, 180, 121)'),
(0.8333333333333333, 'rgb(232, 132, 113)'),
(1.0, 'rgb(232, 132, 113)')]
# dictionary mapping raw map metrics to human-readable names
lookup["metric_names"] = {
"Total Deaths": "Total Deaths",
"Mean Age of Death": "Mean Age of Death",
"HIV/AIDS related death": "HIV/AIDS",
"Diabetes mellitus": "Diabetes Mellitus",
"Acute resp infect incl pneumonia": "Pneumonia",
"Other and unspecified cardiac dis": "Other Cardiac",
"Diarrhoeal diseases": "Diarrhoeal Diseases",
"Other and unspecified neoplasms": "Unspecified Neoplasm",
"Renal failure": "Renal Failure",
"Liver cirrhosis": "Liver Cirrhosis",
"Digestive neoplasms": "Digestive Neoplasm",
"Other and unspecified infect dis": "Other",
}
# dictionary mapping place of death names to more human-readable names
lookup["death_location_names"] = {
"on_route_to_hospital_or_facility": "En Route to Facility",
"DK": "Unknown",
"other_health_facility": "Other Health Facility",
}
# formats for montly, weekly, and yearly dates
lookup["date_display_formats"] = {
"week": "%d/%m/%Y",
"month": "%m/%Y",
"year": "%Y",
}
return lookup
LOOKUP = load_lookup_dicts()
# =============Geo dictionaries and global variables ========#
# load geojson data from flat file (will likely migrate to a database later)
def load_geojson_data(json_file):
geojson = None
if os.path.isfile(json_file):
geojson = json.loads(open(json_file, "r").read())
# add min and max coordinates for mapping
for i, g in enumerate(geojson["features"]):
coordinate_list = g["geometry"]["coordinates"]
coordinate_stat_tables = []
for coords in coordinate_list:
if len(coords) == 1:
coords = coords[0]
coordinate_stat_tables.append(
pd.DataFrame(coords, columns=["lon", "lat"]).describe()
)
g["properties"]["area_name"] += " {}".format(g["properties"]["area_level_label"])
g["properties"]["min_x"] = min(
[stat_df["lon"]["min"] for stat_df in coordinate_stat_tables]
)
g["properties"]["max_x"] = max(
[stat_df["lon"]["max"] for stat_df in coordinate_stat_tables]
)
g["properties"]["min_y"] = min(
[stat_df["lat"]["min"] for stat_df in coordinate_stat_tables]
)
g["properties"]["max_y"] = max(
[stat_df["lat"]["max"] for stat_df in coordinate_stat_tables]
)
geojson["features"][i] = g
# save total districts and provinces for future use
geojson["district_count"] = len([f for f in geojson["features"] \
if f["properties"]["area_level_label"] == "District"])
geojson["province_count"] = len([f for f in geojson["features"] \
if f["properties"]["area_level_label"] == "Province"])
return geojson
GEOJSON = load_geojson_data(json_file=f"{JSON_DIR}/{JSON_FILE}")
# ============ VA Data =================
def load_va_data(geographic_levels=None):
return_dict = {"data": pd.DataFrame()}
valid_vas = VerbalAutopsy.objects.exclude(causes=None).prefetch_related("location").prefetch_related("causes")
if len(valid_vas) > 0:
# Grab exactly the fields we need, including location and cause data
va_data = [
{
"id": va.id,
"Id10019": va.Id10019,
"Id10058": va.Id10058,
"ageInYears": va.ageInYears,
"location": va.location.name,
"cause": va.causes.all()[0].cause, # Don't use first() to take advantage of the prefetch
}
for va in valid_vas
]
# Build a location ancestors lookup and add location information at all levels to all vas
# TODO: This is not efficient (though it's better than 2 DB queries per VA)
# TODO: This assumes that all VAs will occur in a facility, ok?
locations, location_types = dict(), dict()
location_ancestors = { location.id:location.get_ancestors() for location in Location.objects.filter(location_type="facility") }
for i, va in enumerate(valid_vas):
# location_types[va.location.depth] = va.location.location_type
# locations.add(va.location.name)
for ancestor in location_ancestors[va.location.id]:
va_data[i][ancestor.location_type] = ancestor.name
location_types[ancestor.depth] = ancestor.location_type
locations[ancestor.name] = ancestor.location_type
va_df = pd.DataFrame.from_records(va_data)
# clean up age fields and assign to age bin
va_df["age"] = va_df["ageInYears"].replace(
to_replace=["dk"], value=np.random.randint(1, 80)
)
va_df["age"] = pd.to_numeric(va_df["age"])
va_df["age_group"] = va_df["age"].apply(assign_age_group)
cur_date = dt.datetime.today()
# TODO: This random date of death assignment needs to be correctly handled
# NOTE: date field called -Id10023 in VA form, but no dates in curent responses
va_df["date"] = [
cur_date - dt.timedelta(days=int(x))
for x in np.random.randint(3, 400, size=va_df.shape[0])
]
# convert location_types to an ordered list
location_types = [l for _,l in sorted(location_types.items(), key=lambda x: x[0])]
return_dict = {"data": va_df,
"location_types": location_types,
"max_depth": len(location_types) - 1,
"locations": locations}
return return_dict
def assign_age_group(age):
if age <= 1:
return "neonate"
elif age <= 16:
return "child"
else:
return "adult"
# ===============APP LAYOUT====================#
app.layout = html.Div(
id="app-body-container",
children=[
html.Div(
[
# global filters (affect entire dashboard)
dbc.Row(
[
html.Span("Analytics Dashboard", className="dashboard-title"),
html.Div(
className="dashboard-comp-container",
children=[
html.P("Time Frame", className="input-label"),
dcc.Dropdown(
id="timeframe",
options=[
{"label": o, "value": o.lower()}
for o in [
"1 Day ",
"1 Week",
"1 Month",
"1 Year",
"All",
]
],
value="1 year",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "120px",
},
searchable=False,
clearable=False,
),
],
style={
"display": "flex",
"margin-right": "10px",
"margin-left": "10px",
},
),
html.Div([html.A(dbc.Button("Download Data", color="info"),
href="/va_analytics/download")],
style={
"margin-top": "10px",
"margin-left": "20px"
}
)
],
style={"margin-left": "0px"},
),
html.Div(
[
dbc.Col(
[
dbc.Row(
[
html.Div(
id="callout-container",
style={
"display": "flex",
"text-align": "center",
},
)
],
style={"margin-left": "0px"},
),
dbc.Row(
[
html.Div(
className="dashboard-comp-container",
children=[
html.P(
"Map Metric",
className="input-label",
),
dcc.Dropdown(
id="map_metric",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "200px",
},
searchable=False,
clearable=False,
),
],
style={"display": "flex"},
),
html.Div(className='dashborad-comp-container',
id='search-container',
children = [
dcc.Dropdown(
id='map_search',
options= [],
multi=False,
placeholder='Search Locations',
clearable=True,
)],
style={
"margin-left": "10px",
"width": "220px",
}
),
html.Div(
className="dashboard-comp-container",
children=[
html.P(
"View",
id="view_label",
className="input-label",
),
dcc.Dropdown(
id="view_level",
# options = [{"value": o, "label": o.capitalize()}
# for o in [INITIAL_GRANULARITY]],
# value=INITIAL_GRANULARITY,
value="",
placeholder = "Auto",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "100px",
},
searchable=False,
clearable=False,
disabled=False
),
],
style={"display": "flex"},
),
html.Div(className='dashboard-comp-container',
children = [
dbc.Button("Reset Map",
id="reset",
color="info",
className="mr-1",
n_clicks=0)
]
)
],
style={"align-items": "center"},
),
# map container
dcc.Loading(id='map-loader',
type='circle',
children=[
html.Div(
id="choropleth-container",
children=dcc.Graph(id="choropleth"),
)
]),
html.Div(id="bounds"),
html.Div(id="va_data", style={"display": "none"}),
html.Div(id="locations", style={"display": "none"}),
html.Div(id="location_types", style={"display": "none"}),
html.Div(id="filter_dict", style={"display": "none"}),
],
width=7,
),
dbc.Col(
[
dcc.Tabs(
[ # graph tabs
dcc.Tab(
label="COD Analysis",
children=[ # tab 1: COD Analysis
html.Div(
id="cod_buttons",
children=[
html.Div(
[
html.P(
"Demographic",
className="input-label",
),
dcc.Dropdown(
id="cod_factor",
options=[
{
"label": o,
"value": o,
}
for o in [
"All",
"Age Group",
"Sex",
]
],
value="All",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "120px",
},
searchable=False,
clearable=False,
),
],
style={
"display": "flex",
"margin-right": "30px",
},
),
html.Div(
[
html.P(
"N",
className="input-label",
),
dcc.Dropdown(
id="cod_n",
options=[
{
"label": o,
"value": o,
}
for o in [5,10,15,20]
],
value=10,
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "60px",
},
searchable=False,
clearable=False,
),
],
style={"display": "flex"},
),
dbc.RadioItems(
id="cod-aggtype",
options=[
{
"label": "% of Total",
"value": "percent_total",
},
{
"label": "Counts",
"value": "counts",
},
],
value="cts",
labelStyle={
"display": "inline-block"
},
labelClassName="radio-group-labels",
labelCheckedClassName="radio-group-labels-checked",
style={
"margin-left": "30px",
"display": "flex",
},
),
],
style={
"display": "flex",
"align-items": "center",
},
),
dcc.Loading(html.Div(id="cod-container"), type='circle'),
],
),
dcc.Tab(
label="Age Distribution",
children=[dcc.Loading(html.Div(id="age-container"), type='circle')]
),
dcc.Tab(
label="Gender Distribution",
children=[dcc.Loading(html.Div(id="sex-container"))]
),
dcc.Tab(
label="Place of Death Distribution",
children=[dcc.Loading(html.Div(id="pod-container"))]
),
dcc.Tab(
label="VA Trends",
children=[
html.Div(
id="ts_buttons",
children=[
html.Div(
[
html.P(
"Aggregation",
className="input-label",
),
dcc.Dropdown(
id="group_period",
options=[
{
"label": o,
"value": o,
}
for o in [
"Day",
"Week",
"Month",
"Year",
]
],
value="Month",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "120px",
"margin-right": "30px",
},
searchable=False,
clearable=False,
),
html.P(
"Demographic",
className="input-label",
),
dcc.Dropdown(
id="ts_factor",
options=[
{
"label": o,
"value": o,
}
for o in [
"All",
"Age Group",
"Sex",
"Place of Death",
]
],
value="All",
style={
"margin-top": "5px",
"margin-bottom": "5px",
"width": "140px",
},
searchable=False,
clearable=False,
),
],
style={
"display": "flex",
"margin-right": "30px",
},
),
dcc.Loading(html.Div(id="ts-container"), type='circle'),
],
)
],
),
]
)
]
),
],
style={"display": "flex", "margin-top": "10px"},
),
]
)
],
)
#=============Reset logic (reset map to default)====================#
@app.callback(
[
Output(component_id="map_search", component_property="value"),
Output(component_id="map_metric", component_property="value")
],
[
Input(component_id="reset", component_property="n_clicks")
]
)
def reset(n_clicks=0):
return "", INITIAL_MAP_METRIC
# ============ VA data (loaded from database and shared across components) ========
@app.callback(
[
Output(component_id="va_data", component_property="children"),
Output(component_id="locations", component_property="children"),
Output(component_id="location_types", component_property="children")
],
[
Input(component_id="timeframe", component_property="value"),
]
)
def va_data(timeframe="All"):
res = load_va_data()
va_data = res["data"].to_json()
locations = json.dumps(res["locations"])
location_types = json.dumps(res["location_types"])
return va_data, locations, location_types
# ============ Location search options (loaded after load_va_data())==================
@app.callback(
Output(component_id="map_search", component_property="options"),
[Input(component_id="map_search", component_property="search_value"),
Input(component_id="locations", component_property="children")]
)
def update_options(search_value, location_json):
if search_value and location_json:
locations = json.loads(location_json).keys()
options = [{'label':l, 'value':l} for l in locations \
if search_value.lower() in l.lower()]
return options
else:
raise dash.exceptions.PreventUpdate
#============ Filter logic (update filter table used by other componenets)========#
@app.callback(
Output(component_id="filter_dict", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="choropleth", component_property="selectedData"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="map_search", component_property="value"),
Input(component_id="locations", component_property="children"),
Input(component_id="location_types", component_property="children")
]
)
def filter_data(
va_data, selected_json, timeframe="All", search_terms=[], locations=None, location_types=None
):
filter_df = pd.read_json(va_data)
granularity = INITIAL_GRANULARITY
locations = json.loads(locations) if type(locations) is str else locations
search_terms = [] if search_terms is None else search_terms
location_types = json.loads(location_types) if location_types is not None else location_types
# dictionary storing data we want to share across callbacks
filter_dict = {
"geo_filter": (selected_json is not None) or (len(search_terms) > 0),
"plot_regions": []
}
if filter_dict["geo_filter"]:
# first, check if user searched anything. If yes, use that as filter.
if search_terms is not None:
if len(search_terms) > 0:
search_terms = [search_terms] if type(search_terms) is str else search_terms
granularity = locations.get(search_terms[0], granularity)
filter_df = filter_df[filter_df[granularity].isin(set(search_terms))]
# then, check for locations clicked on map.
if selected_json is not None:
point_df = pd.DataFrame(selected_json["points"])
chosen_regions = point_df["location"].tolist()
granularity = locations.get(chosen_regions[0], granularity)
filter_df = filter_df[filter_df[granularity].isin(set(chosen_regions))]
# get parent location type from current granularity
parent_location_type = shift_granularity(granularity, location_types, move_up=True)
# get all locations in parent(s) of chosen regions for plotting
plot_regions = list()
for parent_name in filter_df[parent_location_type].unique():
location_object = Location.objects.get(name=parent_name)
children = location_object.get_children()
children_names = [c.name for c in location_object.get_children()]
plot_regions = plot_regions + children_names + [parent_name]
# set final granularity to same level of children
granularity = children[0].location_type
filter_dict["plot_regions"] = plot_regions
# finally, apply time filter if necessary
if timeframe != "all":
cutoff = dt.datetime.today() - dt.timedelta(
days=LOOKUP["time_dict"][timeframe]
)
filter_df = filter_df[filter_df["date"] >= cutoff]
filter_ids = filter_df.index.tolist()
filter_dict["ids"] = filter_ids
filter_dict["granularity"] = granularity
#ret = f"{granularity} | {children_names[0]} "
return json.dumps(filter_dict)
# try to move one level up or down in the geographical hierarchy. If not possible,
# return current level
def shift_granularity(current_granularity, levels, move_up=False):
current_granularity = current_granularity.lower()
if current_granularity in levels:
current_idx = levels.index(current_granularity)
if move_up:
new_idx = max(current_idx - 1, 0)
else:
new_idx = min(current_idx + 1, len(levels) - 1)
return levels[new_idx]
# =========Map Metrics =======================#
# Top metrics to track for map dropdown
@app.callback(
Output(component_id="map_metric", component_property="options"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="filter_dict", component_property="children")
]
)
def get_metrics(va_data, filter_dict=None, N=10):
# by default, start with aggregate measures
metrics = [
"Total Deaths",
"Mean Age of Death",
]
metric_data = pd.read_json(va_data)
if metric_data.size > 0:
if filter_dict is not None:
filter_dict = json.loads(filter_dict)
metric_data = metric_data.iloc[filter_dict["ids"], :]
# add top N CODs by incidence to metric list
metrics = metrics + (metric_data["cause"]
.value_counts()
.sort_values(ascending=False)
.head(N)
.index
.tolist())
return [{"label": LOOKUP["metric_names"].get(m,m),"value": m} for m in metrics]
def get_metric_display_names(map_metrics):
names = []
for metric in map_metrics:
metric_name = LOOKUP["metric_names"].get(metric, None)
if metric_name is None:
metric_name = " ".join([x.capitalize() for x in metric.strip().split(" ")])
names.append(metric_name)
return names
# ====================Geographic Levels (View options)============#
@app.callback(
[
Output(component_id="view_level", component_property="options"),
Output(component_id="view_level", component_property="disabled"),
Output(component_id="view_label", component_property="className")
],
[
Input(component_id="filter_dict", component_property="children"),
Input(component_id="location_types", component_property="children"),
]
)
def update_view_options(filter_dict, location_types):
if filter_dict is not None:
filter_dict = json.loads(filter_dict)
# only activate this dropdown when user is zoomed out
disable = filter_dict["geo_filter"]
if not disable:
view_options = json.loads(location_types)
label_class = "input-label"
else:
view_options = []
label_class = "input-label-disabled"
options = [{'label': o.capitalize(), 'value': o} for o in view_options]
return options, disable, label_class
# when view dropdown is disabled, reset selected value to null
@app.callback(
Output(component_id="view_level", component_property="value"),
[
Input(component_id="view_level", component_property="disabled"),
]
)
def reset_view_value(is_disabled=False):
if is_disabled:
return ""
else:
raise dash.exceptions.PreventUpdate
# ====================Map Logic===================================#
@app.callback(
# [
Output(component_id="choropleth-container", component_property="children"),
# Output(component_id="bounds", component_property="children")
# ],
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="map_metric", component_property="value"),
Input(component_id="view_level", component_property="value"),
Input(component_id="location_types", component_property="children"),
Input(component_id="filter_dict", component_property="children"),
],
)
def update_choropleth( va_data, timeframe, map_metric="Total Deaths", view_level=None,\
location_types=None, filter_dict=None, geojson=GEOJSON ):
# first, see which input triggered update. If granularity change, only run
# if value is non-empty
context = dash.callback_context
trigger = context.triggered[0]
if trigger["prop_id"].split('.')[0] == "view_level" and trigger["value"] == "":
raise dash.exceptions.PreventUpdate
plot_data = pd.read_json(va_data)
return_value = html.Div(id="choropleth")
zoom_in = False
granularity = INITIAL_GRANULARITY
location_types = json.loads(location_types)
if plot_data.size > 0:
timeframe = timeframe.lower()
feature_id = "properties.area_name"
chosen_regions = geojson["features"]
# if dashboard filter applied, carry over to data
if filter_dict is not None:
filter_dict = json.loads(filter_dict)
granularity = filter_dict.get('granularity', granularity)
plot_data = plot_data.iloc[filter_dict["ids"], :]
# geo_filter is true if user clicked on map or searches for location
zoom_in = filter_dict["geo_filter"]
# if zoom in necessary, filter geojson to only chosen region(s)
if zoom_in:
plot_regions = filter_dict["plot_regions"]
chosen_regions = [
g for g in geojson["features"] if g["properties"]["area_name"] in plot_regions
]
# if user has clicked on map and granularity is providence level, change to district level
granularity = shift_granularity(granularity, location_types, move_up=False)
if map_metric not in ["Total Deaths", "Mean Age of Death"]:
plot_data = plot_data[plot_data["cause"] == map_metric]
# if user has not chosen a view level or its disabled, default to using granularity
view_level = view_level if len(view_level) > 0 else granularity
# get map tooltips to match view level (disstrict or province)
map_df = generate_map_data(plot_data, chosen_regions, view_level, zoom_in, map_metric)
data_value = "age_mean" if len(re.findall("[mM]ean", map_metric)) > 0 else "age_count"
figure = go.Figure(
data=go.Choropleth(
locations=map_df[view_level],
z=map_df[data_value].astype(float),
locationmode="geojson-id",
geojson=geojson,
featureidkey=feature_id,
colorscale=LOOKUP["map_colorscale"],
hovertext=map_df["tooltip"],
hoverinfo="text",
autocolorscale=False,
marker_line_color="black", # line markers between states
marker_line_width=0.25,
colorbar=dict(
title="{} by {}".format(
map_metric.capitalize(), granularity.capitalize()
),
thicknessmode="fraction",
thickness=0.03,
lenmode="fraction",
len=0.8,
yanchor="middle",
ticks="outside",
nticks=10,
),
)
)
# update figure layout
figure.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0},
clickmode="event" if zoom_in else "event+select",
#clickmode="none",
dragmode="select",
)
# additional styling
config = {"displayModeBar": True}
figure.update_geos(
fitbounds="locations",
visible=True,
showcountries=True,
showlakes=False,
countrycolor="lightgray",
showsubunits=True,
landcolor="rgb(250,250,248)",
framewidth=0,
)
return_value = dcc.Graph(id="choropleth", figure=figure, config=config)
return return_value
# ==========Map dataframe (built from va dataframe)============#
def generate_map_data(va_df, chosen_geojson, view_level="district", zoom_in=False, metric="Total Deaths"):
if va_df.size > 0:
map_df = (
va_df[[view_level, "age", "location"]]
.groupby(view_level)
.agg({"age": ["mean", "count"], "location": [pd.Series.nunique]})
)
map_df.columns = ['_'.join(tup) for tup in map_df.columns.to_flat_index()]
map_df.reset_index(inplace=True)
# generate tooltips for regions with data
metric_name = LOOKUP['metric_names'].get(metric, metric)
map_df["age_mean"] = np.round(map_df["age_mean"], 1)
map_df["tooltip"] = (
"<b>"+ map_df[view_level] + "</b>"
+ "<br>"
+ f"<b>{metric_name}: </b>" + map_df["age_count"].astype(str)
+ "<br>"
+ "<b>Average Lifespan: </b>" + map_df["age_mean"].astype(str)
+ "<br>"
+ "<b>Active Facilities: </b>" + map_df["location_nunique"].astype(str)
)
# join with all region names to ensure each region has a record
chosen_region_names = [(f['properties']['area_name'], f['properties']['area_id'])\
for f in chosen_geojson\
if f['properties']['area_level_label'] == view_level.capitalize()]
geo_df = pd.DataFrame.from_records(chosen_region_names, columns=[view_level, 'area_id'])
map_df = geo_df.merge(map_df, how='left', on=view_level)
# fill NAs with 0s and rename empty tooltips to "No Data"
map_df["tooltip"].fillna("No Data", inplace=True)
map_df.fillna(0, inplace=True)
return map_df
return pd.DataFrame()
# =========Callout Boxes Logic============================================#
@app.callback(
Output(component_id="callout-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="filter_dict", component_property="children"),
],
)
def update_callouts(va_data, timeframe, filter_dict=None, geojson=GEOJSON):
plot_data = pd.read_json(va_data)
granularity = INITIAL_GRANULARITY
if plot_data.size > 0:
if filter_dict is not None:
filter_dict = json.loads(filter_dict)
plot_data = plot_data.iloc[filter_dict["ids"], :]
granularity = filter_dict.get('granularity', granularity)
# total VAs
total_vas = plot_data.shape[0]
# active facilities
active_facilities = plot_data["location"].nunique()
# TODO: get field worker data from ODK - this is just a janky hack
num_field_workers = int(1.25 * active_facilities)
# region coverage
total_regions = geojson[f"{granularity}_count"]
if filter_dict is not None:
if len(filter_dict["plot_regions"]) > 0:
total_regions = len(filter_dict["plot_regions"])
# TODO: fix this to be more generic
if granularity == 'province':
granularity = 'district'
regions_covered = (
plot_data[[granularity, "age"]].dropna()[granularity].nunique()
)
coverage = "{}%".format(np.round(100 * regions_covered / total_regions, 0))
return [
make_card(total_vas, header="Total VAs"),
make_card(active_facilities, header="Active Facilities"),
make_card(num_field_workers, header="Field Workers"),
make_card(coverage, header="Region Coverage"),
]
else:
return [[html.Div() for i in range(4)]]
# build a calloutbox with specific value
# colors: primary, secondary, info, success, warning, danger, light, dark
def make_card(
value, header=None, description="", color="light", inverse=False, style=None
):
card_content = []
if header is not None:
card_content.append(dbc.CardHeader(header))
body = dbc.CardBody(
[
html.H3(value, className="card-title"),
html.P(description, className="card-text",),
],
style={"padding": ".5rem"},
)
card_content.append(body)
if style is None:
style = {"width": "190px"}
card_obj = dbc.Card(card_content, color=color, inverse=inverse, className="mr-2")
card_container = html.Div(card_obj, style=style)
return card_container
# =========Cause of Death Plot Logic============================================#
@app.callback(
Output(component_id="cod-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="cod_factor", component_property="value"),
Input(component_id="cod_n", component_property="value"),
Input(component_id="cod-aggtype", component_property="value"),
Input(component_id="filter_dict", component_property="children"),
],
)
def cod_plot(va_data, timeframe, factor="All", N=10, agg_type="counts", filter_dict=None):
figure = go.Figure()
plot_data = pd.read_json(va_data)
if plot_data.size > 0:
if filter_dict is not None:
plot_data = plot_data.iloc[json.loads(filter_dict)["ids"], :]
factor = factor.lower()
if factor != "all":
assert factor in ["age group", "sex"]
factor_col = LOOKUP["demo_to_col"][factor]
factor_title = "by " + factor.capitalize()
counts = plot_data.pivot_table(
index="cause",
columns=factor_col,
values="id",
aggfunc=pd.Series.nunique,
fill_value=0,
margins=True,
)
plot_fn = go.Scatter
else:
counts = pd.DataFrame({"All": plot_data.cause.value_counts()})
factor_title = "Overall"
plot_fn = go.Bar
counts["cod"] = counts.index
counts = counts[counts["cod"] != "All"]
counts = counts.sort_values(by="All", ascending=False).head(N)
groups = list(set(counts.columns).difference(set(["cod"])))
if factor != "all":
groups.remove("All")
for i, group in enumerate(groups):
if agg_type != "counts":
counts[group] = 100 * counts[group] / counts[group].sum()
figure.add_trace(
plot_fn(
y=counts[group],
x=counts["cod"],
name=group.capitalize(),
orientation="v",
marker=dict(
color=LOOKUP["color_list"][i],
line=dict(color="rgb(158,158,158)", width=1),
),
)
)
figure.update_layout(
barmode="stack",
title_text="Top {} Causes of Death {}".format(N, factor_title),
xaxis_tickangle=-45,
yaxis_title="Count" if agg_type == "counts" else "Percent",
)
return dcc.Graph(id="cod_plot", figure=figure)
# =========Age Distribution Plot Logic============================================#
@app.callback(
Output(component_id="age-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="filter_dict", component_property="children"),
],
)
def age_plot(va_data, timeframe, filter_dict=None, bins=9):
figure = go.Figure()
plot_data = pd.read_json(va_data)
if plot_data.size > 0:
if filter_dict is not None:
plot_data = plot_data.iloc[json.loads(filter_dict)["ids"], :]
historgram_data = [plot_data["age"].dropna().tolist()]
group_labels = ["Verbal Autopsies"] # name of the dataset
figure = ff.create_distplot(
historgram_data, group_labels, show_rug=False, bin_size=[bins]
)
figure.update_layout(
title_text="Verbal Autopsy Age Distribution",
xaxis_title="Age",
yaxis_title="Density",
)
return dcc.Graph(id="age_plot", figure=figure)
# =========Gender Plot Logic============================================#
@app.callback(
Output(component_id="sex-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="filter_dict", component_property="children"),
],
)
def sex_plot(va_data, timeframe, filter_dict=None):
figure = go.Figure()
plot_data = pd.read_json(va_data)
if plot_data.size > 0:
if filter_dict is not None:
plot_data = plot_data.iloc[json.loads(filter_dict)["ids"], :]
column_name = LOOKUP["demo_to_col"]["sex"]
sex_counts = plot_data[column_name].value_counts()
figure.add_trace(
go.Pie(labels=sex_counts.index.tolist(), values=sex_counts.values, hole=0.3)
)
figure.update_layout(title_text="Verbal Autopsies by Gender")
return dcc.Graph(id="sex_plot", figure=figure)
# ========= Time Series Plot Logic============================================#
@app.callback(
Output(component_id="ts-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="timeframe", component_property="value"),
Input(component_id="group_period", component_property="value"),
Input(component_id="filter_dict", component_property="children"),
Input(component_id="ts_factor", component_property="value"),
],
)
def trend_plot(va_data, timeframe, group_period, filter_dict=None, factor="All"):
figure = go.Figure()
plot_data = pd.read_json(va_data)
if plot_data.size > 0:
if filter_dict is not None:
plot_data = plot_data.iloc[json.loads(filter_dict)["ids"], :]
group_period = group_period.lower()
aggregate_title = group_period.capitalize()
plot_data["date"] = pd.to_datetime(plot_data["date"])
plot_data["timegroup"] = pd.to_datetime(plot_data["date"])
if group_period == "week":
plot_data["timegroup"] = pd.to_datetime(
plot_data["date"]
.dt.to_period("W")
.apply(lambda x: x.strftime("%Y-%m-%d"))
)
elif group_period == "month":
plot_data["timegroup"] = pd.to_datetime(
plot_data["date"].dt.to_period("M").apply(lambda x: x.strftime("%Y-%m"))
)
elif group_period == "year":
plot_data["timegroup"] = plot_data["date"].dt.to_period("Y").astype(str)
dtype = "category" if group_period == "year" else "date"
factor = factor.lower()
if factor != "all":
assert factor in LOOKUP["demo_to_col"]
factor_col = LOOKUP["demo_to_col"][factor]
trend_counts = plot_data.pivot_table(
index="timegroup",
columns=factor_col,
values="id",
aggfunc=pd.Series.nunique,
fill_value=0,
margins=False,
)
plot_fn = go.Scatter
else:
trend_counts = (
plot_data[["timegroup", "id"]]
.groupby("timegroup")
.count()
.rename(columns={"id": "all"})
)
plot_fn = go.Bar
for i, group in enumerate(trend_counts.columns.tolist()):
figure.add_trace(
plot_fn(
y=trend_counts[group],
x=trend_counts.index,
name=group.capitalize(),
marker=dict(
color=LOOKUP["color_list"][i],
line=dict(color=LOOKUP["color_list"][i], width=1),
),
)
)
figure.update_layout(
title_text="Verbal Autopsies by {}".format(aggregate_title),
xaxis_title=aggregate_title,
yaxis_title="Verbal Autopsy Count",
xaxis_type=dtype,
xaxis_tickangle=-45,
xaxis_tickformatstops=[
dict(
dtickrange=[None, None],
value=LOOKUP["date_display_formats"].get(group_period, "%d/%m/%Y"),
)
],
)
return dcc.Graph(id="trend_plot", figure=figure)
# =========Place of Death Plot Logic============================================#
@app.callback(
Output(component_id="pod-container", component_property="children"),
[
Input(component_id="va_data", component_property="children"),
Input(component_id="filter_dict", component_property="children")
],
)
def place_of_death_plt(va_data, filter_dict=None):
figure = go.Figure()
plot_data = pd.read_json(va_data)
if plot_data.size > 0:
if filter_dict is not None:
plot_data = plot_data.iloc[json.loads(filter_dict)["ids"], :]
plot_data["Id10058"] = plot_data["Id10058"].apply(
lambda x: LOOKUP["death_location_names"].get(x, x.capitalize())
)
location_counts = plot_data["Id10058"].value_counts()
figure = go.Figure(
go.Pie(
labels=location_counts.index.tolist(),
values=location_counts.values,
hole=0.3,
)
)
figure.update_layout(title_text="VAs by Place of Death")
return dcc.Graph(id="pod_plt", figure=figure)
# uncomment this if running as Dash app (as opposed to DjangoDash app)
# app.run_server(debug= True)
|
"""
Django settings for django_blog project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8q@q!u=s#vgqz7=s)2au6w4ae@4mt8i=#76x6ll-x_jbs-_zh%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.aaron-zhao.com']
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'ckeditor',
'ckeditor_uploader',
'django.contrib.sites',
'django.contrib.sitemaps',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.weibo',
'crispy_forms',
'imagekit',
'robots',
'mptt',
'easy_comment',
'notifications',
'online_status',
'django_celery_results',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'online_status.middleware.OnlineStatusMiddleware',
]
ROOT_URLCONF = 'django_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'blog.views.global_setting',
],
},
},
]
WSGI_APPLICATION = 'django_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blogdb',
'USER': 'root',
"PASSWORD": 'mangui710',
'HOST': '127.0.0.1',
'PORT': '3700',
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Media root 设置
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# 富文本编辑器设置
CKEDITOR_UPLOAD_PATH = 'upload/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_ALLOW_NONIMAGE_FILES = False
CKEDITOR_RESTRICT_BY_USER = True
CKEDITOR_RESTRICT_BY_DATE = True
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono',
'language':'zh-cn',
'width': '750px',
'height': '500px',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote',
'-','JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor', '-', 'RemoveFormat']},
{'name': 'insert',
'items': ['Image', '-', 'Flash', 'Iframe', '-', 'Table', 'CodeSnippet', 'Templates']},
'/',
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'styles', 'items': ['Format', 'Font', 'FontSize']},
{'name': 'special', 'items': ['Subscript', 'Superscript', '-', 'HorizontalRule',
'SpecialChar', 'Smiley']},
{'name': 'tools', 'items': ['Undo', 'Redo', '-', 'Source', 'Preview', 'Save', '-', 'Maximize']}
],
'toolbar': 'YourCustomToolbarConfig',
'image_previewText':' ',
'tabSpaces': 4,
'extraPlugins': ','.join(
[
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath',
'codesnippet',
'uploadimage',
'uploadfile',
'prism',
]),
},
'comment': {
# 编辑器的宽高请根据你的页面自行设置
'width': 'auto',
'height': '150px',
'image_previewText': ' ',
'tabSpaces': 4,
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline', 'RemoveFormat'],
['NumberedList', 'BulletedList'],
['CodeSnippet'],
['Image', 'Link', 'Unlink']
],
'extraPlugins': ','.join(['codesnippet', 'uploadimage', 'prism', 'widget', 'lineutils', ]),
}
}
# 自定义日志输出信息
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(name)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s'} #日志格式
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters':['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'default': {
'level':'INFO',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'log/all.log', #日志输出文件
'maxBytes': 1024*1024*5, #文件大小
'backupCount': 5, #备份份数
'formatter':'standard', #使用哪种formatters日志格式
},
'error': {
'level':'ERROR',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'log/error.log',
'maxBytes':1024*1024*5,
'backupCount': 5,
'formatter':'standard',
},
'console':{
'level': 'INFO',
'filters':['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'log/script.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter':'standard',
},
'scripts_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename':'log/script.log',
'maxBytes': 1024*1024*5,
'backupCount': 5,
'formatter':'standard',
}
},
'loggers': {
'django': {
'handlers': ['default', 'mail_admins'],
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['request_handler', 'console', 'mail_admins'],
'level': 'DEBUG',
'propagate': False,
},
'scripts': {
'handlers': ['scripts_handler'],
'level': 'INFO',
'propagate': False
},
'blog.views': {
'handlers': ['default', 'error', 'mail_admins'],
'level': 'DEBUG',
'propagate': True
},
}
}
# Website INFO Global Var
SITE_NAME = 'Python学习笔记、Django学习笔记 - AA的博客'
SITE_DESCP = 'AA的博客是分享学习Python技术与经验的个人博客,由Python、Django以及资源分享等分类组成,内容主要是Django博客开发。'
SITE_KEYWORDS = 'AA的博客, Python学习笔记, Django博客开发, Django学习笔记'
# 自定义用户model
AUTH_USER_MODEL = 'blog.user'
# django-allauth相关设置
AUTHENTICATION_BACKENDS = (
# django admin所使用的用户登录与django-allauth无关
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# 前面我们app里添加了django.contrib.sites,需要设置SITE_ID
SITE_ID = 1
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/accounts/login'
SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_CONFIRMATION_COOLDOWN = 180
ACCOUNT_LOGOUT_ON_GET = True
# Email setting
# SMTP服务器,我使用的是sendclound的服务
EMAIL_HOST = 'smtp.mxhichina.com'
EMAIL_HOST_USER = 'support@aaron-zhao.com'
EMAIL_HOST_PASSWORD = '1234'
EMAIL_PORT = 465
#EMAIL_HOST = 'smtpcloud.sohu.com'
#EMAIL_HOST_USER = 'Aaroon_test_N05MI9' 'bguqzpoxqlhccaig' qq
#EMAIL_HOST_PASSWORD = 'u7qFYbkRKK08PMtR'
#EMAIL_PORT = 25
# 是否使用了SSL 或者TLS
EMAIL_USE_SSL = True
#EMAIL_USE_TLS = True
# 默认发件人,不设置的话django默认使用的webmaster@localhost
DEFAULT_FROM_EMAIL = 'Support <support@aaron-zhao.com>'
ADMINS = (('Aaron', 'rudy710@qq.com'), ('Barry', 'zhruyao@163.com'))
#非空链接,却发生404错误,发送通知MANAGERS
SEND_BROKEN_LINK_EMAILS = True
MANAGERS = ADMINS
# easy_comment setting
COMMENT_ENTRY_MODEL = 'blog.post'
ROBOTS_USE_HOST = False
# notification setting
NOTIFICATIONS_USE_JSONFIELD=True
SEND_NOTIFICATION_EMAIL = True
#redis settings
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
#celery settings
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/0'
# CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'django-cache'
|
from ruby.network.buffer.Encoder import Encoder
class Tests:
@staticmethod
def send(data):
encoder = Encoder()
message = encoder.encoder(data)
print(message)
pass
|
import os
import unittest
from ....BaseTestCase import BaseTestCase
from kombi.Crawler import Crawler
from kombi.Crawler.PathHolder import PathHolder
from kombi.Crawler.Fs.Texture import TextureCrawler
class TextureCrawlerTest(BaseTestCase):
"""Test Texture crawler."""
__exrFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test_DIFF_u1_v1.exr")
__tifFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test_bump_1002.tif")
__badExrFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test_0001.exr")
def testTextureCrawler(self):
"""
Test that the Texture crawler test works properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertIsInstance(crawler, TextureCrawler)
crawler = Crawler.create(PathHolder(self.__tifFile))
self.assertIsInstance(crawler, TextureCrawler)
crawler = Crawler.create(PathHolder(self.__badExrFile))
self.assertNotIsInstance(crawler, TextureCrawler)
def testTextureVariables(self):
"""
Test that variables are set properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertEqual(crawler.var("type"), "texture")
self.assertEqual(crawler.var("category"), "texture")
self.assertEqual(crawler.var("assetName"), "test")
self.assertEqual(crawler.var("mapType"), "DIFF")
self.assertEqual(crawler.var("udim"), 1001)
self.assertEqual(crawler.var("variant"), "default")
crawler = Crawler.create(PathHolder(self.__tifFile))
self.assertEqual(crawler.var("assetName"), "test")
self.assertEqual(crawler.var("mapType"), "BUMP")
self.assertEqual(crawler.var("udim"), 1002)
self.assertEqual(crawler.var("variant"), "default")
def testTextureTags(self):
"""
Test that the tags are set properly.
"""
crawler = Crawler.create(PathHolder(self.__exrFile))
self.assertEqual(crawler.tag("group"), "test-default")
if __name__ == "__main__":
unittest.main()
|
def get_client(url):
from verta import Client
return Client(url)
|
from datetime import timedelta
from django_performance_testing.timing import TimeCollector
from freezegun import freeze_time
import pytest
from testapp.test_helpers import capture_result_collected
@pytest.mark.parametrize('seconds', [10, 5, 0.04])
def test_captures_and_measures_elapsed_time(seconds):
with capture_result_collected() as captured:
with freeze_time('2016-09-22 15:57:01') as frozen_time:
with TimeCollector():
frozen_time.tick(timedelta(seconds=seconds))
assert len(captured.calls) == 1
assert pytest.approx(seconds) == captured.calls[0]['results'][0].value
|
from enum import Enum, auto
# List of palettes to use. Remember to wrap the first + last colors!
class Palette(Enum):
Autumn = [
[0.00, [69, 21, 21]],
[0.33, [35, 25, 25]],
[0.67, [98, 58, 26]],
[1.00, [69, 21, 21]],
]
Beach = [
[0, [0,177,56]],
[0.25, [226,168,110]],
[0.5, [5,111,142]],
[0.75, [6,73,163]],
[1, [0,177,56]],
]
Fire = [
[0, [160,0,0]],
[0.33, [240,60,5]],
[0.66, [255,245,60]],
[1, [160,0,0]],
]
Forest = [
[0, [106, 255, 61]],
[0.25, [48, 90, 34]],
[0.5, [81, 230, 68]],
[0.75, [140, 90, 40]],
[1, [106, 255, 61]],
]
Ocean = [
[0, [30,170,180]],
[0.33, [0,20,250]],
[0.66, [30,250,50]],
[1, [30,170,180]],
]
Pink = [
[0, [250,30,60]],
[0.33, [80,30,200]],
[0.66, [100,70,100]],
[1, [250,30,60]],
]
Rainbow = [
[0, [255,0,0]],
[0.33, [0,255,0]],
[0.66, [0,0,255]],
[1, [255,0,0]],
]
Sorbet = [
[0, [62, 65, 137]],
[0.25, [54, 129, 109]],
[0.5, [210, 158, 46]],
[0.75, [140, 79, 94]],
[1, [62, 65, 137]],
]
class Palettes:
# NOTE: These values will be overridden by Config
MAX_BRIGHTNESS = 20
BRIGHTNESS_SCALAR = MAX_BRIGHTNESS / 255
@classmethod
def updateBrightness(cls, newValue):
cls.MAX_BRIGHTNESS = max(0, min(20, newValue)) # safety measure to make sure we never go above 20 for all leds at once
cls.BRIGHTNESS_SCALAR = cls.MAX_BRIGHTNESS / 255
@classmethod
def hsv_to_rgb(cls, h, s, v):
if s == 0.0: v*=255; return [v, v, v]
i = int(h*6.) # XXX assume int() truncates!
f = (h*6.)-i; p,q,t = int(255*(v*(1.-s))), int(255*(v*(1.-s*f))), int(255*(v*(1.-s*(1.-f)))); v*=255; i%=6
if i == 0: return [v, t, p]
if i == 1: return [q, v, p]
if i == 2: return [p, v, t]
if i == 3: return [p, q, v]
if i == 4: return [t, p, v]
if i == 5: return [v, p, q]
@classmethod
def hsv_to_rgb_int(cls, h, s, v):
rgb = cls.hsv_to_rgb(h, s, v)
return [int(rgb[0]), int(rgb[1]), int(rgb[2])]
@classmethod
def lerp(cls, a, b, t, scaled=False):
if scaled: return int((a + (b-a) * t) * cls.BRIGHTNESS_SCALAR)
else: return int(a + (b-a) * t)
@classmethod
def lerpColor(cls, a, b, t, scaled=False):
return [cls.lerp(a[0], b[0], t, scaled), cls.lerp(a[1], b[1], t, scaled), cls.lerp(a[2], b[2], t, scaled)]
@classmethod
def generatePalette(cls, gradient, LED_COUNT):
palette = []
i = 0
for step in range(len(gradient) - 1):
left = gradient[step]
right = gradient[step + 1]
percent = i / (LED_COUNT - 1)
while percent <= right[0]:
t = (percent - left[0]) / (right[0] - left[0])
# palette.append([i, step, t, lerpColor(left[1], right[1], t)])
palette.append(cls.lerpColor(left[1], right[1], t, True))
i += 1
percent = i / (LED_COUNT - 1)
return palette
|
"""
BPJS Calculator
"""
class Bpjs:
""" BPJS Class """
def __init__(self, employee_information, configuration):
"""
Args:
employee_information (dictionary):
base_salary -- (Integer) Gaji Pokok
fixed_allowances -- (Integer) Tunjangan Tetap
non_fixed_allowances -- (Integer) Tunjangan Tidak Tetap
is_salary_allowances -- (Boolean), True it means we calculate SALARY +
ALLOWANCE , False it means we calculate SALARY ONLY
accident_insurance_status -- (Boolean) BPJS Jaminan Kecelakaan
Enrollment Status
pension_insurance_status -- (Boolean) BPJS Jaminan Pensiun
Enrollment Status
death_insurance_status -- (Boolean) BPJS Jaminan Kematian
Enrollment Status
health_insurance_status -- (Boolean) BPJS Jaminan Kesehatan
Enrollment Status
industry_risk_rate -- (Float) Industry Risk Rate (ex : 0.24)
configuration -- (dictionary):
pension_max_fee -- (Integer) Jumlah Maksimal BPJS Pensiun
health_max_fee -- (Integer) Jumlah Maksimal BPJS
old_pension_max_fee -- (Integer) Jumlah Maksimal BPJS
individual_health_insurance_rate -- (Integer) Jumlah Maksimal BPJS
company_health_insurance_rate -- (Integer) Jumlah Maksimal BPJS
death_insurance_rate -- (Integer) Jumlah Maksimal BPJS
individual_old_age_insurance_rate -- (Integer) Jumlah Maksimal BPJS
company_old_age_insurance_rate -- (Integer) Jumlah Maksimal BPJS
individual_pension_insurance_rate -- (Integer) Jumlah Maksimal BPJS
company_pension_insurance_rate -- (Integer) Jumlah Maksimal BPJS
"""
self.base_salary = employee_information["base_salary"]
self.fixed_allowances = employee_information["fixed_allowances"]
self.non_fixed_allowances = employee_information["non_fixed_allowances"]
self.is_salary_allowances = employee_information["is_salary_allowances"]
self.accident_insurance_status = employee_information["accident_insurance_status"]
self.pension_insurance_status = employee_information["pension_insurance_status"]
self.old_age_insurance_status = employee_information["old_age_insurance_status"]
self.death_insurance_status = employee_information["death_insurance_status"]
self.health_insurance_status = employee_information["health_insurance_status"]
self.industry_risk_rate = employee_information["industry_risk_rate"]
self.health_max_fee = configuration["health_max_fee"]
self.pension_max_fee = configuration["pension_max_fee"]
self.old_pension_max_fee = configuration["old_pension_max_fee"]
self.individual_health_insurance_rate =\
configuration["individual_health_insurance_rate"]
self.company_health_insurance_rate =\
configuration["company_health_insurance_rate"]
self.death_insurance_rate =\
configuration["death_insurance_rate"]
self.individual_old_age_insurance_rate=\
configuration["individual_old_age_insurance_rate"]
self.company_old_age_insurance_rate=\
configuration["company_old_age_insurance_rate"]
self.individual_pension_insurance_rate =\
configuration["individual_pension_insurance_rate"]
self.company_pension_insurance_rate =\
configuration["company_pension_insurance_rate"]
#end def
@staticmethod
def summarize(allowances):
"""
Function to summarize allowances
Args:
allowances (dictionary): { }
Returns:
allowances(int) : total allowances
"""
total_allowances = 0
if isinstance(allowances, dict):
for key, value in allowances.items():
total_allowances = total_allowances + int(value)
#end for
else:
total_allowances = allowances
return total_allowances
#end def
def _individual_health_insurance(self, total_salary):
"""
Function to calculate bpjs health individual
Args:
total_salary (int): The amount of base salary that person receive each month.
Returns:
individual_health_insurance(int) : the amount of bpjs
health insurance that person have to pay monthly
"""
individual_health_insurance = 0
if total_salary <= self.health_max_fee:
individual_health_insurance = total_salary \
* self.individual_health_insurance_rate
else:
individual_health_insurance = self.health_max_fee\
* self.individual_health_insurance_rate
#end if
return individual_health_insurance
#end def
def _company_health_insurance(self, total_salary):
"""
Function to calculate bpjs health that paid by company
Args:
total_salary (int): The amount of base salary that person receive each month.
Returns:
company_health_insurance (int) : the amount of \
bpjs health insurance that person have to pay monthly
"""
company_health_insurance = 0
if total_salary <= self.health_max_fee:
company_health_insurance = total_salary *\
self.company_health_insurance_rate
else:
company_health_insurance = self.health_max_fee\
* self.company_health_insurance_rate
#end if
return company_health_insurance
#end def
@staticmethod
def _accident_insurance(total_salary, industry_risk_rate):
"""
Function to calculate bpjs accident insurance
Args:
total_salary (int): The amount of base salary that person receive each month.
industry_risk_rate (float) : the risk percentage amout\
based on industry that person work on
Returns:
accident_insurance(int) : the amount of bpjs accident\
insurance that person have to pay monthly
"""
industry_type_rate = industry_risk_rate / 100
accident_insurance_rate = industry_type_rate
accident_insurance = accident_insurance_rate * total_salary
return round(accident_insurance, 1)
#end def
def _death_insurance(self, total_salary):
"""
Function to calculate BPJS death insurance
Args:
total_salary (int): The amount of base salary that person receive each month.
Returns:
death_insurance(int) : amount of accident insurance that person have to pay
"""
return int(total_salary * self.death_insurance_rate)
#end def
def _company_old_age_insurance(self, total_salary):
"""
Function to calculate company old age insurance
Args:
total_salary (int): The amount of base salary that person receive each month.
Returns:
company_old_age_insurance(int) : \
amount of company person old age insurance fee.
"""
return self.company_old_age_insurance_rate * total_salary
#end def
def _individual_old_age_insurance(self, total_salary):
"""
Function to calculate person old age insurance
Args:
total_salary (int): The amount of base salary that person receive each month.
Returns:
individual_health_insurance (int) :\
amount of company person old age insurance fee.
"""
return self.individual_old_age_insurance_rate * total_salary
#end def
def _individual_pension_insurance(self, total_salary, month=None, year=None):
"""
Function to calculate person pension contribution fee
Args:
base_salary (int): The amount of base salary that person receive each month.
Returns:
individual_pension_insurance(int) : amount of individual
pension insurance.
"""
individual_pension_insurance = 0
pension_max_fee = self.pension_max_fee
# special case in 2018
if year == 2018:
if month <= 2:
pension_max_fee = self.old_pension_max_fee
#end if
#end if
if total_salary > pension_max_fee:
individual_pension_insurance = pension_max_fee \
* self.individual_pension_insurance_rate
else:
individual_pension_insurance = total_salary \
* self.individual_pension_insurance_rate
#end if
return individual_pension_insurance
#end def
def _company_pension_insurance(self, total_salary, month=None, year=None):
"""
Function to calculate company pension contribution fee
Args:
base_salary (int): The amount of base salary that person receive each month.
Returns:
company_pension_insurance (int) : amount of person company
penson insurance.
"""
company_pension_insurance = 0
pension_max_fee = self.pension_max_fee
# special case in 2018
if year == 2018:
if month <= 2:
pension_max_fee = self.old_pension_max_fee
#end if
#end if
if total_salary > pension_max_fee:
company_pension_insurance = pension_max_fee *\
self.company_pension_insurance_rate
else:
company_pension_insurance = total_salary * \
self.company_pension_insurance_rate
#end if
return company_pension_insurance
#end def
def monthly_fee(self):
"""
calculate person bpjs monthly fee
return:
old_age_insurance
pension_insurance
health_insurance
death_insurance
accident_insurance
"""
total_salary = self.base_salary
if self.is_salary_allowances is True:
fixed_allowances = self.summarize( self.fixed_allowances )
non_fixed_allowances = self.summarize( self.non_fixed_allowances )
total_salary = total_salary + non_fixed_allowances + fixed_allowances
#end if
company_old_age_insurance = 0
individual_old_age_insurance = 0
if self.old_age_insurance_status is True:
company_old_age_insurance = \
self._company_old_age_insurance(total_salary)
individual_old_age_insurance = \
self._individual_old_age_insurance(total_salary)
#end if
company_pension_insurance = 0
individual_pension_insurance = 0
if self.pension_insurance_status is True:
company_pension_insurance = \
self._company_pension_insurance(total_salary)
individual_pension_insurance = \
self._individual_pension_insurance(total_salary)
#end if
company_health_insurance = 0
individual_health_insurance = 0
if self.health_insurance_status is True:
company_health_insurance = \
self._company_health_insurance(total_salary)
individual_health_insurance = \
self._individual_health_insurance(total_salary)
#end if
death_insurance = 0
if self.death_insurance_status is True:
death_insurance = self._death_insurance(total_salary)
#end if
accident_insurance = 0
if self.accident_insurance_status is True:
accident_insurance = \
self._accident_insurance(total_salary, \
self.industry_risk_rate)
#end if
monthly = {
"old_age_insurance" : {
"company" : company_old_age_insurance,
"individual" : individual_old_age_insurance,
},
"pension_insurance" : {
"company" : company_pension_insurance,
"individual" : individual_pension_insurance,
},
"health_insurance" : {
"company" : company_health_insurance,
"individual" : individual_health_insurance,
},
"death_insurance" : death_insurance,
"accident_insurance" : accident_insurance
}
return monthly
#end def
def annual_fee(self, working_months, year, with_bpjs=True):
"""
calculate annual bpjs fee
args:
working_months
year
with_bpjs
parameter:
working_months -- working_months
year -- year
"""
monthly_bpjs = []
total_salary = self.base_salary
if self.is_salary_allowances is True:
fixed_allowances = self.summarize( self.fixed_allowances )
non_fixed_allowances = self.summarize( self.non_fixed_allowances )
total_salary = total_salary + non_fixed_allowances + fixed_allowances
#end if
# initialize variable for storing the annual bpjs
annual_c_old_age_insurance = 0
annual_i_old_age_insurance = 0
annual_c_pension_insurance = 0
annual_i_pension_insurance = 0
annual_c_health_insurance = 0
annual_i_health_insurance = 0
annual_death_insurance = 0
annual_accident_insurance = 0
if with_bpjs is True:
# only calculate bpjs if is enabled and automatically set everthing to zero when is false
start_month = 1
for month in range(start_month, working_months+1):
company_old_age_insurance = 0
individual_old_age_insurance = 0
if self.old_age_insurance_status is True:
company_old_age_insurance = \
self._company_old_age_insurance(total_salary)
individual_old_age_insurance = \
self._individual_old_age_insurance(total_salary)
#end if
company_pension_insurance = 0
individual_pension_insurance = 0
if self.pension_insurance_status is True:
company_pension_insurance = \
self._company_pension_insurance(total_salary, month, year)
individual_pension_insurance = \
self._individual_pension_insurance(total_salary, month, year)
#end if
company_health_insurance = 0
individual_health_insurance = 0
if self.health_insurance_status is True:
company_health_insurance = \
self._company_health_insurance(total_salary)
individual_health_insurance = \
self._individual_health_insurance(total_salary)
#end if
death_insurance = 0
if self.death_insurance_status is True:
death_insurance = self._death_insurance(total_salary)
#end if
accident_insurance = 0
if self.accident_insurance_status is True:
accident_insurance = \
self._accident_insurance(total_salary, \
self.industry_risk_rate)
#end if
monthly = {
"old_age_insurance" : {
"company" : company_old_age_insurance,
"individual" : individual_old_age_insurance,
},
"pension_insurance" : {
"company" : company_pension_insurance,
"individual" : individual_pension_insurance,
},
"health_insurance" : {
"company" : company_health_insurance,
"individual" : individual_health_insurance,
},
"death_insurance" : death_insurance,
"accident_insurance" : accident_insurance
}
monthly_bpjs.append(monthly)
annual_c_old_age_insurance = annual_c_old_age_insurance \
+ company_old_age_insurance
annual_i_old_age_insurance = annual_i_old_age_insurance \
+ individual_old_age_insurance
annual_c_pension_insurance = annual_c_pension_insurance \
+ company_pension_insurance
annual_i_pension_insurance = annual_i_pension_insurance \
+ individual_pension_insurance
annual_c_health_insurance = annual_c_health_insurance \
+ company_health_insurance
annual_i_health_insurance = annual_i_health_insurance \
+ individual_health_insurance
annual_death_insurance = annual_death_insurance\
+ death_insurance
annual_accident_insurance = annual_accident_insurance\
+ accident_insurance
#end for
annual_bpjs = {
"old_age_insurance" : {
"company" : annual_c_old_age_insurance,
"individual" : annual_i_old_age_insurance,
},
"pension_insurance" : {
"company" : annual_c_pension_insurance,
"individual" : annual_i_pension_insurance,
},
"health_insurance" : {
"company" : annual_c_health_insurance,
"individual" : annual_i_health_insurance,
},
"death_insurance" : annual_death_insurance,
"accident_insurance" : annual_accident_insurance
}
return annual_bpjs
#end def
#end class
|
from crispy_forms.layout import Layout, HTML
from django.contrib import messages
from django.http import HttpResponseRedirect
from mezzanine.pages.page_processors import processor_for
from .forms import UpdateSQLiteLayout, SeriesSelectionLayout, TimeSeriesMetaDataLayout, \
UTCOffSetLayout, UTCOffSetForm
from hs_app_timeseries.models import TimeSeriesResource
from hs_core import page_processors
from hs_core.views import add_generic_context
@processor_for(TimeSeriesResource)
def landing_page(request, page):
"""
A typical Mezzanine page processor.
"""
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
if content_model.metadata.is_dirty and content_model.can_update_sqlite_file:
messages.info(request, "SQLite file is out of sync with metadata changes.")
extended_metadata_exists = False
if content_model.metadata.sites or \
content_model.metadata.variables or \
content_model.metadata.methods or \
content_model.metadata.processing_levels or \
content_model.metadata.time_series_results:
extended_metadata_exists = True
series_ids = content_model.metadata.series_ids_with_labels
if 'series_id' in request.GET:
selected_series_id = request.GET['series_id']
if selected_series_id not in list(series_ids.keys()):
# this will happen only in case of CSV file upload when data is written
# first time to the blank sqlite file as the series ids get changed to
# uuids
selected_series_id = list(series_ids.keys())[0]
if 'series_id' in request.session:
if selected_series_id != request.session['series_id']:
request.session['series_id'] = selected_series_id
else:
request.session['series_id'] = selected_series_id
else:
selected_series_id = list(series_ids.keys())[0] if list(series_ids.keys()) else None
request.session['series_id'] = selected_series_id
ts_result_value_count = None
if content_model.metadata.series_names and selected_series_id is not None:
sorted_series_names = sorted(content_model.metadata.series_names)
selected_series_name = sorted_series_names[int(selected_series_id)]
ts_result_value_count = int(content_model.metadata.value_counts[selected_series_name])
# view depends on whether the resource is being edited
if not edit_resource:
# resource in VIEW Mode
context = _get_resource_view_context(page, request, content_model, selected_series_id,
series_ids, extended_metadata_exists)
if isinstance(context, HttpResponseRedirect):
# sending user to login page
return context
else:
# resource in EDIT Mode
context = _get_resource_edit_context(page, request, content_model, selected_series_id,
series_ids, ts_result_value_count,
extended_metadata_exists)
# TODO: can we refactor to make it impossible to skip adding the generic context
hs_core_context = add_generic_context(request, page)
context.update(hs_core_context)
return context
def _get_resource_view_context(page, request, content_model, selected_series_id, series_ids,
extended_metadata_exists):
# get the context from hs_core
context = page_processors.get_page_context(page, request.user, resource_edit=False,
extended_metadata_layout=None, request=request)
context['extended_metadata_exists'] = extended_metadata_exists
context['selected_series_id'] = selected_series_id
context['series_ids'] = series_ids
context['sites'] = [site for site in content_model.metadata.sites if selected_series_id in
site.series_ids]
context['variables'] = [variable for variable in content_model.metadata.variables if
selected_series_id in variable.series_ids]
context['methods'] = [method for method in content_model.metadata.methods if
selected_series_id in method.series_ids]
context['processing_levels'] = [pro_level for pro_level in
content_model.metadata.processing_levels if
selected_series_id in pro_level.series_ids]
context['timeseries_results'] = [ts_result for ts_result in
content_model.metadata.time_series_results if
selected_series_id in ts_result.series_ids]
context['utc_offset'] = content_model.metadata.utc_offset.value if \
content_model.metadata.utc_offset else None
return context
def _get_resource_edit_context(page, request, content_model, selected_series_id, series_ids,
ts_result_value_count, extended_metadata_exists):
from hs_file_types.models.timeseries import create_site_form, create_variable_form, \
create_method_form, create_processing_level_form, create_timeseries_result_form
utcoffset_form = None
if content_model.has_csv_file:
utc_offset = content_model.metadata.utc_offset
utcoffset_form = UTCOffSetForm(instance=utc_offset,
res_short_id=content_model.short_id,
element_id=utc_offset.id if utc_offset else None,
selected_series_id=selected_series_id)
# create timeseries specific metadata element forms
site_form = create_site_form(target=content_model, selected_series_id=selected_series_id)
variable_form = create_variable_form(target=content_model,
selected_series_id=selected_series_id)
method_form = create_method_form(target=content_model, selected_series_id=selected_series_id)
processing_level_form = create_processing_level_form(target=content_model,
selected_series_id=selected_series_id)
timeseries_result_form = create_timeseries_result_form(target=content_model,
selected_series_id=selected_series_id)
ext_md_layout = Layout(UpdateSQLiteLayout,
SeriesSelectionLayout,
TimeSeriesMetaDataLayout,
UTCOffSetLayout)
if content_model.files.all().count() == 0:
ext_md_layout = Layout(HTML("""<div class="alert alert-warning"><strong>No resource
specific metadata is available. Add an ODM2 SQLite file or CSV file to see
metadata.</strong></div>"""))
# get the context from hs_core
context = page_processors.get_page_context(page, request.user, resource_edit=True,
extended_metadata_layout=ext_md_layout,
request=request)
# customize base context
context['extended_metadata_exists'] = extended_metadata_exists
context['resource_type'] = 'Time Series Resource'
context['selected_series_id'] = selected_series_id
context['series_ids'] = series_ids
context['utcoffset_form'] = utcoffset_form
context['site_form'] = site_form
context['variable_form'] = variable_form
context['method_form'] = method_form
context['processing_level_form'] = processing_level_form
context['timeseries_result_form'] = timeseries_result_form
return context
def _get_series_label(series_id, resource):
label = "{site_code}:{site_name}, {variable_code}:{variable_name}, {units_name}, " \
"{pro_level_code}, {method_name}"
site = [site for site in resource.metadata.sites if series_id in site.series_ids][0]
variable = [variable for variable in resource.metadata.variables if
series_id in variable.series_ids][0]
method = [method for method in resource.metadata.methods if series_id in method.series_ids][0]
pro_level = [pro_level for pro_level in resource.metadata.processing_levels if
series_id in pro_level.series_ids][0]
ts_result = [ts_result for ts_result in resource.metadata.time_series_results if
series_id in ts_result.series_ids][0]
label = label.format(site_code=site.site_code, site_name=site.site_name,
variable_code=variable.variable_code,
variable_name=variable.variable_name, units_name=ts_result.units_name,
pro_level_code=pro_level.processing_level_code,
method_name=method.method_name)
return label
def _get_element_update_form_action(element_name, resource_id, element_id):
action = "/hsapi/_internal/{}/{}/{}/update-metadata/"
return action.format(resource_id, element_name, element_id)
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Style related tests"""
from pygal import Line
from pygal.style import (
LightStyle,
LightenStyle, DarkenStyle, SaturateStyle, DesaturateStyle, RotateStyle
)
STYLES = LightenStyle, DarkenStyle, SaturateStyle, DesaturateStyle, RotateStyle
def test_parametric_styles():
"""Test that no parametric produce the same result"""
chart = None
for style in STYLES:
line = Line(style=style('#f4e83a'))
line.add('_', [1, 2, 3])
line.x_labels = 'abc'
new_chart = line.render()
assert chart != new_chart
chart = new_chart
def test_parametric_styles_with_parameters():
"""Test a parametric style with parameters"""
line = Line(style=RotateStyle(
'#de3804', step=12, max_=180, base_style=LightStyle))
line.add('_', [1, 2, 3])
line.x_labels = 'abc'
assert line.render()
|
from queue import Queue, Empty, Full
import logging
logger = logging.getLogger(__name__)
#own classes
from networking import Connection
from networking import Message
from .router_base import RouterBase
class GroupRouter(RouterBase):
participating_in_groups = set()
def __init__(self, node_id, queue, router):
super(GroupRouter, self).__init__(node_id, queue)
self.data_queues = {}
self.outgoing_timers = {}
self.unbound_connections = {}
self.groups = {}
self.router = router
def stop(self):
logger.warning("Stopping group router!")
GroupRouter.participating_in_groups = set()
super(GroupRouter, self).stop()
logger.warning("Group router successfully stopped!");
def publish(self, channel, data):
if channel not in self.outgoing_timers:
return False # we are not in any group for this channel
self._call_command({"_command": "GroupRouter__publish", "channel": channel, "data": data})
return True # data will be published
def create_group(self, channel, ip_list, interval):
self._call_command({"_command": "GroupRouter__create_group", "channel": channel, "ip_list": ip_list, "interval": interval})
def __dump_state(self):
return {
"outgoing_timers": self.outgoing_timers,
"unbound_connections": self.unbound_connections,
"groups": self.groups
}
def _add_connection_command(self, command):
con = command["connection"]
peer_ip = con.get_peer_ip()
self.connections[peer_ip] = con
# try to initialize all known groups
for channel in list(self.unbound_connections.keys()):
self._init_group(channel)
def _remove_connection_command(self, command):
con = command["connection"]
peer_ip = con.get_peer_ip()
if peer_ip in list(self.connections.keys()):
del self.connections[peer_ip]
def _init_group(self, channel):
interval = self.unbound_connections[channel]["interval"]
# test if all connections for this channel are established
all_established = True
for con in self.unbound_connections[channel]["connections"]:
if con.connection_state != "ESTABLISHED":
all_established = False
break
if all_established:
logger.info("All group connections for channel '%s' established, sending out init messages..." % channel)
msg = Message("%s_init" % self.__class__.__name__, {
"channel": channel,
"interval": self.unbound_connections[channel]["interval"],
})
for con in list(self.unbound_connections[channel]["connections"]):
logger.info("Sending group init for channel '%s' to %s..." % (channel, str(con)))
con.send_msg(msg)
self.groups[channel] = {
"connections": self.unbound_connections[channel]["connections"],
"received": {},
}
del self.unbound_connections[channel]
def _route_data(self, msg, incoming_connection=None):
if msg.get_type().endswith("_init"):
return self._route_init(msg, incoming_connection)
elif msg.get_type().endswith("_deinit"):
return self._route_deinit(msg, incoming_connection)
elif msg.get_type().endswith("_data"):
return self._route_real_data(msg, incoming_connection)
def _route_init(self, msg, incoming_connection):
logger.info("Routing init: %s coming from %s..." % (str(msg), str(incoming_connection)))
command = {"_command": "GroupRouter__publish_data", "channel": msg["channel"], "interval": msg["interval"], "connection": incoming_connection}
self._call_command(command)
def _route_deinit(self, msg, incoming_connection):
logger.info("Routing deinit: %s coming from %s..." % (str(msg), str(incoming_connection)))
if msg["channel"] in self.outgoing_timers:
logger.info("Ignoring unknown outgoing_timers channel '%s'..." % str(msg["channel"]))
self._abort_timer(self.outgoing_timers[msg["channel"]]["timer"])
del self.outgoing_timers[msg["channel"]]
del self.data_queues[msg["channel"]]
def _route_real_data(self, msg, incoming_connection):
logger.info("Routing data: %s coming from %s..." % (str(msg), str(incoming_connection)))
if msg["channel"] not in self.groups: # ignore unknown groups
logger.info("Ignoring unknown group channel '%s'..." % str(msg["channel"]))
return
# add message to received list
if msg["counter"] not in self.groups[msg["channel"]]["received"]:
self.groups[msg["channel"]]["received"][msg["counter"]] = {}
self.groups[msg["channel"]]["received"][msg["counter"]][incoming_connection.get_peer_ip()] = msg
# check if we received all messages for any known (incomplete) counter
for counter in list(self.groups[msg["channel"]]["received"].keys()):
if len(self.groups[msg["channel"]]["received"][counter]) == len(self.groups[msg["channel"]]["connections"]):
logger.info("Received round %s for channel '%s', publishing data on behalf of group members..." % (str(counter), str(msg["channel"])))
for data_msg in self.groups[msg["channel"]]["received"][counter].values():
# extract all data messages and publish them
if data_msg["type"] == "data":
self.router.publish(data_msg["channel"], data_msg["data"]);
del self.groups[msg["channel"]]["received"][counter] # counter was complete --> remove it from list
def __publish_data_command(self, command):
# reshedule this command
if command["channel"] in self.outgoing_timers:
self._abort_timer(self.outgoing_timers[command["channel"]]["timer"])
self.outgoing_timers[command["channel"]] = {
# increment counter by one starting with zero
"counter": self.outgoing_timers[command["channel"]]["counter"] + 1 if command["channel"] in self.outgoing_timers else 0,
# create new timer
"timer": self._add_timer(command["interval"], command)
}
# send data to RS
try:
if command["channel"] not in self.data_queues: # treat non existent queue as empty
raise Empty
data = self.data_queues[command["channel"]].get(False)
self.data_queues[command["channel"]].task_done()
logger.info("Sending out data message for round %s in channel '%s'..." % (str(self.outgoing_timers[command["channel"]]["counter"]), str(command["channel"])))
msg = Message("%s_data" % self.__class__.__name__, {
"type": "data",
"channel": command["channel"],
"data": data,
"counter": self.outgoing_timers[command["channel"]]["counter"],
})
except Empty as err:
logger.info("Sending out dummy message for round %s in channel '%s'..." % (str(self.outgoing_timers[command["channel"]]["counter"]), str(command["channel"])))
msg = Message("%s_data" % self.__class__.__name__, {
"type": "dummy",
"channel": command["channel"],
"data": "DEADBEEF",
"counter": self.outgoing_timers[command["channel"]]["counter"],
})
command["connection"].send_msg(msg)
def __publish_command(self, command):
if command["channel"] not in self.data_queues:
self.data_queues[command["channel"]] = Queue()
self.data_queues[command["channel"]].put(command["data"])
def __create_group_command(self, command):
# deinit old group if neccessary
if command["channel"] in self.groups:
msg = Message("%s_deinit" % self.__class__.__name__, {
"channel": command["channel"],
})
for con in self.groups[command["channel"]]["connections"]:
con.send_msg(msg)
# create connections and add them to self.unbound_connections (init message will be sent when all those connections are established)
self.unbound_connections[command["channel"]] = {"interval": command["interval"], "connections": set()}
for ip in command["ip_list"]:
if ip in self.connections:
con = self.connections[ip] # reuse connections
else:
con = Connection.connect_to("group", ip)
self.unbound_connections[command["channel"]]["connections"].add(con)
self._init_group(command["channel"])
|
"""Circle package main functions."""
import math
import tkinter
def calculate_circumference(radius: float):
if radius < 0:
raise ValueError("Radius must be positive!")
return radius * 2 * math.pi
def calculate_area(radius: float):
if radius < 0:
raise ValueError("Radius must be positive!")
return radius ** 2 * math.pi
def circle_data_gui():
window = tkinter.Tk()
window.title("Circle Calculations")
canvas = tkinter.Canvas(window, height=150, width=500, bg="#c2eaee")
canvas.pack()
radius = tkinter.Label(window, text="Radius: ", bg="#c2eaee")
canvas.create_window(180, 25, window=radius)
radius_input = tkinter.Entry(window, width=15)
radius_input.focus()
canvas.create_window(280, 25, window=radius_input)
circumference = tkinter.Label(window, text="Circumference: ", bg="#c2eaee")
canvas.create_window(250, 100, window=circumference)
area = tkinter.Label(window, text="Area: ", bg="#c2eaee")
canvas.create_window(250, 120, window=area)
def get_data():
try:
circint = calculate_circumference(float(radius_input.get()))
areaint = calculate_area(float(radius_input.get()))
circumference.configure(text="Circumference: " + str(circint))
area.configure(text="Area: " + str(areaint))
except ValueError:
error = tkinter.Toplevel()
error.title("ValueError")
error_canvas = tkinter.Canvas(error, height=100, width=400, bg="#c2eaee")
error_canvas.pack()
error_label = tkinter.Label(error, text="Radius must be a positive number!",
bg="#c2eaee", fg="red", font=("TKDefaultFont", 20))
error_canvas.create_window(200, 50, window=error_label)
button = tkinter.Button(text="Calculate", command=get_data)
canvas.create_window(250, 60, window=button)
window.mainloop()
|
from onmt.modules.Transformer.Models import TransformerEncoder, TransformerDecoder, Transformer
# For flake8 compatibility.
__all__ = [TransformerEncoder, TransformerDecoder, Transformer]
|
from multiprocessing import Process, Queue, Value
from yaml import load
import queue
import time
import sys
def mqtt_proc(q, mqtt_basetopic, mqtt_host, mqtt_port, mqtt_user, mqtt_pass):
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
if rc == 0:
print("[+] MQTT connection successful")
mqttc.subscribe("%s#" % (mqtt_basetopic,), 0)
else:
print("[-] MQTT connection failed: "+mqtt.connack_string(rc))
time.sleep(5.0)
def on_subscribe(mqttc, obj, mid, granted_qos):
print("[+] Subscribed to MQTT with QoS: " + str(granted_qos[0]))
def on_message(mqttc, obj, msg):
try:
#Both values must be representable as integers
objid = int(msg.topic.rpartition('/')[2])
value = int(msg.payload)
#Payload must be representable as an integer between 0 and 255
if value < 0 or value > 255:
raise ValueError
except ValueError:
print("[-] Invalid MQTT message received: topic \"%s\", payload \"%s\"" % (msg.topic, str(msg.payload)))
return
print("Set object %s to value %s" % (objid, value))
try:
q.put_nowait((objid,value))
except queue.Full:
print("[-] Message queue full, discarding message!")
mqttc = mqtt.Client()
if mqtt_user and mqtt_pass:
mqttc.username_pw_set(mqtt_user, mqtt_pass)
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_subscribe = on_subscribe
while(True):
try:
mqttc.connect(mqtt_host, mqtt_port, 60)
mqttc.loop_forever(retry_first_connection=True)
except OSError:
print("[-] OSError during MQTT connection, retrying in 10 sec.")
time.sleep(10.0)
def csrmesh_proc(q, wdt_val, pin, mac_list):
import csrmesh as cm
from bluepy import btle
backoff = False
while(True):
#Skip on 1st iteration, otherwase wait 5 sec for retrying
if backoff:
time.sleep(5.0)
backoff = True
#Try to make a connection.
#Also keep the watchdog timer from counting up
with wdt_val.get_lock():
csrconn = cm.gatt.connect(mac_list,True)
#We failed to connect, retry
if not csrconn:
continue
print("[+] Connected to mesh")
#For as long as we are connected, reset the WDT, and try to get and deliver a message
while(True):
with wdt_val.get_lock():
wdt_val.value = 0
try:
oid, value = q.get(block=True, timeout=5)
except queue.Empty:
#Send no-op command to make connection is still alive
cm.gatt.send_packet(csrconn,0,b'00')
continue
res = cm.lightbulb.set_light(csrconn,pin,value,value,value,value,oid,True)
if not res:
#Failed, so reconnect
cm.gatt.disconnect(csrconn)
break
if __name__== '__main__':
conf = None
#Load config file
if len(sys.argv) != 2:
print("Usage: %s <config file>" % sys.argv[0])
with open(sys.argv[1],'r') as f:
conf = load(f)
#Forever
while(1):
#Startup
q = Queue()
wdt_val = Value('i',0)
mqtt_phandle = Process(target=mqtt_proc, args=(q,conf['mqtt']['basetopic'],
conf['mqtt']['host'],
conf['mqtt']['port'],
conf['mqtt']['user'],
conf['mqtt']['pass']))
mqtt_phandle.daemon = True
csr_phandle = Process(target=csrmesh_proc, args=(q,wdt_val, conf['csrmesh']['pin'], conf['csrmesh']['mac_list']))
csr_phandle.daemon = True
#Start MQTT and csrmesh processes
print("[+] Initializing MQTT to CSRMesh gateway")
mqtt_phandle.start()
csr_phandle.start()
#Bluepy hangs when the device disappears, so restart if the WDT stops being reset
while(wdt_val.value < 3):
with wdt_val.get_lock():
wdt_val.value += 1
time.sleep(10.0)
#Terminate processes, and reinitialize the whole system
print("[!] bluepy unresponsive, reloading...")
csr_phandle.terminate()
mqtt_phandle.terminate()
|
import serial
ser = serial.Serial('/dev/ttyACM1',9600)
ser.close()
ser.open()
while True:
data = ser.readline()
print(data.decode())
|
#!/usr/bin/python
#coding=utf-8
#desc: 休息一下,打开舒适音乐
#author:gengkun123@gmail.com
import webbrowser
import time
time.sleep(2)
webbrowser.open("https://www.youtube.com/watch?v=kRaJ_9FFzBQ")
|
from unittest import TestCase
from src.core import get_message
class HelloworldTestCase(TestCase):
def test_helloworld(self):
self.assertEqual(get_message(), 'Hello World!')
|
import os # Integrates the programs or resources of the Operating System
print("#" * 60)
ip_or_host = input("Type the IP or host to be checked: ")
print("-" * 60)
os.system('ping -c 6 {}' .format(ip_or_host)) # -c -> Set limit to desire number of ping packets
print("-" * 60)
|
# Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared functions related to benchmarks.
"""
import logging
import time
from collections import OrderedDict
from contextlib import contextmanager
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
def print_timings(name: str, timings: List[float]) -> None:
"""
Format and print inference latencies.
:param name: inference engine name
:param timings: latencies measured during the inference
"""
mean_time = 1e3 * np.mean(timings)
std_time = 1e3 * np.std(timings)
min_time = 1e3 * np.min(timings)
max_time = 1e3 * np.max(timings)
median, percent_95_time, percent_99_time = 1e3 * np.percentile(timings, [50, 95, 99])
print(
f"[{name}] "
f"mean={mean_time:.2f}ms, "
f"sd={std_time:.2f}ms, "
f"min={min_time:.2f}ms, "
f"max={max_time:.2f}ms, "
f"median={median:.2f}ms, "
f"95p={percent_95_time:.2f}ms, "
f"99p={percent_99_time:.2f}ms"
)
def setup_logging(level: int = logging.INFO) -> None:
"""
Set the generic Python logger
:param level: logger level
"""
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=level)
@contextmanager
def track_infer_time(buffer: List[int]) -> None:
"""
A context manager to perform latency measures
:param buffer: a List where to save latencies for each input
"""
start = time.perf_counter()
yield
end = time.perf_counter()
buffer.append(end - start)
def generate_input(
seq_len: int, batch_size: int, input_names: List[str], device: str = "cuda"
) -> Tuple[Dict[str, torch.Tensor], Dict[str, np.ndarray]]:
"""
Generate dummy inputs.
:param seq_len: number of token per input.
:param batch_size: first dimension of the tensor
:param input_names: tensor input names to generate
:param device: where to store tensors (Pytorch only). One of [cpu, cuda]
:return: a tuple of tensors, Pytorch and numpy
"""
assert device in ["cpu", "cuda"]
shape = (batch_size, seq_len)
inputs_pytorch: OrderedDict[str, torch.Tensor] = OrderedDict()
for name in input_names:
inputs_pytorch[name] = torch.ones(size=shape, dtype=torch.int32, device=device)
inputs_onnx: Dict[str, np.ndarray] = {
k: np.ascontiguousarray(v.detach().cpu().numpy()) for k, v in inputs_pytorch.items()
}
return inputs_pytorch, inputs_onnx
def generate_multiple_inputs(
seq_len: int, batch_size: int, input_names: List[str], nb_inputs_to_gen: int, device: str
) -> Tuple[List[Dict[str, torch.Tensor]], List[Dict[str, np.ndarray]]]:
"""
Generate multiple random inputs.
:param seq_len: sequence length to generate
:param batch_size: number of sequences per batch to generate
:param input_names: tensor input names to generate
:param nb_inputs_to_gen: number of batches of sequences to generate
:param device: one of [cpu, cuda]
:return: generated sequences
"""
all_inputs_pytorch: List[Dict[str, torch.Tensor]] = list()
all_inputs_onnx: List[Dict[str, np.ndarray]] = list()
for _ in range(nb_inputs_to_gen):
inputs_pytorch, inputs_onnx = generate_input(
seq_len=seq_len, batch_size=batch_size, input_names=input_names, device=device
)
all_inputs_pytorch.append(inputs_pytorch)
all_inputs_onnx.append(inputs_onnx)
return all_inputs_pytorch, all_inputs_onnx
def compare_outputs(pytorch_output: List[torch.Tensor], engine_output: List[Union[np.ndarray, torch.Tensor]]) -> float:
"""
Compare 2 model outputs by computing the mean of absolute value difference between them.
:param pytorch_output: reference output
:param engine_output: other engine output
:return: difference between outputs as a single float
"""
if isinstance(pytorch_output[0], torch.Tensor):
pytorch_output = [t.detach().cpu().numpy() for t in pytorch_output]
pt_output = np.asarray(pytorch_output)
if isinstance(engine_output[0], torch.Tensor):
engine_output = [t.detach().cpu().numpy() for t in engine_output]
eng_output = np.asarray(engine_output)
return np.mean(np.abs(pt_output - eng_output))
|
import io_model
import numpy as np
from scipy import stats
import sequence
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
import torch
def get_loss(predictions, targets):
loss_function = torch.nn.BCELoss() # binary cross entropy
return loss_function(predictions, targets).item()
def get_learning_rate(predictions, inputs):
learning_rate = torch.empty_like(predictions)
n_time_steps = learning_rate.size(0)
prediction_prior = 0.5
learning_rate[0] = (predictions[0] - prediction_prior) / (inputs[0] - prediction_prior)
learning_rate[1:n_time_steps] = (predictions[1:n_time_steps] - predictions[0:n_time_steps-1]) / (inputs[1:n_time_steps] - predictions[0:n_time_steps-1])
return learning_rate
def get_confidence_from_sds(sds):
return -torch.log(sds)
def get_sds_from_confidence(confidence):
return torch.exp(-confidence)
def get_logits_from_ps(ps):
# logit = log-odds function
return torch.log(ps / (1. - ps))
def get_ps_from_logits(logits):
return logits.sigmoid()
def get_chance_loss(inputs, targets):
constant_outputs = torch.ones_like(inputs) * 0.5
return get_loss(constant_outputs, targets)
def get_io_loss(inputs, targets, gen_process):
io = io_model.IOModel(gen_process)
io_predictions = io.get_predictions(inputs)
return get_loss(io_predictions, targets)
def get_percent_value(values, ref_0, ref_100):
return (values - ref_0) / (ref_100 - ref_0) * 100.
def get_summary_stats(values, axis=0, ci_threshold=0.95):
n_values = values.shape[axis]
mean = values.mean(axis=axis)
sem = stats.sem(values, axis=axis)
ci_lower, ci_upper = \
stats.t.interval(ci_threshold, n_values - 1, loc=mean, scale=sem)
if np.any(sem == 0):
ci_lower[sem == 0] = mean[sem == 0]
ci_upper[sem == 0] = mean[sem == 0]
return dict(mean=mean, sem=sem, ci_lower=ci_lower, ci_upper=ci_upper)
def get_median_ci(values, ci_threshold=0.95):
# reference: https://github.com/minddrummer/median-confidence-interval/blob/master/Median_CI.py
# flat to one dimension array
assert len(values.shape) == 1, "need 1D array"
N = values.shape[0]
data = np.sort(values)
low_count, up_count = stats.binom.interval(ci_threshold, N, 0.5, loc=0)
low_count -= 1 # indexing starts at 0
return data[int(low_count)], data[int(up_count)]
def stats_twoway_anova_table(data, iv1_name, iv2_name, dv_name, type=1, test="F"):
# Fit a two-way ANOVA model on the data
anova_formula = f"{dv_name} ~ C({iv1_name}) + C({iv2_name}) + C({iv1_name}):C({iv2_name})"
anova_model = ols(anova_formula, data=data).fit()
return anova_lm(anova_model, test=test, typ=type)
def cat_features(tensors):
return torch.cat(tensors, dim=2)
def get_vector(tensor):
"""
- input: torch.tensor(n_time_steps, n_samples, n_features)
- output: np.ndarray(n_time_steps * n_samples, n_features)
where:
output[0:n_samples, :] = input[0, 0:n_samples, :],
output[n_samples:2*n_samples,:] = input[1, 0:n_samples, :],
..."""
n_features = tensor.size(2) if len(tensor.shape) > 2 else 1
return tensor.reshape((-1, n_features)).numpy()
def get_tensor(vector, n_time_steps, n_samples):
"""
Inverse function of get_vector()
- input: np.ndarray(n_time_steps * n_samples, n_features)
- output: torch.tensor(n_time_steps, n_samples, n_features)
where:
output[0, 0:n_samples, :] = input[0:n_samples, :],
output[1, 0:n_samples, :] = input[n_samples:2*n_samples,:],
..."""
assert n_time_steps * n_samples == vector.shape[0], "Inconsistent sizes"
n_features = vector.shape[1] if len(vector.shape) > 1 else 1
return torch.tensor(vector).reshape(n_time_steps, n_samples, n_features)
def get_decoded_tensor(decoder, predictor_tensor):
n_time_steps, n_samples = predictor_tensor.size(0), predictor_tensor.size(1)
decoded_vector = decoder.predict(get_vector(predictor_tensor))
return get_tensor(decoded_vector, n_time_steps, n_samples)
def get_predictor_tensor_dict_from_activations(a):
# we include as predictors
# - hidden state at time t
# - hidden gates at time t+1
# - both (hidden state at time t, gates at time t+1)
# - output of network at time t (as a control)
# this requires discarding one time step:
# the first one for state, the last for gates
predictor_tensor_dict = {}
has_state = "hidden_state" in a
has_gates = "reset_gate" in a and "update_gate" in a
if has_state:
state_t = a["hidden_state"][:-1, :, :]
predictor_tensor_dict["hidden_state"] = state_t
if has_gates:
gates_tplus1 = cat_features((a["reset_gate"], a["update_gate"]))[1:, :, :]
predictor_tensor_dict["hidden_gates_tplus1"] = gates_tplus1
if has_state and has_gates:
hidden_all = cat_features((state_t, gates_tplus1))
predictor_tensor_dict["hidden_all"] = hidden_all
if "outputs" in a:
output_t = a["outputs"][:-1, :, :].detach()
predictor_tensor_dict["output"] = output_t
return predictor_tensor_dict
def get_predictor_vector_dict_from_activations(a):
predictor_tensor_dict = get_predictor_tensor_dict_from_activations(a)
predictor_vector_dict = {
key: get_vector(tensor) for key, tensor in predictor_tensor_dict.items()
}
return predictor_vector_dict
def get_decoder_target_vector_dict_from_io_outputs_inputs(io_outputs, inputs, gen_process):
decoder_target_tensor_dict = \
get_decoder_target_tensor_dict_from_io_outputs_inputs(io_outputs, inputs, gen_process)
decoder_target_vector_dict = {
key: get_vector(tensor) for key, tensor in decoder_target_tensor_dict.items()
}
return decoder_target_vector_dict
def get_decoder_target_tensor_dict_from_io_outputs_inputs(io_outputs, inputs, gen_process):
# we include as decoder targets:
# - bayes confidence at time t
# - bayes evidence at time t
# - bayes apparent learning rate at time t+1
# this requires discarding one time step:
# the first one for confidence and evidence, the last for learning rate
io_ps = io_outputs["mean"]
io_sds = io_outputs["sd"]
if gen_process.is_markov():
io_confidence = get_confidence_from_sds(io_sds)[:-1, :, :]
io_evidence = get_logits_from_ps(io_ps)[:-1, :, :]
io_lr_tplus1 = get_learning_rate(io_ps, inputs)[1:, :, :]
io_confidence_0 = io_confidence[:, :, 0]
io_confidence_1 = io_confidence[:, :, 1]
io_evidence_0 = io_evidence[:, :, 0]
io_evidence_1 = io_evidence[:, :, 1]
io_lr_tplus1_0 = io_lr_tplus1[:, :, 0]
io_lr_tplus1_1 = io_lr_tplus1[:, :, 1]
# the "relevant" estimate is the one that is conditional on the input at time t
# (i.e. the estimate that is used for prediction at time t and then is updated at time t+1)
conditional_inputs = inputs[:-1, :, :]
io_confidence_xt = sequence.get_relevant_estimates_given_inputs(io_confidence, conditional_inputs)
io_confidence_1minusxt = sequence.get_relevant_estimates_given_inputs(io_confidence, 1 - conditional_inputs)
io_evidence_xt = sequence.get_relevant_estimates_given_inputs(io_evidence, conditional_inputs)
io_evidence_1minusxt = sequence.get_relevant_estimates_given_inputs(io_evidence, 1 - conditional_inputs)
io_lr_tplus1_xt = sequence.get_relevant_estimates_given_inputs(io_lr_tplus1, conditional_inputs)
io_lr_tplus1_1minusxt = sequence.get_relevant_estimates_given_inputs(io_lr_tplus1, 1 - conditional_inputs)
decoder_target_tensor_dict = dict(
io_confidence_0=io_confidence_0,
io_confidence_1=io_confidence_1,
io_evidence_0=io_evidence_0,
io_evidence_1=io_evidence_1,
io_lr_tplus1_0=io_lr_tplus1_0,
io_lr_tplus1_1=io_lr_tplus1_1,
io_confidence_xt=io_confidence_xt,
io_confidence_1minusxt=io_confidence_1minusxt,
io_evidence_xt=io_evidence_xt,
io_evidence_1minusxt=io_evidence_1minusxt,
io_lr_tplus1_xt=io_lr_tplus1_xt,
io_lr_tplus1_1minusxt=io_lr_tplus1_1minusxt
)
else:
io_confidence = get_confidence_from_sds(io_sds)[:-1, :, :]
io_evidence = get_logits_from_ps(io_ps)[:-1, :, :]
io_lr_tplus1 = get_learning_rate(io_ps, inputs)[1:, :, :]
decoder_target_tensor_dict = dict(io_confidence=io_confidence,
io_evidence=io_evidence,
io_lr_tplus1=io_lr_tplus1)
return decoder_target_tensor_dict
|
import sqlite3 as sql
DB_PATH = "dispositivo.db"
def createDB():
conn = sql.connect(DB_PATH)
conn.commit()
conn.close()
def createTable():
conn = sql.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE dispositivo (
id interger,
nombre_de_equipo text,
tipodispositivoId text,
fecha_de_alta text,
fecha_de_actualización text,
potencia_actual interger,
statusDispositivoId text
)"""
)
conn.commit()
conn.close()
def addValues():
conn = sql.connect(DB_PATH)
cursor = conn.cursor()
data = [
(1, "1", "Celda", "1/1/2021", "2/2/2021", 1, "en mantenimiento"),
(2, "2", "aerogenerador", "1/1/2021", "2/2/2021", 1, "En operación"),
(3, "3", "turbina hidroeléctrica", "1/1/2021", "2/2/2021", 1, "en mantenimiento"),
(4, "4", "Celda", "1/1/2021", "2/2/2021", 1, "En operación"),
]
cursor.executemany("""INSERT INTO dispositivo VALUES (?, ?, ?)""", data)
cursor.commit()
cursor.close()
if __name__ == "__main__":
createDB()
##createTable()
addValues()
|
import tkinter as tk
import widgets
import client
import struct
def update(conn, root, plot, ch0, ch1, prev_init):
data = conn.send_recv(0x01)
if data is None: return
fields = struct.unpack('>ffB', data)
print(fields)
if fields[2]:
if not prev_init:
plot.clear()
plot.put(ch0, fields[0])
plot.put(ch1, fields[1])
root.after(100, update, conn, root, plot, ch0, ch1, fields[2])
def main():
conn = client.Connection('192.168.43.1', 17777)
conn.connect()
root = tk.Tk()
plot = widgets.Plot1D(root, 700, 300, 500, 0, 10)
plot.grid(column=0, row=0)
ch0 = plot.add_channel((255, 0, 0))
ch1 = plot.add_channel((0, 255, 0))
root.after(0, update, conn, root, plot, ch0, ch1, 0)
root.mainloop()
if __name__ == '__main__':
main()
|
from utils import test
def mergesort(l):
""" Implementation of the mergesort algorithm, not in-place
Sorts list l by the following steps:
1. Look at list l. If the length is less than 2 (either 1 or 0), return the
list, as it must be sorted.
2. If the list is longer than two, split it in half, and sort each half
independently starting at step 1.
3. Merge the two halves via the included merge function
4. The final, merged list will be sorted.
"""
if len(l) < 2:
return l
else:
midpoint = int(len(l)/2)
left = l[:midpoint]
right = l[midpoint:]
left = mergesort(left)
right = mergesort(right)
return merge(left, right)
def merge(left, right):
""" Merge two lists by the following method:
1. Begin iterating through both halves simultaneously
2. Compare the currently indexed value of each half
3. Add the smaller value to the final, merged list, increment
"""
worklist = []
lindex = 0
rindex = 0
while lindex < len(left) and rindex < len(right):
if left[lindex] > right[rindex]:
worklist.append(right[rindex])
rindex += 1
else:
worklist.append(left[lindex])
lindex += 1
# It is necessary to append the remainder of the lists to the end of the
# worklist because we used `and` instead of `or` for the loop's
# conditional. Using `or` would be more concise, but less efficient.
worklist += left[lindex:]
worklist += right[rindex:]
return worklist
if __name__ == "__main__":
test(mergesort)
|
from handlers import base_handlers
from models import User
class HandleLogin(base_handlers.BaseAction):
def handle_post(self, google_user):
user = User.query(ancestor=User.PARENT_KEY).filter(User.email == google_user.email().lower()).get()
if not user:
newUser = User(parent=User.PARENT_KEY,
name=google_user.nickname(),
email=google_user.email().lower(),
groups=[])
newUser.put()
self.redirect("/groups")
elif self.request.get("group-key"):
user.name = google_user.nickname()
user.put()
self.redirect('/groups?group-key='+self.request.get("group-key"))
else:
self.redirect("/groups")
|
try:
from typing import Dict, List, Optional, Tuple
from integral_timber_joints.process import RFLPathPlanner, RobotClampAssemblyProcess
except:
pass
import itertools
from copy import deepcopy
from compas.geometry import Frame
from integral_timber_joints.assembly import Assembly, BeamAssemblyMethod
from integral_timber_joints.process.action import *
from integral_timber_joints.process.dependency import ComputationalResult
from integral_timber_joints.process.movement import *
from integral_timber_joints.process.state import SceneState
from integral_timber_joints.tools import Clamp, Gripper, Tool
######################################################
# Beam level functions - Assigning tool_type / tool_id
######################################################
def assign_tool_type_to_joints(process, beam_id, verbose=False):
# type: (RobotClampAssemblyProcess, str, Optional[bool]) -> ComputationalResult
"""Assign tool_types to joints based on the
(priority 1) user proference or
(priority 2) joint's preference.
If request tools are not available or not enough, return None.
State Change
------------
This functions sets the joint attribute `tool_type`
Return
------
`ComputationalResult.ValidCannotContinue` if no suitable clamp is found not satisfied
`ComputationalResult.ValidCanContinue` otherwise (this function should not fail)
"""
# Loop through all the beams and look at their previous_built neighbour.
assembly = process.assembly
something_failed = False
something_changed = False
assembly_method = assembly.get_assembly_method(beam_id)
joint_ids = assembly.get_joint_ids_with_tools_for_beam(beam_id)
if len(joint_ids) == 0:
return ComputationalResult.ValidNoChange
def vprint(str):
if verbose:
print(str)
vprint("Beam %s (%s). Joints that need assignment: %s" % (beam_id, BeamAssemblyMethod.value_to_names_dict[assembly_method], joint_ids))
# * Function to set the assignment results back to Joint Attributes
def _set_assignments_to_joints(joint_ids, assignment):
# type: (List[Tuple[str, str]], List[Tuple[str, str]]) -> None
for (joint_id, (tool_type, tool_id)) in zip(joint_ids, assignment):
assembly.set_joint_attribute(joint_id, 'tool_type', tool_type)
assembly.set_joint_attribute(joint_id, 'tool_id', tool_id)
# * For SCREWED_WITHOUT_GRIPPER, put the grasping joint first
if assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
grasping_joint_id = process.assembly.get_grasping_joint_id(beam_id)
joint_ids.remove(grasping_joint_id)
joint_ids.insert(0, grasping_joint_id)
# * (Priority 1) Try assign according to user preference + joint request
type_requests = []
for joint_id in joint_ids:
preference = assembly.get_joint_attribute(joint_id, 'tool_type_preference')
tool_type_request_from_joint = process.assembly.joint(joint_id).assembly_tool_types(assembly_method)
if preference is not None:
if preference in tool_type_request_from_joint:
type_requests.append([preference])
else:
vprint("Warning: tool_type_preference: %s for Joint %s is no longer valid. Valid options are %s" % (preference, joint_id, tool_type_request_from_joint))
type_requests.append(tool_type_request_from_joint)
else:
type_requests.append(tool_type_request_from_joint)
assignment = assign_tool_types_by_formulated_request(process, type_requests)
vprint("Type Request with user preference: %s" % type_requests)
vprint("Assignment with user preference: %s" % assignment)
if assignment is not None:
_set_assignments_to_joints(joint_ids, assignment)
return ComputationalResult.ValidCanContinue
# (Priority 2) Ignore User Preference use joint.assembly_tool_types only
type_requests = [process.assembly.joint(joint_id).assembly_tool_types(assembly_method) for joint_id in joint_ids]
assignment = assign_tool_types_by_formulated_request(process, type_requests)
vprint("Assignment with only joint reequest: %s" % assignment)
if assignment is not None:
_set_assignments_to_joints(joint_ids, assignment)
return ComputationalResult.ValidCanContinue
else:
[assembly.set_joint_attribute(joint_id, 'tool_type', None) for joint_id in joint_ids]
return ComputationalResult.ValidCannotContinue
def assign_tool_types_by_formulated_request(process, joints_tool_types_request):
# type: (RobotClampAssemblyProcess, List[List[str]]) -> List[Tuple[str, str]]
"""Assign the tool_types to the request according to a given list of possible tool_types.
`type_request is a list (per joint) of list (of tool_type).
Returns a list of tuple containing assigned `tool_types` and `tool_id` in the same order of the original `joints_tool_types_request`
If request tools are not available or not enough, return None.
"""
if any([len(request) == 0 for request in joints_tool_types_request]):
return None
available_tools = process.get_available_tool_type_dict()
assigned_tool_types = []
def assign_one_joint(possible_types):
"""Returns True of success."""
for tool_type in possible_types:
if tool_type in available_tools and len(available_tools[tool_type]) > 0:
assigned_tool_types.append((tool_type, available_tools[tool_type].pop(0)))
return True
return False # None of the possible type are available
# * Assigning to all the joints
for tool_types_request in joints_tool_types_request:
if not assign_one_joint(tool_types_request):
return None
# If passing all assignemnts
return assigned_tool_types
if __name__ == "__main__":
pass
|
from enum import Enum
import attr
class PgObjectType(Enum):
"""PostgreSQL object type."""
TABLE = 'TABLE'
SEQUENCE = 'SEQUENCE'
FUNCTION = 'FUNCTION'
LANGUAGE = 'LANGUAGE'
SCHEMA = 'SCHEMA'
DATABASE = 'DATABASE'
TABLESPACE = 'TABLESPACE'
TYPE = 'TYPE'
FOREIGN_DATA_WRAPPER = 'FOREIGN DATA WRAPPER'
FOREIGN_SERVER = 'FOREIGN SERVER'
FOREIGN_TABLE = 'FOREIGN TABLE'
LARGE_OBJECT = 'LARGE OBJECT'
@attr.s(slots=True)
class Privileges:
"""Stores information from a parsed privilege string.
.. seealso:: :func:`~.parse.parse_acl_item`
"""
grantee = attr.ib()
grantor = attr.ib()
privs = attr.ib(factory=list)
privswgo = attr.ib(factory=list)
def as_grant_statements(self, type_: PgObjectType, target, **kwargs):
"""Return array of :func:`~.sql.grant` statements that can be executed
to grant these privileges. Refer to the function documentation for the
meaning of `target` and additional keyword arguments.
.. note:: This requires installing with the ``sqlalchemy`` extra.
"""
from .sql import grant
statements = []
if self.privs:
statements.append(
grant(self.privs, type_, target, self.grantee, **kwargs))
if self.privswgo:
statements.append(grant(
self.privswgo, type_, target, self.grantee, grant_option=True,
**kwargs))
return statements
def as_revoke_statements(self, type_: PgObjectType, target, **kwargs):
"""Return array of :func:`~.sql.revoke` statements that can be executed
to revoke these privileges. Refer to the function documentation for the
meaning of `target` and additional keyword arguments.
.. note::
The statement for the `privswgo` privileges will revoke them
fully, not only their grant options.
.. note:: This requires installing with the ``sqlalchemy`` extra.
"""
from .sql import revoke
statements = []
if self.privs:
statements.append(
revoke(self.privs, type_, target, self.grantee, **kwargs))
if self.privswgo:
statements.append(revoke(
self.privswgo, type_, target, self.grantee, **kwargs))
return statements
@attr.s(slots=True)
class RelationInfo:
"""Holds object information and privileges as queried using the
:mod:`.query` submodule."""
#: Row identifier.
oid = attr.ib()
#: Name of the table, sequence, etc.
name = attr.ib()
#: Owner of the relation.
owner = attr.ib()
#: Access control list.
acl = attr.ib()
@attr.s(slots=True)
class SchemaRelationInfo(RelationInfo):
"""Holds object information and privileges as queried using the
:mod:`.query` submodule."""
#: The name of the schema that contains this relation.
schema = attr.ib()
@attr.s(slots=True)
class FunctionInfo(SchemaRelationInfo):
"""Holds object information and privileges as queried using the
:mod:`.query` submodule."""
#: Data types of the function arguments.
arg_types = attr.ib()
@attr.s(slots=True)
class ColumnInfo:
"""Holds object information and privileges as queried using the
:mod:`.query` submodule."""
#: Table identifier.
table_oid = attr.ib()
#: The name of the schema that contains the table.
schema = attr.ib()
#: Name of the table.
table = attr.ib()
#: Name of the column.
column = attr.ib()
#: Owner of the table.
owner = attr.ib()
#: Column access control list.
acl = attr.ib()
|
"""Changement objectif
Revision ID: b64307cd3c69
Revises: 39ed6dfe1eff
Create Date: 2021-07-22 14:55:56.245601
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b64307cd3c69'
down_revision = '39ed6dfe1eff'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
UPDATE recommandation
SET objectif = 'Aujourd’hui votre recommmandation est un conseil pratique pour ' || objectif
WHERE objectif != '';
""")
def downgrade():
pass
|
#!/usr/bin/env python
import os, sys, subprocess
is_master_commit = (
os.environ["GITHUB_REPOSITORY"] == "lihaoyi/mill" and
os.environ["GITHUB_REF"].endswith("/master")
)
if is_master_commit:
subprocess.check_call(sys.argv[1:])
|
class Solution:
"""
@param s: The first string
@param b: The second string
@return true or false
"""
def anagram(self, s, t):
# write your code here
return sorted(s) == sorted(t)
|
# -*- coding: utf-8 -*-
"""
(Description)
Created on Dec 20, 2013
"""
from ce1sus.db.classes.definitions import AttributeHandler
from ce1sus.db.common.broker import BrokerBase
__author__ = 'Weber Jean-Paul'
__email__ = 'jean-paul.weber@govcert.etat.lu'
__copyright__ = 'Copyright 2013, GOVCERT Luxembourg'
__license__ = 'GPL v3+'
class AttributeHandlerBroker(BrokerBase):
"""
Attribute handler broker
"""
def get_broker_class(self):
"""
overrides BrokerBase.get_broker_class
"""
return AttributeHandler
|
# -*- coding: utf-8 -*-
import markdown
MD = markdown.Markdown(extensions=[
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
])
class TagStripper(object):
def __init__(self, tag):
self._tag0 = '<{0}>'.format(tag)
self._tag1 = '</{0}>'.format(tag)
self._len_tag0 = len(self._tag0)
self._len_tag1 = len(self._tag1)
def strip(self, text):
if text.startswith(self._tag0) and text.endswith(self._tag1):
return text[self._len_tag0:-self._len_tag1]
else:
return text
TS = TagStripper('p')
# Used outside this module
md_convert = MD.convert
def md_iconvert(txt):
return TS.strip(MD.convert(txt))
|
def encryption(text,key):
result = ""
final=""
for i in range(len(text)):
word = text[i]
if (word.isupper()):
result += chr((ord(word) + key-65) % 26 + 65)
elif(word==" "):
result += " "
else:
result += chr((ord(word) + key - 97) % 26 + 97)
if(result=="k"):
final=final+" "
else:
final=result
print(final)
def decryption(ciphertext, key):
if(len(ciphertext)==len(key)):
plaintext = []
for i in range(len(ciphertext)):
x = (ord(ciphertext[i]) - ord(key[i]) + 26) % 26
x = x+65
plaintext.append(chr(x))
return("" . join(plaintext))
else:
print("key is not equal to the size of ciphertext")
text = "PHHW PH DIWHU WKH WRJD SDUWB"
key = -3
encryption(text,key)
ciphertext = "ZICVTWQNGKZEIIGASXSTSLVVWLA"
key = "DECEPTIVEWEAREDISCOVEREDSAV"
print("Plaintext :",decryption(ciphertext, key))
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
#from avaliacao import views
from . import views
urlpatterns = patterns(views,
url('^$', views.index, name='home'),
)
|
from django.db import models
class Payment(models.Model):
IDR = 'IDR'
USD = 'USD'
CURRENCY_CHOICES = (
(USD, 'USD'),
(IDR, 'IDR')
)
amount = models.DecimalField(decimal_places=2, max_digits=10)
currency = models.CharField(max_length=10, choices=CURRENCY_CHOICES)
|
$NetBSD: patch-myhdl___always__seq.py,v 1.1 2018/12/27 16:01:54 joerg Exp $
--- myhdl/_always_seq.py.orig 2018-12-25 21:39:40.951802739 +0000
+++ myhdl/_always_seq.py
@@ -45,7 +45,7 @@ _error.EmbeddedFunction = "embedded func
class ResetSignal(_Signal):
- def __init__(self, val, active, async):
+ def __init__(self, val, active, is_async):
""" Construct a ResetSignal.
This is to be used in conjunction with the always_seq decorator,
@@ -53,7 +53,7 @@ class ResetSignal(_Signal):
"""
_Signal.__init__(self, bool(val))
self.active = bool(active)
- self.async = async
+ self.is_async = is_async
def always_seq(edge, reset):
@@ -91,8 +91,8 @@ class _AlwaysSeq(_Always):
if reset is not None:
self.genfunc = self.genfunc_reset
active = self.reset.active
- async = self.reset.async
- if async:
+ is_async = self.reset.is_async
+ if is_async:
if active:
senslist.append(reset.posedge)
else:
|
import random
from typing import List
import hydra.utils
import torch
from torch import nn
import utils
from losses import SupConLoss
from nets import LinearClassifier
class Encoder(nn.Module):
def __init__(self, cfg):
super(Encoder, self).__init__()
self.cfg = cfg
self._encoders = nn.ModuleList(
[
hydra.utils.instantiate(self.cfg.encoder, in_channel=depth) for depth in
self.cfg.colorspace.view_depths
]
)
self._optimizer = torch.optim.Adam(self._encoders.parameters(), lr=self.cfg.lr, betas=(self.cfg.beta_1, self.cfg.beta_2))
self._criterion = SupConLoss(
contrast_mode='all' if self.cfg.full_graph else 'one',
temperature=self.cfg.temperature,
base_temperature=self.cfg.temperature
)
def output_dim(self, layer):
return self._encoders[0].output_dim(layer) * len(self._encoders)
def forward(self, x, layer):
return self.encode(x, layer, concat=True)
def encode(self, x: torch.Tensor, layer=None, concat=False):
views_list = list(torch.split(x, list(self.cfg.colorspace.view_depths), dim=1))
vectors_list = [self._encoders[i](views, layer) for i, views in enumerate(views_list)]
if concat:
vectors_list = torch.cat(vectors_list, dim=1)
return vectors_list
def update(self, x: torch.Tensor):
self._optimizer.zero_grad()
vectors_list = self.encode(x)
loss = self._criterion(torch.stack(vectors_list, dim=1))
# loss = self.full_graph_loss(vectors_list) if self.cfg.full_graph else self.core_view_loss(vectors_list)
loss.backward()
self._optimizer.step()
return loss.item()
def _contrast_loss(self, vectors_1, vectors_2):
i = random.randint(0, vectors_1.shape[0] - 1)
z1 = vectors_1[i].repeat((vectors_1.shape[0], 1))
z2 = vectors_2
similarities = torch.cosine_similarity(z1, z2, eps=0)
critic = torch.log_softmax(similarities * self.cfg.temperature, dim=0)[i]
return - critic
def _two_view_loss(self, vectors_1, vectors_2):
return self._contrast_loss(vectors_1, vectors_2) + self._contrast_loss(vectors_2, vectors_1)
def core_view_loss(self, vectors_list):
loss = torch.tensor(0, dtype=torch.float, device=utils.device())
for i in range(1, len(vectors_list)):
loss += self._two_view_loss(vectors_list[0], vectors_list[i])
return loss
def full_graph_loss(self, vectors_list):
loss = torch.tensor(0, dtype=torch.float, device=utils.device())
for i in range(len(vectors_list)-1):
for j in range(i+1, len(vectors_list)):
loss += self._two_view_loss(vectors_list[i], vectors_list[j])
return loss
class Classifier(nn.Module):
def __init__(self, cfg, feature_dim):
super(Classifier, self).__init__()
self.cfg = cfg
self._classifier = LinearClassifier(feature_dim, 10)
self._optimizer = torch.optim.Adam(
self._classifier.parameters(),
lr=self.cfg.lr,
betas=(self.cfg.beta_1, self.cfg.beta_2),
)
self._criterion = nn.CrossEntropyLoss()
def update(self, x: torch.Tensor, y: torch.Tensor):
self._optimizer.zero_grad()
output = self._classifier(x)
loss = self._criterion(output, y)
loss.backward()
self._optimizer.step()
loss = loss.item()
score = ((output.argmax(dim=1) == y) * 1.).mean().item()
return loss, score
def evaluate(self, x: torch.Tensor, y: torch.Tensor):
output = self._classifier(x)
loss = self._criterion(output, y)
loss = loss.item()
score = ((output.argmax(dim=1) == y) * 1.).mean().item()
return loss, score
|
from django.urls import path
from partners import views
app_name = 'partners'
urlpatterns = [
path('', views.partners_list, name='partners_list'),
path('contractor/detail/<int:id>', views.contractor_detail, name='contractor_detail'),
path('contractor/add/', views.update_contractor, name='add_contractor'),
path('contractor/update/<int:id>', views.update_contractor, name='update_contractor'),
path('contractor/delete/<int:id>', views.delete_contractor, name='delete_contractor'),
path('supplier/detail/<int:id>', views.supplier_detail, name='supplier_detail'),
path('supplier/add/', views.update_supplier, name='add_supplier'),
path('supplier/update/<int:id>', views.update_supplier, name='update_supplier'),
path('supplier/delete/<int:id>', views.delete_supplier, name='delete_supplier'),
path('company/comments/add/', views.leave_comment, name='leave_comment'),
path('company/comments/delete/<int:id>', views.delete_comment, name='delete_comment'),
]
|
# Manel Ferré
# Busqueda de vulnerabilidades en el sistema operativo
# por fecha de hoy basado en vuls-control
# por versión, busca si la versión instalada es vulnerable
#from operator import truediv
import sys
#from sys import exit
from os import remove
from os import path
import argparse
# Cargamos funciones segun su sistema operativo /win32/Linux/Darwin
sistemaOperativo = sys.platform
if(sistemaOperativo == "win32"):
import fwindows
elif(sistemaOperativo == "linux"):
import flinux
elif(sistemaOperativo == "darwin"):
import fmac
# Funcion de busqueda de vulnerabilidades en CVE Details
import fvulns
# Configuración Constantes
import config
# Variables globales
listTranslate=[] # Lista de sinónimos
listForzar=[] # Lista de aplicaciones a monitorizar forzosas
listDemo=[] # Lista de aplicaciones para DEMO (vulnerables)
# lee el fichero de translate para cambiar el nombre que nos dice el SO con el que tiene dado de alta CVDetails
def leeSinonimos():
source = open(config.ConstFileSinonimos, 'r')
for line in source:
if not line.startswith('#') and line.strip():
listTranslate.append(line)
source.close()
# Aplicaciones a verificar aunque no las tengamos en el sistema
def leeAppFozar():
source = open(config.ConstFileAppForzar, 'r')
for line in source:
if not line.startswith('#') and line.strip():
listForzar.append(line)
source.close()
# para demos, aplicaciones vulnerables
def leeAppDemoVulnerables():
source = open(config.ConstDemoVulnerables, 'r')
for line in source:
if not line.startswith('#') and line.strip():
listDemo.append(line)
source.close()
# mueve las listas
def ini():
if(sistemaOperativo == "win32"):
fwindows.listTranslate = listTranslate
elif(sistemaOperativo == "Linux"):
flinux.listTranslate = listTranslate
elif(sistemaOperativo == "Darwin"):
fmac.listTranslate = listTranslate
# crea los ficheros de aplicaciones
def createFile():
if(sistemaOperativo == "win32"):
fwindows.createFile()
elif(sistemaOperativo == "Linux"):
flinux.createFile()
elif(sistemaOperativo == "Darwin"):
fmac.createFile()
# Carga las demos
def vulnDemo():
if len(listDemo)>0:
for x in listDemo:
nametemp = x.split(";")
fvulns.busca_cve(nametemp[0], nametemp[1])
# Busca segun version
def vulnVersion():
if(sistemaOperativo == "win32"):
fwindows.vulnVersion()
elif(sistemaOperativo == "Linux"):
flinux.vulnVersion()
elif(sistemaOperativo == "Darwin"):
fmac.vulnVersion()
def gestionargumentos():
appdesc = "HorusEye Buscador de vuberabilidades CVEs de programas instalados."
parser = argparse.ArgumentParser(description=appdesc)
# argumentos
parser.add_argument('-c',
dest='create_file',
action='store_true',
default='False',
required=False,
help='Crea fichero temporal.')
parser.add_argument('-m',
dest='monitorizar_apps',
action='store_true',
default='False',
required=False,
help='Monitoriza el sistema.')
parser.add_argument('-s',
dest='search_vulns',
action='store_true',
default='False',
required=False,
help='Busca vulnerabilidades por versión.')
parser.add_argument('-v',
dest='verbose',
action='store_true',
default='False',
required=False,
help='Muestra información por pantalla')
parser.add_argument('-d',
dest='demo',
action='store_true',
default='False',
required=False,
help='Ejecuta la demo')
return parser.parse_args()
def print_log(_verb, Text):
if (_verb == True):
print(Text)
def main():
namespace = gestionargumentos()
_verbose = namespace.verbose
if(sistemaOperativo == "win32"):
print_log (_verbose, "Sistema operativo Windows")
elif(sistemaOperativo == "Linux"):
print_log (_verbose, "Sistema operativo Linux")
elif(sistemaOperativo == "Darwin"):
print_log (_verbose, "Sistema operativo Mac")
print_log (_verbose, "Inicio de proceso")
# borramos ficheros de apps y errores
if (namespace.create_file is True):
if path.exists(config.ConstFileAppInstaladas):
remove(config.ConstFileAppInstaladas)
if path.exists(config.ConstFileAppNoEncontradas):
remove(config.ConstFileAppNoEncontradas)
# leemos lista sinonimos
if path.exists(config.ConstFileSinonimos):
print_log (_verbose, "Lectura de sinónimos")
leeSinonimos()
# leemos lista Aplicaciones a verificar si o si
if path.exists(config.ConstFileAppForzar):
print_log (_verbose, "Lectura de Aplicaciones forzadas")
leeAppFozar()
# leemos lista Aplicaciones a verificar si o si
if path.exists(config.ConstDemoVulnerables):
print_log (_verbose, "Lectura de Aplicaciones vulnerables (DEMO)")
leeAppDemoVulnerables()
# mueve listas
ini()
# creamos el fichero de applicaciones del servidor
if (namespace.create_file is True):
print_log (_verbose, "Creamos ficheros temporales")
createFile()
# Buscamos vulnerabilidades a fecha de hoy
# se basa en los ficheros generados y en el de forzado
if (namespace.monitorizar_apps is True):
print_log (_verbose, "Monitorizando aplicaciones")
fvulns.monitorizarapps()
# Buscamos vulnerabilidades segun version instalada
if (namespace.search_vulns is True):
print_log (_verbose, "Buscando vulnerabilidades del software instalado, segun versión instalada")
vulnVersion()
if (namespace.demo is True):
print_log (_verbose, "buscamos las aplicaicones DEMO que son vulnerables")
vulnDemo()
if __name__ == '__main__':
main()
|
import sys, itertools
lines = [int(x) for x in sys.stdin.readlines()]
print(sum(lines))
seen = {0}
freq = 0
for l in itertools.cycle(lines):
freq += l
if freq in seen:
break
seen.add(freq)
print(freq)
|
import math
channel_bw = input("Enter the 5G Channel Bandwidth (MHz): ")
scs = input("Enter the Subcarrier Spacing (KHz): ")
mimo_layers = input("Enter the number of MIMO layers: ")
class prbs:
def __init__(self, channel_bw, scs) -> None:
self.channel_bw = channel_bw
self.scs = scs
|
# -*- coding: utf-8 -*-
import redis
from core import config
from core.datasources.movielens_source import MovieLensSource
from core.warehouse import FileWarehouse
""" Various global objects that can be loaded on demand"""
redis_conn = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
_source = MovieLensSource(name='movielens',
ratings_file='external_data/ml-1m/ratings.dat',
products_file='external_data/ml-1m/movies.dat',
encoding='ISO-8859-1')
# needed for the server component
warehouse = FileWarehouse(partition=_source.name)
|
import requests
from celery_app.utils.utils import insert_vuln_db
from celery_app.config.config import web_port_short
#检测到 WEB-INF web.xml
plugin_id=63
default_port_list=web_port_short
def check(host, port=80):
scheme = 'https' if '443' in str(port) else 'http'
target = '{}://{}:{}'.format(scheme, host, port)
uris = ['/WEB-INF/web.xml']
hits = ['</web-app>', '</servlet-mapping>']
try:
targets = ['{}{}'.format(target, uri) for uri in uris]
requests.packages.urllib3.disable_warnings()
with requests.Session() as session:
for target in targets:
response = session.get(target, timeout=7, verify=False)
for hit in hits:
if hit in response.text and response.status_code in [200]:
output = response.text
insert_vuln_db(host, target, output, plugin_id)
return True, host, target, output
except Exception as error:
return False
return False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Create a mesh file with the number of vertical levels as nodal value
"""
from schimpy.schism_mesh import read_mesh, write_mesh
import numpy as np
def create_arg_parser():
""" Create an argument parser
"""
import argparse
description = ("Create a mesh file with the number of vertical levels",
" as nodal values ")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--hgrid', default='hgrid.gr3',
help="Horizontal grid name")
parser.add_argument('--vgrid', default='vgrid.in',
help="Vertical grid file name")
parser.add_argument('--output', default='n_levels.gr3',
help="Output file name")
return parser
def main():
""" Just a main function
"""
parser = create_arg_parser()
args = parser.parse_args()
mesh = read_mesh(args.hgrid, args.vgrid)
print(mesh.vmesh.n_vert_levels())
n_levels = mesh.vmesh.n_vert_levels() - np.array(mesh.vmesh.kbps)
write_mesh(mesh, args.output, node_attr=n_levels)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.