repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
biobai/LiBis | LiBis/test/final.py | import multiprocessing
from Comb_fastq import combine
from utils import *
import sys
import gzip
import os
def cut(step,length_bin,link,i):
start = step*i
end = length_bin + start
if end > len(link):
end = len(link)
if start>=end or end-start<=length_bin-step:
return False,''
return True,link[start:end]
def writefiles(UnmappedReads,step,length_bin,max_length,outputname):
Part_Fastq_Filename = []
for i in range(max_length):
filecontent = []
for readsname in UnmappedReads:
link = UnmappedReads[readsname]
mark,cutreads = cut(step,length_bin,link[0],i)
if not mark: continue
_,cutquality = cut(step,length_bin,link[1],i)
filecontent.append(readsname+'\n'+cutreads+'\n+\n'+cutquality+'\n')
if len(filecontent)==0: break
name = outputname+'.part'+str(i+1)+'.fastq'
Part_Fastq_Filename.append(name)
with open(name,'a') as f:
f.writelines(filecontent)
print(Part_Fastq_Filename)
return Part_Fastq_Filename
def do_process(temp,param):
#print(l+'in')
#temp = l.strip().split()
length = len(temp)
if length<=0 or length>2:
print("Parameter error in "+l)
sys.exit()
refpath = param['ref']
#refpath='/data/dsun/ref/mouseigenome/mm10.fa'
#refpath = '/data/dsun/ref/humanigenome/hg19.fa'
#tempname=temp[0].lower()
#if tempname.endswith(".gz"):
# tempname = tempname[:-3]
#if tempname.endswith(".fq"):
# tempname = tempname[:-3]
#if tempname.endswith(".fastq"):
# tempname = tempname[:-6]
outputname = RemoveFastqExtension(temp[0])
#print(outputname)
originallogname = outputname+'_originallog.record'
phred=33
if length==2 :
commend='bsmap -a '+temp[0]+' -b '+temp[1]+' -z '+str(phred)+' -d '+refpath+' -o '+outputname+'.bam -n 1 -r 0 1>>log 2>>'+originallogname
else:
commend='bsmap -a '+temp[0]+' -z '+str(phred)+' -d '+refpath+' -o '+outputname+'.bam -n 1 -r 0 1>>log 2>>'+originallogname
First_try = Pshell(commend)
First_try.process()
#Test1 done
inputfileinfo=l.strip().split()
commend = 'samtools view '+outputname+'.bam > '+outputname+'.sam'
BamFileReader = Pshell(commend)
BamFileReader.process()
with open(outputname+".sam") as sam:
#second column in sam file: 64, mate 1; 128, mate 2;
samlines = sam.readlines()
set_sam = {}
for line in samlines:
temp = line.strip().split()
m1 = (int(temp[1]) & 64)
m2 = (int(temp[1]) & 128)
# print(temp[1],m1,m2)
if m1>0: mate = 1
elif m2>0: mate = 2
else: mate = 0
if temp[0] in set_sam: set_sam[temp[0]]=3
else: set_sam[temp[0]]=mate
# print(mate)
# for k in set_sam:
# print(k)
# break
del samlines
UnmappedReads = {}
o=0
#step = 5
#length_bin = 30#30
step = param['step']
length_bin = param['window']
max_length = 24#50
Part_Fastq_Filename=[]
for filename in inputfileinfo:
o+=1
gzmark=False
if filename.endswith('.gz'):
f = gzip.open(filename)
gzmark=True
else:
f = open(filename)
if f:
while 1:
if gzmark:
line1 = f.readline().decode()
else:
line1 = f.readline()
if not line1:
break
if gzmark:
line2 = f.readline().decode().strip()
line3 = f.readline().decode()
line4 = f.readline().decode().strip()
else:
line2 = f.readline().strip()
line3 = f.readline()
line4 = f.readline().strip()
line1 = line1.strip().split()
line1[0] = line1[0]
# print(line1[0][1:])
if (line1[0][1:] in set_sam):
string_mark = o
if line1[1][0]>='1' and line1[1][0]<='2':
string_mark = int(line1[1][0])
if set_sam[line1[0][1:]]==0 or set_sam[line1[0][1:]]==3 : continue
if set_sam[line1[0][1:]]==string_mark : continue
temp = line1[0]
if length>1: temp+='_'+line1[1][0]
#Maybe the mate search method is buggy. Cuz there are different structures of reads name generated by different sequencing machine.
#fastqlines[i] = line1[0]+'_'+line1[1][0]+' '+line1[1]
UnmappedReads[temp]=[line2,line4]
if len(UnmappedReads)>10000000:
pfn = writefiles(UnmappedReads,step,length_bin,max_length,outputname)
UnmappedReads={}
if len(pfn)>len(Part_Fastq_Filename):
Part_Fastq_Filename=pfn
#We've got a dictionary named UnmappedReads = {readsname:[line1,line2,line3,line4]}
#Change cut funtion into cut(setp,length_bin,string,fileorder), return Available(T/F), reads_fraction
if len(UnmappedReads)>0:
pfn=writefiles(UnmappedReads,step,length_bin,max_length,outputname)
if len(pfn)>len(Part_Fastq_Filename):
Part_Fastq_Filename=pfn
print('finish')
f.close()
del UnmappedReads
#We've got the splited fastq file, filename is stored in Part_Fastq_Filename
# p = multiprocessing.Pool(processes=7)
for i in range(len(Part_Fastq_Filename)):
commend = 'bsmap -a '+Part_Fastq_Filename[i]+' -z '+str(phred)+' -d '+refpath+' -o '+Part_Fastq_Filename[i]+'.bam -n 1 -r 0 -R'
Batch_try = Pshell(commend)
Batch_try.process()
#run bsmap and get bam files named as Part_Fastq_Filename[i].bam
#import combine to generate the finalfastq
combine(outputname,Part_Fastq_Filename,step,length_bin)
splitlogname = outputname+'_split_log.record'
commend = 'bsmap -a '+outputname+'_finalfastq.fastq -d '+refpath+' -z '+str(phred)+' -o '+outputname+'_split.bam -n 1 -r 0 1>>log 2>> '+splitlogname
Bam = Pshell(commend)
Bam.process()
splitfilename = outputname+'_split.bam'
header = outputname+'.header'
command='samtools view -H '+splitfilename+' > '+header
filter = Pshell(command)
filter.process()
split_length=40
command='samtools view '+splitfilename+"| awk '{if (length($10)>"+str(split_length)+") print}' > "+splitfilename+'.sam'
filter.change(command)
filter.process()
command='cat '+header+' '+splitfilename+'.sam | samtools view -bS - > '+splitfilename+'.bam'
filter.change(command)
filter.process()
command='samtools sort -@ 4 '+splitfilename+'.bam'+' -o '+splitfilename+'.sorted.bam'
filter.change(command)
filter.process()
command='samtools sort -@ 4 '+outputname+'.bam'+' -o '+outputname+'.sort.bam'
filter.change(command)
filter.process()
command='mv '+outputname+'.sort.bam '+outputname+'.bam'
filter.change(command)
filter.process()
command='mv '+splitfilename+'.sorted.bam '+splitfilename
filter.change(command)
filter.process()
command='rm '+splitfilename+'.bam '+splitfilename+'.sam'
filter.change(command)
filter.process()
m=Pshell('samtools merge '+outputname+'_combine.bam '+outputname+'.bam '+outputname+'_split.bam')
m.process()
return outputname+'_combine.bam',originallogname,splitlogname
print("Merge done!\nCreated final bam file called "+outputname+'_combine.bam')
def clipmode(name,param):
'''
When we get the mapping result, we should report
mapping ratio, mapped reads number, length distribution,
original mapping ratio, original mapped reads number,
new generated splitted reads number, new generated splitted reads length
'''
newname=[]
log=[]
for n in names:
newn,originallog,splitlog=do_process(n,param)
newname.append(newn)
log.append([originallog,splitlog])
if True:# param['cleanmode'] Set a clean mode and full mode for clipping mode
cleanupmess(name,newname)
return newname,log
def cleanupmess(inputname,outputname):
name = RemoveFastqExtension(inputname[0])
pass
if __name__=="__main__":
with open("config.txt") as f:
lines = f.readlines()
import multiprocessing
#pool = multiprocessing.Pool(processes=2)
for l in lines:
#pool.apply_async(do_process,(l,))
do_process(l) #pass file name to do_process
pool.close()
pool.join()
|
biobai/LiBis | LiBis/test/bsseq_sim.py | import numpy as np
import math
import random
import sys
#==============================Variable setting===================================
readsname = sys.argv[1]
readsnum= int(sys.argv[2])
conversion_ratio=1
methylation_ratio=0.75
read_length=100
'''
@Variables:
chrom_set: dictionary, {chromsome_name:chromsome_genome}
chrom_len: int[], chromsome length ordered by chromsome name in fastq file
chrom_name: string[], chromsome names
genome_len: int, length of whole genome
methy_dict: dictionary, {'chr1_10469':0.1(methylation ratio)}
'''
#================================================================================
def bed_reader(filename):
'''
read bed file as fixed methylation ratio
'''
dict = {}
with open(filename) as f:
for line in f:
line_content = line.strip().split()
dict[line_content[0]+'_'+line_content[1]] = float(line_content[3])
return dict
def genome_loader(filename):
chr = ''
chrom_set = {}
seq = ''
genome_len = 0
chrom_len = []
chrom_name = []
with open(filename) as f:
for line in f:
line_content = line.strip()
if line_content[0]=='>':
if chr!='':
chrom_set[chr] = seq
chrom_len.append(len(seq))
genome_len += chrom_len[-1]
seq=''
chr = line_content[1:]
chrom_name.append(chr)
continue
seq += line_content
chrom_set[chr] = seq
chrom_len.append(len(seq))
genome_len += chrom_len[-1]
return chrom_set, chrom_len, chrom_name, genome_len
def fake_read(length):
base = ['A','T','C','G']
seq = ''
for i in range(length):
pos = random.randint(0,3)
seq += base[pos]
return seq
def reverse(read):
'''
CG......
......CG
CG......
'''
dic={'A':'T','T':'A','C':'G','G':'C','N':'N'}
r=''
for rr in read:
r=dic[rr.upper()]+r
return r
def bisulfite(chrom, start, read):
'''
return bisulfited seq.
Ignore the reads if get ''
'''
r=''
l = len(read)
for i in range(1,l-1): #waste 2 bp
base=read[i].upper()
if base=='C':
c = random.random()
if c<conversion_ratio:
if read[i+1].upper()=='G':
pos = chrom+'_'+str(start+i)
if pos not in methy_dict:
return ''
m = random.random()
if m>methy_dict[pos]:
base='T'
else:
base='T'
r=r+base
return r
'''
@Variables:
chrom_set: dictionary, {chromsome_name:chromsome_genome}
chrom_len: int[], chromsome length ordered by chromsome name in fastq file
chrom_name: string[], chromsome names
genome_len: int, length of whole genome
methy_dict: dictionary, {'chr1_10469':0.1(methylation ratio)}
'''
def random_head_tail():
finalbed=[]
fake_length = 20
real_read_length = read_length - fake_length
reads_count = 0
while True:
pos = random.randint(0,genome_len)
chr=0
real_read_length = read_length - 20
while pos>=chrom_len[chr]:
pos-=chrom_len[chr]
chr+=1
if pos==0:
pos+=1
start=pos-1
end=pos+real_read_length+1
if end>chrom_len[chr]: continue
if pos<1:continue
# if pos>chrom_len[chr]:continue
read=chrom_set[chrom_name[chr]][start:end]
if 'N' in read:
continue
r=read
a1=random.random()
a2=random.random()
if a1>0.5:
r=reverse(read)#Get reads from +/- strand
r = bisulfite(chrom_name[chr],start,r)
if r=='': continue
if a2>0.5:
r=reverse(r)#PCR +/-
fake_marker=''
if fake_length>0:
head_fake_length = random.randint(0,fake_length)
tail_fake_length = fake_length - head_fake_length
head = fake_read(head_fake_length)
tail = fake_read(tail_fake_length)
r = head + r + tail
fake_marker = '_'+str(head_fake_length)+'_'+str(tail_fake_length)
quality = 'E'*read_length
print('@'+str(reads_count)+'_'+readsname)
print(r)
print('+')
print(quality)
finalbed.append(chrom_name[chr]+'\t'+str(start+1)+'\t'+str(end-1)+'\t'+str(reads_count)+'_'+readsname+'\t'+str(head_fake_length)+'\t'+str(tail_fake_length)+'\t'+str(a1)+'\t'+str(a2)+'\n')
reads_count += 1
if reads_count == readsnum:
break
with open(readsname+'_simulation.bed','w') as f:
f.writelines(finalbed)
chrom_set, chrom_len, chrom_name, genome_len = genome_loader('/data/dsun/ref/humanigenome/hg19.fa')
methy_dict = bed_reader('./hESC.bed')#'./hESC.bed')
if __name__=="__main__":
'''
print(chrom_set['chr1'][10468:10498])
print(genome_len)
print(chrom_len)
print(chrom_name)
print(methy_dict['chr1_10468'])
'''
random_head_tail()
|
biobai/LiBis | LiBis/fastqc.py | '''
Tested
'''
from .utils import *
import os
class FASTQC():
def check(self,nockeck=False):
#return True,''
if not toolcheck('fastqc --help'):
return False,'Fastqc command not found'
if os.path.exists('Fastqc'):
if nockeck:
print('Fastqc file or dir exists! But --nockeck enabled, so continue running')
else:
return False,'Fastqc file or dir exists'
else:
os.mkdir('Fastqc')
return True,''
def setpath(self,path):
self.path = path+'Fastqc'
def run(self,filename):
pshell=Pshell('fastqc -o '+self.path+' '+filename)
pshell.process()
Fastqc = FASTQC()
if __name__=="__main__":
a = Fastqc()
#print(a.check())
a.setpath('./')
a.run('../trimtest/SRR1248444_1.1.1.1.fastq')
|
biobai/LiBis | LiBis/test/clipped_extractor.py | from utils import *
import os
def reads_map(partfilelist,args):
mapfilenum = args['mapfilenumber']
step = args['step']
file_order=0
mapreduce_file=[]
rootfile = partfilelist[0][:partfilelist[0].find('.')]
dic={}
for i in range(mapfilenum):
mapreduce_file.append(rootfile+'_'+str(i)+'.mapreduce')
if os.path.exists(mapreduce_file[-1]) and (not 'finish' in args):
os.remove(mapreduce_file[-1])
print('Delete '+mapreduce_file[-1])
dic[i]=[]
if 'finish' in args:
return mapreduce_file
for file in partfilelist:
print(file)
count=0
with open(file+'.sam') as f:
for line in f:
s = line.strip().split('\t')
mismatch = int(s[11][s[11].rfind(':')+1:])
if mismatch>1: continue
read_length = len(s[9])
tail_length = ((read_length-1)%step)+1
refseq = s[12][-2-tail_length:-2]
readsseq = s[9][-tail_length:]
strand = s[13][-2:]
mis=0
for base in range(tail_length):
if (refseq[base]!=readsseq[base]):
if strand[0]=='+':
if (refseq[base]=='C' and readsseq[base]=='T'): continue
else:
if (refseq[base]=='G' and readsseq[base]=='A'): continue
mis+=1
tail_mismatch = mis
hashnum = abs(hash(s[0])) % mapfilenum
dic[hashnum].append([s[0],s[2][3:],s[3],str(file_order),str(mismatch),str(tail_mismatch),str(read_length)])
count+=1
if count>5000000:
for i in range(mapfilenum):
arr = dic[i]
arr = list(map(lambda x:'\t'.join(x)+'\n',arr))
with open(mapreduce_file[i],'a') as ff:
ff.writelines(arr)
dic[i]=[]
count=0
file_order+=1
if count>0:
for i in range(mapfilenum):
arr = dic[i]
arr = list(map(lambda x:'\t'.join(x)+'\n',arr))
with open(mapreduce_file[i],'a') as ff:
ff.writelines(arr)
dic[i]=[]
count=0
return mapreduce_file
def reads_combine(filename,args):
step = args['step']
length_bin = args['binsize']
filter = args['filter']
outputname = args['outputname']
originalfile = args['originalfile']
result={}
with open(filename) as f:
for line in f:
arr = line.strip().split()
name,chr,startpos,fileorder,mismatch,tail_mismatch,read_length = arr
startpos = int(startpos)
fileorder = int(fileorder)
mismatch = int(mismatch)
tail_mismatch = int(tail_mismatch)
read_length = int(read_length)
if (not name in result) or (len(result[name])==0):
result[name]=[[chr,startpos,fileorder,mismatch,0]]
else:
temp = [chr,startpos,fileorder,mismatch,0]
#COPIED CODE; NEED TO BE MODIFIED
#reads from cliped mapped bam
join_or_not=False
for reads in result[name]:
if reads[3]+tail_mismatch<=1 and readsjoin(reads,temp,step,read_length,length_bin):
reads[3]+=tail_mismatch
reads[4]=temp[2]-reads[2]
join_or_not=True
break
frac_list=result[name]
if not join_or_not:
frac_list.append(temp)
#print(len(result))
#Delete short fragments
print(len(result))
del_name=[]
for name in result:
nonjoin_num=0
reads_list=result[name]
for i in range(len(reads_list)-1,-1,-1):
if reads_list[i][4]<=1:# and reads_list[i][3]>1:
reads_list.pop(i)
if len(reads_list)==0:
del_name.append(name)
for name in del_name:
del result[name]
print(len(result))
for name in result:
reads_list = result[name]
num = len(reads_list)
del_mark = [0 for i in range(num)]
for i in range(num):
for j in range(i+1,num):
if overlap(result[name][i],result[name][j],step,length_bin):
sss = result[name][i][4]-result[name][j][4]
if sss>0: del_mark[j]=1
elif sss<0: del_mark[i]=1
else:
mis = result[name][i][3]-result[name][j][3]
if mis>0: del_mark[i]=1
else: del_mark[j]=1
#Only keep the best read which has the most extends and the least mismatches.
for i in range(num-1,-1,-1):
if del_mark[i]==1:
reads_list.pop(i)
return result
#GetFastqList(result,step,length_bin,filter,outputname,originalfile)
def reads_reduce(mapreduce_file,args):
step = args['step']
length_bin = args['binsize']
filter = args['filter']
outputname = args['outputname']
originalfile = args['originalfile']
mapfilenum = args['mapfilenumber']
totalresult={}
for i in range(mapfilenum):
print(str(i)+' start!')
result=reads_combine(mapreduce_file[i],args)
GetFastqList(result,step,length_bin,filter,outputname,originalfile)
#GetFastqList(result,step,length_bin,filter,outputname,originalfile)
#totalresult.update(result)
print(str(i)+' finished! length='+str(len(totalresult)))
#GetFastqList(totalresult,step,length_bin,filter,outputname,originalfile)
def GetFastqList(joined_reads,step,length_bin,filter,outputname,originalfile):
#print(joined_reads)
nameset={}
#Generate a dictionary which contains readsname, start file order and extend fraction number
for name in joined_reads:
reads_list = joined_reads[name]
if len(reads_list)==0: continue
n = name
nameset[n]=[[read[2],read[4]] for read in reads_list]
#contentset[n]=[['',''] for i in range(len(nameset[n]))]#read_content,read_quality
print(len(nameset))
pos_mark=[{},{}]
for name in nameset:
readinfo = nameset[name]
pos=0
if name[-2:]=='_2':
pos=1
if name[-2:]=='_1' or name[-2:]=='_2':
name = name[:-2]
for order,sum in readinfo:
start = (order)*step
end = start + step*sum + length_bin
if end-start<filter: continue
if name in pos_mark[pos]:
pos_mark[pos][name].append([start,end])
else:
pos_mark[pos][name]=[[start,end]]
print(len(pos_mark[0]),len(pos_mark[1]))
del nameset
#num=0
#for n in pos_mark[0]:
# print(n)
# if n in pos_mark[0]:
# print(pos_mark[0][n])
# num+=1
# if num>10: break
fileorder=0
#print(pos_mark)
result_start=[]
result_end=[]
for file in originalfile:
with open(file) as f:
while True:
name = f.readline()
if not name:
break
reads = f.readline().strip()
_ = f.readline()
quality = f.readline().strip()
fqname = name.strip().split()[0][1:]
if not fqname in pos_mark[fileorder]: continue
for i in range(len(pos_mark[fileorder][fqname])):
start,end = pos_mark[fileorder][fqname][i]
s_name = fqname
if len(pos_mark[pos])>1:
s_name += '_'+str(i)
s_read = reads[:start]
#0-start
s_qua = quality[:start]
readlen = str(len(reads))
s_final = '@'+s_name+'_'+str(fileorder)+':'+readlen+'\n'+s_read+'\n'+'+\n'+s_qua+'\n'
result_start.append(s_final)
s_read = reads[end:]
s_qua = quality[end:]
s_final = '@'+s_name+'_'+str(fileorder)+':'+readlen+'\n'+s_read+'\n'+'+\n'+s_qua+'\n'
result_end.append(s_final)
if len(result_start)>5000000:
with open(outputname+'_clipped_start.fastq','a') as ff:
ff.writelines(result_start)
result_start=[]
with open(outputname+'_clipped_end.fastq','a') as ff:
ff.writelines(result_end)
result_end=[]
fileorder+=1
if len(result_start)>0:
with open(outputname+'_clipped_start.fastq','a') as ff:
ff.writelines(result_start)
with open(outputname+'_clipped_end.fastq','a') as ff:
ff.writelines(result_end)
if __name__=='__main__':
args={'step':5,
'binsize':30,
'filter':40,
'outputname':'6P',
'originalfile':['6P_R1_val_1.fq','6P_R2_val_2.fq'],
'mapfilenumber':10,
'finish':1
}
name_6g=['6G.part1.fastq','6G.part2.fastq','6G.part3.fastq','6G.part4.fastq',
'6G.part5.fastq','6G.part6.fastq','6G.part7.fastq','6G.part8.fastq',
'6G.part9.fastq','6G.part10.fastq','6G.part11.fastq','6G.part12.fastq',
'6G.part13.fastq','6G.part14.fastq','6G.part15.fastq','6G.part16.fastq',
'6G.part17.fastq','6G.part18.fastq','6G.part19.fastq','6G.part20.fastq',
'6G.part21.fastq','6G.part22.fastq','6G.part23.fastq','6G.part24.fastq']
name_6p=['6P.part1.fastq','6P.part2.fastq','6P.part3.fastq','6P.part4.fastq',
'6P.part5.fastq','6P.part6.fastq','6P.part7.fastq','6P.part8.fastq',
'6P.part9.fastq','6P.part10.fastq','6P.part11.fastq','6P.part12.fastq',
'6P.part13.fastq','6P.part14.fastq','6P.part15.fastq','6P.part16.fastq',
'6P.part17.fastq','6P.part18.fastq','6P.part19.fastq','6P.part20.fastq',
'6P.part21.fastq','6P.part22.fastq','6P.part23.fastq','6P.part24.fastq']
name_m6g=['M6G.part1.fastq','M6G.part2.fastq','M6G.part3.fastq','M6G.part4.fastq',
'M6G.part5.fastq','M6G.part6.fastq','M6G.part7.fastq','M6G.part8.fastq',
'M6G.part9.fastq','M6G.part10.fastq','M6G.part11.fastq','M6G.part12.fastq',
'M6G.part13.fastq','M6G.part14.fastq','M6G.part15.fastq','M6G.part16.fastq',
'M6G.part17.fastq','M6G.part18.fastq','M6G.part19.fastq','M6G.part20.fastq',
'M6G.part21.fastq','M6G.part22.fastq','M6G.part23.fastq','M6G.part24.fastq']
names = reads_map(name_6p,args)
print(names)
reads_reduce(names,args)
# step = args['step']
## length_bin = args['binsize']
# filter = args['filter']
# outputname = args['outputname']
# originalfile = args['originalfile']
|
biobai/LiBis | LiBis/test/setup.py | from setuptools import setup, find_packages
setup(name = 'pl',version = '0.1',packages = find_packages(),)
|
biobai/LiBis | LiBis/test/GenerateResult.py | import os
from utils import *
def GenerateResult(tablefile, fastqcfile, fig):
os.system('mkdir RESULT/qc')
os.system('cp Fastqc/*.html RESULT/qc')
|
biobai/LiBis | LiBis/bedtools.py | '''
Tested
'''
import os
from .utils import *
class BEDTOOLS:
def check(self,nockeck=False):
if not toolcheck('bedtools --version'):
return False,'Bedtools not found!'
return True,''
def setparam(self,param):
self.genome = param['genome']
if 'bin' in param and param['bin']!=None:
self.bin = param['bin']
else:
self.bin=1000000
def makewindow(self):
path=os.path.abspath(__file__)
path = path[:path.rfind('/')+1]
filename = path+'chromsize/'+self.genome + '.chrom.sizes'
#print(filename)
outputname = self.genome+'_'+str(self.bin)+'.bed'
os.system('bedtools makewindows -g '+filename+' -w '+str(self.bin)+' > '+outputname)
self.binfile = outputname
def intersect(self,names):
sample=0
result=[]
for name in names:
temp = str(sample)+'.intersect'
output = str(sample)+'.bed'
os.system('bedtools intersect -loj -a '+self.binfile+' -b '+ name +" | awk -v OFS='\t' '{if ($7>=0 && $7<=1) print $1,$2,$3,$7}' > BED_FILE/"+temp)
os.system('bedtools groupby -i BED_FILE/'+temp+' -g 1,2,3 -c 4 -o mean > '+'BED_FILE/'+output)
result.append('BED_FILE/'+output)
sample=sample+1
return result
'''
Two plots from segmented genome average: TSNE/PCA and heatmap
'''
Bedtools=BEDTOOLS()
if __name__=="__main__":
b = Bedtools()
param={'genome':'hg19'}
b.setparam(param)
print(b.check())
b.makewindow()
name = ['FWAC.bed']
print(b.intersect(name))
print(b.binfile)
|
biobai/LiBis | setup.py | <filename>setup.py
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('Python 3.4 or greater is required.')
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
with open('RELEASE') as f:
lines = f.readlines()
version = lines[0]
version = version.strip().split()[-1]
VERSION = version
LICENSE = "MIT"
setup(
name='LiBis',
version=VERSION,
description=(
'Low input Bisulfite sequencing alignment'
),
long_description='',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license=LICENSE,
packages=find_packages(),
platforms=["all"],
url='https://github.com/Dangertrip/LiBis',
install_requires=[
"matplotlib",
"numpy",
"pandas",
"scikit-learn",
"scipy",
"seaborn",
"pysam"
],
scripts=[
"bin/LiBis",
],
include_package_data=True,
)
|
biobai/LiBis | LiBis/test/pos_clipped_length.py | def comb(bam_file,fq_start,fq_end):
fq={}
with open(fq_start) as f:
lines = f.readlines()
for i in range(1,len(lines),4):
l = len(lines[i].strip())
name = lines[i-1].strip()[1:]
fq[name]=[str(l)]
with open(fq_end) as f:
lines = f.readlines()
for i in range(1,len(lines),4):
l = len(lines[i].strip())
name = lines[i-1].strip()[1:]
fq[name].append(str(l))
with open(bam_file) as f:
lines = f.readlines()
result=[]
for line in lines:
temp = line.strip().split()
if temp[0] in fq:
result.append(temp[0]+'\t'+temp[1]+'\t'+temp[2]+'\t'+fq[temp[0]][0]+'\t'+fq[temp[0]][1]+'\n')
with open(bam_file+'.comb','w') as f:
f.writelines(result)
if __name__=="__main__":
comb("M6G_split.bam.pos","M6G_clipped_start.fastq","M6G_clipped_end.fastq")
|
biobai/LiBis | LiBis/bsplot.py | <filename>LiBis/bsplot.py<gh_stars>0
'''
Tested
'''
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import os
from urllib import request
from copy import deepcopy
import matplotlib.cm as cm
def point_cluster(data,outputname,method='PCA'):
#data: DataFrame
#contains labelname column, samplename column and data
d = deepcopy(data)
#print(di)
d.sort_values(['chrom','start'])
windowdata = d.values[:,3:].T
position = d.values[:,:3]
label = d.columns[3:]
#Every sample in a row
#print(windowdata)
dim=2
colors = cm.rainbow(np.linspace(0,1,len(label)))
if method == 'PCA':
pca = PCA(n_components=dim)
x_tr = pca.fit_transform(windowdata)
if method == 'TSNE':
tsne = TSNE(n_components=dim)
x_tr = tsne.fit_transform(windowdata)
fig,ax = plt.subplots(figsize=(7,7))
xmin = np.min(x_tr[:,0])
xmax = np.max(x_tr[:,0])
ymin = np.min(x_tr[:,1])
ymax = np.max(x_tr[:,1])
sample_size = x_tr.shape[0]
plt.xlim(xmin-0.2*np.abs(xmin),xmax+0.2*np.abs(xmax))
plt.ylim(ymin-0.2*np.abs(ymin),ymax+0.2*np.abs(ymax))
markers=['o', '^','v','<','>','1','2', '3','4','8','s','P','p', '*','H','h','x','X','D']
label_u = np.unique(label)
for i in range(len(label_u)):
cc = colors[i]
l = label_u[i]
pos = np.where(label==l)
ma = markers[i]
plt.scatter(x_tr[pos,0],x_tr[pos,1],c=cc,alpha=0.8,s=50,marker=ma,label=l)
if method=='PCA':
plt.xlabel('PC1',fontsize=13)
plt.ylabel('PC2',fontsize=13)
if method=='TSNE':
plt.xlabel('TSNE1',fontsize=13)
plt.ylabel('TSNE2',fontsize=13)
plt.legend(loc='best',fontsize=13)
plt.savefig(outputname)
def heatmap(data,outputname):
from seaborn import clustermap
d = deepcopy(data)
#print(d)
d=d.drop(['chrom','start','end'],axis=1)
sns_plot=clustermap(d)
sns_plot.ax_row_dendrogram.set_visible(False)
sns_plot.savefig(outputname)
if __name__=='__main__':
with open('BED_FILE/head_combine.bam.G.bed.short.bed') as f:
lines = f.readlines()
d=[]
for line in lines:
temp = line.strip().split()
temp[-1]=float(temp[-1])
d.append(temp)
import random
for i in range(5):
for dd in d:
dd.append(random.random())
import pandas as pd
d = pd.DataFrame(d,columns=['chrom','start','end','L1','L2','L3','L4','L4','L4'])
heatmap(d,'a.pdf')
|
jhugestar/detectron2 | projects/DensePose/run_3dpw.py | <reponame>jhugestar/detectron2
from apply_net import denseposeRunner
import sys
import glob
import os
# mocapRootDir = '/run/media/hjoo/disk/data/Penn_Action/labels'
g_bIsDevfair = False
if os.path.exists('/private/home/hjoo'):
g_bIsDevfair = True
if g_bIsDevfair:
inputDir_root = '/private/home/hjoo/data/3dpw/imageFiles'
img_outputDir_root = '/private/home/hjoo/data/3dpw/densepose_img'
json_outputDir_root = '/private/home/hjoo/data/3dpw/densepose'
else:
inputDir_root = '/run/media/hjoo/disk/data/3dpw/imageFiles'
img_outputDir_root = '/run/media/hjoo/disk/data/3dpw/densepose_img'
json_outputDir_root = '/run/media/hjoo/disk/data/3dpw/densepose'
if not os.path.exists(img_outputDir_root):
os.mkdir(img_outputDir_root)
if not os.path.exists(json_outputDir_root):
os.mkdir(json_outputDir_root)
# inputFolder=$1
# outputFolder=$2
# #./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images $outputFolder --write_images_format jpg
# echo ./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images ${outputFolder}_img --write_images_format jpg --write_json $outputFolder
seqList = sorted(glob.glob('{0}/*'.format(inputDir_root)) )
for i, inputPath in enumerate(seqList):
seqName = os.path.basename(inputPath)
print(seqName)
# if not ("outdoors_fencing_01" in seqName or "downtown_walking_00" in seqName or "outdoors_fencing_01" in seqName):
# continue
outputFolder_img = os.path.join(img_outputDir_root,seqName)
outputFolder_pkl = os.path.join(json_outputDir_root,seqName+'.pkl')
if not os.path.exists(outputFolder_pkl):
params = ['dump','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'--output',outputFolder_pkl,'-v']
denseposeRunner(params)
if not os.path.exists(outputFolder_img):
os.mkdir(outputFolder_img)
params = ['show','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'dp_contour,bbox','--output','{}/output.jpg'.format(outputFolder_img),'-v']
denseposeRunner(params)
break
# cmd_str = "cd /home/hjoo/codes/openpose; ./build/examples/openpose/openpose.bin --image_dir {0} --write_images {1} --write_images_format jpg --write_json {2}".format(inputPath,
# outputFolder_img, outputFolder_json)
# cmd_str = "python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml model_final_5f3d7f.pkl \"{}/*.jpg\" dp_contour,bbox -v --output {}".format(inputPath, outputFolder_img)
# print(cmd_str)
# os.system(cmd_str)
#./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images ${outputFolder}_img --write_images_format jpg --write_json $outputFolder
|
jhugestar/detectron2 | projects/DensePose/run_penn.py | <reponame>jhugestar/detectron2
from apply_net import denseposeRunner
import sys
import glob
import os
def runPennAction(startIdx, endIdx):
# mocapRootDir = '/run/media/hjoo/disk/data/Penn_Action/labels'
g_bIsDevfair = False
if os.path.exists('/private/home/hjoo'):
g_bIsDevfair = True
if g_bIsDevfair:
inputDir_root = '/private/home/hjoo/data/pennaction/frames'
img_outputDir_root = '/private/home/hjoo/data/pennaction/densepose_img'
json_outputDir_root = '/private/home/hjoo/data/pennaction/densepose'
else:
assert False
if not os.path.exists(img_outputDir_root):
os.mkdir(img_outputDir_root)
if not os.path.exists(json_outputDir_root):
os.mkdir(json_outputDir_root)
# inputFolder=$1
# outputFolder=$2
# #./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images $outputFolder --write_images_format jpg
# echo ./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images ${outputFolder}_img --write_images_format jpg --write_json $outputFolder
# seqList = sorted(glob.glob('{0}/*'.format(inputDir_root)) )
for seqIdx in range(startIdx, endIdx):
seqName = '{:04d}'.format(seqIdx)
print(seqName)
inputPath = os.path.join(inputDir_root,seqName)
# if not ("outdoors_fencing_01" in seqName or "downtown_walking_00" in seqName or "outdoors_fencing_01" in seqName):
# continue
outputFolder_img = os.path.join(img_outputDir_root,seqName)
outputFolder_pkl = os.path.join(json_outputDir_root,seqName+'.pkl')
if not os.path.exists(outputFolder_pkl):
print(">>> Running:{}".format(outputFolder_img))
params = ['dump','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'--output',outputFolder_pkl,'-v']
denseposeRunner(params)
else:
print(">>> Already exists:{}".format(outputFolder_img))
# if not os.path.exists(outputFolder_img):
# os.mkdir(outputFolder_img)
# print(">>> Running:{}".format(outputFolder_img))
# params = ['show','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'dp_contour,bbox','--output','{}/output.jpg'.format(outputFolder_img),'-v']
# denseposeRunner(params)
# else:
# print(">>> Already exists:{}".format(outputFolder_img))
def runPennAction_img(startIdx, endIdx):
# mocapRootDir = '/run/media/hjoo/disk/data/Penn_Action/labels'
g_bIsDevfair = False
if os.path.exists('/private/home/hjoo'):
g_bIsDevfair = True
if g_bIsDevfair:
inputDir_root = '/private/home/hjoo/data/pennaction/frames'
img_outputDir_root = '/private/home/hjoo/data/pennaction/densepose_img'
json_outputDir_root = '/private/home/hjoo/data/pennaction/densepose'
else:
assert False
if not os.path.exists(img_outputDir_root):
os.mkdir(img_outputDir_root)
if not os.path.exists(json_outputDir_root):
os.mkdir(json_outputDir_root)
# inputFolder=$1
# outputFolder=$2
# #./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images $outputFolder --write_images_format jpg
# echo ./build/examples/openpose/openpose.bin --image_dir $inputFolder --write_images ${outputFolder}_img --write_images_format jpg --write_json $outputFolder
# seqList = sorted(glob.glob('{0}/*'.format(inputDir_root)) )
for seqIdx in range(startIdx, endIdx):
seqName = '{:04d}'.format(seqIdx)
print(seqName)
inputPath = os.path.join(inputDir_root,seqName)
# if not ("outdoors_fencing_01" in seqName or "downtown_walking_00" in seqName or "outdoors_fencing_01" in seqName):
# continue
outputFolder_img = os.path.join(img_outputDir_root,seqName)
outputFolder_pkl = os.path.join(json_outputDir_root,seqName+'.pkl')
# if not os.path.exists(outputFolder_pkl):
# print(">>> Running:{}".format(outputFolder_img))
# params = ['dump','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'--output',outputFolder_pkl,'-v']
# denseposeRunner(params)
# else:
# print(">>> Already exists:{}".format(outputFolder_img))
if not os.path.exists(outputFolder_img):
os.mkdir(outputFolder_img)
print(">>> Running:{}".format(outputFolder_img))
params = ['show','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'dp_contour,bbox','--output','{}/output.jpg'.format(outputFolder_img),'-v']
denseposeRunner(params)
else:
print(">>> Already exists:{}".format(outputFolder_img))
if __name__ == "__main__":
interval = 20
for i in range(0,2250,interval):
print('runPennAction({},{})'.format(i, i+ interval))
# runPennAction(2,10)
|
jhugestar/detectron2 | projects/DensePose/submit_pennaction.py | <gh_stars>0
import submitit
executor = submitit.AutoExecutor(folder="pennImg")
executor.update_parameters(timeout_min=4320, gpus_per_node=1, cpus_per_task=8, partition="learnfair", comment= 'CVPR 11/15', name='pennImg') # timeout in min
from run_penn import runPennAction, runPennAction_img
interval = 100
for i in range(0,2200,interval):
print('>> runPennAction({},{})'.format(i, i+ interval))
# job = executor.submit(runPennAction,i,i+interval)
job = executor.submit(runPennAction_img,i,i+interval)
# runPennAction(i,+ interval)
# runPennAction(2,10)
|
jhugestar/detectron2 | projects/DensePose/submit_3dpw.py | import glob
import os
import submitit
from apply_net import denseposeRunner
executor = submitit.AutoExecutor(folder="3dpwImg2")
executor.update_parameters(timeout_min=4320, gpus_per_node=1, cpus_per_task=8, partition="learnfair", comment= 'CVPR 11/15', name='3dpwImg2') # timeout in min
# mocapRootDir = '/run/media/hjoo/disk/data/Penn_Action/labels'
g_bIsDevfair = False
if os.path.exists('/private/home/hjoo'):
g_bIsDevfair = True
if g_bIsDevfair:
inputDir_root = '/private/home/hjoo/data/3dpw/imageFiles'
img_outputDir_root = '/private/home/hjoo/data/3dpw/densepose_img'
json_outputDir_root = '/private/home/hjoo/data/3dpw/densepose'
else:
inputDir_root = '/run/media/hjoo/disk/data/3dpw/imageFiles'
img_outputDir_root = '/run/media/hjoo/disk/data/3dpw/densepose_img'
json_outputDir_root = '/run/media/hjoo/disk/data/3dpw/densepose'
if not os.path.exists(img_outputDir_root):
os.mkdir(img_outputDir_root)
if not os.path.exists(json_outputDir_root):
os.mkdir(json_outputDir_root)
seqList = sorted(glob.glob('{0}/*'.format(inputDir_root)) )
for i, inputPath in enumerate(seqList):
seqName = os.path.basename(inputPath)
outputFolder_img = os.path.join(img_outputDir_root,seqName)
outputFolder_pkl = os.path.join(json_outputDir_root,seqName+'.pkl')
# if not os.path.exists(outputFolder_pkl):
# params = ['dump','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'--output',outputFolder_pkl,'-v']
# print(">>> Submitting:{}".format(outputFolder_pkl))
# # denseposeRunner(params)
# job = executor.submit(denseposeRunner,params)
# else:
# print(">>> Already exists:{}".format(outputFolder_pkl))
if not os.path.exists(outputFolder_img):
params = ['show','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'dp_contour,bbox','--output','{}/output.jpg'.format(outputFolder_img),'-v']
print(">>> Submitting:{}".format(outputFolder_img))
# denseposeRunner(params)
job = executor.submit(denseposeRunner,params)
else:
print(">>> Already exists:{}".format(outputFolder_img))
# if not os.path.exists(outputFolder_img):
# os.mkdir(outputFolder_img)
# # denseposeRunner(params)
# job = executor.submit(denseposeRunner,params)
# params = ['show','configs/densepose_rcnn_R_50_FPN_s1x.yaml','model_final_5f3d7f.pkl','{}/*.jpg'.format(inputPath),'dp_contour,bbox','--output','{}/output.jpg'.format(outputFolder_img),'-v']
# caller(params)
# job = executor.submit(trainerWrapper,['--bRandOcc', '--skelType','coco_noeyeear','--w_angleLoss','1e4','--w_3dJ_smpl_Loss','0.1', '--w_3dJ_coco_Loss','0.1', '--bPredAnkle','--data_dir','dataset/data_amass_fbbox_noShape/', '--train_batch','20000','--test_batch','2048','--job','3','--train_db','All', '--load', '/private/home/hjoo/dropbox_checkpoint/10-17-44257-bMini_0-WShp_0.0-WAng_10000.0-W3JSM_0.1-W3JCO_0.1-db_All-rCrop_0-ocT_all-skeT_coco_noeyeear-ranOc_1-pAkl_1-bLo_0_best_epoch153/ckpt_last.pth.tar'])
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/tables.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
_SNAKE_TO_CAMEL_CASE_TABLE = {
"b64_std": "b64Std",
"b64_url": "b64Url",
"byte_length": "byteLength",
"min_lower": "minLower",
"min_numeric": "minNumeric",
"min_special": "minSpecial",
"min_upper": "minUpper",
"override_special": "overrideSpecial",
"result_count": "resultCount",
}
_CAMEL_TO_SNAKE_CASE_TABLE = {
"b64Std": "b64_std",
"b64Url": "b64_url",
"byteLength": "byte_length",
"minLower": "min_lower",
"minNumeric": "min_numeric",
"minSpecial": "min_special",
"minUpper": "min_upper",
"overrideSpecial": "override_special",
"resultCount": "result_count",
}
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_pet.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomPet(pulumi.CustomResource):
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
"""
length: pulumi.Output[float]
"""
The length (in words) of the pet name.
"""
prefix: pulumi.Output[str]
"""
A string to prefix the name with.
"""
separator: pulumi.Output[str]
"""
The character to separate words in the pet name.
"""
def __init__(__self__, resource_name, opts=None, keepers=None, length=None, prefix=None, separator=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomPet` generates random pet names that are intended to be
used as unique identifiers for other resources.
This resource can be used in conjunction with resources that have
the `create_before_destroy` lifecycle flag set, to avoid conflicts with
unique names during the brief period where both the old and new resources
exist concurrently.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] length: The length (in words) of the pet name.
:param pulumi.Input[str] prefix: A string to prefix the name with.
:param pulumi.Input[str] separator: The character to separate words in the pet name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['keepers'] = keepers
__props__['length'] = length
__props__['prefix'] = prefix
__props__['separator'] = separator
super(RandomPet, __self__).__init__(
'random:index/randomPet:RandomPet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, keepers=None, length=None, prefix=None, separator=None):
"""
Get an existing RandomPet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] length: The length (in words) of the pet name.
:param pulumi.Input[str] prefix: A string to prefix the name with.
:param pulumi.Input[str] separator: The character to separate words in the pet name.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["keepers"] = keepers
__props__["length"] = length
__props__["prefix"] = prefix
__props__["separator"] = separator
return RandomPet(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_string.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomString(pulumi.CustomResource):
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
"""
length: pulumi.Output[float]
"""
The length of the string desired
"""
lower: pulumi.Output[bool]
"""
(default true) Include lowercase alphabet characters
in random string.
"""
min_lower: pulumi.Output[float]
"""
(default 0) Minimum number of lowercase alphabet
characters in random string.
"""
min_numeric: pulumi.Output[float]
"""
(default 0) Minimum number of numeric characters
in random string.
"""
min_special: pulumi.Output[float]
"""
(default 0) Minimum number of special characters
in random string.
"""
min_upper: pulumi.Output[float]
"""
(default 0) Minimum number of uppercase alphabet
characters in random string.
"""
number: pulumi.Output[bool]
"""
(default true) Include numeric characters in random
string.
"""
override_special: pulumi.Output[str]
"""
Supply your own list of special characters to
use for string generation. This overrides the default character list in the special
argument. The special argument must still be set to true for any overwritten
characters to be used in generation.
"""
result: pulumi.Output[str]
"""
Random string generated.
"""
special: pulumi.Output[bool]
"""
(default true) Include special characters in random
string. These are `!@#$%&*()-_=+[]{}<>:?`
"""
upper: pulumi.Output[bool]
"""
(default true) Include uppercase alphabet characters
in random string.
"""
def __init__(__self__, resource_name, opts=None, keepers=None, length=None, lower=None, min_lower=None, min_numeric=None, min_special=None, min_upper=None, number=None, override_special=None, special=None, upper=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomString` generates a random permutation of alphanumeric
characters and optionally special characters.
This resource *does* use a cryptographic random number generator.
Historically this resource's intended usage has been ambiguous as the original example
used it in a password. For backwards compatibility it will
continue to exist. For unique ids please use random_id, for sensitive
random values please use random_password.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] length: The length of the string desired
:param pulumi.Input[bool] lower: (default true) Include lowercase alphabet characters
in random string.
:param pulumi.Input[float] min_lower: (default 0) Minimum number of lowercase alphabet
characters in random string.
:param pulumi.Input[float] min_numeric: (default 0) Minimum number of numeric characters
in random string.
:param pulumi.Input[float] min_special: (default 0) Minimum number of special characters
in random string.
:param pulumi.Input[float] min_upper: (default 0) Minimum number of uppercase alphabet
characters in random string.
:param pulumi.Input[bool] number: (default true) Include numeric characters in random
string.
:param pulumi.Input[str] override_special: Supply your own list of special characters to
use for string generation. This overrides the default character list in the special
argument. The special argument must still be set to true for any overwritten
characters to be used in generation.
:param pulumi.Input[bool] special: (default true) Include special characters in random
string. These are `!@#$%&*()-_=+[]{}<>:?`
:param pulumi.Input[bool] upper: (default true) Include uppercase alphabet characters
in random string.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['keepers'] = keepers
if length is None:
raise TypeError("Missing required property 'length'")
__props__['length'] = length
__props__['lower'] = lower
__props__['min_lower'] = min_lower
__props__['min_numeric'] = min_numeric
__props__['min_special'] = min_special
__props__['min_upper'] = min_upper
__props__['number'] = number
__props__['override_special'] = override_special
__props__['special'] = special
__props__['upper'] = upper
__props__['result'] = None
super(RandomString, __self__).__init__(
'random:index/randomString:RandomString',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, keepers=None, length=None, lower=None, min_lower=None, min_numeric=None, min_special=None, min_upper=None, number=None, override_special=None, result=None, special=None, upper=None):
"""
Get an existing RandomString resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] length: The length of the string desired
:param pulumi.Input[bool] lower: (default true) Include lowercase alphabet characters
in random string.
:param pulumi.Input[float] min_lower: (default 0) Minimum number of lowercase alphabet
characters in random string.
:param pulumi.Input[float] min_numeric: (default 0) Minimum number of numeric characters
in random string.
:param pulumi.Input[float] min_special: (default 0) Minimum number of special characters
in random string.
:param pulumi.Input[float] min_upper: (default 0) Minimum number of uppercase alphabet
characters in random string.
:param pulumi.Input[bool] number: (default true) Include numeric characters in random
string.
:param pulumi.Input[str] override_special: Supply your own list of special characters to
use for string generation. This overrides the default character list in the special
argument. The special argument must still be set to true for any overwritten
characters to be used in generation.
:param pulumi.Input[str] result: Random string generated.
:param pulumi.Input[bool] special: (default true) Include special characters in random
string. These are `!@#$%&*()-_=+[]{}<>:?`
:param pulumi.Input[bool] upper: (default true) Include uppercase alphabet characters
in random string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["keepers"] = keepers
__props__["length"] = length
__props__["lower"] = lower
__props__["min_lower"] = min_lower
__props__["min_numeric"] = min_numeric
__props__["min_special"] = min_special
__props__["min_upper"] = min_upper
__props__["number"] = number
__props__["override_special"] = override_special
__props__["result"] = result
__props__["special"] = special
__props__["upper"] = upper
return RandomString(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_integer.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomInteger(pulumi.CustomResource):
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
"""
max: pulumi.Output[float]
"""
The maximum inclusive value of the range.
"""
min: pulumi.Output[float]
"""
The minimum inclusive value of the range.
"""
result: pulumi.Output[float]
"""
(int) The random Integer result.
"""
seed: pulumi.Output[str]
"""
A custom seed to always produce the same value.
"""
def __init__(__self__, resource_name, opts=None, keepers=None, max=None, min=None, seed=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomInteger` generates random values from a given range, described by the `min` and `max` attributes of a given resource.
This resource can be used in conjunction with resources that have
the `create_before_destroy` lifecycle flag set, to avoid conflicts with
unique names during the brief period where both the old and new resources
exist concurrently.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] max: The maximum inclusive value of the range.
:param pulumi.Input[float] min: The minimum inclusive value of the range.
:param pulumi.Input[str] seed: A custom seed to always produce the same value.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['keepers'] = keepers
if max is None:
raise TypeError("Missing required property 'max'")
__props__['max'] = max
if min is None:
raise TypeError("Missing required property 'min'")
__props__['min'] = min
__props__['seed'] = seed
__props__['result'] = None
super(RandomInteger, __self__).__init__(
'random:index/randomInteger:RandomInteger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, keepers=None, max=None, min=None, result=None, seed=None):
"""
Get an existing RandomInteger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] max: The maximum inclusive value of the range.
:param pulumi.Input[float] min: The minimum inclusive value of the range.
:param pulumi.Input[float] result: (int) The random Integer result.
:param pulumi.Input[str] seed: A custom seed to always produce the same value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["keepers"] = keepers
__props__["max"] = max
__props__["min"] = min
__props__["result"] = result
__props__["seed"] = seed
return RandomInteger(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_id.py | <reponame>pulumi-bot/pulumi-random
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomId(pulumi.CustomResource):
b64: pulumi.Output[str]
b64_std: pulumi.Output[str]
"""
The generated id presented in base64 without additional transformations.
"""
b64_url: pulumi.Output[str]
"""
The generated id presented in base64, using the URL-friendly character set: case-sensitive letters, digits and the characters `_` and `-`.
"""
byte_length: pulumi.Output[float]
"""
The number of random bytes to produce. The
minimum value is 1, which produces eight bits of randomness.
"""
dec: pulumi.Output[str]
"""
The generated id presented in non-padded decimal digits.
"""
hex: pulumi.Output[str]
"""
The generated id presented in padded hexadecimal digits. This result will always be twice as long as the requested byte length.
"""
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
"""
prefix: pulumi.Output[str]
"""
Arbitrary string to prefix the output value with. This
string is supplied as-is, meaning it is not guaranteed to be URL-safe or
base64 encoded.
"""
def __init__(__self__, resource_name, opts=None, byte_length=None, keepers=None, prefix=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomId` generates random numbers that are intended to be
used as unique identifiers for other resources.
This resource *does* use a cryptographic random number generator in order
to minimize the chance of collisions, making the results of this resource
when a 16-byte identifier is requested of equivalent uniqueness to a
type-4 UUID.
This resource can be used in conjunction with resources that have
the `create_before_destroy` lifecycle flag set to avoid conflicts with
unique names during the brief period where both the old and new resources
exist concurrently.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] byte_length: The number of random bytes to produce. The
minimum value is 1, which produces eight bits of randomness.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[str] prefix: Arbitrary string to prefix the output value with. This
string is supplied as-is, meaning it is not guaranteed to be URL-safe or
base64 encoded.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if byte_length is None:
raise TypeError("Missing required property 'byte_length'")
__props__['byte_length'] = byte_length
__props__['keepers'] = keepers
__props__['prefix'] = prefix
__props__['b64'] = None
__props__['b64_std'] = None
__props__['b64_url'] = None
__props__['dec'] = None
__props__['hex'] = None
super(RandomId, __self__).__init__(
'random:index/randomId:RandomId',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, b64=None, b64_std=None, b64_url=None, byte_length=None, dec=None, hex=None, keepers=None, prefix=None):
"""
Get an existing RandomId resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] b64_std: The generated id presented in base64 without additional transformations.
:param pulumi.Input[str] b64_url: The generated id presented in base64, using the URL-friendly character set: case-sensitive letters, digits and the characters `_` and `-`.
:param pulumi.Input[float] byte_length: The number of random bytes to produce. The
minimum value is 1, which produces eight bits of randomness.
:param pulumi.Input[str] dec: The generated id presented in non-padded decimal digits.
:param pulumi.Input[str] hex: The generated id presented in padded hexadecimal digits. This result will always be twice as long as the requested byte length.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[str] prefix: Arbitrary string to prefix the output value with. This
string is supplied as-is, meaning it is not guaranteed to be URL-safe or
base64 encoded.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["b64"] = b64
__props__["b64_std"] = b64_std
__props__["b64_url"] = b64_url
__props__["byte_length"] = byte_length
__props__["dec"] = dec
__props__["hex"] = hex
__props__["keepers"] = keepers
__props__["prefix"] = prefix
return RandomId(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/__init__.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .provider import *
from .random_id import *
from .random_integer import *
from .random_password import *
from .random_pet import *
from .random_shuffle import *
from .random_string import *
from .random_uuid import *
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_shuffle.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomShuffle(pulumi.CustomResource):
inputs: pulumi.Output[list]
"""
The list of strings to shuffle.
"""
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
"""
result_count: pulumi.Output[float]
"""
The number of results to return. Defaults to
the number of items in the `input` list. If fewer items are requested,
some elements will be excluded from the result. If more items are requested,
items will be repeated in the result but not more frequently than the number
of items in the input list.
"""
results: pulumi.Output[list]
"""
Random permutation of the list of strings given in `input`.
"""
seed: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, inputs=None, keepers=None, result_count=None, seed=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomShuffle` generates a random permutation of a list
of strings given as an argument.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] inputs: The list of strings to shuffle.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] result_count: The number of results to return. Defaults to
the number of items in the `input` list. If fewer items are requested,
some elements will be excluded from the result. If more items are requested,
items will be repeated in the result but not more frequently than the number
of items in the input list.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if inputs is None:
raise TypeError("Missing required property 'inputs'")
__props__['inputs'] = inputs
__props__['keepers'] = keepers
__props__['result_count'] = result_count
__props__['seed'] = seed
__props__['results'] = None
super(RandomShuffle, __self__).__init__(
'random:index/randomShuffle:RandomShuffle',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, inputs=None, keepers=None, result_count=None, results=None, seed=None):
"""
Get an existing RandomShuffle resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] inputs: The list of strings to shuffle.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new id to be generated. See
the main provider documentation for more information.
:param pulumi.Input[float] result_count: The number of results to return. Defaults to
the number of items in the `input` list. If fewer items are requested,
some elements will be excluded from the result. If more items are requested,
items will be repeated in the result but not more frequently than the number
of items in the input list.
:param pulumi.Input[list] results: Random permutation of the list of strings given in `input`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["inputs"] = inputs
__props__["keepers"] = keepers
__props__["result_count"] = result_count
__props__["results"] = results
__props__["seed"] = seed
return RandomShuffle(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_uuid.py | <reponame>pulumi-bot/pulumi-random
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomUuid(pulumi.CustomResource):
keepers: pulumi.Output[dict]
"""
Arbitrary map of values that, when changed, will
trigger a new uuid to be generated. See
the main provider documentation for more information.
"""
result: pulumi.Output[str]
"""
The generated uuid presented in string format.
"""
def __init__(__self__, resource_name, opts=None, keepers=None, __props__=None, __name__=None, __opts__=None):
"""
The resource `.RandomUuid` generates random uuid string that is intended to be
used as unique identifiers for other resources.
This resource uses the `hashicorp/go-uuid` to generate a UUID-formatted string
for use with services needed a unique string identifier.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new uuid to be generated. See
the main provider documentation for more information.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['keepers'] = keepers
__props__['result'] = None
super(RandomUuid, __self__).__init__(
'random:index/randomUuid:RandomUuid',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, keepers=None, result=None):
"""
Get an existing RandomUuid resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] keepers: Arbitrary map of values that, when changed, will
trigger a new uuid to be generated. See
the main provider documentation for more information.
:param pulumi.Input[str] result: The generated uuid presented in string format.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["keepers"] = keepers
__props__["result"] = result
return RandomUuid(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
pulumi-bot/pulumi-random | sdk/python/pulumi_random/random_password.py | <reponame>pulumi-bot/pulumi-random
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class RandomPassword(pulumi.CustomResource):
keepers: pulumi.Output[dict]
length: pulumi.Output[float]
lower: pulumi.Output[bool]
min_lower: pulumi.Output[float]
min_numeric: pulumi.Output[float]
min_special: pulumi.Output[float]
min_upper: pulumi.Output[float]
number: pulumi.Output[bool]
override_special: pulumi.Output[str]
result: pulumi.Output[str]
special: pulumi.Output[bool]
upper: pulumi.Output[bool]
def __init__(__self__, resource_name, opts=None, keepers=None, length=None, lower=None, min_lower=None, min_numeric=None, min_special=None, min_upper=None, number=None, override_special=None, special=None, upper=None, __props__=None, __name__=None, __opts__=None):
"""
> **Note:** Requires random provider version >= 2.2.0
Identical to .RandomString with the exception that the
result is treated as sensitive and, thus, _not_ displayed in console output.
> **Note:** All attributes including the generated password will be stored in
the raw state as plain-text. [Read more about sensitive data in
state](https://www.terraform.io/docs/state/sensitive-data.html).
This resource *does* use a cryptographic random number generator.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['keepers'] = keepers
if length is None:
raise TypeError("Missing required property 'length'")
__props__['length'] = length
__props__['lower'] = lower
__props__['min_lower'] = min_lower
__props__['min_numeric'] = min_numeric
__props__['min_special'] = min_special
__props__['min_upper'] = min_upper
__props__['number'] = number
__props__['override_special'] = override_special
__props__['special'] = special
__props__['upper'] = upper
__props__['result'] = None
super(RandomPassword, __self__).__init__(
'random:index/randomPassword:RandomPassword',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, keepers=None, length=None, lower=None, min_lower=None, min_numeric=None, min_special=None, min_upper=None, number=None, override_special=None, result=None, special=None, upper=None):
"""
Get an existing RandomPassword resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["keepers"] = keepers
__props__["length"] = length
__props__["lower"] = lower
__props__["min_lower"] = min_lower
__props__["min_numeric"] = min_numeric
__props__["min_special"] = min_special
__props__["min_upper"] = min_upper
__props__["number"] = number
__props__["override_special"] = override_special
__props__["result"] = result
__props__["special"] = special
__props__["upper"] = upper
return RandomPassword(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
mfrashad/blockchain | register_nodes.py | import os
import requests
import json
stream = os.popen('sudo docker node ps $(sudo docker node ls -q) --filter desired-state=Running | uniq | grep blockchain_app | cut -d " " -f1')
processes = stream.read().splitlines()
print("Processes : ", processes)
overlay_addresses = []
port = "5000"
for process in processes:
stream = os.popen(f'sudo docker inspect {process} | grep "10.0." | grep ":" -v | cut -d / -f1 | sed \'s/[ "]//g\'')
ip = stream.read().strip()
print(f'{process} : {ip}')
overlay_addresses.append(f'http://{ip}:{port}')
print("\nOverlay_addresses : ", overlay_addresses, "\n")
stream = os.popen('sudo docker ps | grep blockchain_app | cut -d " " -f1')
containers = stream.read().splitlines()
print("Containers in current swarm node : ", containers)
addresses = []
for container in containers:
stream = os.popen(f'sudo docker exec -ti {container} ifconfig eth2 | grep inet | cut -d : -f2 | cut -d " " -f1')
ip = stream.read().strip()
print(f'{container} : {ip}')
addresses.append(f'http://{ip}:{port}')
print("\naddresses : ", addresses, "\n")
headers = {"Content-Type":"application/json"}
for address in addresses:
print("Registering node at ", address)
payload = {"nodes":overlay_addresses}
print(json.dumps(payload))
r= requests.post(f'{address}/nodes/register', headers=headers, data=json.dumps(payload))
print(r.text)
|
MasterCash/Creer | creer/__init__.py | <gh_stars>1-10
import os
import creer.data
import creer.prototype
import creer.template
import creer.writer
import creer.input
GAMES_DIR = '../Games/'
def run(games, inputs, output, merge=False, tagless=False, no_write=False):
if len(games) == 0:
raise Exception('No game(s) provided to run Creer against')
if len(games) == 1 and games[0].lower() == 'all':
# then games is actually the list of all the game names, by dir names
games = [
name for name in sorted(os.listdir(GAMES_DIR))
if os.path.isdir(os.path.join(GAMES_DIR, name))
]
all_generated_files = []
for game in games:
print('~~~~~~ {} ~~~~~~'.format(game))
datas = creer.data.parse(game)
proto = creer.prototype.build(datas)
inputs = creer.input.validate(inputs)
all_generated_files.append(
creer.template.build_all(proto, inputs, output, merge, tagless)
)
if not no_write:
for generated_files in all_generated_files:
creer.writer.write(generated_files)
else:
print("Creer Success! Not writing any files.")
|
MasterCash/Creer | creer/prototype.py | from creer.utilities import extend, copy_dict, sort_dict_keys
import creer.default as default
import creer.validate
import hashlib
import json
def _copy_from(obj, keys):
d = {}
for key in keys:
d[key] = obj[key]
return d
def _clean_functions(obj):
cleaned = {}
if 'functions' in obj:
for func_name, func_data in obj['functions'].items():
cleaned[func_name] = {
'arguments': [],
'returns': None,
}
for attr in func_data['arguments']:
cleaned[func_name]['arguments'].append(_copy_from(attr, ['name', 'optional', 'type']))
if func_data['returns']:
cleaned[func_name]['returns'] = _copy_from(func_data['returns'], ['type'])
return cleaned
def _clean_attributes(obj):
cleaned = {}
if 'attributes' in obj:
for attr_name, attr_data in obj['attributes'].items():
cleaned[attr_name] = _copy_from(attr_data, ['type'])
return cleaned
def _proto_clean(proto):
cleaned = {
'AI': { 'functions': _clean_functions(proto['ai']) },
'Game': {'attributes': _clean_attributes(proto['game']) },
}
for game_obj_name, game_obj in proto['game_objects'].items():
cleaned[game_obj_name] = {
'attributes': _clean_attributes(game_obj),
'functions': _clean_functions(game_obj),
}
return cleaned
def _inherit_into(obj, parent_class, game_objects):
parent = game_objects[parent_class]
for parm_type in ["attributes", "functions"]:
for parm_key, parm_parms in parent[parm_type].items():
obj['inherited' + parm_type.capitalize()][parm_key] = copy_dict(parm_parms, {
'inheritedFrom': parent_class
})
for parent_parent_class in parent['parentClasses']:
_inherit_into(obj, parent_parent_class, game_objects)
def build(datas):
parent_keys = ['main']
parent_datas = []
parent_data_names = []
while len(parent_keys) > 0:
parent_key = parent_keys.pop()
parent_data = datas[parent_key]
if parent_key != 'main':
parent_data_names.append(parent_key)
parent_datas.append(parent_data)
# now look if that data had parent data to continue investigating
if not '_parentDatas' in parent_data:
parent_data['_parentDatas'] = []
for new_parent_key in parent_data['_parentDatas']:
parent_keys.append(new_parent_key)
parent_datas.append(datas['base']) # all games get the base data
# merge all the prototypes inherited into one prototype
prototype = {}
for parent_data in reversed(parent_datas):
extend(prototype, parent_data)
# extend won't do this correctly. multiple data may pre-define parent classes and will get overwritten via extend. this appends each additional class name
for proto_key, proto in prototype.items():
if proto_key[0] == "_":
continue
newServerParentClasses = []
if 'serverParentClasses' in proto:
for parent_data in reversed(parent_datas):
if proto_key in parent_data and 'serverParentClasses' in parent_data[proto_key]:
for parent_class_name in parent_data[proto_key]['serverParentClasses']:
newServerParentClasses.append(parent_class_name)
proto['serverParentClasses'] = newServerParentClasses
game_objects = {}
game = prototype['Game']
if not 'name' in game:
raise Exception("Error: no name given for the main game data. Name your Game!!!")
default.game_obj(game, "Game")
ai = prototype['AI']
del prototype['AI']
default.functions_for(ai, "AI")
if len(game['serverParentClasses']) == 0:
game['serverParentClasses'].append("BaseGame")
for obj_key, obj in prototype.items():
if obj_key == "Game" or obj_key[0] == "_":
continue
if obj_key == "GameObject" and len(obj['serverParentClasses']) == 0:
obj['serverParentClasses'] = [ 'BaseGameObject' ]
default.game_obj(obj, obj_key)
if obj_key != "GameObject" and len(obj['parentClasses']) == 0:
obj['parentClasses'].append("GameObject")
game_objects[obj_key] = obj
for obj_key, obj in (copy_dict(game_objects, {'Game': game}).items()):
obj['inheritedAttributes'] = {}
obj['inheritedFunctions'] = {}
for parent_class in obj['parentClasses']:
_inherit_into(obj, parent_class, game_objects)
# now all the prototypes should be built, so sort the attribute/function keys
for proto_key, proto in prototype.items():
if proto_key[0] == '_':
continue
proto['function_names'] = sort_dict_keys(proto['functions'])
proto['attribute_names'] = sort_dict_keys(proto['attributes'])
proto['inheritedFunction_names'] = sort_dict_keys(proto['inheritedFunctions'])
proto['inheritedAttribute_names'] = sort_dict_keys(proto['inheritedAttributes'])
ai['function_names'] = sort_dict_keys(ai['functions'])
creer.validate.validate(prototype)
proto = {
'game_objects': game_objects,
'game': game,
'ai': ai
}
min_game_data = _proto_clean(proto)
as_string = json.dumps(min_game_data, sort_keys=True)
as_bytes = bytes(as_string, 'utf8')
sha = hashlib.sha256()
sha.update(as_bytes)
proto['parent_data_names'] = parent_data_names
proto['game_version'] = sha.hexdigest()
return proto
|
MasterCash/Creer | creer/githash.py | import subprocess
def get():
try:
return (subprocess.check_output(['git', 'rev-parse', 'HEAD'])).decode("utf-8").rstrip()
except:
return "Error: git probably not installed"
|
MasterCash/Creer | creer/validate.py | <filename>creer/validate.py<gh_stars>1-10
# this validates a prototype to ensure none of the data/types/setup will screw with an output template
# basically, this validates Creer input data after it has been parsed
import re
_primitives = [
'string',
'boolean',
'int',
'float',
'list',
'dictionary'
]
_dangerous_names = [
'true',
'false',
'if',
'else',
'continue',
'for',
'end',
'function',
'pass',
'assert',
'eval',
'break',
'import',
'from',
'catch',
'finally',
'null',
'while',
'double',
'float',
'goto',
'return'
]
_valid_types = []
_game_classes = []
def _check(obj, location, key, expected_type):
if type(obj) != dict:
raise Exception(location + " is not a dict to check if it contains " + key)
if not key in obj:
raise Exception("No '{}' in {}".format(key, location))
if type(obj[key]) != expected_type:
raise Exception("{}[{}] is not the expected type '{}'".format(location, key, expected_type))
def _validate_type(obj, location, type_key="type"):
_check(obj, location, type_key, dict)
type_obj = obj[type_key]
_check(type_obj, location + "'s type", "name", str)
name = type_obj['name']
if name == "list" or name == "dictionary":
_validate_type(type_obj, "{}.{}[valueType]".format(location, name), "valueType")
if name == "dictionary":
if not 'keyType' in type_obj:
raise Exception("No 'keyType' for type '{}' at '{}'".format(name, location))
_validate_type(type_obj, "{}.{}[keyType]".format(location, name), "keyType")
if not name in _valid_types:
raise Exception("Type named '{}' is not a primitive or custom class in {}.".format(name, location))
def _validate_description(obj, location):
_check(obj, location, "description", str)
desc = obj["description"]
for c in ['"', "\n", "\t", "\r"]:
if c in desc:
escaped = c.translate(str.maketrans({"-": r"\-", "]": r"\]", "\\": r"\\", "^": r"\^", "$": r"\$", "*": r"\*", ".": r"\."}))
raise Exception("{} description contains illegal character {}".format(location, escaped))
if desc[0].upper() != desc[0]:
raise Exception("Capitalize your doc string in " + location + "'s description")
if desc[-1] != ".":
raise Exception("End your doc strings as sentences with periods in " + location + "'s description")
_required = {
'type': _validate_type,
'description': _validate_description
}
def _check_required(obj, location, additional_reqs=None):
for key, call in _required.items():
call(obj, location)
if additional_reqs:
for key, expected_type in additional_reqs.items():
_check(obj, location, key, expected_type)
def _validate_name(key, obj, pascal=False):
base_err = '"{}" is not a valid name for {}. '.format(key, obj)
search_re = '([A-Z][a-z]+)+' if pascal else '([a-z]+([A-Za-z])?)+'
casing = 'PascalCase' if pascal else 'camelCase'
match = re.search(search_re, key)
if not match or match[0] != key:
raise Exception(base_err + 'Name must be in {}.'.format(casing))
if key.lower() in _primitives:
raise Exception(base_err + 'Too similar to primitive type.')
if key.lower() in _dangerous_names:
raise Exception(base_err + 'Name too similar to popular programming keywords for some clients.')
###############################################################################
## Public Function To Call ##
###############################################################################
def validate(prototype):
for primitive in _primitives:
_valid_types.append(primitive)
for key, value in prototype.items():
if key[0] != "_" and key != "Game" and key != "AI":
_validate_name(key, "custom Game Object", pascal=True)
_game_classes.append(key)
_valid_types.append(key)
for key, value in prototype.items():
if key.startswith("_"):
continue
if key is not "AI":
_validate_description(value, key)
_check(value, key, 'attributes', dict)
for attr_key, attr in value['attributes'].items():
_check_required(attr, key + "." + attr_key)
if key is not "Game" and key is not "GameObject":
if not "parentClasses" in value:
raise Exception(key + " expected to be game object sub class, but has no parent class(es)")
for parent_class in value['parentClasses']:
if not parent_class in _game_classes:
raise Exception("{} has invalid parentClass '{}'".format(key, parent_class))
for attr_name, attr in value['attributes'].items():
_validate_name(attr_name, 'an attribute in ' + key)
_check(value, key, 'functions', dict)
for funct_key, funct in value['functions'].items():
loc = key + "." + funct_key
_check(funct, loc, "description", str)
if "arguments" in funct:
_check(funct, loc, "arguments", list)
optional = None
for i, arg in enumerate(funct['arguments']):
arg_loc = "{}.arguments[{}]".format(loc, i)
_check_required(arg, arg_loc, {'name': str })
_validate_name(arg['name'], arg_loc)
arg_loc += " (" + arg['name'] + ")"
if 'default' in arg and arg['default'] != None:
default = arg['default']
optional = i
def_type = arg['type']['name']
type_of_default = type(default)
if def_type == "string":
if type_of_default != str:
raise Exception("{} default value should be a string, not a {}".format(arg_loc, type_of_default))
elif def_type == "int":
if type_of_default != int:
raise Exception("{} default value should be an integer, not a {}".format(arg_loc, type_of_default))
elif def_type == "float":
if type_of_default != int and type_of_default != float:
raise Exception("{} default value should be a float, not a {}".format(arg_loc, type_of_default))
elif def_type == "boolean":
if type_of_default != bool:
raise Exception("{} default value should be a bool, not a {}".format(arg_loc, type_of_default))
else: # dict, list, or GameObject
if type_of_default != type(None):
raise Exception("{} default value must be null for dictionaries/lists/GameObjects, not a {}".format(arg_loc, type_of_default))
if optional != None and not 'default' in arg:
raise Exception("{} has no default to make it optional, by prior index {} was optional. Optional args must all be at the end.".format(arg_loc, i))
if 'returns' in funct and funct['returns'] != None:
_check_required(funct['returns'], loc + ".returns")
if 'invalidValue' not in funct['returns']:
raise Exception("{} requires an invalidValue for the return".format(loc))
type_of_invalidValue = type(funct['returns']['invalidValue'])
expected_type_name_of_invalidValue = funct['returns']['type']['name']
if expected_type_name_of_invalidValue == 'string' and type_of_invalidValue != str:
raise Exception("{}.invalidValue is not of expected string type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'boolean' and type_of_invalidValue != bool:
raise Exception("{}.invalidValue is not of expected boolean type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'int' and type_of_invalidValue != int:
raise Exception("{}.invalidValue is not of expected int type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'float' and type_of_invalidValue != int and type_of_invalidValue != float:
raise Exception("{}.invalidValue is not of expected int type (was {})".format(loc, type_of_invalidValue))
|
MasterCash/Creer | creer/writer.py | <reponame>MasterCash/Creer<filename>creer/writer.py
import os
from shutil import copyfile
def write(generated_files):
for generated_file in generated_files:
if 'copy-from' in generated_file:
# we just need to copy the file from to dest
copyfile(generated_file['copy-from'], generated_file['copy-dest'])
else:
# we have templated contents to write
path = generated_file['path']
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
contents = generated_file['contents']
with open(path, 'wb') as temp_file:
temp_file.write(bytes(contents, 'UTF-8'))
|
MasterCash/Creer | creer/utilities.py | import re
import os
import collections
import operator
def extend(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = extend(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def list_dirs(path):
folders = []
while path != "" and path != None:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path!="":
folders.append(path)
break
folders.reverse()
return folders
def uncapitalize(s):
return s[:1].lower() + s[1:] if s else ''
def extract_str(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return raw_string[start:end]
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_case_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def camel_case_to_hyphenate(name):
s1 = first_cap_re.sub(r'\1-\2', name)
return all_cap_re.sub(r'\1-\2', s1).lower()
def copy_dict(source_dict, diffs):
result=dict(source_dict) # Shallow copy
result.update(diffs)
return result
def sort_dict_keys(d):
return sorted(d)
def sort_dict_values(d):
return sorted(d.items(), key=operator.itemgetter(0))
def upcase_first(s):
return s[0].upper() + s[1:]
def lowercase_first(s):
return s[0].lower() + s[1:]
def human_string_list(strs, conjunction='or'):
n = len(strs)
if n == 0:
return ''
if n == 1:
return str(strs[0])
if n == 2:
return '{} {} {}'.format(strs[0], conjunction, strs[1])
# else list of >= 3
strs_safe = list(strs)
strs_safe[-1] = '{} {}'.format(conjunction, strs_safe[-1])
return ', '.join(strs_safe)
def is_primitive_type(type_obj):
return (type_obj['name'] in ['null', 'boolean', 'int', 'float', 'string', 'list', 'dictionary'])
|
MasterCash/Creer | creer/input.py | <gh_stars>1-10
import glob
from os import path
from creer.template import TEMPLATES_DIR
def validate(inputs):
validated_inputs = []
for input_dir in inputs:
dirs = glob.glob(input_dir)
if not dirs:
raise Exception("No directories matching {}".format(input_dir))
if not glob.glob(path.join(input_dir, TEMPLATES_DIR)):
raise Exception("Cannot template a directory with no Creer templates!\nNo template directory '{}' in {}".format(TEMPLATES_DIR, input_dir))
validated_inputs.extend(dirs)
for validated_input in validated_inputs:
print(">> Input Directory:", validated_input)
return validated_inputs
|
MasterCash/Creer | main.py | <filename>main.py
import argparse
import creer
parser = argparse.ArgumentParser(description='Runs the Creer game generator with a main data file against imput templates to generate an output skeleton game framework')
parser.add_argument('games', nargs='*', action='store', help='the file(s) or game names that should be treated as the main data file/folder for game generation. Must be json or yaml')
parser.add_argument('-o, --output', action='store', dest='output', help='the path to the folder to put generated folders and files into. If omitted it will output and overwrite the input files')
parser.add_argument('-i, --input', action='store', dest='inputs', nargs='+', help='the path(s) to look for templates in "_templates/" to build output from. can be a list of inputs seperated via spaces. defaults to all the siblings directories with creer templates.')
parser.add_argument('--merge', action='store_true', dest='merge', default=False, help='if the output files should be merged with existing files')
parser.add_argument('--tagless', action='store_true', dest='tagless', default=False, help='if the Creer-Merge tags should be omitted (a merge is still possible if the input sources have tags).')
parser.add_argument('--test', action='store_true', dest='no_write', default=False, help='If you do not want files to be output (basically validates the generation)')
args = parser.parse_args()
creer.run(**vars(args))
|
MasterCash/Creer | creer/merge.py | from creer.utilities import extract_str
MERGE_KEYWORD_START_PRE = "<<-- Creer-Merge: "
MERGE_KEYWORD_START_POST = " -->>"
MERGE_KEYWORD_END_PRE = "<<-- /Creer-Merge: "
MERGE_KEYWORD_END_POST = " -->>"
def with_data(data, pre_comment, key, alt, add_tags=True, optional=False, help=True):
merged = []
# begin merge comment tag
if add_tags:
help = " - Code you add between this comment and the end comment will be preserved between Creer re-runs." if help else ""
merged.extend([pre_comment, MERGE_KEYWORD_START_PRE, key, MERGE_KEYWORD_START_POST,help + "\n"])
# merged content
if key in data:
print(" + merging", key)
merged.append(data[key])
else:
if alt[len(alt) - 1] != "\n" and add_tags:
alt = alt + "\n"
merged.append(alt)
if not add_tags and optional and (merged[-1] == alt or merged[-1] == alt + "\n"):
# then don't bother with this merge tag
return ""
# end merge comment tag
if add_tags:
merged.extend([pre_comment, MERGE_KEYWORD_END_PRE, key, MERGE_KEYWORD_END_POST])
return "".join(merged)
def generate_data(file_contents):
data = {}
recording = None
for line in file_contents:
if MERGE_KEYWORD_END_PRE in line:
recording = None
elif MERGE_KEYWORD_START_PRE in line:
split = line.split()
recording = extract_str(line, MERGE_KEYWORD_START_PRE, MERGE_KEYWORD_START_POST)
data[recording] = []
elif recording:
data[recording].append(line)
merge_data = {}
for key, lines in data.items():
merge_data[key] = "".join(lines)
return merge_data |
vincestorm/Docker-on-Amazon-Web-Services | ch17/todobackend/src/todobackend/settings_release.py | <reponame>vincestorm/Docker-on-Amazon-Web-Services
from .settings import *
import os
# Disable debug
DEBUG = True
# Looks up secret in following order:
# 1. /run/secret/<key>
# 2. Environment variable named <key>
# 3. Value of default or None if no default supplied
def secret(key, default=None):
root = os.environ.get('SECRETS_ROOT','/run/secrets')
path = os.path.join(root,key)
if os.path.isfile(path):
with open(path) as f:
return f.read().rstrip()
else:
return os.environ.get(key,default)
# Set secret key
SECRET_KEY = secret('SECRET_KEY', SECRET_KEY)
# Must be explicitly specified when Debug is disabled
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(',')
# Database settings
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': os.environ.get('MYSQL_DATABASE','todobackend'),
'USER': os.environ.get('MYSQL_USER','todo'),
'PASSWORD': secret('MYSQL_PASSWORD','password'),
'HOST': os.environ.get('MYSQL_HOST','localhost'),
'PORT': os.environ.get('MYSQL_PORT','3306'),
},
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
STATIC_ROOT = os.environ.get('STATIC_ROOT', '/public/static')
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', '/public/media')
MIDDLEWARE.insert(0,'aws_xray_sdk.ext.django.middleware.XRayMiddleware') |
rabbit-of-caerbannog/yahsd | yahsd.py | import os
import sys
import time
import argparse
import itertools
import collections
import html.parser
import urllib.parse
import urllib.request
class HorribleSubsShow:
BASE_URL = "https://horriblesubs.info/api.php"
HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0",
}
def __init__(self, showid: int):
self.showid = showid
def get(self, page: int = 0):
timestamp = int(time.time() * 1000)
params = [
("method", "getshows"),
("type", "show"),
("showid", self.showid),
("_", timestamp),
]
if page != 0:
params.append(("nextid", page),)
query_string = urllib.parse.urlencode(params)
url = self.BASE_URL + "?" + query_string
req = urllib.request.Request(url, headers=self.HEADERS)
with urllib.request.urlopen(req) as response:
html = response.read().decode()
return html
def get_first(self):
yield self.get(page=0)
def get_all(self):
for page in itertools.count():
html = self.get(page=page)
if html == "DONE":
break
yield html
class EpisodeListParser(html.parser.HTMLParser):
def __init__(self):
self.episodes = {}
self.show_name = None
self.current_episode = None
self.resolution = None
self.data_count = 0
super().__init__()
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
classes = attrs.get("class", "").split()
if tag == "div" and "rls-info-container" in classes:
self.current_episode = attrs["id"]
self.episodes[self.current_episode] = {}
return
if tag == "div" and "rls-link" in classes:
assert attrs["id"].startswith(self.current_episode)
self.resolution = attrs["id"].split("-")[1]
self.episodes[self.current_episode][self.resolution] = {}
return
if tag == "a" and attrs.get("title") == "Magnet Link":
self.episodes[self.current_episode][self.resolution]["magnet"] = attrs[
"href"
]
return
if tag == "a" and attrs.get("title") == "Torrent Link":
self.episodes[self.current_episode][self.resolution]["torrent"] = attrs[
"href"
]
return
def handle_endtag(self, tag):
...
def handle_data(self, data):
if self.data_count == 1:
self.show_name = data.strip()
self.data_count += 1
class ArgParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, description="Process some integers.", **kwargs)
self.add_argument(
"show_ids",
metavar="ShowID(s)",
type=int,
nargs="+",
help="HorribleSubs show id",
)
self.add_argument(
"--all",
dest="get",
action="store_const",
const=lambda show: show.get_all(),
default=lambda show: show.get_first(),
help="sum the integers (default: find the max)",
)
class YahsDownloader:
@classmethod
def run(cls):
args = ArgParser().parse_args()
shows = collections.defaultdict(dict)
for showid in args.show_ids:
show = HorribleSubsShow(showid=showid)
for body in args.get(show):
parser = EpisodeListParser()
parser.feed(body)
shows[parser.show_name].update(parser.episodes)
cls.output(shows)
@classmethod
def output(cls, shows: dict):
for show in shows:
for episode in shows[show]:
for resolution in shows[show][episode]:
for medium, url in shows[show][episode][resolution].items():
sys.stdout.write(
cls.fmt(show, episode, resolution, medium, url)
)
@staticmethod
def fmt(show_name, episode, resolution, medium, url):
def bold(string):
start_bold = "\033[1m"
end = "\033[0m"
return f"{start_bold}{string}{end}"
if sys.stdout.isatty() and os.getenv("NO_COLOR") is None:
show_name = bold(show_name)
episode = bold(episode)
resolution = bold(resolution)
return "\t".join([show_name, episode, medium, resolution, url]) + "\n"
if __name__ == "__main__":
YahsDownloader.run()
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/tests/DeckBuilderTests.py | <reponame>vlad9i22/DeckBuilderDjangoWebsiteCC<gh_stars>0
import json
import os
import sys
sys.path.insert(1, os.path.abspath('DeckBuilder'))
import tools
def test_sort_1():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context2 = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
tools.sort_deck(context)
for key in context2:
assert context2[key] == context[key]
assert len(context) == len(context2)
def test_sort_2():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane.png"
context["slot3"] = "black/lich_spawner.png"
context2 = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context2["slot1"] = "black/bane.png"
context2["slot12"] = "black/lich_spawner.png"
tools.sort_deck(context)
for key in context2:
assert context2[key] == context[key]
assert len(context) == len(context2)
def test_sort_3():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane.png"
context["slot3"] = "black/lich_spawner.png"
context["slot12"] = "blue/conductor.png"
context["slot15"] = "black/bane.png"
context2 = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context2["slot1"] = "black/bane.png"
context2["slot12"] = "black/lich_spawner.png"
context2["slot2"] = "blue/conductor.png"
context2["slot15"] = "black/bane.png"
tools.sort_deck(context)
for key in context2:
assert context2[key] == context[key]
assert len(context) == len(context2)
def test_sort_4():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane.png"
context["slot3"] = "black/lich_spawner.png"
context["slot12"] = "blue/conductor.png"
context["slot15"] = "black/bane.png"
context2 = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context2["slot1"] = "black/bane.png"
context2["slot3"] = "black/lich_spawner.png"
context2["slot12"] = "blue/conductor.png"
context2["slot13"] = "black/bane.png"
context["deck_switch"] = 1
context2["deck_switch"] = 1
tools.sort_deck(context)
for key in context2:
assert context2[key] == context[key]
assert len(context) == len(context2)
def test_process_unit_1():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
button_name = ["unit", "black/bane"]
colors = [{"crystal": 3,
"black": 1,
"blue": 2},
3]
tools.process_unit_button(button_name, context, colors)
assert context["slot1"] == "black/bane.png"
def test_process_unit_2():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
button_name = ["unit", "black/bane"]
colors = [{"crystal": 3,
"green": 1,
"blue": 2,
"black": 0,
"white": 0}, 2]
tools.process_unit_button(button_name, context, colors)
assert context["slot1"] == "empty.jpg"
def test_process_slot_1():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
button_name = ["chosenslot", "1"]
tools.process_slot_button(button_name, context)
assert context["slot1"] == "empty.jpg"
def test_process_slot_2():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane"
button_name = ["chosenslot", "1"]
tools.process_slot_button(button_name, context)
assert context["slot1"] == "empty.jpg"
def test_process_slot_3():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane"
context["slot2"] = "black/bane_spawner"
button_name = ["chosenslot", "1"]
tools.process_slot_button(button_name, context)
assert context["slot1"] == "empty.jpg"
assert context["slot2"] == "black/bane_spawner"
def test_process_clean_1():
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
context["slot1"] = "black/bane"
context["slot20"] = "black/bane_spawner"
context["slot10"] = "black/bane_spawner"
context["slot18"] = "black/bane_spawner"
tools.process_clean_button(context)
for i in range(1, 25):
assert context["slot" + str(i)] == "empty.jpg"
def test_count_colors_1():
colors = {"crystal": 3,
"green": 1,
"blue": 2,
"black": 0,
"white": 0}
assert tools.count_nonzero_colors(colors) == 3
def test_count_colors_2():
colors = {"crystal": 0,
"green": 0,
"blue": 0,
"black": 0,
"white": 0}
assert tools.count_nonzero_colors(colors) == 0
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/DeckBuilder/migrations/0001_initial.py | # Generated by Django 3.2.5 on 2022-04-17 17:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeckStructure',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slot1', models.CharField(max_length=30, verbose_name='Slot1_name')),
('slot2', models.CharField(max_length=30, verbose_name='Slot2_name')),
('slot3', models.CharField(max_length=30, verbose_name='Slot3_name')),
('slot4', models.CharField(max_length=30, verbose_name='Slot4_name')),
('slot5', models.CharField(max_length=30, verbose_name='Slot5_name')),
('slot6', models.CharField(max_length=30, verbose_name='Slot6_name')),
('slot7', models.CharField(max_length=30, verbose_name='Slot7_name')),
('slot8', models.CharField(max_length=30, verbose_name='Slot8_name')),
('slot9', models.CharField(max_length=30, verbose_name='Slot9_name')),
('slot10', models.CharField(max_length=30, verbose_name='Slot10_name')),
('slot11', models.CharField(max_length=30, verbose_name='Slot11_name')),
('slot12', models.CharField(max_length=30, verbose_name='Slot12_name')),
('slot13', models.CharField(max_length=30, verbose_name='Slot13_name')),
('slot14', models.CharField(max_length=30, verbose_name='Slot14_name')),
('slot15', models.CharField(max_length=30, verbose_name='Slot15_name')),
('slot16', models.CharField(max_length=30, verbose_name='Slot16_name')),
('slot17', models.CharField(max_length=30, verbose_name='Slot17_name')),
('slot18', models.CharField(max_length=30, verbose_name='Slot18_name')),
('slot19', models.CharField(max_length=30, verbose_name='Slot19_name')),
('slot20', models.CharField(max_length=30, verbose_name='Slot20_name')),
('slot21', models.CharField(max_length=30, verbose_name='Slot21_name')),
('slot22', models.CharField(max_length=30, verbose_name='Slot22_name')),
('slot23', models.CharField(max_length=30, verbose_name='Slot23_name')),
('slot24', models.CharField(max_length=30, verbose_name='Slot24_name')),
('maket_name', models.CharField(max_length=30, verbose_name='Maket_name')),
],
),
]
|
vlad9i22/DeckBuilderDjangoWebsiteCC | dodo.py | from doit.tools import run_once
DOIT_CONFIG = {'default_tasks':
['docs', 'babel', 'private_settings', 'migrate', 'tests']}
def task_docs():
"""Creates documentation in html."""
return {
'actions': ['make -C ./docs html']
}
def task_babel():
"""Creates generative files for babel (Translation)"""
return {
'actions': ['''cd CCwebsite/DeckBuilder/translation &&
pybabel compile -D tools -d ./ -l ru &&
pybabel compile -D tools -d ./ -l en &&
pybabel compile -D tools -d ./ -l ru pybabel compile -D tools -d ./ -l ru''']
}
def task_tests():
"""Run tests"""
return {
'actions': ['''cd CCwebsite && pytest ./tests/DeckBuilderTests.py''']
}
def task_private_settings():
"""Generates default private_setting.json file."""
return {
'actions': ['''cd CCwebsite/CCwebsite && python3 generate_default_private_settings.py'''],
'targets': ['./CCwebsite/CCwebsite/private_settings.json'],
'uptodate': [run_once]
}
def task_wheel():
"""Generates wheel distribution"""
return {
'actions': ['''python -m build -w'''],
'task_dep': ["babel"]
}
def task_migrate():
"""Create django databases"""
return {
'actions': ['''cd CCwebsite && python3 manage.py migrate''']
}
def task_flake8():
"""Check for flake8"""
return {
'actions': ['flake8']
}
|
vlad9i22/DeckBuilderDjangoWebsiteCC | tools/img_tools.py | from PIL import Image
from glob import glob
from os import path
from shutil import copytree, rmtree
def get_all_file_names(dir_name: str) -> list:
"""
Recursively gets all file names from given directory
Args:
dir_name (str): Directory name
Returns:
list: Names of all files in directory
"""
all_names = sorted(glob(path.join(dir_name, "*.png")))
collected_names = []
for name in all_names:
if path.isfile(name):
collected_names.append(name)
elif path.isdir(name):
collected_names += get_all_file_names(name)
return collected_names
def process_images(new_size: tuple) -> None:
"""
Transforms raw image data to processed unit icons. REMOVES ./data directory
Args:
new_size (tuple): Size of cleaned images
"""
if path.exists("./data"):
rmtree("./data")
copytree("./raw_data", "./data")
file_names = get_all_file_names("./data/cards")
for image_name in file_names:
Image.open(image_name).resize(new_size).save(image_name)
if __name__ == "__main__":
process_images((136, 136))
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/DeckBuilder/migrations/0002_simplemodel.py | # Generated by Django 3.2.5 on 2022-04-18 14:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DeckBuilder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SimpleModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=30)),
],
),
]
|
vlad9i22/DeckBuilderDjangoWebsiteCC | tools/img_filename_taker.py | from img_tools import get_all_file_names
import json
import os
if __name__ == "__main__":
filenames = get_all_file_names("../raw_data/cards")
dump_dict = {}
for i, fname in enumerate(filenames):
splitted_path = fname.split("/")
dump_dict[os.path.join(splitted_path[-2], splitted_path[-1])] = i
json.dump(dump_dict, open("names.json", "w"), indent=1)
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/DeckBuilder/urls.py | <filename>CCwebsite/DeckBuilder/urls.py
from django.urls import path
from . import views
urlpatterns = [
# Main page
path('', views.index),
# Deck build page
path('deckbuild/', views.deck_builder)
]
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/CCwebsite/generate_default_private_settings.py | <reponame>vlad9i22/DeckBuilderDjangoWebsiteCC
import json
if __name__ == '__main__':
SITE_ID = 3
SECRET_KEY = "<KEY>"
private_settings = {
"SITE_ID": SITE_ID,
"SECRET_KEY": SECRET_KEY
}
with open('private_settings.json', 'w') as f:
json.dump(private_settings, f)
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/DeckBuilder/views.py | from django.shortcuts import render
from django.http import HttpResponse
from DeckBuilder.tools import process_deckbuilder_request
# Create your views here.
def index(request):
return render(request, './base.html')
def deck_builder(request):
context = process_deckbuilder_request(request)
if request.method == "GET":
pass
else:
return HttpResponse("POSHELWON")
return render(request, './DeckBuilderPage.html', context=context)
def request_page(request):
print("hi")
return HttpResponse("buttonClick")
|
vlad9i22/DeckBuilderDjangoWebsiteCC | CCwebsite/DeckBuilder/tools.py | <filename>CCwebsite/DeckBuilder/tools.py
import json
import os
import gettext
def sort_deck(context: dict) -> None:
'''
Sorts deck according to the rules of the game
'''
unit_order = json.load(open("templates/static/jsons/sort_order.json", "r"))
deck_slots = []
if context["deck_switch"]:
lb, rb = 13, 25
else:
lb, rb = 1, 13
for i in range(lb, rb):
is_spawner = context["slot" + str(i)].count("spawner")
uorder = unit_order[context["slot" + str(i)]]
deck_slots.append([is_spawner, uorder, context["slot" + str(i)]])
deck_slots.sort()
for i in range(lb, rb):
context["slot" + str(i)] = deck_slots[i - lb][-1]
def add_color(count_colors: dict, unit_name: str) -> None:
'''
Check if color needs to be counted for CC rules
'''
unit = unit_name.split('/')
if len(unit) == 2 and unit[0] in count_colors:
count_colors[unit[0]] += 1
def count_nonzero_colors(count_colors: dict) -> int:
'''
Counts number of units in deck for each color.
'''
cnt = 0
for val in count_colors.values():
if val > 0:
cnt += 1
return cnt
def is_proper_slot_idx(key: str, deck_switch: int) -> bool:
'''
Determines if proper slot of deck is chosen. Depends on deck_switch value and slot_id
'''
key_id = int(key.split("slot")[-1])
return (deck_switch == 1 and key_id >= 13) or (deck_switch == 0 and key_id < 13)
def copy_session_information(context: dict, request) -> list:
'''
Moves session information to context dictionary and collects unit color information for future processing
Return value is the list of 2 elements: 1) dict -> number of each color in deck
2) int -> number of distinct colors in deck
'''
count_colors = {"black": 0, "blue": 0, "green": 0, "white": 0}
for key in request.session.keys():
context[key] = request.session[key]
if "slot" in key and is_proper_slot_idx(key, request.session["deck_switch"]):
add_color(count_colors, context[key])
ncolors_in_deck = count_nonzero_colors(count_colors)
count_colors["crystal"] = -1
return [count_colors, ncolors_in_deck]
def copy_context_information(context: dict, request) -> None:
'''
Moves session update to session
'''
for key in context:
request.session[key] = context[key]
def get_clickedbutton_name(request_dict: dict) -> str:
'''
Gets the name of the button which was pressed by user
'''
key_ids = [key for key in request_dict.keys() if key != 'csrfmiddlewaretoken']
if len(key_ids) == 0:
return None
return key_ids[0].split(".")[0].split(";")
def process_unit_button(button_name: list, context: dict, color_info: list) -> None:
'''
Processes click on any unit button (button name contains "unit")
'''
unit_type, unit_name = button_name[1].split("/")
color_dict, ncolors = color_info
if ncolors >= 2 and color_dict[unit_type] == 0: # Max available number of colors already
return
if context["deck_switch"]:
lb, rb = 13, 25
else:
lb, rb = 1, 13
for i in range(lb, rb):
if context["slot" + str(i)] == "empty.jpg":
context["slot" + str(i)] = os.path.join(unit_type, unit_name) + ".png"
break
def process_slot_button(button_name: list, context: dict) -> None:
'''
Processes click on any slot button (button name contains "slot")
'''
# if is_proper_slot_idx("slot" + button_name[1], request.session["deck_switch"]):
context["slot" + button_name[1]] = "empty.jpg"
def process_button_button(button_name: list, context: dict) -> None:
'''
Processes click on any button button (button name contains "button")
'''
color = button_name[1]
color_matching = json.load(open("templates/static/jsons/color_matching.json", "r"))
context["maket_name"] = color_matching[color][0]
tree_layout = json.load(open(f"templates/static/jsons/{color_matching[color][1]}", "r"))
context["tree_layout"] = tree_layout["tree_layout"]
def process_switcher_button(button_name: list, context: dict) -> None:
'''
Processes click on switcher button (button name contains "switcher")
'''
if int(button_name[1]) == 1:
switch_val = 0
else:
switch_val = 1
context["deck_switch"] = switch_val
def process_clean_button(context: dict) -> None:
'''
Processes click on clear button (button name contains "switcher"). Delete all units from slots
'''
for i in range(1, 25):
context["slot" + str(i)] = "empty.jpg"
def process_flag_button(button_name: list, context: dict) -> None:
'''
Processes site localization button
'''
context["flag"] = button_name[1]
def process_deckbuilder_request(request):
'''
Processes and parses deckbuild webpage request
'''
context = json.load(open("templates/static/jsons/deckbuilder_state_default.json", "r"))
if "deck_switch" not in request.session.keys():
request.session["deck_switch"] = 0
color_info = copy_session_information(context, request)
button_name = get_clickedbutton_name(request.GET.dict())
if button_name is None: # No changes provided
return context
if button_name[0] == "unit":
process_unit_button(button_name, context, color_info)
sort_deck(context)
elif button_name[0] == "chosenslot":
process_slot_button(button_name, context)
sort_deck(context)
elif button_name[0] == "button":
process_button_button(button_name, context)
elif button_name[0] == "switcher":
process_switcher_button(button_name, context)
elif button_name[0] == "clear":
process_clean_button(context)
elif button_name[0] == "flag":
process_flag_button(button_name, context)
translation = gettext.translation('tools', 'DeckBuilder/translation', [context["flag"]])
_ = translation.gettext
context["title"] = _("hello")
copy_context_information(context, request)
return context
|
INN/maine-legislature | helpers.py | <reponame>INN/maine-legislature
# _*_ coding:utf-8 _*_
# Helper functions for the Maine Legislature project
import app_config
import collections
import copytext
import re
import json
from unicodedata import normalize
CACHE = {}
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def get_legislators():
copy = get_copy()
return copy['senators']._sheet + copy['house_reps']._sheet
def get_legislator_slugs():
legislators = get_legislators()
slugs = []
for legislator in legislators:
slugs.append(slugify(legislator['name']))
return slugs
def get_legislator_by_slug(slug):
legislators = get_legislators()
leg = None
for legislator in legislators:
if slugify(legislator['name']) == slug:
leg = legislator
break
return leg
def get_legislator_id_by_slug(slug):
leg = get_legislator_by_slug(slug)
return leg['id']
# I apologize for the length of this function.
def get_legislator_income_by_slug(slug):
copy = get_copy()
income = collections.OrderedDict()
leg_id = get_legislator_id_by_slug(slug)
for row in copy['income_employment']:
if row['sh_number'] == leg_id:
try:
income['income_employment']
except KeyError:
income['income_employment'] = []
if row['Name_Employer'] != u'':
income['income_employment'].append(
row['Position'] + ', '
+ row['Name_Employer'] + ', '
+ row['Employer_City']
# + format_zip(row['Employer_Zip'])
)
for row in copy['income_self']:
if row['sh_number'] == leg_id:
try:
income['income_self']
except KeyError:
income['income_self'] = []
if row['Name_of_Self_Employment_Business'] != u'':
income['income_self'].append(
row['Name_of_Self_Employment_Business'] + ', '
+ row['City_of_Self_Employment_Business']
# + format_zip(row['Zip_of_Self_Employment'])
)
for row in copy['income_law']:
if row['sh_number'] == leg_id:
try:
income['income_law']
except KeyError:
income['income_law'] = []
if row['Name_of_Practice'] != u'':
income['income_law'].append(
row['Position_in_Practice'] + ', '
+ row['Name_of_Practice'] + ', '
+ row['City_of_Practice']
# + format_zip(row['Zip_of_Practice'])
)
for row in copy['income_other']:
if row['sh_number'] == leg_id:
try:
income['income_other']
except KeyError:
income['income_other'] = []
if row['Name_of_Source'] != u'':
line = u''
if row['Name_of_Source'] != u'':
line += row['Name_of_Source']
if row['City_of_Source'] != u'':
line += ', ' + row['City_of_Source']
# line += ', ' + format_zip(row['Zip_of_Source'])
if row['Description_of_income_type'] != u'':
line += " (%s)" % row['Description_of_income_type']
income['income_other'].append(line)
for row in copy['honoraria']:
if row['sh_number'] == leg_id:
try:
income['honoraria']
except KeyError:
income['honoraria'] = []
if row['Source_of_Honoraria'] != u'':
income['honoraria'].append(row['Source_of_Honoraria'] + ' (honorarium)')
for row in copy['loans']:
if row['sh_number'] == leg_id:
try:
income['loans']
except KeyError:
income['loans'] = []
if row['Name_of_Lender'] != u'' and row['City_of_Lender'] != u'' and row['Zip_of_Lender'] != u'':
income['loans'].append(
row['Name_of_Lender'] + ', '
+ row['City_of_Lender'] + ' (loan)'
# + ', ' + format_zip(row['Zip_of_Lender']) + ' (Loan)'
)
for row in copy['gifts']:
if row['sh_number'] == leg_id:
try:
income['zgifts']
except KeyError:
income['zgifts'] = []
if row['Source_of_Gift'] != u'':
income['zgifts'].append(row['Source_of_Gift'] + ' (gift)')
return income
def get_legislator_business_by_slug(slug):
"""
Break this out from get_legislator_income_by slug in response to https://github.com/INN/maine-legislature/issues/82
"""
copy = get_copy()
businesses = collections.OrderedDict()
leg_id = get_legislator_id_by_slug(slug)
for row in copy['income_business']:
if row['sh_number'] == leg_id:
try:
businesses['income_business']
except KeyError:
businesses['income_business'] = []
if row['Name_of_Business'] != u'':
temporary = row['Name_of_Business']
if row['City_of_Business']:
temporary += ', ' + row['City_of_Business']
businesses['income_business'].append(temporary)
return businesses
def get_legislator_positions_by_slug(slug):
"""
positions for nonprofits and suchlike
"""
copy = get_copy()
positions = {}
leg_id = get_legislator_id_by_slug(slug)
for row in copy['position_org']:
if row['sh_number'] == leg_id:
try:
positions['position_org']
except KeyError:
positions['position_org'] = []
# this checks row['Relationship_to_Legislator'] to make sure it's self
# otherwise, this goes in family member positions
if unicode(row['Relationship_to_Legislator']).lower() == u'self':
line = row['Title_in_Organization'] + ', '
line += row['Organization']
if unicode(row['City_of_Organization']) != u'':
line += ', ' + row['City_of_Organization']
# line += format_zip(row['Zip_of_Organization'])
if unicode(row['Compensated']).lower() == u'yes':
line += ' (paid position)'
positions['position_org'].append(line)
return positions
def get_legislator_political_positions_by_slug(slug):
"""
Get just this legislator's political positions
https://github.com/INN/maine-legislature/issues/82
"""
copy = get_copy()
political_positions = {}
leg_id = get_legislator_id_by_slug(slug)
for row in copy['position_political']:
if row['sh_number'] == leg_id:
try:
political_positions['position_political']
except KeyError:
political_positions['position_political'] = []
if row['Name_of_Committee'] != u'':
if row['Name_of_Official'] == row['sh_name']:
# the official is the legislator,
# per https://github.com/INN/maine-legislature/issues/68
political_positions['position_political'].append(
row['Title_in_Committee'] + ', ' +
row['Name_of_Committee']
)
return political_positions
def get_legislator_family_by_slug(slug):
copy = get_copy()
family = {}
leg_id = get_legislator_id_by_slug(slug)
for row in copy['position_org']:
if row['sh_number'] == leg_id:
try:
family['position_org']
except KeyError:
family['position_org'] = []
# this checks row['Relationship_to_Legislator'] to make sure it's a family
# otherwise, this goes in family member positions
# The values used here are spouse and self, and u'' for self.
if unicode(row['Relationship_to_Legislator']).lower() == u'spouse':
line = row['Name_of_Position_Holder']
line += " (%s)" % unicode(row['Relationship_to_Legislator']).lower()
if row['Title_in_Organization']:
line += ', ' + row['Title_in_Organization']
if row['Organization']:
line += ', ' + row['Organization']
if row['City_of_Organization']:
line += ', ' + row['City_of_Organization']
# Not doing zips this app
# line += ', ' + format_zip(row['Zip_of_Organization'])
if unicode(row['Compensated']).lower() == u'yes':
line += ' (paid position)'
family['position_org'].append(line)
for row in copy['family_income_compensation']:
if row['sh_number'] == leg_id:
try:
family['family_income_compensation']
except KeyError:
family['family_income_compensation'] = []
if unicode(row['Name_of_family_member']).lower() != u'':
line = row['Name_of_family_member']
if unicode(row['Position_of_family_member']) != u'':
line += ', ' + row['Position_of_family_member']
if unicode(row['Family_Member_Employers_Name']) != u'':
line += ', ' + row['Family_Member_Employers_Name']
if unicode(row['Employers_City']) != u'':
line += ', ' + row['Employers_City']
# if unicode(row['Employers_Zip']) != u'':
# line += ', ' + format_zip(row['Employers_Zip'])
family['family_income_compensation'].append(line)
for row in copy['family_other_income']:
if row['sh_number'] == leg_id:
try:
family['family_other_income']
except KeyError:
family['family_other_income'] = []
# This column is also used for other family members
if unicode(row['Name_of_spouse']) != u'':
line = row['Name_of_spouse']
if unicode(row['Source_of_family_member_income']) != u'':
line += ', ' + row['Source_of_family_member_income']
if unicode(row['City_of_other_source']) != u'':
line += ', ' + row['City_of_other_source']
# if unicode(row['Zip_of_other_source']) != u'':
# line += ', ' + format_zip(row['Zip_of_other_source'])
if unicode(row['Type_of_Income']) != u'':
line += ' (%s)' % row['Type_of_Income']
family['family_other_income'].append(line)
for row in copy['position_political']:
if row['sh_number'] == leg_id:
try:
family['position_political']
except KeyError:
family['position_political'] = []
if row['Name_of_Committee'] != u'':
if row['Name_of_Official'] != u'' and row['Name_of_Official'] != row['sh_name']:
# the official is the legislator,
# per https://github.com/INN/maine-legislature/issues/68
family['position_political'].append(
row['Name_of_Official'] + ', ' +
row['Title_in_Committee'] + ', ' +
row['Name_of_Committee']
)
return family
def rep_sen(id):
if id.startswith('s'):
return u"Sen."
elif id.startswith('h'):
return u"Rep."
else:
return u''
def format_district(district):
try:
float(district)
return u"District " + district
except ValueError:
return district
# Not actually used anymore, since we removed the zip codes from display.
# Please test the first two lines against "01234-4567": it should not return "001234-4567"
def format_zip(zip):
if type(zip) == str:
return zip
try:
zip = str(zip)
#stripzero = re.sub(u'.0', u'')
zip = zip.replace('.0', u'')
return u"0" + zip
except ValueError:
return zip
# Other helpers
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def is_really_iterable(var):
if not hasattr(var, '__iter__'):
return False
count = 0
for k in var:
if hasattr(var[k], '__iter__'):
for j in var[k]:
count += 1
if count >= 1:
return True
else:
return False
def leg_bills_count(leg_id):
counter = 0
copy = get_copy()
for bill in copy['bills']:
if bill['sh_number'] == leg_id:
if bill['ld_num'] != u'':
counter = counter + 1
return counter
def get_copy():
if not CACHE.get('copy', None):
CACHE['copy'] = copytext.Copy(app_config.COPY_PATH)
return CACHE['copy']
def legislators_json():
legislators = get_legislators()
json_data = []
for legislator in legislators:
json_data.append({
'id': legislator['id'],
'name': legislator['name'],
'district': format_district(legislator['district_number']),
'party': legislator['party'],
'town': legislator['home_city'],
'slug': slugify(legislator['name']),
'rep_sen': rep_sen(legislator['id'])
})
with open('www/assets/data/legislators.json', 'w+') as f:
print "Writing www/assets/data/legislators.json"
f.write(json.dumps(json_data))
|
INN/maine-legislature | app.py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
"""
Example application views.
Note that `render_template` is wrapped with `make_response` in all application
routes. While not necessary for most Flask apps, it is required in the
App Template for static publishing.
"""
import app_config
import oauth
import static
from flask import Flask, make_response, render_template
from render_utils import make_context, smarty_filter, urlencode_filter
from werkzeug.debug import DebuggedApplication
from helpers import slugify, rep_sen, format_district, format_zip, \
is_really_iterable, get_legislator_slugs, leg_bills_count, \
get_legislator_by_slug, get_legislator_income_by_slug, \
get_legislator_positions_by_slug, get_legislator_family_by_slug, \
get_legislator_business_by_slug, get_legislator_political_positions_by_slug
app = Flask(__name__)
app.debug = app_config.DEBUG
app.add_template_filter(smarty_filter, name='smarty')
app.add_template_filter(urlencode_filter, name='urlencode')
app.jinja_env.filters['slugify'] = slugify
app.jinja_env.filters['rep_sen'] = rep_sen
app.jinja_env.filters['format_district'] = format_district
app.jinja_env.filters['format_zip'] = format_zip
app.jinja_env.filters['is_really_iterable'] = is_really_iterable
app.jinja_env.filters['leg_bills_count'] = leg_bills_count
@app.route('/')
def index():
context = make_context()
return make_response(render_template('index.html', **context))
legislator_slugs = get_legislator_slugs()
for slug in legislator_slugs:
@app.route('/legislator/%s/' % slug)
def legislator():
context = make_context()
from flask import request
slug = request.path.split('/')[2]
context['legislator'] = get_legislator_by_slug(slug)
context['income'] = get_legislator_income_by_slug(slug)
context['business'] = get_legislator_business_by_slug(slug)
context['positions'] = get_legislator_positions_by_slug(slug)
context['political_positions'] = get_legislator_political_positions_by_slug(slug)
context['family'] = get_legislator_family_by_slug(slug)
return make_response(render_template('legislator.html', **context))
app.register_blueprint(static.static)
app.register_blueprint(oauth.oauth)
# Enable Werkzeug debug pages
if app_config.DEBUG:
wsgi_app = DebuggedApplication(app, evalex=False)
else:
wsgi_app = app
# Catch attempts to run the app directly
if __name__ == '__main__':
print 'This command has been removed! Please run "fab app" instead!'
|
INN/maine-legislature | tests/test_app.py | <reponame>INN/maine-legislature
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import unittest
import app
import app_config
class IndexTestCase(unittest.TestCase):
"""
Test the index page.
"""
def setUp(self):
app.app.config['TESTING'] = True
self.client = app.app.test_client()
def test_index_exists(self):
response = self.client.get('/')
assert app_config.PROJECT_SLUG in response.data
class AppConfigTestCase(unittest.TestCase):
"""
Testing dynamic conversion of Python app_config into Javascript.
"""
def setUp(self):
app.app.config['TESTING'] = True
self.client = app.app.test_client()
def parse_data(self, response):
"""
Trim leading variable declaration and load JSON data.
"""
return json.loads(response.data[20:])
def test_app_config_staging(self):
response = self.client.get('/js/app_config.js')
data = self.parse_data(response)
assert data['DEBUG'] == True
def test_app_config_production(self):
app_config.configure_targets('production')
response = self.client.get('/js/app_config.js')
data = self.parse_data(response)
assert data['DEBUG'] == False
app_config.configure_targets('staging')
if __name__ == '__main__':
unittest.main()
|
wvandertoorn/nanoRMS | visualization_per_read/per_read_mean.py | import sys
import pandas as pd
infile = sys.argv[1]
inp=pd.read_csv(infile,sep='\t') #this step will take ages if the file is huge!
inp = inp [inp['model_kmer'] != 'NNNNN'] #Remove NNNNN values
grouped_multiple_mean_inp = inp.groupby(['contig', 'position','reference_kmer', "read_index"]).agg({'event_level_mean':['mean']}) #Collapse multiple observations from the same read
grouped_multiple_mean_inp = grouped_multiple_mean_inp.reset_index()
grouped_multiple_mean_inp.columns = grouped_multiple_mean_inp.columns.droplevel(-1)
grouped_multiple_mean_inp.to_csv('{}_processed_perpos_mean.csv'.format(infile), sep='\t', index = False) #Export the file |
wvandertoorn/nanoRMS | per_read/fast5_to_fastq.py | <reponame>wvandertoorn/nanoRMS
#!/usr/bin/env python3
desc="""Report FastQ from basecalled Fast5 file(s).
Originally from https://github.com/lpryszcz/Pszczyna
Dependencies: ont_fast5_api
"""
epilog="""Author: <EMAIL>
Barcelona, 30/00/2020
"""
import os, sys
from datetime import datetime
from ont_fast5_api.fast5_interface import get_fast5_file
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='0.10a')
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="verbose")
parser.add_argument("-i", "--fast5", nargs="+", help="input Fast5 file(s)")
parser.add_argument("-o", "--out", default=sys.stdout, type=argparse.FileType("w"), help="output stream [stdout]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
for fn in o.fast5:
seqs = []
f5file = get_fast5_file(fn, mode="r")
for read_id in f5file.get_read_ids():
read = f5file.get_read(read_id)
bcgrp = read.get_latest_analysis("Basecall_1D") #Basecall_1D_000
fastq = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Fastq")
o.out.write(fastq)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
#except IOError as e:
# sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
#sys.stderr.write("#Time elapsed: %s\n"%dt)
|
wvandertoorn/nanoRMS | per_read/get_features.py | #!/usr/bin/env python3
desc="""Requiggle basecalled FastQ files and features in BAM file.
For all reference bases we store (as BAM comments):
- normalised signal intensity mean [tag si:B,f]
- reference base probability [tag tr:B:C] retrieved from guppy (trace scaled 0-255)
- dwell time [tag dt:B:C] in signal step capped at 255
All features are matched versus padded reference sequnce blocks
ie excluding introns and large (padded) deletions from reference.
Those blocks (2-D array of start & ends) are stored as flattened 1-D array [tag bs:B:i]
ie. exons [(8114, 8244), (8645, 8797)] will be stored as array('I', [8114, 8244, 8645, 8797]).
--rna will automatically enable spliced alignments.
"""
epilog="""Author: <EMAIL>
Cologne/Barcelona/Mizerów, 17/06/2020
"""
import itertools, json, os, resource, scipy, subprocess, sys, numpy as np, pysam, tempfile
from tombo import tombo_stats, resquiggle, tombo_helper
from tombo._default_parameters import OUTLIER_THRESH, SHIFT_CHANGE_THRESH, SCALE_CHANGE_THRESH, RNA_SAMP_TYPE, DNA_SAMP_TYPE, COLLAPSE_RNA_STALLS, COLLAPSE_DNA_STALLS, STALL_PARAMS#, FM_OFFSET_DEFAULT
from ont_fast5_api.fast5_interface import get_fast5_file
from datetime import datetime
from multiprocessing import Pool
from array import array
from copy import deepcopy
# add PATH - needed by fast5_to_fastq.py
os.environ["PATH"] = "%s:%s"%(':'.join(sys.path), os.environ["PATH"])
VERSION = '0.11b'
DEFAULT_STALL_PARAMS = tombo_helper.stallParams(**STALL_PARAMS)
USE_START_CLIP_BASES = resquiggle.USE_START_CLIP_BASES
# only DNA bases as in SAM U is always referred as T
bases = "ACGT"
base2idx = {b: i for i, b in enumerate(bases)}
base2complement = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N"}
# add lower-case for get_aligned_pairs as it reports substitutions as lower-case
for b, i in list(base2idx.items()): base2idx[b.lower()] = i
for b, c in list(base2complement.items()): base2complement[b.lower()] = c
def minimap2_proc(ref, fast5, threads=1, spliced=0, sensitive=1):
"""Run minimap2 and return its stdout"""
mode = ["-axmap-ont", ]
if spliced:
mode = ["-axsplice", "-uf"]
args1 = ["minimap2", "--MD", "-Y", "-t%s"%threads] + mode
if sensitive:
args1 += ["-k7", "-w5", "-m20", "-A6", "-B4"]
args1 += [ref, "-"]
# fast5_to_fastq
args0 = ["fast5_to_fastq.py", "-i%s"%fast5]
proc0 = subprocess.Popen(args0, stdout=subprocess.PIPE)
# minimap2
proc1 = subprocess.Popen(args1, stdin=proc0.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return proc1
def adjust_map_res(map_res, seq_samp_type, rsqgl_params, TRIM_RNA_ADAPTER=False):
if seq_samp_type.name == RNA_SAMP_TYPE:
if TRIM_RNA_ADAPTER:
# trim DNA adapter off of RNA signal
adapter_end = tombo_stats.trim_rna(map_res.raw_signal, rsqgl_params)
# trim off adapter
map_res = map_res._replace(raw_signal=map_res.raw_signal[adapter_end:])
# flip raw signal for re-squiggling
map_res = map_res._replace(raw_signal=map_res.raw_signal[::-1])
elif seq_samp_type.name == DNA_SAMP_TYPE and USE_START_CLIP_BASES:
# flip raw signal, genome and start clip seqs for re-squiggling
map_res = map_res._replace(
raw_signal=map_res.raw_signal[::-1],
genome_seq=map_res.genome_seq[::-1])
if ((COLLAPSE_RNA_STALLS and seq_samp_type.name == RNA_SAMP_TYPE) or
(COLLAPSE_DNA_STALLS and seq_samp_type.name == DNA_SAMP_TYPE)):
map_res = map_res._replace(stall_ints=tombo_stats.identify_stalls(map_res.raw_signal, DEFAULT_STALL_PARAMS))
return map_res
def adjust_rsqgl_res(rsqgl_res, all_raw_signal, seq_samp_type, USE_START_CLIP_BASES=False):
if seq_samp_type.name == DNA_SAMP_TYPE and USE_START_CLIP_BASES:
# flip raw signal and events back for storage in genome direction
rev_rsrtr = (all_raw_signal.shape[0] -
rsqgl_res.read_start_rel_to_raw -
rsqgl_res.segs[-1])
rev_segs = -1 * (rsqgl_res.segs[::-1] - rsqgl_res.segs[-1])
rsqgl_res = rsqgl_res._replace(
read_start_rel_to_raw=rev_rsrtr, segs=rev_segs,
genome_seq=rsqgl_res.genome_seq[::-1],
raw_signal=rsqgl_res.raw_signal[::-1])
return rsqgl_res
def map_read(a, faidx, seq_samp_type, std_ref, ref2len):
"""Get resquiggle result with read alignement info"""
seq_data = tombo_helper.sequenceData(seq=a.seq, id=a.qname, mean_q_score=np.mean(a.query_qualities))
# get chrom, start and end
chrm, ref_start, ref_end = a.reference_name, a.reference_start, a.reference_end
# store strand & number of clipped bases relative to read sequence
if a.is_reverse:
strand = "-"
num_start_clipped_bases = len(seq_data.seq) - a.qend
num_end_clipped_bases = a.qstart
else:
strand = "+"
num_start_clipped_bases = a.qstart
num_end_clipped_bases = len(seq_data.seq) - a.qend
# 'ID', 'Subgroup', 'ClipStart', 'ClipEnd', 'Insertions', 'Deletions', 'Matches', 'Mismatches'
align_info = tombo_helper.alignInfo(seq_data.id, "", num_start_clipped_bases, num_end_clipped_bases,
0, 0, a.alen, 0) # this isn't used anywhere, so just don't bother computing it!
# extract genome sequence from mappy aligner
# expand sequence to get model levels for all sites (need to handle new
# sequence coordinates downstream)
start_skip = 0
# get exonic blocks
blocks = get_exonic_blocks(a)
align_info.blocks = deepcopy(blocks)
dnstrm_bases = std_ref.kmer_width - std_ref.central_pos - 1
if ((seq_samp_type.name == RNA_SAMP_TYPE and strand == '+') or
(seq_samp_type.name == DNA_SAMP_TYPE and strand == '-' and USE_START_CLIP_BASES) or
(seq_samp_type.name == DNA_SAMP_TYPE and strand == '+' and not USE_START_CLIP_BASES)):
if ref_start < std_ref.central_pos:
start_skip = std_ref.central_pos-ref_start
ref_start = std_ref.central_pos
ref_seq_start = ref_start - std_ref.central_pos
ref_seq_end = ref_end + dnstrm_bases
else:
if ref_start < dnstrm_bases:
start_skip = dnstrm_bases-ref_start
ref_start = dnstrm_bases
ref_seq_start = ref_start - dnstrm_bases
ref_seq_end = ref_end + std_ref.central_pos
# update blocks start & end with kmer specific shifts - this sequence won't be saved!
blocks[0][0] = ref_seq_start
blocks[-1][1] = ref_seq_end
# get exonic sequence
genome_seq = "".join([faidx.fetch(chrm, s, e) for s, e in blocks])
# get missing bases in the end
end_skip = 0 if blocks[-1][1]<=ref2len[chrm] else blocks[-1][1]-ref2len[chrm]
# enlarge genome seq by missing bits from ends with (random!) bases - As for now
if start_skip or end_skip:
genome_seq = "A"*start_skip + genome_seq + "A"*end_skip
if strand == '-':
genome_seq = tombo_helper.rev_comp(genome_seq)
# store enlarged genome for P-value calculation, so no trimming needed later :)
genome_seq = genome_seq.upper() #.upper() is important to correctly process soft-masked sequences
align_info.refseq = genome_seq.upper() # res.genome_seq is altered during find_adaptive_assignment
genome_loc = tombo_helper.genomeLocation(ref_start, strand, chrm)
return tombo_helper.resquiggleResults(align_info, genome_loc, genome_seq, seq_data.mean_q_score)
def get_exonic_blocks(a):
"""Return exonic blocks this is start-end reference-based
for consecutive exons covered by given read.
Note, those are not necesarily exact exons, just exons infered from read alignment.
"""
blocks = []
s = e = a.pos
# iter read blocks
for code, bases in a.cigar:
# count blocks that alter reference positions (ignore ie insertions [1])
if code in (0, 2, 7, 8): e += bases
# exclude introns - those are reported as reference-padded alignment part
elif code == 3:
blocks.append([s, e])
s = e + bases
e = s
# store exon after last intron (or entire transcript if no introns)
blocks.append([s, e])
return blocks
def resquiggle_reads(multifast5_fn, aligner, ref, seq_samp_type, std_ref, rsqgl_params,
outlier_thresh=OUTLIER_THRESH, max_scaling_iters=3, max_per_ref=0,
valid_bases=set(list('ACGT'))):
ref2c = {}
# process reads from multi fast5
faidx = pysam.FastaFile(ref)
ref2len = {r: l for r, l in zip(faidx.references, faidx.lengths)}#; ref2len
f5file = get_fast5_file(multifast5_fn, mode="r")
for a in aligner:
# process only given number of reads per reference
if max_per_ref:
contig = a.reference_name #map_results.genome_loc.Chrom
if contig in ref2c:
if ref2c[contig]>=max_per_ref: continue
else: ref2c[contig] = 0
# skip reads without alignment or secondary/qcfails
if a.is_unmapped or a.is_secondary or a.is_qcfail:
yield None, "No alignment" if a.is_unmapped else "Secondary alignment"
continue
# get alignment data
map_results = map_read(a, faidx, seq_samp_type, std_ref, ref2len)
# make sure only ACGT chars in reference!
if set(map_results.genome_seq).difference(valid_bases):
yield None, "Non-ACGT sequence" # instead maybe just replace by random char?
continue
# extract data from FAST5
read = f5file.get_read(a.qname) #read_id)
all_raw_signal = read.get_raw_data(scale=False)
map_results = map_results._replace(raw_signal=all_raw_signal)
try:
# this causes sometimes TomboError: Read event to sequence alignment extends beyond bandwidth
map_results = adjust_map_res(map_results, seq_samp_type, rsqgl_params)
rsqgl_res = resquiggle.resquiggle_read(map_results, std_ref, rsqgl_params, outlier_thresh)
n_iters = 1
while n_iters < max_scaling_iters and rsqgl_res.norm_params_changed:
rsqgl_res = resquiggle.resquiggle_read(map_results._replace(scale_values=rsqgl_res.scale_values),
std_ref, rsqgl_params, outlier_thresh)
n_iters += 1
except Exception as inst:
yield None, str(inst)
continue
rsqgl_res = adjust_rsqgl_res(rsqgl_res, all_raw_signal, seq_samp_type)
# add alignment and read as those are needed later
rsqgl_res.a, rsqgl_res.read = a, read
# update ref counter
if ref2c: ref2c[contig] += 1
yield rsqgl_res, ""
def get_norm_mean(raw, segs):
"""Return raw signal means for given segments."""
return np.array([raw[segs[i]:segs[i+1]].mean() for i in range(len(segs)-1)])
def get_trace_for_reference_bases(a, read, rna, func=np.mean):
"""Return reference-aligned trace for tr (ref base), tA, tC, tG, tT"""
def get_bidx_fwd(b): return base2idx[b]
def get_bidx_rev(b): return base2idx[base2complement[b]]
# trace for reference bases
tr = np.zeros(a.reference_length, dtype="uint8")
# trace and move data from read
bcgrp = read.get_latest_analysis("Basecall_1D")
trace = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Trace")
if trace is None:
logger("[ERROR] Trace table is missing in Fast5 file! Basecall Fast5 files again using --fast5_out option. ")
return tr
move = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Move")
move_pos = np.append(np.argwhere(move==1).flatten(), len(trace)) # add end of trace
# combine flip & flop probabilities
## here we get sum of flip & flop. maybe get just one? but flop is usually lower...
trace[:, :len(bases)] += trace[:, len(bases):]
trace = trace[:, :len(bases)]
# here we need to remember that DNA 5'>3', but RNA 3'>5'
# plus the strand matters
if a.is_reverse: # for REV alg
get_bidx = get_bidx_rev # take complement base
if not rna: move_pos = move_pos[::-1] # reverse move_pos for DNA
else: # for FWD alg
get_bidx = get_bidx_fwd # take base
if rna: move_pos = move_pos[::-1] # reverse move_pos for RNA
# process aligned bases - that's quite elegant, right? :P
## with_seq require MD tags: in minimap2 use --MD and -Y (soft-clip supplementary)
for qi, ri, b in a.get_aligned_pairs(with_seq=True, matches_only=True):
# get start & end in trace-space
s, e = move_pos[qi:qi+2]
if s>e: s, e = e, s # fix s, e for reversed move_pos
tr[ri-a.reference_start] = func(trace[s:e, get_bidx(b)], axis=0)
return tr
def get_trace_for_all_bases(a, read, rna, func=np.mean):
"""Return reference-aligned trace for tr (ref base), tA, tC, tG, tT"""
def get_bidx_fwd(b): return base2idx[b]
def get_bidx_rev(b): return base2idx[base2complement[b]]
# trace for reference bases
tr = np.zeros((a.reference_length,5), dtype="uint8") # one column per base + canonical col
# trace and move data from read
bcgrp = read.get_latest_analysis("Basecall_1D")
trace = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Trace")
if trace is None:
logger("[ERROR] Trace table is missing in Fast5 file! Basecall Fast5 files again using --fast5_out option. ")
return tr
move = read.get_analysis_dataset(bcgrp, "BaseCalled_template/Move")
move_pos = np.append(np.argwhere(move==1).flatten(), len(trace)) # add end of trace
# combine flip & flop probabilities
## here we get sum of flip & flop. maybe get just one? but flop is usually lower...
trace[:, :len(bases)] += trace[:, len(bases):]
trace = trace[:, :len(bases)]
# here we need to remember that DNA 5'>3', but RNA 3'>5'
# plus the strand matters
if a.is_reverse: # for REV alg
get_bidx = get_bidx_rev # take complement base
if not rna: move_pos = move_pos[::-1] # reverse move_pos for DNA
else: # for FWD alg
get_bidx = get_bidx_fwd # take base
if rna: move_pos = move_pos[::-1] # reverse move_pos for RNA
# process aligned bases - that's quite elegant, right? :P
## with_seq require MD tags: in minimap2 use --MD and -Y (soft-clip supplementary)
for qi, ri, b in a.get_aligned_pairs(with_seq=True, matches_only=True):
# get start & end in trace-space
s, e = move_pos[qi:qi+2]
if s>e: s, e = e, s # fix s, e for reversed move_pos
tr[ri-a.reference_start,0] = func(trace[s:e, 0], axis=0)
tr[ri-a.reference_start,1] = func(trace[s:e, 1], axis=0)
tr[ri-a.reference_start,2] = func(trace[s:e, 2], axis=0)
tr[ri-a.reference_start,3] = func(trace[s:e, 3], axis=0)
tr[ri-a.reference_start,4] = func(trace[s:e, get_bidx(b)], axis=0)
return tr
def process_fast5(fast5, ref, rna=True, sensitive=False):
"""Process individual Fast5 files"""
outfn = "%s.bam"%fast5 #.d2r
# uncomment if you don't wish to recompute previously computed bam files
# if os.path.isfile(outfn): return outfn
faidx = pysam.FastaFile(ref)
ref2len = {r: l for r, l in zip(faidx.references, faidx.lengths)}
# load model & its parameters
if rna:
seq_samp_type = tombo_helper.seqSampleType('RNA', True)
rsqgl_params = tombo_stats.load_resquiggle_parameters(seq_samp_type)
std_ref = tombo_stats.TomboModel(seq_samp_type=seq_samp_type)
spliced = True
else:
seq_samp_type = tombo_helper.seqSampleType('DNA', False)
rsqgl_params = tombo_stats.load_resquiggle_parameters(seq_samp_type)
spliced = False
std_ref = tombo_stats.TomboModel(seq_samp_type=seq_samp_type)
# get resquiggle parameters
i, errors = 0, {}
# prep aligner, signal model and parameters
aligner = minimap2_proc(ref, fast5, sensitive=sensitive, spliced=spliced)
sam = pysam.AlignmentFile(aligner.stdout)
# open unsorted bam for saving alignements with features
tmp = tempfile.NamedTemporaryFile(delete=False); tmp.close()
bam_unsorted = pysam.AlignmentFile(tmp.name, "wb", header=sam.header)
for i, (res, err) in enumerate(resquiggle_reads(fast5, sam, ref, seq_samp_type, std_ref, rsqgl_params), 1):
#if i>200: break
if not i%100: sys.stderr.write(" %s - %s reads skipped: %s \r"%(i, sum(errors.values()), str(errors)))
if not res:
if err not in errors: errors[err] = 1
else: errors[err] += 1
continue
# get pysam alignment object & exonic blocks
a, blocks = res.a, res.align_info.blocks
# get signal intensity means
si = get_norm_mean(res.raw_signal, res.segs)
# catch problems - here exonic seq will have different length
if len(si)!=sum([e-s for s, e in blocks]): #a.reference_length:
region = "%s:%s-%s"%(a.reference_name, a.reference_start, a.reference_end)
print(a.qname, region, sam.lengths[a.reference_id], a.reference_length, len(si), blocks)
# get dwell times capped at 255 to fit uint8 (1 byte per base)
dt = res.segs[1:]-res.segs[:-1]
dt[dt>255] = 255
# get reference-aligned base probabilities: tr (ref base)
tr = get_trace_for_all_bases(a, res.read, rna) # trA, trC, trG, trT, (canonical) tr
if a.is_reverse: si, dt = si[::-1], dt[::-1]
# and finally set tags matching refseq
## but if alignment reaches seq end the end signal/probs will be wrong!
## same at exon-intron boundaries
a.set_tag("bs", array("i", np.array(blocks).flatten()))
a.set_tag("si", array("f", si))
a.set_tag("dt", array("B", dt))
# tr correspond to reference base
# get exonic tr
exonic_pos = np.concatenate([np.arange(s, e) for s, e in blocks])
tr = tr[exonic_pos-a.pos]
a.set_tag("tA", array("B", tr[:,0]))
a.set_tag("tC", array("B", tr[:,1]))
a.set_tag("tG", array("B", tr[:,2]))
a.set_tag("tT", array("B", tr[:,3]))
a.set_tag("tr", array("B", tr[:,4]))
# add quality scores
a.set_tag("QQ", array("B", a.query_qualities))
# read id
a.set_tag('ID', a.qname)
# store read alignment with additional info
bam_unsorted.write(a)
# close tmp, sort, index & clean-up
bam_unsorted.close()
pysam.sort("-o", outfn, tmp.name)
pysam.index(outfn)
os.unlink(tmp.name)
# write error report
with open('%s.json'%outfn, 'w') as f:
errors["Alignements"] = i # store number of alignements
f.write(json.dumps(errors)) #
return outfn
def mod_encode(fnames, fasta, threads=1, rna=True, sensitive=False, mem=1):
"""Process multiple directories from Fast5 files"""
# no need to have more threads than input directories ;)
if threads > len(fnames):
threads = len(fnames)
# use pool if more than 1 thread, otherwise just itertools
if threads>1: p = Pool(threads, maxtasksperchild=1)
else: p = itertools
# get arguments for func
args = [(fn, fasta, rna, sensitive) for fn in fnames]# if not os.path.isfile("%s.bam"%fn)]
# return list of outputs
return list(p.starmap(process_fast5, args))
def memory_usage(childrenmem=True, div=1024.):
"""Return memory usage in MB including children processes"""
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / div
if childrenmem:
mem += resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss / div
return mem
def logger(info, add_timestamp=1, add_memory=1, out=sys.stderr):
"""Report nicely formatted stream to stderr"""
info = info.rstrip('\n')
memory = timestamp = ""
if add_timestamp:
timestamp = "[%s]"%str(datetime.now()).split(".")[0]
if add_memory:
memory = " [mem: %5.0f MB]"%memory_usage()
out.write("%s %s%s\n"%(timestamp, info, memory))
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version=VERSION)
parser.add_argument("-v", "--verbose", action="store_true", help="verbose")
parser.add_argument("-i", "--input", nargs="+", help="input Fast5 file(s)")
parser.add_argument("--rna", action='store_true', help="project is RNA sequencing [DNA]")
parser.add_argument("-f", "--fasta", required=1, help="reference FASTA file")
parser.add_argument("-t", "--threads", default=1, type=int, help="number of cores to use [%(default)s]")
parser.add_argument("-s", "--sensitive", action='store_true', help="use sensitive alignment")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
# encode tombo output into BAM files
logger("Processing %s file(s)..."%len(o.input))
bamfiles = mod_encode(o.input, o.fasta, o.threads, o.rna, o.sensitive)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
#except IOError as e:
# sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s \n"%dt)
|
wvandertoorn/nanoRMS | epinano_RMS/epinano_rms.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os,re,io,pysam
import shutil, fileinput
import glob, itertools
import subprocess
import argparse
import multiprocessing as mp
from multiprocessing import Process, Manager
from functools import partial
from sys import __stdout__
import dask
import dask.dataframe as dd
import pandas as pd
from collections import defaultdict
from collections import OrderedDict
import numpy as np
#~~~~~~~~~~~~~~~~~~~~ private function ~~~~~~~~
# func1 subprocess call linux cmmands
def touch(fname):
if os.path.exists(fname):
os.utime(fname, None)
else:
open(fname, 'a').close()
def openfile(f):
if f.endswith ('.gz'):
fh = gzip.open (f,'rt')
elif f.endswith ('bz') or f.endswith ('bz2'):
fh = bz2.open(f,'rt')
else:
fh = open(f,'rt')
return fh
def spot_empty_tsv (tsv):
ary = []
cnt = 0
with open (tsv,'r') as fh:
for l in fh:
if cnt <2:
ary.append (l)
else:
break
cnt += 1
return True if len (ary)>1 else False
def split_tsv_for_per_site_var_freq(tsv,folder, q, number_threads, num_reads_per_chunk=4000):
'''
'''
head = next(tsv)
firstline = next (tsv)
current_rd = firstline.split()[0]
rd_cnt = 1
idx = 0
out_fn = "{}/CHUNK_{}.txt".format(folder, idx)
out_fh = open (out_fn, 'w')
#chunk_out = [] # open ("CHUNK_{}.txt".format(idx),'w')
#chunk_out.append(firstline)
print (firstline.rstrip(), file=out_fh)
try:
for line in tsv:
rd = line.split()[0]
if current_rd != rd:
rd_cnt += 1
current_rd = rd
if ((rd_cnt-1) % num_reads_per_chunk == 0 and rd_cnt >= num_reads_per_chunk):
q.put ((idx, out_fn)) #.close()
idx += 1
out_fn = "{}/CHUNK_{}.txt".format(folder,idx)
out_fh = open (out_fn, 'w')
print (line.rstrip(), file=out_fh)
out_fh.close()
q.put((idx, out_fn))
except:
raise
sys.stderr.write("split tsv file on reads failed\n")
finally:
for _ in range(number_threads):
q.put(None)
def proc_small_freq (small_freq_fn):
df = pd.read_csv (small_freq_fn)
df['pos'] = df['pos'].astype(str)
df['index'] = df[['#Ref','pos','base','strand']].apply (lambda x: "-EPIJN-".join(x),axis=1)
df.drop(['#Ref','pos','base','strand'], axis=1, inplace=True)
df.set_index(['index'], inplace=True)
df['qual'] = df['qual'].replace(r':{2,}',':',regex=True)
df['qual'] = df['qual'].replace(r':$','',regex=True)
df['bases'] = df['bases'].replace(r':{2,}',':', regex=True)
df['bases'] = df['bases'].replace(r':$','', regex=True)
df[['_A_', '_C_', '_G_', '_T_']] = df['bases'].str.split(pat=':', expand=True)
df.drop (['bases'],axis=1, inplace=True)
return df
def file_exist (file):
return os.path.exists (file)
def _rm (file):
os.remove (file)
def stdin_stdout_gen (stdin_stdout):
'''
generator for subprocess popen stdout
'''
for l in stdin_stdout:
if isinstance (l,bytes):
yield (l.decode('utf-8'))
else:
yield l
def print_from_stdout (stdout_lst, outputfh):
for i, o in enumerate (stdout_lst):
for l in o:
if l.decode().startswith ('#'):
if i >1 :
continue
outputfh.write(l.decode())
#~~~~~~~
def java_bam_to_tsv (bam_file, reference_file, sam2tsv):
'''
type: reference types,i.e., trans or genome
'''
awk_forward_strand = """ awk '{if (/^#/) print $0"\tSTARAND"; else print $0"\t+"}' """
awk_reverse_strand = """ awk '{if (/^#/) print $0"\tSTARAND"; else print $0"\t-"}' """
cmds = []
cmd1 = (f"samtools view -h -F 3860 {bam_file} | java -jar {sam2tsv} -r {reference_file} "
f"| {awk_forward_strand} ")
cmd2 = (f"samtools view -h -f 16 -F 3844 {bam_file} | java -jar {sam2tsv} -r {reference_file} "
f" | {awk_reverse_strand}")
cmds = [cmd1,cmd2]
return cmds
# data frame
def tsv_to_freq_multiprocessing_with_manager (tsv_reads_chunk_q, out_dir):
'''
mutliprocessing
produced with sam2tsv.jar with strand information added
read read-flags reference read-pos read-base read-qual ref-pos ref-base cigar-op strand
a3194184-d809-42dc-9fa1-dfb497d2ed6a 0 cc6m_2244_T7_ecorv 0 C # 438 G S +
'''
for idx, tsv_small_chunk_fn in iter (tsv_reads_chunk_q.get, None):
filename = "{}/small_{}.freq".format(out_dir, idx)
outh = open (filename,'w')
mis = defaultdict(int) # mismatches
mat = defaultdict (int) #matches
ins = defaultdict(int) # insertions
dele = defaultdict(int) # deletions
cov = OrderedDict () # coverage
ins_q = defaultdict(list)
aln_mem = [] #read, ref, refpos; only store last entry not matching insertion
pos = defaultdict(list) # reference positions
base = {} # ref base
qual = defaultdict(list)
read_bases = defaultdict (dict)
#READ_NAME FLAG CHROM READ_POS BASE QUAL REF_POS REF OP STRAND
#read read-flags reference read-pos read-base read-qual ref-pos ref-base cigar-op strand
tsv_small_chunk = open (tsv_small_chunk_fn,'r')
for line in tsv_small_chunk:
if line.startswith ('#'):
continue
ary = line.rstrip().split()
if ary[-2] in ['M','m']:
k = (ary[2], int (ary[-4]), ary[-1]) #
cov[k] = cov.get(k,0) + 1
aln_mem = []
aln_mem.append((ary[0],ary[2],int(ary[-4]), ary[-1]))
qual[k].append (ord(ary[-5])-33)
base[k] = ary[-3].upper()
read_bases[k][ary[4]] = read_bases[k].get(ary[4], 0) + 1
if (ary[-3] != ary[4]):
mis[k] += 1
else:
mat[k] += 1
if ary[-2] == 'D':
k = (ary[2], int(ary[-4]), ary[-1])
cov[k] = cov.get(k,0) + 1
aln_mem = []
aln_mem.append((ary[0],ary[2],int(ary[-4]), ary[-1]))
base[k] = ary[-3].upper()
dele[k] = dele.get(k,0) + 1
if ary[-2] == 'I':
last_k = aln_mem[-1][1],aln_mem[-1][2],aln_mem[-1][3] # last alignment with match/mismatch/del
next_k = (ary[2], last_k[1] + 1,last_k[2])
if last_k[0] != ary[2]:
pass
ins_k_up = (ary[0], ary[2], last_k[1],last_k[2])
ins_k_down = (ary[0], ary[2], last_k[1] + 1,last_k[2])
if (ins_k_down) not in ins_q:
ins[next_k] = ins.get(next_k,0) + 1
ins_q[ins_k_down].append(ord(ary[-5])-33)
if (ins_k_up) not in ins_q:
ins[last_k] = ins.get(last_k,0) + 1
ins_q[ins_k_up].append(ord(ary[-5])-33)
header = '#Ref,pos,base,cov,mat,mis,ins,del,qual,strand,bases\n'
outh.write(header)
os.remove(tsv_small_chunk_fn)
for k in cov.keys():
depth = cov.get (k,0)
Mis = mis.get (k,0)
Mat = mat.get (k,0)
Del = dele.get (k,0)
q_lst = qual.get (k,[0])
try:
q_lst = ':'.join (map (str, q_lst))+':' # dataframe sum
num_ins = ins.get (k,0)
bases_counts = "0:0:0:0:"
if k in read_bases:
bases_counts = ":".join ([str(read_bases[k].get(l,0)) for l in 'ACGT'])
inf = "{},{},{},{},{},{},{},{},{},{},{}:\n".format (k[0], k[1], base[k], depth, Mat, Mis, num_ins, Del, q_lst, k[2], bases_counts)
outh.write (inf)
except:
sys.stderr.write ("file {} {} does not work\n".format (tsv,k))
def df_is_not_empty(df):
'''
input df is a df filtred on reference id
if is is empty: next (df.iterrows()) does not work
otherwise it returns a row of df
'''
try:
next (df.iterrows())
return True
except:
return False
def _tsv_gen_ (bam_fn, ref_fn, sam2tsv_jar):
cmds = java_bam_to_tsv (bam_fn, ref_fn, sam2tsv_jar) #, args.type)
cmd1 = subprocess.Popen ((cmds[0]), stdout=subprocess.PIPE, stderr = subprocess.PIPE,shell=True)
cmd2 = subprocess.Popen ((cmds[1]), stdout=subprocess.PIPE, stderr = subprocess.PIPE,shell=True)
returncode1 = cmd1.returncode
returncode2 = cmd2.returncode
if any ([returncode1, returncode2] ):
res1 = cmd1.communicate()
res2 = cmd2.communicate()
print (res1[1], res2[1], file=sys.stderr)
exit()
return itertools.chain (stdin_stdout_gen (cmd1.stdout), stdin_stdout_gen (cmd2.stdout))
#~~~~~~~~~~~~~~~~~~~~~~~ main () ~~~~~~~~~~~~~~~~~~~~~~~
def main ():
parser = argparse.ArgumentParser()
parser.add_argument ('-R','--reference', type=str, required=True, help='samtools faidx indexed reference file')
parser.add_argument ('-b', '--bam', type=str, required=True, help='bam file; if given; no need to offer reads file; mapping will be skipped')
parser.add_argument ('-s', '--sam2tsv',type=str, required=True, default='',help='/path/to/sam2tsv.jar; needed unless a sam2tsv.jar produced file is already given')
parser.add_argument ('-n', '--number_cpus', type=int, default=4, help='number of CPUs')
parser.add_argument ('-d', '--delete', action='store_true', help = 'delete intermediate files')
args=parser.parse_args()
#~~~~~~~~~~~~~~~~~~~~~~~ prepare for analysis ~~~~~~~~~~~~~~
prefix = ''
if args.reference:
if not file_exist (args.reference):
sys.stderr.write (args.reference, 'does not exist')
exit()
dict_fn = args.reference + '.dict'
if not file_exist (dict_fn):
sys.stderr.write (dict_fn, 'needs to be created using picard.jar CreateSequenceDictionary')
exit()
ref_faidx = args.reference +'.fai'
if not file_exist (ref_faidx):
sys.stderr.write (ref_faidx, 'needs to be created with samtools faidx')
exit()
if args.bam:
bam_file = args.bam
if not file_exist (bam_file):
sys.stderr.write (bam_file+' does not exist; please double check!\n')
exit()
else:
if not file_exist (args.sam2tsv):
sys.stderr.write ("Please offer correctly path to sam2tsv.jar\n".format(args.sam2tsv))
exit()
if not os.path.exists (bam_file+'.bai'):
print (bam_file)
sys.stderr.write ('bam file not indexed!\nstarting indexing it ...')
pysam.index (bam_file)
if not args.reference :
sys.stderr.write('requires reference file that was used for reads mapping\n')
prefix = re.sub (r'.bam$', '', bam_file) # bam_file.replace('.bam','')
#~~~~~~~~~~~~~~~~ SAM2TSV ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
################# funciton run commands ###########################
#~~~~~~~~~~~~~~~~ split tsv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tsv_gen = _tsv_gen_(args.bam, args.reference, args.sam2tsv)
tmp_dir = prefix + '.tmp_splitted_base_freq'
progress_fn = ".{}.done_splitting".format(tmp_dir)
if not os.path.exists(progress_fn):
if os.path.exists(tmp_dir):
shutil.rmtree (tmp_dir)
sys.stderr.write ("{} already exists, will overwrite it\n".format(tmp_dir))
os.mkdir (tmp_dir)
number_threads = args.number_cpus
manager = Manager()
q = manager.Queue(args.number_cpus)
#~~~~~~~~~~~~~~~~ compute per site variants frequecies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#1 calculate variants frequency for each small splitted file
processes = []
ps = Process (target = split_tsv_for_per_site_var_freq, args = (tsv_gen, tmp_dir, q, number_threads, 2500))
processes.append (ps)
for _ in range(number_threads):
ps = Process (target= tsv_to_freq_multiprocessing_with_manager, args = (q, tmp_dir))
processes.append (ps)
for ps in processes:
ps.daemon = True
ps.start()
for ps in processes:
ps.join()
touch (".{}.done_splitting".format(tmp_dir))
#2 combine small files and produce varinats frequencies per ref-position
small_freq_fns = [os.path.join (tmp_dir, f) for f in os.listdir(tmp_dir) if f.startswith('small_')]
out = open (prefix + '.per.site.baseFreq.csv', 'w')
print ('#Ref,pos,base,strand,cov,mean_q,median_q,std_q,mis,ins,del,ACGT', file=out)
ddf_lst = []
for f in small_freq_fns:
df = proc_small_freq (f)
ddf = dd.from_pandas(df, npartitions=2)
ddf_lst.append(ddf)
ddf_cat = dd.concat (ddf_lst, axis=1)
for r in ddf_cat.iterrows ():
index, var = r[0],r[1]
var_df = pd.DataFrame (np.split(np.array(var), len(r[1])/10)) #10: cov, mat, mis, ins, del, qual, _A_, _C_, _G_, _T_
var_df.columns = ['cov', 'mat', 'mis', 'ins', 'del', 'qual', '_A_', '_C_','_G_', '_T_']
cov=var_df['cov'].sum()
mat="{:.6f}".format(var_df['mat'].sum()/cov)
mis="{:.6f}".format(var_df['mis'].sum()/cov)
ins="{:.6f}".format(var_df['ins'].sum()/cov)
dele = "{:.6f}".format(var_df['del'].sum()/cov)
qual = var_df['qual'].dropna().sum() #apply(str).replace(np.nan,'',regex=True).sum()
qual = np.array(qual.split(':')).astype(int)
qmn, qme, qst = "{:.6f}".format(np.mean(qual)), "{:.6f}".format(np.median(qual)), "{:.6f}".format(np.std(qual))
ACGTs = [var_df['_A_'].dropna().astype(int).sum(), var_df['_C_'].dropna().astype(int).sum() ,
var_df['_G_'].dropna().astype(int).sum() , var_df['_T_'].dropna().astype(int).sum()]
index = index.replace('-EPIJN-',',')
out.write ("{},{},{},{},{},{},{},{},{}\n".format(index,cov,qmn,qme,qst,mis,ins,dele,":".join(map (str,ACGTs))))
# ~~~~~~~~~~~~~~~~~ delete intermediate files
if args.delete:
pool = mp.Pool(args.number_cpus)
tmp_files = glob.glob("{}/small*".format(tmp_dir))
pool.map(_rm, tmp_files)
shutil.rmtree(tmp_dir)
if __name__ == "__main__":
main()
|
wvandertoorn/nanoRMS | per_read/common_functions.py | <filename>per_read/common_functions.py
"""
Here we store all functions that are used across Jupyter notebooks
"""
import csv, gzip, os, matplotlib.pyplot as plt, numpy as np, pandas as pd, pysam, sys
import seaborn as sns#; sns.set()
import eif_new as iso_new
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from collections import Counter
from multiprocessing import Pool
# it's only DNA as in SAM U should be A
base2complement = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N"}
# nanopolish parser
def mer_generator(handle, k=15):
"""Report consecutive k-mers from nanopolish output"""
# data handle
pcontig, pread_name, mer_data = 0, 0, []
rd = csv.reader(handle, delimiter="\t", quotechar='"')
header = rd.__next__() #; print(header)
for i, r in enumerate(rd):
#if i>100000: break
if not i%10000:
sys.stderr.write(" %s \r"%i)
contig, position, reference_kmer, read_name, strand, event_index, event_level_mean, event_stdv, event_length, model_kmer, model_mean, model_stdv, standardized_level, start_idx, end_idx = r[:15]
# skip undetermined model_kmers
#if "N" in model_kmer: continue
# get int and float
position, event_level_mean, event_length = int(position), float(event_level_mean), float(event_length)
# start over
if pcontig!=contig or pread_name!=read_name:
#if len(mer_data)==k: yield pcontig, ppos, mer_data[:-1]
pread_name, pcontig, ppos, mer_data = read_name, contig, position, [[]]
# define data to store
data = (event_level_mean, event_length)
# add to previous mer
if position == ppos: mer_data[-1].append(data)
# add new mer position
elif position == ppos+1:
mer_data.append([data, ])
ppos = position
# start new mer
else:
if len(mer_data)==k: yield pcontig, ppos, mer_data[:-1]
ppos, mer_data = position, [[data, ]]
# report middle position only if full mer
if len(mer_data)==k+1: # skip last pos, since it's still not complete
yield pcontig, ppos-1, mer_data[:-1]
mer_data = mer_data[1:]
def nanopolish2regions(fn, regions, nn=1, maxReads=2000):
"""Create dictionary of nanopolish regions"""
k = 2*nn+1
pos2data = {(ref, pos): [] for ref, pos, mt in regions}
for ref, pos, data in mer_generator(gzip.open(fn, "rt"), k):
if (ref, pos-nn) not in pos2data: continue
# get weithted average of events at every position
si = [np.average([e[0] for e in d], weights=[e[1] for e in d]) for d in data]
pos2data[(ref, pos-nn)].append(si)
return pos2data
# get coverage in reads per each reference position
def pass_filters(a, mapq=10):
if a.mapq<mapq or a.is_secondary or a.is_supplementary or a.is_qcfail or a.is_duplicate: return False
return True
def get_coverage(regions1, fnames1, sample2nanopolish1):
"""Return coverage from Nanopolish"""
pos2count1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn[:-10]).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in fnames1]
for ref, pos, mt in regions1}
# get number of resquiggled reads from tombo
tombo1 = ["guppy3.0.3.hac/%s/workspace/batch0.fast5.bam"%fn.split("/")[-2] for fn in fnames1]
tombo_p2c1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in tombo1]
for ref, pos, mt in regions1}
# combine
names1 = ["%s %s"%(n, fn.split("/")[-2]) for n in ("coverage", "nanopolish", "tombo") for fn in fnames1]
df4c1 = pd.DataFrame([[ref, pos, *cov, *[len(sample2nanopolish1[i][(ref, pos)]) for i, fn in enumerate(fnames1)], *tombo_p2c1[(ref, pos)]]
for (ref, pos), cov in pos2count1.items()], columns=["chrom", "pos", *names1])
return df4c1
def get_coverage2(regions1, fnames1, sample2nanopolish1):
pos2count1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn[:-10]).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in fnames1]
for ref, pos, mt in regions1}
# get number of resquiggled reads from tombo
tombo1 = ["guppy3.0.3.hac/%s/workspace/batch0.fast5.bam"%fn.split("/")[-2] for fn in fnames1]
tombo_p2c1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in tombo1]
for ref, pos, mt in regions1}
nanopolish_p2c = {(ref, pos): [len(sample2nanopolish1[i][(ref, pos)]) for i, fn in enumerate(fnames1)] for ref, pos, mt in regions1}
# combine
dframes = []
names = [fn.split("/")[-2].split("_")[-1] for fn in fnames1]
for n, d in zip(("minimap2", "nanopolish", "tombo"),
(pos2count1, nanopolish_p2c, tombo_p2c1)):
df = pd.DataFrame([[ref, pos, n, *d[(ref, pos)]] for ref, pos, mt in regions1], columns=["chrom", "pos", "name", *names])
dframes.append(df)
df4c1 = pd.concat(dframes).reset_index()
return df4c1
def get_coverage3(regions1, fnames1, sample2nanopolish1, mod="pU"):
pos2count1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn[:-10]).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in fnames1]
for ref, pos, mt in regions1}
# get number of resquiggled reads from tombo
tombo1 = ["guppy3.0.3.hac/%s/workspace/batch0.fast5.bam"%fn.split("/")[-2] for fn in fnames1]
tombo_p2c1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in tombo1]
for ref, pos, mt in regions1}
nanopolish_p2c = {(ref, pos): [len(sample2nanopolish1[i][(ref, pos)]) for i, fn in enumerate(fnames1)] for ref, pos, mt in regions1}
# combine
dframes = []
names = [fn.split("/")[-2].split("_")[-1] for fn in fnames1]
strain2idx = {n: i for i, n in enumerate(names)}
for n, d in zip(("nanopolish", "tombo"), (nanopolish_p2c, tombo_p2c1)):
df = pd.DataFrame([[ref, pos, n, m, d[(ref, pos)][strain2idx[s]]/1000] for ref, pos, mt in regions1
for s, m in zip(("wt", mt), (mod, "unmod"))],
columns=["chrom", "pos", "name", "base", "resquiggled"])
dframes.append(df)
df = pd.concat(dframes).reset_index()
return df
# FastA/BAM parsers
def get_revcomp(bases):
"""Return reverse comlement"""
return "".join(base2complement[b] for b in bases[::-1])
def fasta2bases(fastafn, ref, start, end, strands="+-", n=3):
"""Generator of individual bases from FastA file.
The output consists of:
- position in reference (1-based)
- strand integer (0 for plus, 1 for minus)
- strand as +/-
- base (complement for -)
- 7-mer (+/- n bases surrounding given position)
"""
fasta = pysam.FastaFile(fastafn)
ref2len = {r: l for r, l in zip(fasta.references, fasta.lengths)}
if ref not in ref2len: #fasta.references:
raise StopIteration
for pos, refbase in enumerate(fasta.fetch(ref, start, end), start+1):
refbase = refbase.upper()
# combine before start NNN (if needed) sequence from ref and after start NNN (if needed)
mer = "N"*(n-pos+1) + "".join(fasta.fetch(ref, pos-n-1 if pos>n+1 else 0, pos+n)) + "N"*(pos-ref2len[ref]+n)
mer = mer.upper() # need to be upper case
for si, strand in enumerate(strands):
if si:
refbase = base2complement[refbase]
mer = get_revcomp(mer)
yield pos, si, strand, refbase, mer
def moving_average(a, n=5):
"""Calculate moving average including first n-1 objects"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
ret[n-1:] *= 1 / n
ret[:n-1] *= 1 / np.arange(1, n)
return ret
def bam2data(bam, ref, start, end, rna=True, nn=1, features=["si", "tr"],
maxDepth=100000, mapq=20, dtype="float16", verbose=1, logger=sys.stderr.write):
"""Generator of data for consecutive positions from ref:start-end region"""
sam = pysam.AlignmentFile(bam)#; print(ref, start, end)
# get dt_shift
f2idx = {f: i for i, f in enumerate(features)}
dt_shift_keys = list(filter(lambda k: k.startswith("dt") and k!="dt0", f2idx.keys()))
dt_shift = 0 if not len(dt_shift_keys) else int(dt_shift_keys[0][2:]) # dt10 > 10
# update end position with shift
end += dt_shift # here for DNA it's a bit complicated as we'd need to do start-=dt_shift
# this is needed later
requested_tags = list(filter(lambda f: not f.startswith("dt"), features))
if dt_shift or "dt0" in features: requested_tags.append("dt")
# here only SI & MP # here dt_shift should be read from the feature
id_tags = np.empty(maxDepth, dtype=object) # store id per read
id_tags[:] = ""
data = np.empty((len(features), maxDepth, end-start), dtype=dtype)
# solve missing trace at deletions in the read
data[:] = -1 # store -1 instead of 0 (trace with -1 will be skipped)
strands = np.zeros((maxDepth, end-start), dtype="int8") # 1: +/FOR; -1: -/REV; 0: no alignment/read
readi = 0
for a in sam.fetch(ref, start, end):
# filter by mapq
if a.mapq<mapq: continue
# make sure first position of read always corresponds to first pos in data
while a.pos>start: # consider skipping first/last 5-15 bases
# report data for reads from + & - strand separately
for strand in (1, -1):
flt = strands[:readi, nn] == strand
yield (id_tags[:readi].tolist(), data[:, :readi][:, flt, :2*nn+1])
# strip position 0 from arrays
data = data[:, :, 1:]
strands = strands[:, 1:]
start += 1
# define read start & end for current data view
s, e = start-a.pos, a.aend-a.pos if a.aend<=end else end-a.pos
# and region end
re = e-s
# get data from tags
tags = {k: v for k, v in a.tags}
# turn exonic blocks back into genomic coordinates
if "bs" in tags and len(tags["bs"])>2:
# get blocks as 2D array (start-end) with exonic intervals of the read
blocks = np.array(tags["bs"]).reshape(-1, 2)-tags["bs"][0]
# take care only about requested features
_tags = {}
for f in requested_tags:
# storing 1s is importand as dt is log2 obs/exp, thus it can't be 0s
_tags[f] = np.ones(a.reference_length, dtype=dtype)
# mark exonic block in strands
read_strands = np.zeros(a.reference_length, dtype="int8")
# store block info
pe = 0
for bs, be in blocks:
# mark exonic block in read_strands
read_strands[bs:be] = -1 if a.is_reverse else 1
# store exon block into new tags
blen = be-bs
for f in requested_tags:
#print(f, bs, be, be-bs, pe, be-pe)
available = tags[f][pe:pe+blen]
_tags[f][bs:bs+len(available)] = available
pe += blen
# replace tags & udpate exonic strands
tags = _tags
strands[readi, :re] = read_strands[s:e]
else:
# mark entire read as stand
strands[readi, :re] = -1 if a.is_reverse else 1
# here we need to add special treatment for dt!
if "dt0" in f2idx or dt_shift:
# normalise dwell time by moving average and store as log2
dt = np.array(tags["dt"])
dt = np.log2(dt / moving_average(dt)) #dt.mean())
# store
for j, k in enumerate(features, 0): #for k, j in f2idx.items(): #
# dwell-time for position 0
if k=="dt0": data[j, readi, :re] = dt[s:e]
# shifted dwell time
elif k.startswith("dt"):
if rna and not a.is_reverse or not rna and a.is_reverse:
if e>s+dt_shift: # make sure enough alignment here # and len(dt[s+dt_shift:e]):
data[j, readi, :re-dt_shift] = dt[s+dt_shift:e]
elif e-dt_shift>s: # and here as well len(dt[s:e-dt_shift]):
data[j, readi, :re-dt_shift] = dt[s:e-dt_shift]
# normalise trace - this isn't needed cause we'll do min-max anyway
elif k.startswith("t"): data[j, readi, :re] = np.array(tags[k][s:e], dtype=dtype)/255
# and remaining features
else:
data[j, readi, :re] = tags[k][s:e]
id_tags[readi] = tags["ID"] if 'ID' in tags.keys() else ''
readi += 1
# clean-up only if maxDepth reached
if readi>=maxDepth:
if verbose: logger("[INFO] maxDepth reached for %s:%s-%s @ %s\n"%(ref, start, end, bam))
# get algs that still carry data
## here read has strand over from 0 to end (not excluding introns)
ne = strands[:, 0]!=0 # np.all(strands!=0, axis=0)#?
readi = ne.sum() # update readi
if readi>=maxDepth: # if all reads are still aligned, remove random 25% of algs
ne[np.random.randint(0, len(ne), int(0.25*maxDepth))] = False
readi = ne.sum() # update readi
# update strands & data
_strands, _data, _id_tags = np.zeros_like(strands), np.zeros_like(data), np.zeros_like(id_tags)
_strands[:readi] = strands[ne]
_data[:, :readi] = data[:, ne]
_id_tags[:readi] = id_tags[ne]
strands, data, id_tags = _strands, _data, _id_tags
# report last bit from region
for pos in range(strands.shape[1]-nn):
# report data for reads from + & - strand separately
for strand in (1, -1):
flt = strands[:readi, pos+nn] == strand
yield (id_tags[:readi].tolist(), data[:, :readi][:, flt, pos:pos+2*nn+1])
# functions we'll need to load the data
def load_data(fasta, bams, regions, features, max_reads=1000, strands="+-", nn=1):
"""Return features for positions of interest"""
# get storage
k = 2*nn+1
fi = 0
sam = pysam.AlignmentFile(bams[0])
region2data = {}
for ri, (ref, pos, _) in enumerate(regions, 1):
sys.stderr.write(" %s / %s %s:%s \r"%(ri, len(regions), ref, pos))
start, end = pos-1, pos
# extend start/end by nn and end by dt_shift
##this is for RNA, for DNA start start needs to be -dt_shift
parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True,
nn, features, max_reads) for bam in bams]
refparser = fasta2bases(fasta, ref, start, end, strands)
for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):
if strand=="+":
region2data[(ref, pos)] = (mer, [np.hstack(c) for c in calls])
return region2data
def load_data_reps(fasta, bams, regions, features, strains, strains_unique, maxReads=100000, nn=1):
"""Return features for positions of interest"""
# get storage
k = 2*nn+1
fi = 0
strain2idx = {s: idx for idx, s in enumerate(strains_unique)}
region2data = {}
for ri, (ref, pos, strand) in enumerate(regions, 1):
if type(strand)==float: strand="+" # sometimes strand is missing, assume +
start, end = pos-1, pos
sys.stderr.write(" %s / %s %s:%s-%s \r"%(ri, len(regions), ref, start, end))
# extend start/end by nn and end by dt_shift
##this is for RNA, for DNA start start needs to be -dt_shift
parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True,
nn, features, maxReads) for bam in bams]
refparser = fasta2bases(fasta, ref, start, end, n=nn)
for ((pos, _, _strand, refbase, mer), *calls) in zip(refparser, *parsers):
if _strand==strand:
sdata = [[], []] #np.hstack(c) for c in calls]
sid = [[], []]
for c, s in zip(calls, strains):
sdata[strain2idx[s]].append(np.hstack(c[1])) # feature data
sid[strain2idx[s]].extend(c[0]) # read ids
# merge replicates
region2data[(pos, strand)] = (mer, sid, [np.vstack(sd) for sd in sdata])
return region2data
def get_data_mix(unmod, mod, frac, max_reads):
"""Return sample containing mod[:frac*max_reads] and unmod[:(1-frac)*max_reads]"""
mod_n = int(round(frac*max_reads))
unmod_n = max_reads-mod_n #int(round((1-frac)*max_reads))
return np.vstack([unmod[:unmod_n], mod[:mod_n]])
def load_data_stoichometry(fasta, bams, regions, features, samples, fracs,
maxReads=1000, strands="+-", nn=1):
"""Return features for positions of interest"""
# get storage
k = 2*nn+1
fi = 0
sam = pysam.AlignmentFile(bams[0])
region2data = {}
sample2idx = {s: i for i, s in enumerate(samples)}; print(sample2idx)
for ri, (ref, pos, mt) in enumerate(regions, 1):
sys.stderr.write(" %s / %s %s:%s \r"%(ri, len(regions), ref, pos))
start, end = pos-1, pos
# extend start/end by nn and end by dt_shift
##this is for RNA, for DNA start start needs to be -dt_shift
parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True,
nn, features, maxReads) for bam in bams]
refparser = fasta2bases(fasta, ref, start, end, strands)
for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):
if strand=="+":
sample2data = [np.hstack(c) for c in calls]
# get min number of reads
max_reads = int(min(map(len, sample2data))/3)#; print(ref, pos, mt, max_reads, [s.shape for s in sample2data])
# first get 2 fully unmodified and 1 fully modified sample - those reads won't be used later on
data_frac = [sample2data[sample2idx[mt]][max_reads:2*max_reads], # this will be used as 0 sample
sample2data[sample2idx[mt]][-max_reads:], sample2data[sample2idx["wt"]][-max_reads:], # those two will be training set
]
# the get samples with given fractions of modified reads
data_frac += [get_data_mix(sample2data[sample2idx[mt]],
sample2data[sample2idx["wt"]], frac, max_reads)
for frac in fracs]
region2data[(ref, pos)] = (mer, data_frac)
return region2data
def load_data_train_test_val(fasta, bams, regions, features, samples, maxReads=1000, strands="+-", nn=1):
"""Return features for positions of interest"""
# get storage
k = 2*nn+1
fi = 0
sam = pysam.AlignmentFile(bams[0])
region2data = {}
sample2idx = {s: i for i, s in enumerate(samples)}; print(sample2idx)
for ri, (ref, pos, mt) in enumerate(regions, 1):
sys.stderr.write(" %s / %s %s:%s \r"%(ri, len(regions), ref, pos))
start, end = pos-1, pos
# extend start/end by nn and end by dt_shift
##this is for RNA, for DNA start start needs to be -dt_shift
parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True,
nn, features, maxReads) for bam in bams]
refparser = fasta2bases(fasta, ref, start, end, strands)
for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):
if strand=="+":
sample2data = [np.hstack(c) for c in calls]
# get min number of reads
max_reads = int(min(map(len, sample2data))/3)#; print(ref, pos, mt, max_reads, [s.shape for s in sample2data])
# first get 2 fully unmodified and 1 fully modified sample - those reads won't be used later on
data_frac = [sample2data[sample2idx[mt]][max_reads:2*max_reads], # this will be used as 0 sample
sample2data[sample2idx[mt]][-max_reads:], sample2data[sample2idx["wt"]][-max_reads:], # those two will be training set
]
# get a bit of every sample
data_frac += [sd[:max_reads] for sd in sample2data]
region2data[(ref, pos)] = (mer, data_frac)
return region2data
# functions we'll need to plot
def get_modfreq_from_quantiles_many_samples(scores_per_sample, q=0.1):
"""Return modification frequency calculated using quantiles method"""
freqs = np.zeros(len(scores_per_sample))
minc = min(map(len, scores_per_sample))
q1, q2 = np.quantile(np.concatenate([s[:minc] for s in scores_per_sample]), [q, 1-q])
for i, _scores in enumerate(scores_per_sample):
confs = [(_scores<q1).sum(), (_scores>q2).sum()]
if not sum(confs): continue
mod_freq = confs[1]/sum(confs)
freqs[i] = mod_freq
return freqs
def get_mod_freq_clf(df, cols, chr_pos, strains, clf, method="GMM"):
"""Predict modification frequency using single classifier"""
results = []
for cp in chr_pos:
# min-max normalisation
_df = df.loc[(df["chr_pos"]==cp)&(df.Strain.isin(strains)), cols+["Strain"]]
_X = min_max_norm(_df[cols].to_numpy().astype("float"))
# get fit and clusters
clusters = clf.fit_predict(_X)
# for outlier method, store outliers (-1) as cluster_1 and normal (1) as cluster_0
if max(clusters)>1:
clusters[clusters!=0] = 1
elif -1 in clusters and 1 in clusters: # outlier method
clusters[clusters==1] = 0
clusters[clusters<0] = 1
# get modification freuqency - simply number of 1s over all for each sample
freqs = [clusters[_df["Strain"]==s].mean() for s in strains]
results.append((cp, method, *freqs, ", ".join(map(str, strains[1:]))))
return results
def min_max_norm(X):
"""Return (X-min(X))/(max(X)-min(X))"""
#return X # no min_max_norm ;)
Xmax, Xmin = X.max(axis=0), X.min(axis=0)
sel = Xmin!=Xmax
if sel.sum():
X[:, sel] = (X[:, sel] - Xmin[sel])/(Xmax[sel] - Xmin[sel]) # here if min==max div by
return X
def get_mod_freq_two_step(df, cols, chr_pos, strains, method="GMM+eIF", clf_name="GMM",
clf=GaussianMixture(n_components=4, random_state=0),
clf2_name="eIF", clf2=iso_new.iForest(random_state=0),
OFFSET=None):
"""Predict modification frequency using """
results = []
for cp in chr_pos:
_df = df.loc[(df["chr_pos"]==cp)&(df.Strain.isin(strains)), cols+["Strain"]]
_X = min_max_norm(_df[cols].to_numpy().astype("float"))
# get clusters from GMM using only SIGNAL INTENSITY
clusters = clf.fit_predict(_X) #[:,:3]
c2i = Counter(clusters)#; print(c2i)
# get outliers using every cluster as training sset
mod_freqs = np.zeros((len(c2i), len(strains)))
mod_freqs1 = np.zeros_like(mod_freqs)
for cl in list(c2i.keys())[:3]:
Xtrain = _X[clusters==cl]
if len(Xtrain)<3: continue # this is arbitrary value
scores = clf2.fit(Xtrain).score_samples(_X)
offset = (max(scores)-min(scores))/2 if not OFFSET else OFFSET
y_pred = scores>offset
# get mod_freq from outlier score cut-off
mod_freqs1[cl] = [y_pred[_df["Strain"]==s].mean() for s in strains]
# and using quantile method
mod_freqs[cl] = get_modfreq_from_quantiles_many_samples([scores[_df["Strain"]==s] for s in strains])
# pick cluster that gave the largest difference in mod_freq between any two samples
extremes = np.vstack([np.nanmin(mod_freqs, axis=1), np.nanmax(mod_freqs, axis=1)])
mod_freq_idx = np.abs(np.diff(extremes, axis=0)).argmax()#; print(mod_freq_idx)
# and report
#results.append((cp, "%s+%s_c"%(clf_name, clf2_name), *mod_freqs1[mod_freq_idx],
# ", ".join(map(str, strains[1:]))))
results.append((cp, method, *mod_freqs[mod_freq_idx], ", ".join(map(str, strains[1:]))))
return results
def get_mod_freq_clf_train_test(df, cols, chr_pos, strains, train_samples,
clf=KNeighborsClassifier(), method="KNN"):
"""Predict modification frequency using single classifier"""
results = []
for cp in chr_pos:
# train classifier using train sampels: unmod and mod
_df = df.loc[(df["chr_pos"]==cp)&(df.Strain.isin(train_samples)), cols+["Strain"]]
X_train = min_max_norm(_df[cols].to_numpy().astype("float"))
y_train = _df.Strain==train_samples[-1]
clf.fit(X_train, y_train)
# min-max normalisation
_df = df.loc[(df["chr_pos"]==cp)&(df.Strain.isin(strains)), cols+["Strain"]]
_X = min_max_norm(_df[cols].to_numpy().astype("float"))
# get fit and clusters
clusters = clf.predict(_X) # this will return 0 (unmodified) and 1 (modified)
# get modification freuqency - simply number of 1s over all for each sample
freqs = [clusters[_df["Strain"]==s].mean() for s in strains]
results.append((cp, method, *freqs, ", ".join(map(str, strains[1:]))))
return results
def generate_figures_and_xls(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):
"""Generate figures and tables"""
all_freqs = []
# concatenate all pos and samples into one dataframe
dframes = []
for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)
mer, calls = region2data[(ref, pos)]
for c, s in zip(calls, samples):
df = pd.DataFrame(c, columns=feature_names)
df["Strain"] = s
df["chr_pos"] = "%s:%s"%(ref, pos)
dframes.append(df)
# read all tsv files
df = pd.concat(dframes).dropna().reset_index()
chr_pos, strains = df["chr_pos"].unique(), df["Strain"].unique()
# compare individual methods
for clf, method in (
(iso_new.iForest(ntrees=100, random_state=0), "GMM+eIF"),
(GaussianMixture(random_state=0, n_components=2), "GMM"),
(AgglomerativeClustering(n_clusters=2), "AggClust"),
(KMeans(n_clusters=2), "KMeans"),
(OneClassSVM(), "OCSVM"),
(IsolationForest(random_state=0), "IF"),
(iso_new.iForest(ntrees=100, random_state=0), "eIF"),
(KNeighborsClassifier(), "KNN"),
(RandomForestClassifier(), "RF"),
):
fname = method
print(fname)
outfn = os.path.join(outdir, "%s.%s"%(fname, ext))
results = []
for i, cols_start in enumerate(cols_starts, 1):
# narrow down the features to only signal intensity & trace
cols = list(filter(lambda n: n.startswith(cols_start), feature_names)); cols #, "DT"
# compare all samples to 0%
s0 = samples[0]
for s in samples[3:]:
with np.errstate(under='ignore'):
if "+" in method:
clf2_name = method.split("+")[-1]
results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], "_".join(cols_start),
OFFSET=0.5, clf2_name=clf2_name, clf2=clf)
elif method in ("KNN", "RF"):
results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, "_".join(cols_start))
else:
results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, "_".join(cols_start))
# and store mod_freq predicted by various methods
freqs = pd.DataFrame(results, columns=["chr_pos", "features", "mod_freq wt", "mod_freq strain", "strain"])
freqs["diff"] = freqs.max(axis=1)-freqs.min(axis=1); freqs
for name, pos in group2pos.items(): #(("negative", negatives), ("pU", pU_pos), ("Nm", Nm_pos)):
freqs.loc[freqs["chr_pos"].isin(pos), "group"] = name
#freqs.to_csv(outfn, sep="\t"); freqs.head()
freqs.to_excel(xls, fname, index=False)
# plot differences between methods
for group, pos in group2pos.items():
freqs.loc[freqs["chr_pos"].isin(pos), "modification"] = group
#g = sns.catplot(x="strain", y="diff", hue="features", col="modification", data=freqs, kind="box")#, palette="Blues")
g = sns.catplot(x="strain", y="diff", hue="features", col="modification", data=freqs, kind="point", ci=None)#, palette="Blues")
fig = g.fig
fig.suptitle(method)
for ax in fig.axes:
ax.set_xlabel("Expected mod_freq")
ax.set_ylabel("Observed mod_freq [absolute difference between wt & mt]")
ax.set_ylim(0, 1)
fig.savefig(outfn)
plt.close() # clear axis
freqs["name"] = fname
all_freqs.append(freqs)
return all_freqs
def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):
"""Generate figures and tables"""
all_freqs = []
# concatenate all pos and samples into one dataframe
dframes = []
for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)
mer, calls = region2data[(ref, pos)]
for c, s in zip(calls, samples):
df = pd.DataFrame(c, columns=feature_names)
df["Strain"] = s
df["chr_pos"] = "%s:%s"%(ref, pos)
dframes.append(df)
# read all tsv files
df = pd.concat(dframes).dropna().reset_index()
chr_pos, strains = df["chr_pos"].unique(), df["Strain"].unique()
# compare individual methods
for clf, method in (
(KMeans(n_clusters=2), "KMeans"),
(KNeighborsClassifier(), "KNN"),
#(iso_new.iForest(ntrees=100, random_state=0), "GMM+eIF"),
(GaussianMixture(random_state=0, n_components=2), "GMM"),
(AgglomerativeClustering(n_clusters=2), "AggClust"),
#(OneClassSVM(), "OCSVM"),
(IsolationForest(random_state=0), "IF"),
#(iso_new.iForest(ntrees=100, random_state=0), "eIF"),
(RandomForestClassifier(), "RF"),
):
fname = method
for i, cols_start in enumerate(cols_starts, 1):
results = []
feat_name = "_".join(cols_start)
fname = "%s.%s"%(method, feat_name); print(fname)
outfn = os.path.join(outdir, "%s.%s"%(fname, ext))
# narrow down the features to only signal intensity & trace
cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, "DT"
# compare all samples to 0%
s0 = samples[0]
for s in samples[3:]:
with np.errstate(under='ignore'):
if "+" in method:
clf2_name = method.split("+")[-1]
results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name,
OFFSET=0.5, clf2_name=clf2_name, clf2=clf)
elif method in ("KNN", "RF"):
results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)
else:
results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)
# and store mod_freq predicted by various methods
freqs = pd.DataFrame(results, columns=["chr_pos", "features", "mod_freq wt", "mod_freq strain", "strain"])
freqs["diff"] = freqs.max(axis=1)-freqs.min(axis=1); freqs
for name, pos in group2pos.items(): #(("negative", negatives), ("pU", pU_pos), ("Nm", Nm_pos)):
freqs.loc[freqs["chr_pos"].isin(pos), "group"] = name
#freqs.to_csv(outfn, sep="\t"); freqs.head()
freqs.to_excel(xls, fname, index=False)
# plot differences between methods
for group, pos in group2pos.items():
freqs.loc[freqs["chr_pos"].isin(pos), "modification"] = group
#return freqs
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey="all")
sns.barplot(x="chr_pos", y="mod_freq strain", hue="strain", edgecolor="white", palette=["#f8786fff", "#7aae02ff", "#00bfc2ff", "#c67afeff"],
data=freqs[(freqs["features"]==feat_name)&(freqs["group"]=="pU")], ax=ax1)
sns.barplot(x="chr_pos", y="mod_freq strain", hue="strain", edgecolor="white", palette=["#ed823aff", "#1c6ca9ff", "#35d1bbff", "#c978fdff"],
data=freqs[(freqs["features"]==feat_name)&(freqs["group"]=="Nm")], ax=ax2)
ax1.set_ylabel("Per-site stoichiometry"); ax2.set_ylabel("")
ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])
ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)
ax1.set_title("pU modifications"); ax2.set_title("Nm modifications")
fig.suptitle(fname)
fig.savefig(outfn)
plt.close() # clear axis
freqs["name"] = fname
all_freqs.append(freqs)
return all_freqs
def plot_figures(outdir, df, mt, strains_unique, hue=[], ext="pdf"):
# join with predictionsxs
fnames = df["features"].unique()
fig, axes = plt.subplots(len(fnames), 1, figsize=(12, 5*len(fnames)))
fig.suptitle(mt)
#df_predicted = df[df["Prediction"]=="Predicted"]
#sns.boxplot(x="method", y="diff", hue="features", data=df_predicted, ax=ax) #
# plot boxplot with stipplot
for ai, (ax, fname) in enumerate(zip(axes, fnames)):
sns.boxplot(x="method", y="diff", hue="group", data=df[df["features"]==fname], ax=ax, color=".8", showfliers=False)#, width=0.8)
sns.stripplot(x="method", y="diff", hue="group", data=df[df["features"]==fname], ax=ax, dodge=True)
ax.set_ylabel("Absolute difference between %s & WT"%mt)
ax.set_title(fname); ax.set_xlabel("")
if not ai: ax.legend(bbox_to_anchor=(0, 1.1, 1, 0), loc="lower left", mode="expand", ncol=2) #bbox_to_anchor=(1.01, 1), borderaxespad=0)
else: ax.get_legend().remove() # get rid of legend for subsequent plots
fig.savefig(os.path.join(outdir, "%s.boxplot.%s"%(mt, ext)))
# plot scatterplot
methods = df.method.unique() #fnames = df.features.unique()
groups = df.group.unique(); groups
colors = sns.color_palette(n_colors=len(groups))#"flare"
markers = [".", "1", "2", "o", "o"]
f = "SI_TR"
fig, axes = plt.subplots(1, len(methods), figsize=(5*len(methods), 5), sharex="all", sharey="all")
for ai, (ax, m) in enumerate(zip(axes, methods)):
#g = sns.scatterplot(*strains_unique[::-1], hue="group", data=df[(df["features"]==f)&(df["method"]==m)], ax=ax); ax.get_legend().remove()
for c, g, r in zip(colors, groups, markers):
ax.scatter(*strains_unique[::-1], color=c, alpha=0.75, marker=r, label=g,
data=df[(df["features"]==f)&(df["method"]==m)&(df["group"]==g)])
ax.set_title(m)
ax.set_xlabel(strains_unique[1]); ax.set_ylabel(strains_unique[0])
ax.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), "grey")
lgd = ax.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
#ax.legend(bbox_to_anchor=(-2.5, 1.1, 2.5, 0), loc="lower left", ncol=3)
ax.set_xlim(0, 1); ax.set_ylim(0, 1)
fig.suptitle("{} {}".format(mt, f))
fig.savefig(os.path.join(outdir, "%s.scatter.%s"%(mt, ext)), bbox_extra_artists=(lgd,), bbox_inches='tight')
def plot_boxplot(outdir, df, mt, method, ext="pdf"):
fnames = df["features"].unique()
fig, axes = plt.subplots(len(fnames), 1, figsize=(7, 5*len(fnames)))
fig.suptitle(mt)
df = df.sort_values("New_Status")
# plot boxplot with stipplot
for ai, (ax, fname) in enumerate(zip(axes, fnames)):
sns.boxplot(x="New_Status", y="diff", hue="Prediction", data=df[df["features"]==fname], ax=ax, color=".8", showfliers=False)#, width=0.8)
sns.stripplot(x="New_Status", y="diff", hue="Prediction", data=df[df["features"]==fname], ax=ax, dodge=True)
ax.set_ylabel("Absolute difference between %s & WT"%mt)
ax.set_title(fname); ax.set_xlabel("")
if not ai: ax.legend(bbox_to_anchor=(0, 1.1, 1, 0), loc="lower left", mode="expand", ncol=2) #bbox_to_anchor=(1.01, 1), borderaxespad=0)
else: ax.get_legend().remove() # get rid of legend for subsequent plots
fig.savefig(os.path.join(outdir, "%s.boxplot.%s.%s"%(mt, method, ext)))
def plot_density(outdir, sdata, mt, group, ref, pos, strand, mer, feature_names, colors, ext="pdf"):
"""Plot and save density plot for given position"""
fig, axes = plt.subplots(1, len(feature_names), figsize=(4*len(feature_names), 4))
fig.suptitle("{} {} {}:{}{} {}".format(mt, group, ref, pos, strand, mer))
for fi, (ax, f) in enumerate(zip(axes, feature_names)):
for si, (s, c) in enumerate(zip((mt, "wt"), colors)):
sns.kdeplot(sdata[si][:, fi], color=c, linewidth=2, shade=True, alpha=.5, legend=False, ax=ax)
ax.set_xlabel(f); ax.set_ylabel("")
axes[0].set_ylabel("Density")
fig.savefig(os.path.join(outdir, "{}:{}{}.{}".format(ref, pos, strand, ext)))
plt.close()
# classifiers and mod_freq estimators
def get_freq(y_pred, cov):
freq = []
ps = 0
for c in cov:
freq.append(y_pred[ps:ps+c].mean())
ps+=c
return freq
def get_freq_clf(region2data, strains_unique, cols_starts, feature_names, clf=KNeighborsClassifier(), clf_name="KNN"):
"""Return data frame"""
rows = []
for cols_start in cols_starts:
features = "_".join(cols_start)
cidx = [i for i, n in enumerate(feature_names) if n.startswith(cols_start)]
sys.stderr.write(" %s \r"%(features, ))
for (ref, pos, strand), (mer, data) in region2data.items():
pos_info = "{}:{}{}".format(ref, pos, strand)
cov = list(map(len, data))
X = np.vstack(data)[:, cidx] # get only columns corresponding to features of interests
#X = min_max_norm(X) # minmax_norm
y = np.zeros(len(X)) # KO
y[len(data[0]):] = 1 # WT - here many may be unmodified
clf.fit(X, y) # here we train and predict on the same dataset
y_pred = clf.predict(X)
freq = get_freq(y_pred, cov)
#print(ref, pos, strand, cov, freq)
rows.append((pos_info, clf_name, features, *cov, *freq))
# get df with all predicitons
df = pd.DataFrame(rows, columns=["chr_pos", "method", "features", *["%s cov"%s for s in strains_unique], *strains_unique])
df["diff"] = abs(df[strains_unique[1]]-df[strains_unique[0]])
return df
|
QCoDeS/MQML-scripts | mqml/instrument/conductresist.py | <reponame>QCoDeS/MQML-scripts<gh_stars>1-10
""" Definition of an instrument to calculate differential conductance and reristance for 2 and 4 probe
measurements using inputs of two lock-in amplifiers"""
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.base import Instrument
import numpy as np
import warnings
G_0 = 7.7480917310e-5 #conductance quantum
class ConductResist(Instrument):
"""
This class holds conductance and resistance parameters, which are calculated using voltage and amplitude
parameters generated by two lock-in amplifiers. Current and voltage amplifications and/ or divisions are
also set in the class.
Args:
name: the name of a created instrument
lockin1_volt: X parameter of the first lock-in, e.g., Lockin1.X
lockin1_amp: amplitude parameter of the first lock-in, e.g., Lockin1.amplitude
lockin2_volt: X parameter of the second lock-in, e.g., Lockin2.X
"""
def __init__(self, name: str, *, lockin1_volt: Parameter, lockin1_amp: Parameter, lockin2_volt: Parameter) -> None:
super().__init__(name)
self._lockin1_volt = lockin1_volt
self._lockin2_volt = lockin2_volt
self._lockin1_amp = lockin1_amp
self.add_parameter("GIamp",
label="Current Amplification",
get_cmd=None,
set_cmd=None
)
self.add_parameter("GVamp",
label="Voltage Amplification",
get_cmd=None,
set_cmd=None
)
self.add_parameter("ACdiv",
label="AC Division",
get_cmd=None,
set_cmd=None,
initial_value=1e-4
)
self.add_parameter("DCdiv",
label="DC Division",
get_cmd=None,
set_cmd=None,
initial_value=1e-2
)
self.add_parameter("diff_conductance_fpm",
label="dI/dV",
unit='2e^2/h',
get_cmd=self._desoverh_fpm
)
self.add_parameter("conductance_tpm",
label="I/V",
unit='2e^2/h',
get_cmd=self._desoverh_tpm
)
self.add_parameter("resistance_fpm",
label="R",
unit='Ohm',
get_cmd=self._ohms_law
)
def _desoverh_fpm(self) -> float:
try:
return (self._lockin1_volt()/self.GIamp())/(self._lockin2_volt()/self.GVamp())/G_0
except ZeroDivisionError:
warnings.warn('The denominator is zero, returning NaN')
return np.nan
except TypeError:
raise TypeError('Amplification and/or voltage divisions are not set. Set them and try again.')
def _desoverh_tpm(self) -> float:
try:
return (self._lockin1_volt()/self.GIamp())/(self._lockin1_amp()*self.ACdiv())/G_0
except ZeroDivisionError:
warnings.warn('The denominator is zero, returning NaN')
return np.nan
except TypeError:
raise TypeError('Amplification and/or voltage divisions are not set. Set them and try again.')
def _ohms_law(self) -> float:
try:
return (self._lockin2_volt()/self.GVamp())/(self._lockin1_volt()/self.GIamp())
except ZeroDivisionError:
warnings.warn('The denominator is zero, returning NaN')
return np.nan
except TypeError:
raise TypeError('Amplification and/or voltage divisions are not set. Set them and try again.')
|
QCoDeS/MQML-scripts | setup.py | <gh_stars>1-10
"""
Installs the mqml package
"""
from setuptools import setup, find_packages
from pathlib import Path
import versioneer
readme_file_path = Path(__file__).absolute().parent / "README.md"
required_packages = [
'opencensus-ext-azure',
'qcodes'
]
package_data = {"mqml": ["conf/telemetry.ini"] }
setup(
name="mqml",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
python_requires=">=3.7",
install_requires=required_packages,
author= "<NAME>",
author_email="<EMAIL>",
description="Package required to easily run measurements and analysis for the Microsoft Quantum Materials Lyngby lab. The source codes do not include Microsoft IP and are open source, so these packages could be generally useable.",
long_description=readme_file_path.open().read(),
long_description_content_type="text/markdown",
license="MIT",
package_data=package_data,
packages=find_packages(exclude=["*.tests", "*.tests.*"]),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.7",
],
)
|
QCoDeS/MQML-scripts | mqml/tests/test_conductresist.py | """The test file for conductresist.py"""
import pytest
from mqml.instrument.conductresist import ConductResist
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import Parameter
import numpy as np
import warnings
@pytest.fixture(autouse=True)
def close_all_instruments():
"""Makes sure that after startup and teardown, all instruments are closed"""
Instrument.close_all()
yield
Instrument.close_all()
def test_get_initial_values():
"This tests the initial values of non-calculated parameters in the class"
volt1 = Parameter('volt1', set_cmd=None)
volt2 = Parameter('volt2', set_cmd=None)
amp = Parameter('amp', set_cmd=None)
test_inst = ConductResist('test_inst', lockin1_volt=volt1, lockin2_volt=volt2, lockin1_amp=amp)
assert test_inst.GIamp() == None
assert test_inst.GVamp() == None
assert test_inst.ACdiv() == 1e-4
assert test_inst.DCdiv() == 1e-2
def test_errors():
"This tests the erros"
volt1 = Parameter('volt1', set_cmd=None)
volt2 = Parameter('volt2', set_cmd=None)
amp = Parameter('amp', set_cmd=None)
test_inst = ConductResist('test_inst', lockin1_volt=volt1, lockin2_volt=volt2, lockin1_amp=amp)
with pytest.raises(TypeError, match="(\'Amplification and/or voltage divisions are not set. Set "\
"them and try again.\', \'getting test_inst_diff_conductance_fpm\')"):
test_inst.diff_conductance_fpm()
with pytest.raises(TypeError, match="(\'Amplification and/or voltage divisions are not set. Set "\
"them and try again.\', \'getting test_inst_conductance_tpm\')"):
test_inst.conductance_tpm()
with pytest.raises(TypeError, match="(\'Amplification and/or voltage divisions are not set. Set "\
"them and try again.\', \'getting test_inst_resistance_fpm\')"):
test_inst.resistance_fpm()
def test_warnings():
"This tests warnings if zero divisions occur"
volt1 = Parameter('volt1', set_cmd=None, initial_value=1.)
volt2 = Parameter('volt2', set_cmd=None, initial_value=0.)
amp = Parameter('amp', set_cmd=None, initial_value=0.)
test_inst = ConductResist('test_inst', lockin1_volt=volt1, lockin2_volt=volt2, lockin1_amp=amp)
test_inst.GIamp(1e7)
test_inst.GVamp(100.)
with pytest.warns(UserWarning, match='The denominator is zero, returning NaN'):
assert test_inst.diff_conductance_fpm() is np.nan
assert test_inst.conductance_tpm() is np.nan
volt1 = Parameter('volt1', set_cmd=None, initial_value=0.)
volt2 = Parameter('volt2', set_cmd=None, initial_value=1.)
amp = Parameter('amp', set_cmd=None)
test_inst2 = ConductResist('test_inst2', lockin1_volt=volt1, lockin2_volt=volt2, lockin1_amp=amp)
test_inst2.GIamp(1e7)
test_inst2.GVamp(100.)
with pytest.warns(UserWarning, match='The denominator is zero, returning NaN'):
assert test_inst2.resistance_fpm() is np.nan
def test_returning_correct_values():
"This tests the returned values of the class for calculated parameters"
volt1 = Parameter('volt1', set_cmd=None, initial_value=1.)
volt2 = Parameter('volt2', set_cmd=None, initial_value=2.)
amp = Parameter('amp', set_cmd=None, initial_value=3.)
test_inst = ConductResist('test_inst', lockin1_volt=volt1, lockin2_volt=volt2, lockin1_amp=amp)
test_inst.GIamp(1e7)
test_inst.GVamp(100.)
# ACDiv value is its initial value.
assert test_inst.diff_conductance_fpm() == 0.06453201863879687
assert test_inst.conductance_tpm() == 4.3021345759197915
assert test_inst.resistance_fpm() == 200000.0
|
RacingTadpole/twenty-questions | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file based on https://github.com/kennethreitz/setup.py/blob/master/setup.py
# From https://packaging.python.org/discussions/install-requires-vs-requirements/#requirements-files :
#
# Whereas install_requires defines the dependencies for a single project,
# requirements files are often used to define the requirements for a complete Python environment.
# Whereas install_requires requirements are minimal,
# requirements files often contain an exhaustive listing of pinned versions for the purpose of achieving
# repeatable installations of a complete environment.
#
import os
from setuptools import setup, find_packages
# Package meta-data.
NAME = 'twenty_questions'
DESCRIPTION = 'A fun project to teach python'
# URL = 'https://github.com/me/myproject'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
# What packages are required for this module to be executed?
REQUIRED = []
# You can install using eg. `pip install twenty-questions[dev]==1.0.1`.
EXTRAS = {
'dev': ['pytest-cov', 'pytest', 'mypy', 'radon', 'pycodestyle'],
}
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
# with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about: dict = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
# long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
# url=URL,
packages=find_packages(exclude=('scripts', 'test_utilities')),
# packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
package_data={'twenty-questions': ['LICENSE.txt',]},
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: Other/Proprietary License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython' # Haven't tried others.
],
# $ setup.py publish support.
# cmdclass={
# 'upload': UploadCommand,
# },
)
|
RacingTadpole/twenty-questions | twenty_questions/009-learning.py | <filename>twenty_questions/009-learning.py
from dataclasses import dataclass
from typing import Union
@dataclass
class Answer:
name: str
@dataclass
class Question:
text: str
yes: Union['Question', Answer]
no: Union['Question', Answer]
q = Question(
'Does your animal fly?',
yes=Question(
'Is your flying animal a bird?',
yes=Question(
'Is your bird native to Australia?',
yes=Answer('kookaburra'),
no=Answer('blue jay'),
),
no=Answer('fruit bat'),
),
no=Question(
'Does your animal live underwater?',
yes=Question('Is your animal a mammal?',
yes=Answer('blue whale'),
no=Answer('gold fish'),
),
no=Answer('wombat'),
)
)
while True:
current = q
while isinstance(current, Question):
x = input(current.text + ' ')
previous = current
if x == 'y':
current = current.yes
if x == 'n':
current = current.no
z = input('Is it a ' + current.name + '? ')
if z == 'y':
print('Wow, I guessed it!')
if z == 'n':
print('You beat me! 😡')
new_animal = input('So what was your animal? ')
new_answer = Answer(new_animal)
new_question_text = input('What is a question (with answer yes for your animal) that distinguishes a ' + new_animal + ' from a ' + current.name + '? ')
new_question = Question(text=new_question_text, yes=new_answer, no=current)
if x == 'y':
previous.yes = new_question
if x == 'n':
previous.no = new_question
print()
print("Let's play again!")
print()
|
RacingTadpole/twenty-questions | twenty_questions/005-while.py | <filename>twenty_questions/005-while.py<gh_stars>0
x = 'ok'
while x != 'stop':
x = input('Type anything, or "stop" to stop: ')
print('You typed: ' + x)
print('Finally!')
|
RacingTadpole/twenty-questions | twenty_questions/006-choose-your-own.py | data = [
[0, 'Does your animal fly?', 1, 2],
[1, 'Is your flying animal a bird?', 3, 4],
[2, 'Does your animal live underwater?', 7, 8],
[3, 'Is your bird native to Australia?', 5, 6],
[4, 'Is it a fruit bat?'],
[5, 'Is it a kookaburra?'],
[6, 'Is it a blue jay?'],
[7, 'Is your animal a mammal?', 9, 10],
[8, 'Is it a wombat?'],
[9, 'Is it a blue whale?'],
[10, 'Is it a goldfish?'],
]
i = 0
while True:
question = data[i][1]
x = input(question + ' ')
if len(data[i]) == 2:
break
if x == 'y':
i = data[i][2]
if x == 'n':
i = data[i][3]
print('Thanks for playing')
|
RacingTadpole/twenty-questions | twenty_questions/010-dataclasses-json.py | <gh_stars>0
from dataclasses import dataclass
from dataclasses_serialization.json import JSONSerializer
import json
@dataclass
class Person:
eye_color: str
hair_color: str
hair_count: int
name: str
life_span: int
poobear = Person ('blue', 'red', 400000, 'PooBear', 89)
print(poobear.life_span / 2)
with open('person.json', 'w') as file:
file.write(json.dumps(JSONSerializer.serialize(poobear)))
|
RacingTadpole/twenty-questions | twenty_questions/008-data-classes.py | from dataclasses import dataclass
from typing import Union
@dataclass
class Answer:
text: str
@dataclass
class Question:
text: str
yes: Union['Question', Answer]
no: Union['Question', Answer]
q = Question(
'Does your animal fly?',
yes=Question(
'Is your flying animal a bird?',
yes=Question(
'Is your bird native to Australia?',
yes=Answer('kookaburra'),
no=Answer('blue jay'),
),
no=Answer('fruit bat'),
),
no=Question(
'Does your animal live underwater?',
yes=Question('Is your animal a mammal?',
yes=Answer('blue whale'),
no=Answer('gold fish'),
),
no=Answer('wombat'),
)
)
current = q
while isinstance(current, Question):
x = input(current.text + ' ')
if x == 'y':
current = current.yes
if x == 'n':
current = current.no
z = input('Is it a ' + current.text + '? ')
if z == 'y':
print('Wow, I guessed it!')
if z == 'n':
print('You beat me!')
|
RacingTadpole/twenty-questions | twenty_questions/010-saving.py | <filename>twenty_questions/010-saving.py
from dataclasses import dataclass
from typing import Union
from dataclasses_serialization.json import JSONSerializer
import json
@dataclass
class Answer:
name: str
@dataclass
class Question:
text: str
yes: Union['Question', Answer]
no: Union['Question', Answer]
try:
with open('game.json') as file:
questions_as_dict = json.load(file)
q = JSONSerializer.deserialize(Question, questions_as_dict)
except FileNotFoundError:
q = Answer('wombat')
print()
print()
print('Welcome to Twenty Questions')
print('Please think of an animal or plant, and I will try to guess what it is by asking questions.')
print('Please answer questions with "y" or "n"')
print()
current = q
while isinstance(current, Question):
x = input(current.text + ' ')
previous = current
if x == 'y':
current = current.yes
if x == 'n':
current = current.no
z = input('Is it a ' + current.name + '? ')
if z == 'y':
print('Well that was easy! 🥱')
if z == 'n':
print('You beat me! 😡')
new_animal=input ('So what was your animal? ')
new_answer = Answer(new_animal)
new_question_text=input('What is a question (with answer yes for your animal) that distinguishes a ' + new_animal + ' from a ' + current.name + '? ')
new_question = Question(text=new_question_text, yes=new_answer, no=current)
if x == 'y':
previous.yes = new_question
if x == 'n':
previous.no = new_question
serialized_questions = JSONSerializer.serialize(q)
with open('game.json', 'w') as file:
json.dump(serialized_questions, file, indent=2)
print()
print('Thank you! Please play again 😃') |
RacingTadpole/twenty-questions | twenty_questions/007-simpler-dict.py | <filename>twenty_questions/007-simpler-dict.py
ages = {'Jack': 13, 'Olivia': 15, 'Robert': 48, 'Jess': 47}
print ('Jack is', ages['Jack'])
print()
for name in ages:
print(name, 'is', ages[name])
print()
name = input('Enter a name: ')
print(name, 'is', ages[name])
|
RacingTadpole/twenty-questions | twenty_questions/question.py | <filename>twenty_questions/question.py
from dataclasses import dataclass
@dataclass
class Question:
number: int
text: str
yes_number: int
no_number: int
@dataclass
class Answer:
number: int
text: str
data = [
Question(0, 'Does your animal fly?', 1, 2),
Question(1, 'Is your flying animal a bird?', 3, 4),
Question(2, 'Does your animal live underwater?', 7, 8),
Question(3, 'Is your bird native to Australia?', 5, 6),
Answer(4, 'fruit bat'),
Answer(5, 'kookaburra'),
Answer(6, 'blue jay'),
Question(7, 'Is your animal a mammal?', 9, 10),
Answer(8, 'wombat'),
Answer(9, 'blue whale'),
Answer(10, 'goldfish'),
] |
RacingTadpole/twenty-questions | twenty_questions/007-dictionaries.py | <reponame>RacingTadpole/twenty-questions
data = [
{'number': 0, 'question': 'Does your animal fly?', 'yes': 1, 'no': 2},
{'number': 1, 'question': 'Is your flying animal a bird?', 'yes': 3, 'no': 4},
{'number': 2, 'question': 'Does your animal live underwater?', 'yes': 7, 'no': 8},
{'number': 3, 'question': 'Is your bird native to Australia?', 'yes': 5, 'no': 6},
{'number': 4, 'answer': 'fruit bat'},
{'number': 5, 'answer': 'kookaburra'},
{'number': 6, 'answer': 'blue jay'},
{'number': 7, 'question': 'Is your animal a mammal?', 'yes': 9, 'no': 10},
{'number': 8, 'answer': 'wombat'},
{'number': 9, 'answer': 'blue whale'},
{'number': 10, 'answer': 'goldfish'},
]
i = 0
while True:
info = data[i]
if 'question' in info:
question = info['question']
x = input(question + ' ')
if x == 'y':
i = info['yes']
if x == 'n':
i = info['no']
else:
answer = info['answer']
x = input('Is it a ' + answer + '? ')
if x == 'y':
print('Wow, I guessed it!')
if x == 'n':
print('You beat me!')
break
|
RacingTadpole/twenty-questions | twenty_questions/002-if.py | print('Welcome to Guess the Animal')
print('Please think of an animal, and I will try to guess what it is by asking questions.')
print('Please answer questions with "y" or "n"')
print()
x = input('Does your animal fly? ')
if x == 'yes':
xy = input('Is your flying animal a bird? ')
if xy == 'yes':
print('I think your animal is a pelican.')
if xy == 'no':
print('I think your animal is a fruit bat.')
if x == 'no':
print('I think your animal is a wombat.')
|
RacingTadpole/twenty-questions | twenty_questions/main.py | <gh_stars>0
from twenty_questions.question import Question
print('Welcome to Twenty Questions')
print('Please think of an animal or plant, and I will try to guess what it is by asking questions.')
print('Please answer questions with "y" or "n"')
print()
question2 = Question(text='Does it live underwater?', yes_answer='sea cucumber', no_answer='wombat')
question1 = Question(text='Is it an animal?', yes_question=question2, no_answer='cactus')
question = question1
guess = None
while question:
yes_or_no = input(question.text + ' ')
if yes_or_no == 'y':
guess = question.yes_answer
question = question.yes_question
elif yes_or_no == 'n':
guess = question.no_answer
question = question.no_question
else:
print('Please only respond y or n.')
print('It is a ' + guess + '!')
|
RacingTadpole/twenty-questions | twenty_questions/008-person.py | from dataclasses import dataclass
@dataclass
class Person:
eye_color: str
hair_color: str
hair_count: int
name: str
life_span: int
poobear = Person ('blue', 'red', 400000, 'PooBear', 89)
print(poobear.life_span / 2)
|
ovidner/openmdao-bridge-excel | tests/conftest.py | from hypothesis import settings
settings.register_profile("fast", max_examples=5, derandomize=True)
settings.load_profile("fast")
|
ovidner/openmdao-bridge-excel | src/openmdao_bridge_excel/timeout_utils.py | <reponame>ovidner/openmdao-bridge-excel
import dataclasses
import threading
from contextlib import contextmanager
import psutil
@dataclasses.dataclass(eq=False)
class TimeoutState:
timer = None
reached = False
@contextmanager
def timeout(seconds, timeout_reached_fn):
state = TimeoutState()
def _timeout_reached_fn():
state.reached = True
timeout_reached_fn()
timer = threading.Timer(seconds, _timeout_reached_fn)
state.timer = timer
timer.start()
try:
yield state
finally:
timer.cancel()
class TimeoutComponentMixin:
def _declare_options(self):
super()._declare_options()
self.options.declare("timeout", types=(int, float), default=(60 * 60))
def _apply_nonlinear(self):
with timeout(self.options["timeout"], self._handle_timeout) as timeout_state:
self.timeout_state = timeout_state
super()._apply_nonlinear()
self.timeout_state = None
def _solve_nonlinear(self):
with timeout(self.options["timeout"], self._handle_timeout) as timeout_state:
self.timeout_state = timeout_state
super()._solve_nonlinear()
self.timeout_state = None
def _handle_timeout(self):
# TODO: logging
self.handle_timeout()
def handle_timeout(self):
raise NotImplementedError()
def kill_pid(pid):
try:
proc = psutil.Process(pid)
proc.kill()
except psutil.NoSuchProcess:
pass
|
ovidner/openmdao-bridge-excel | src/openmdao_bridge_excel/macro_execution.py | <reponame>ovidner/openmdao-bridge-excel<filename>src/openmdao_bridge_excel/macro_execution.py
import dataclasses
import hashlib
import logging
import openmdao.api as om
logger = logging.getLogger(__package__)
MACRO_WRAPPER_BASE = """Option Private Module
Option Explicit"""
MACRO_WRAPPER_INSTANCE = """Function {wrapped_macro_name}()
On Error Resume Next
{macro_name}
{wrapped_macro_name} = Array(Err.Number, Err.Source, Err.Description, Err.HelpFile, Err.HelpContext, Err.LastDllError)
End Function"""
@dataclasses.dataclass
class MacroError:
number: int
source: str
description: str
help_file: str
help_context: str
last_dll_error: int
@dataclasses.dataclass
class MacroResult:
error: MacroError
@property
def success(self):
return self.error.number == 0
def wrapper_macro_name(macro):
macro_hash = hashlib.md5(macro.encode("utf-8")).hexdigest()
return f"wrapped_{macro_hash}"
def wrap_macros(book, macros):
vbe = book.app.api.VBE
vb_project = vbe.ActiveVBProject
wrapped_macros_comp = vb_project.VBComponents.Add(1)
wrapped_macros_comp.Name = "ombe_wrapped_macros"
wrapped_macros_code = wrapped_macros_comp.CodeModule
wrapped_macros_code.AddFromString(MACRO_WRAPPER_BASE)
for macro in macros:
wrapped_macros_code.AddFromString(
MACRO_WRAPPER_INSTANCE.format(
macro_name=macro, wrapped_macro_name=wrapper_macro_name(macro)
)
)
def run_wrapped_macro(book, macro_name):
error = book.macro(wrapper_macro_name(macro_name)).run()
return MacroResult(error=MacroError(*error))
def run_and_raise_macro(book, macro, stage):
logger.info(f"Running macro {macro} at {stage} stage...")
result = run_wrapped_macro(book, macro)
logger.info(
f"Finished running macro {macro} at {stage} stage with result: {result}"
)
if not result.success:
raise om.AnalysisError(
f'Excel macro "{macro}" executed in "{stage}" stage failed: {result.error}'
)
|
ovidner/openmdao-bridge-excel | tests/test_integration.py | <filename>tests/test_integration.py
import dataclasses
import time
import hypothesis.strategies as st
import numpy as np
import openmdao.api as om
import pytest
from hypothesis import given, settings
from openmdao_bridge_excel import ExcelComponent, ExcelVar
TEST_FILE_PATH = "tests/data/test.xlsm"
@dataclasses.dataclass
class ExecutionTime:
start: float = dataclasses.field(init=False)
end: float = dataclasses.field(init=False)
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
@property
def duration(self):
return (self.end - self.start) if (self.start and self.end) else None
@settings(deadline=5000)
@given(st.floats(allow_nan=False, allow_infinity=False))
@pytest.mark.parametrize("mode", ["formula", "macro"])
def test_continuous_finite_scalar(mode, value):
prob = om.Problem()
model = prob.model
if mode == "formula":
comp = ExcelComponent(
file_path=TEST_FILE_PATH,
inputs=[ExcelVar("in", "FormulaA")],
outputs=[ExcelVar("out", "FormulaB")],
)
elif mode == "macro":
comp = ExcelComponent(
file_path=TEST_FILE_PATH,
inputs=[ExcelVar("in", "MacroA")],
outputs=[ExcelVar("out", "MacroB")],
pre_macros=["NameA", "NameB"],
main_macros=["CopyAToB"],
post_macros=["EnsureBEqualsA"],
)
else:
raise ValueError(mode)
model.add_subsystem(
"excel", comp,
)
try:
prob.setup()
prob.set_val("excel.in", value)
prob.run_model()
finally:
prob.cleanup()
# Using a normal == comparison will not consider NaNs as equal.
assert np.allclose(prob["excel.out"], value, atol=0.0, rtol=0.0, equal_nan=True)
@pytest.mark.parametrize("stage", ["pre", "main", "post"])
def test_macro_errors(stage):
fudge_up_macros = ["FudgeUp"]
prob = om.Problem()
model = prob.model
model.add_subsystem(
"excel",
ExcelComponent(
file_path=TEST_FILE_PATH,
inputs=[],
outputs=[],
pre_macros=fudge_up_macros if stage == "pre" else [],
main_macros=fudge_up_macros if stage == "main" else [],
post_macros=fudge_up_macros if stage == "post" else [],
),
)
try:
prob.setup()
with pytest.raises(
om.AnalysisError,
match=f'Excel macro "FudgeUp" executed in "{stage}" stage failed',
):
prob.run_model()
finally:
prob.cleanup()
@pytest.mark.parametrize("stage", ["pre", "main", "post"])
@pytest.mark.parametrize("timeout", [1, 10])
@pytest.mark.parametrize("slow_macros", [["SleepBreakable"], ["SleepNonbreakable"]])
def test_timeout(stage, timeout, slow_macros):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"excel",
ExcelComponent(
file_path=TEST_FILE_PATH,
inputs=[],
outputs=[ExcelVar("out", "A1")],
pre_macros=slow_macros if stage == "pre" else [],
main_macros=slow_macros if stage == "main" else [],
post_macros=slow_macros if stage == "post" else [],
timeout=timeout,
),
)
try:
prob.setup()
with pytest.raises(om.AnalysisError, match="Timeout reached!"):
with ExecutionTime() as execution_time:
prob.run_model()
finally:
prob.cleanup()
# Should be finished within the timeout limit plus some overhead, but not too early
assert timeout <= execution_time.duration <= (timeout + 3)
@pytest.mark.parametrize("stage", ["main", "post"])
@pytest.mark.parametrize("slow_macros", [["SleepBreakable"], ["SleepNonbreakable"]])
@pytest.mark.parametrize("value", [1, 3])
def test_timeout_recovery(stage, slow_macros, value):
prob = om.Problem()
model = prob.model
comp = model.add_subsystem(
"excel",
ExcelComponent(
file_path=TEST_FILE_PATH,
inputs=[
ExcelVar("in", "FormulaA"),
ExcelVar("sleep_duration", "SleepDuration"),
],
outputs=[ExcelVar("out", "FormulaB")],
# We can't adjust the sleep duration of the pre stage, so we let it be.
pre_macros=[],
main_macros=slow_macros if stage == "main" else [],
post_macros=slow_macros if stage == "post" else [],
timeout=5,
),
)
try:
prob.setup()
prob.set_val("excel.in", value)
prob.set_val("excel.sleep_duration", 60)
with pytest.raises(om.AnalysisError, match="Timeout reached!"):
prob.run_model()
prob.set_val("excel.in", value)
prob.set_val("excel.sleep_duration", 0)
prob.run_model()
assert prob.get_val("excel.out") == value
finally:
prob.cleanup()
|
ovidner/openmdao-bridge-excel | setup.py | from setuptools import find_packages, setup
setup(
name="openmdao-bridge-excel",
use_scm_version=True,
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(where="src"),
package_dir={"": "src"},
python_requires=">=3.6, <4",
install_requires=["openmdao", "psutil", "xlwings"],
setup_requires=["setuptools_scm"],
)
|
ovidner/openmdao-bridge-excel | src/openmdao_bridge_excel/__init__.py | import dataclasses
import itertools
import logging
import os.path
import numpy as np
import openmdao.api as om
import xlwings
from pywintypes import com_error
from .macro_execution import run_and_raise_macro, wrap_macros
from .timeout_utils import TimeoutComponentMixin, kill_pid
logger = logging.getLogger(__package__)
def nans(shape):
return np.ones(shape) * np.nan
@dataclasses.dataclass(frozen=True)
class ExcelVar:
name: str
range: str
shape = (1,)
class ExcelComponent(TimeoutComponentMixin, om.ExplicitComponent):
def initialize(self):
self.options.declare("file_path", types=str)
self.options.declare("inputs", types=list)
self.options.declare("outputs", types=list)
self.options.declare("pre_macros", types=list, default=[])
self.options.declare("main_macros", types=list, default=[])
self.options.declare("post_macros", types=list, default=[])
self.app = None
self.app_pid = None
def setup(self):
for var in self.options["inputs"]:
self.add_input(name=var.name, val=nans(var.shape))
for var in self.options["outputs"]:
self.add_output(name=var.name, val=nans(var.shape))
self.ensure_app()
def ensure_app(self):
if not self.app_pid:
logger.debug("Starting Excel...")
self.app = xlwings.App(visible=False, add_book=False)
self.app_pid = self.app.pid
logger.info(f"Excel started, PID {self.app_pid}.")
self.app.display_alerts = False
self.app.screen_updating = False
def open_and_run(self, inputs, outputs, discrete_inputs, discrete_outputs):
self.ensure_app()
file_path = self.options["file_path"]
logger.debug(f"Opening {file_path}...")
book = self.app.books.open(file_path)
book.api.EnableAutoRecover = False
all_macros = set(
itertools.chain(
self.options["pre_macros"],
self.options["main_macros"],
self.options["post_macros"],
)
)
logger.debug("Wrapping macros...")
if len(all_macros):
wrap_macros(book, all_macros)
for macro in self.options["pre_macros"]:
run_and_raise_macro(book, macro, "pre")
self.app.calculation = "manual"
for var in self.options["inputs"]:
self.app.range(var.range).options(convert=np.array).value = inputs[var.name]
logger.debug(f"Input variable {var.name} set to range {var.range}.")
self.app.calculation = "automatic"
self.app.calculate()
logger.debug("Workbook re-calculated.")
for macro in self.options["main_macros"]:
run_and_raise_macro(book, macro, "main")
for var in self.options["outputs"]:
outputs[var.name] = (
self.app.range(var.range).options(convert=np.array).value
)
logger.debug(f"Output variable {var.name} set from range {var.range}.")
for macro in self.options["post_macros"]:
run_and_raise_macro(book, macro, "post")
# Closes without saving
book.close()
logger.debug(f"Closed {file_path}.")
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
try:
self.open_and_run(
inputs, outputs, discrete_inputs or {}, discrete_outputs or {}
)
except Exception as exc:
if self.timeout_state.reached:
raise om.AnalysisError("Timeout reached!")
else:
raise exc
def handle_timeout(self):
logger.info(f"Excel component timed out. Killing PID {self.app_pid}.")
kill_pid(self.app_pid)
self.app_pid = None
def cleanup(self):
if self.app_pid:
try:
self.app.quit()
except com_error as exc:
pass
kill_pid(self.app_pid)
super().cleanup()
|
ArielAlvarezCortez/proyecto_SemTec | convolucion.py | <filename>convolucion.py
import numpy as np
import cv2
#Ioriginal = matriz original
def convolucion(Ioriginal,Kernel):
'''Método encargado de realizar una convolución a una imagen
Entrada:
Ioriginal - imagen original en forma de matríz
kernel - kernel para barrer la imagen
Salida:
res - imagen resultante'''
#fr - filas, cr - columnas
fr=len(Ioriginal)-(len(Kernel)-1)
cr=len(Ioriginal[0])-(len(Kernel[0])-1)
Resultado=np.zeros((fr,cr),np.uint8)
#filas, matríz resultado
for i in range(len(Resultado)):
#columnas, matríz resultado
for j in range(len(Resultado[0])):
suma=0
#filas, kernel
for m in range(len(Kernel)):
#columnas, kernel
for n in range(len(Kernel[0])):
suma+=Kernel[m][n]*Ioriginal[m+i][n+j]
if suma<=255:
Resultado[i][j]=round(suma)
else:
Resultado[i][j]=255
return Resultado
#imagenes
K=[[-1,0,1],[-1,0,1],[-1,0,1]]
I=[[2,0,1,1,1],[3,0,0,0,2],[1,1,1,1,1],[3,1,1,1,2],[1,1,1,1,1]]
#imagenes a numpy arrays
In=np.array(I)
Kn=np.array(K)
IRGB=cv2.imread('004.jpg')
IGS=cv2.cvtColor(IRGB,cv2.COLOR_BGR2GRAY)
print(IGS.shape)
#funcion de convolucion
R=convolucion(IGS,Kn)
print(R)
print(R.shape)
cv2.imwrite('004C.jpg',R)
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-manweile | selenium_test.py | import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class AssignFourTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_home(self):
self.driver.get('http://192.168.127.12:8000')
#these are the elements specified to test for in the assignment specs
elements = ["name", "about", "education", "skills", "work", "contact"]
for id in elements:
assert self.driver.find_element_by_id(id) != None
def tearDown(self):
self.addCleanup(self.driver.quit)
if __name__ == '__main__':
unittest.main(verbosity=2) |
cmput401-fall2018/web-app-ci-cd-with-travis-ci-manweile | test_service.py | import unittest
from unittest import mock
from unittest.mock import patch, mock_open
from unittest import TestCase
from service import Service
'''
The selenium test should run on your development (local)
machine. It does not (and should not) be running on your
cybera instance
The method bad_random in service.py DOES NOT work. The
assignment cannot be completed without mocking bad_random
completely
For the test of bad_random, testing a mock of bad random
always returning a value is sufficient (eg. make it always return
10, and check that it does so)
'''
class Assign4TestService(TestCase):
def test_bad_random(self):
mockService = Service()
#test case good data
mockData = "1\n2\n3\n4\n5\n6\n7\n8\n9\n10"
with patch('service.open', mock_open(read_data = mockData)):
mockService.bad_random = mock.Mock(return_value = 5)
badNumber = Service.bad_random()
fileLines = mockData.count('\n') + 1
self.assertTrue(0 <= badNumber <= fileLines)
#test case file not found
self.assertRaises(FileNotFoundError, Service.bad_random)
#test case empty file
mockData = ""
with patch('service.open', mock_open(read_data = mockData)):
fileLines = mockData.count('\n') + 1
self.assertTrue(fileLines == 1)
self.assertRaises(FileNotFoundError, Service.bad_random)
#test case not a number
mockData = "A\nB\nC\nD\nE"
with patch('service.open', mock_open(read_data = mockData)):
fileLines = mockData.count('\n') + 1
self.assertTrue(fileLines == 5)
self.assertRaises(FileNotFoundError, Service.bad_random)
def test_divide(self):
mockService = Service()
#test case divisor is zero
mockService.bad_random = mock.Mock(return_value = 4)
self.assertRaises(ZeroDivisionError, mockService.divide, 0)
#test case dividend is zero
mockService.bad_random = mock.Mock(return_value = 0)
quotient = mockService.divide(4)
self.assertTrue(quotient == 0)
#test case dividend and divisor both same non zero value
mockService.bad_random = mock.Mock(return_value = 7)
quotient = mockService.divide(7)
self.assertTrue(quotient == 1)
#test case non zero dividend and divisor different non zero value
mockService.bad_random = mock.Mock(return_value = 6)
quotient = mockService.divide(3)
self.assertTrue(quotient == 2)
#test case non zero dividend and divisor not a number
mockService.bad_random = mock.Mock(return_value = 9)
self.assertRaises(TypeError, mockService.divide, 'string')
def test_abs_plus(self):
mockService = Service()
#test case very large negative integer
self.assertTrue(mockService.abs_plus(-2147483648) == 2147483649)
#test case integer just less than zero
self.assertTrue(mockService.abs_plus(-1) == 2)
#test case zero
self.assertTrue(mockService.abs_plus(0) == 1)
#test case integer just larger than zero
self.assertTrue(mockService.abs_plus(1) == 2)
#test case very large positive integer
self.assertTrue(mockService.abs_plus(2147483647) == 2147483648)
#test case not a number
self.assertRaises(TypeError, mockService.abs_plus, 'string')
'''
divide and bad_random are already tested
therefore comlicated_function needs to test the modulus divsion only
'''
def test_complicated_function(self):
mockService = Service()
#Test case negative odd integer dividend
mockService.divide = mock.Mock(return_value = 5)
mockService.bad_random = mock.Mock(return_value = -5)
modulus = mockService.complicated_function(1)
self.assertTrue(modulus == (5, 1))
#Test case negative even integer dividend
mockService.divide = mock.Mock(return_value = 6)
mockService.bad_random = mock.Mock(return_value = -6)
modulus = mockService.complicated_function(1)
self.assertTrue(modulus == (6, 0))
#Test case zero dividend
mockService.divide = mock.Mock(return_value = 4)
mockService.bad_random = mock.Mock(return_value = 0)
modulus = mockService.complicated_function(1)
self.assertTrue(modulus == (4, 0))
#Test case positive odd integer dividend
mockService.divide = mock.Mock(return_value = 5)
mockService.bad_random = mock.Mock(return_value = 5)
modulus = mockService.complicated_function(1)
self.assertTrue(modulus == (5, 1))
#Test case positive even integer dividend
mockService.divide = mock.Mock(return_value = 6)
mockService.bad_random = mock.Mock(return_value = 6)
modulus = mockService.complicated_function(1)
self.assertTrue(modulus == (6, 0))
#test case dividend not a number
mockService.divide = mock.Mock(return_value = 7)
mockService.bad_random = mock.Mock(return_value = "A")
self.assertRaises(TypeError, mockService.complicated_function, 'string')
if __name__ == '__main__':
unittest.main(verbosity=2) |
gaybro8777/klio | cli/tests/commands/job/test_run.py | <filename>cli/tests/commands/job/test_run.py
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from klio_core import config
from klio_cli import __version__ as klio_cli_version
from klio_cli import cli
from klio_cli.commands.job import run as run_job
@pytest.fixture
def mock_os_environ(mocker):
return mocker.patch.dict(
run_job.base.os.environ, {"USER": "cookiemonster"}
)
@pytest.fixture
def klio_config():
conf = {
"job_name": "test-job",
"version": 1,
"pipeline_options": {
"worker_harness_container_image": "test-image",
"region": "some-region",
"project": "test-project",
},
"job_config": {
"inputs": [
{
"topic": "foo-topic",
"subscription": "foo-sub",
"data_location": "foo-input-location",
}
],
"outputs": [
{
"topic": "foo-topic-output",
"data_location": "foo-output-location",
}
],
},
}
return config.KlioConfig(conf)
@pytest.fixture
def docker_runtime_config():
return cli.DockerRuntimeConfig(
image_tag="foo-123",
force_build=False,
config_file_override="klio-job2.yaml",
)
@pytest.fixture
def run_job_config():
return cli.RunJobConfig(
direct_runner=False, update=False, git_sha="12345678"
)
@pytest.fixture
def mock_docker_client(mocker):
mock_client = mocker.Mock()
mock_container = mocker.Mock()
mock_container.wait.return_value = {"StatusCode": 0}
mock_container.logs.return_value = [b"a log line\n", b"another log line\n"]
mock_client.containers.run.return_value = mock_container
return mock_client
@pytest.fixture
def run_pipeline(
klio_config,
docker_runtime_config,
run_job_config,
mock_docker_client,
mock_os_environ,
monkeypatch,
):
job_dir = "/test/dir/jobs/test_run_job"
pipeline = run_job.RunPipeline(
job_dir=job_dir,
klio_config=klio_config,
docker_runtime_config=docker_runtime_config,
run_job_config=run_job_config,
)
monkeypatch.setattr(pipeline, "_docker_client", mock_docker_client)
return pipeline
@pytest.mark.parametrize(
"direct_runner,db_url",
((True, None), (False, "https://foo"), (False, None)),
)
def test_run_docker_container(
direct_runner,
db_url,
run_pipeline,
run_job_config,
caplog,
mocker,
monkeypatch,
):
run_job_config = run_job_config._replace(direct_runner=direct_runner)
monkeypatch.setattr(run_pipeline, "run_job_config", run_job_config)
mock_sd_utils = mocker.Mock()
mock_sd_utils.get_stackdriver_group_url.return_value = db_url
monkeypatch.setattr(run_job, "sd_utils", mock_sd_utils)
runflags = {"a": "flag"}
run_pipeline._run_docker_container(runflags)
run_pipeline._docker_client.containers.run.assert_called_once_with(
**runflags
)
ret_container = run_pipeline._docker_client.containers.run.return_value
ret_container.logs.assert_called_once_with(stream=True)
if not direct_runner:
mock_sd_utils.get_stackdriver_group_url.assert_called_once_with(
"test-project", "test-job", "some-region"
)
assert 1 == len(caplog.records)
else:
mock_sd_utils.get_stackdriver_group_url.assert_not_called()
assert not len(caplog.records)
def test_failure_in_docker_container_returns_nonzero(
run_pipeline, run_job_config, caplog, mocker, monkeypatch,
):
mock_sd_utils = mocker.Mock()
monkeypatch.setattr(run_job, "sd_utils", mock_sd_utils)
container_run = run_pipeline._docker_client.containers.run
container_run.return_value.wait.return_value = {"StatusCode": 1}
runflags = {"a": "flag"}
assert run_pipeline._run_docker_container(runflags) == 1
container_run.assert_called_once_with(**runflags)
ret_container = run_pipeline._docker_client.containers.run.return_value
ret_container.logs.assert_called_once_with(stream=True)
mock_sd_utils.get_stackdriver_group_url.assert_not_called()
def test_run_docker_container_dashboard_raises(
run_pipeline, caplog, mocker, monkeypatch
):
mock_sd_utils = mocker.Mock()
mock_sd_utils.get_stackdriver_group_url.side_effect = Exception("fuu")
monkeypatch.setattr(run_job, "sd_utils", mock_sd_utils)
runflags = {"a": "flag"}
run_pipeline._run_docker_container(runflags)
run_pipeline._docker_client.containers.run.assert_called_once_with(
**runflags
)
ret_container = run_pipeline._docker_client.containers.run.return_value
ret_container.logs.assert_called_once_with(stream=True)
mock_sd_utils.get_stackdriver_group_url.assert_called_once_with(
"test-project", "test-job", "some-region"
)
assert 1 == len(caplog.records)
def test_get_environment(run_pipeline):
gcreds = "/usr/gcloud/application_default_credentials.json"
exp_envs = {
"PYTHONPATH": "/usr/src/app",
"GOOGLE_APPLICATION_CREDENTIALS": gcreds,
"USER": "cookiemonster",
"GOOGLE_CLOUD_PROJECT": "test-project",
"COMMIT_SHA": "12345678",
"KLIO_CLI_VERSION": klio_cli_version,
}
assert exp_envs == run_pipeline._get_environment()
@pytest.mark.parametrize(
"config_file", (None, "klio-job2.yaml"),
)
@pytest.mark.parametrize(
"image_tag,exp_image_flags",
((None, []), ("foo-123", ["--image-tag", "foo-123"])),
)
@pytest.mark.parametrize(
"update,exp_update_flag",
((True, ["--update"]), (False, ["--no-update"]), (None, [])),
)
@pytest.mark.parametrize(
"direct_runner,exp_runner_flag", ((False, []), (True, ["--direct-runner"]))
)
def test_get_command(
direct_runner,
exp_runner_flag,
update,
exp_update_flag,
image_tag,
exp_image_flags,
config_file,
run_pipeline,
monkeypatch,
):
run_job_config = run_pipeline.run_job_config._replace(
direct_runner=direct_runner, update=update
)
monkeypatch.setattr(run_pipeline, "run_job_config", run_job_config)
runtime_config = run_pipeline.docker_runtime_config._replace(
image_tag=image_tag, config_file_override=config_file
)
monkeypatch.setattr(run_pipeline, "docker_runtime_config", runtime_config)
exp_command = ["run"]
exp_command.extend(exp_update_flag)
exp_command.extend(exp_runner_flag)
exp_command.extend(exp_image_flags)
assert sorted(exp_command) == sorted(run_pipeline._get_command())
@pytest.mark.parametrize("direct_runner", (True, False))
def test_setup_docker_image(
direct_runner, run_pipeline, mock_docker_client, mocker, monkeypatch
):
run_job_config = run_pipeline.run_job_config._replace(
direct_runner=direct_runner
)
monkeypatch.setattr(run_pipeline, "run_job_config", run_job_config)
mock_super = mocker.Mock()
monkeypatch.setattr(
run_job.base.BaseDockerizedPipeline, "_setup_docker_image", mock_super
)
mock_docker_utils = mocker.Mock()
monkeypatch.setattr(run_job, "docker_utils", mock_docker_utils)
run_pipeline._setup_docker_image()
mock_super.assert_called_once_with()
if not direct_runner:
mock_docker_utils.push_image_to_gcr.assert_called_once_with(
"test-image:foo-123", "foo-123", mock_docker_client,
)
else:
mock_docker_utils.push_image_to_gcr.assert_not_called()
|
gaybro8777/klio | cli/src/klio_cli/utils/config_utils.py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import warnings
SUPPORTED_CONFIG_VERSIONS = (2,)
DEPRECATED_CONFIG_VERSIONS = (1,)
ALL_CONFIG_VERSIONS = SUPPORTED_CONFIG_VERSIONS + DEPRECATED_CONFIG_VERSIONS
# TODO: integrate this into KlioConfig as a converter
def set_config_version(config):
msg_version = config.version
if msg_version is None:
logging.info(
"No value set for 'version' in `klio-job.yaml`. Defaulting to "
"version 1."
)
msg_version = 1
try:
msg_version = int(msg_version)
except ValueError:
logging.error(
"Invalid `version` value in `klio-job.yaml`. Expected `int`, "
"got `{}`".format(type(msg_version))
)
raise # reraises ValueError
if msg_version not in ALL_CONFIG_VERSIONS:
logging.error(
"Unsupported configuration `version` '{}'. Supported versions: "
"{}".format(msg_version, ALL_CONFIG_VERSIONS)
)
if msg_version in DEPRECATED_CONFIG_VERSIONS:
msg = (
"Config version {} is deprecated and will be removed in a future "
"release of klio. Please migrate to a supported "
"version: {}".format(msg_version, SUPPORTED_CONFIG_VERSIONS)
)
logging.warning(msg)
warnings.warn(msg, DeprecationWarning)
config.version = msg_version
return config
|
gaybro8777/klio | exec/src/klio_exec/commands/utils/plugin_utils.py | <reponame>gaybro8777/klio
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import attr
import pkg_resources
from py import io
@attr.s
class KlioPlugin(object):
plugin_name = attr.ib(type=str)
description = attr.ib(type=str)
package_name = attr.ib(type=str)
package_version = attr.ib(type=str)
module_path = attr.ib(type=str)
# TODO: in the future, add functionality to toggle & configure audit
# steps in a job's klio-job.yaml file (@lynn)
def load_plugins_by_namespace(namespace):
"""Loads audit steps defined in `setup.py` under a given namespace.
Args:
namespace (str): namespace under which to look for plugins.
Returns:
Loaded plugin objects (list).
"""
return [e.load() for e in pkg_resources.iter_entry_points(namespace)]
def _get_plugins_by_namespace(namespace):
entrypoints = pkg_resources.iter_entry_points(namespace)
for ep in entrypoints:
# Need to actually load the plugin in order to get its location,
# as well as class attributes, like name & description
loaded = ep.load()
desc = loaded.get_description() or loaded.__doc__
if desc is None:
desc = "No description."
yield KlioPlugin(
plugin_name=loaded.AUDIT_STEP_NAME,
description=desc,
package_name=ep.dist.project_name,
package_version=ep.dist.parsed_version,
module_path=inspect.getfile(loaded),
)
def print_plugins(namespace, tw=None):
plugin_meta = (
" -- via {package_name} (v{package_version}) -- {module_path}\n"
)
plugin_desc = "\t{desc}\n\n"
if not tw:
tw = io.TerminalWriter()
loaded_plugins = _get_plugins_by_namespace(namespace)
for plugin in loaded_plugins:
meta = plugin_meta.format(
package_name=plugin.package_name,
package_version=plugin.package_version,
module_path=plugin.module_path,
)
tw.write(plugin.plugin_name, blue=True, bold=True)
tw.write(meta, green=True)
tw.write(plugin_desc.format(desc=plugin.description))
|
gaybro8777/klio | exec/tests/unit/commands/audit_steps/test_multithreaded_tf.py | <gh_stars>100-1000
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from klio_exec.commands.audit_steps import multithreaded_tf
@pytest.mark.parametrize("tf_loaded", (True, False))
@pytest.mark.parametrize("worker_threads", (0, 1, 2))
def test_multithreaded_tf_usage(
tf_loaded, worker_threads, klio_config, mock_emit_warning, mocker
):
if worker_threads:
klio_config.pipeline_options.experiments = [
"worker_threads={}".format(worker_threads)
]
if tf_loaded:
mocker.patch.dict("sys.modules", {"tensorflow": ""})
mt_tf_usage = multithreaded_tf.MultithreadedTFUsage(
"job/dir", klio_config, "term_writer"
)
mt_tf_usage.after_tests()
if worker_threads != 1 and tf_loaded:
# don't care about the actual message
assert 1 == mock_emit_warning.call_count
else:
mock_emit_warning.assert_not_called()
|
gaybro8777/klio | cli/src/klio_cli/commands/job/test.py | <gh_stars>1-10
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from klio_cli.commands import base
class TestPipeline(base.BaseDockerizedPipeline):
DOCKER_LOGGER_NAME = "klio.job.test"
def __init__(self, job_dir, klio_config, docker_runtime_config):
super().__init__(job_dir, klio_config, docker_runtime_config)
self.requires_config_file = False
def _get_environment(self):
envs = super()._get_environment()
envs["KLIO_TEST_MODE"] = "true"
return envs
def _get_command(self, pytest_args):
return ["test"] + pytest_args
|
gaybro8777/klio | examples/catvdog/transforms.py | <filename>examples/catvdog/transforms.py
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import apache_beam as beam
import numpy as np
import tensorflow as tf
from apache_beam.io.gcp import gcsio
from keras.models import load_model
from keras.preprocessing import image as kimage
from klio.transforms import decorators
class CatVDog(beam.DoFn):
"""Classify cat vs dog based off github.com/gsurma/image_classifier"""
IMAGE_WIDTH = 200
IMAGE_HEIGHT = 200
CLASSES = {0: "cat", 1: "dog"}
@decorators.set_klio_context
def __init__(self):
self.input_loc = self._klio.config.job_config.data.inputs[0].location
self.output_loc = self._klio.config.job_config.data.outputs[0].location
self.model_file = self._klio.config.job_config.as_dict()["model_file"]
self.gcs_client = None
self.model = None
def setup(self):
"""Setup instance variables that are not pickle-able"""
self.gcs_client = gcsio.GcsIO()
self.model = tf.keras.models.load_model(self.model_file)
@decorators.set_klio_context
def download_image(self, filename):
"""Download a given image from GCS.
Args:
filename (str): filename to download from configured GCS bucket.
Returns:
(tempfile.NamedTemporaryFile) Temporary file object of the
downloaded image.
"""
remote_file = os.path.join(self.input_loc, filename)
local_tmp_file = tempfile.NamedTemporaryFile(suffix=".jpg")
with self.gcs_client.open(remote_file, "rb") as source:
with open(local_tmp_file.name, "wb") as dest:
dest.write(source.read())
self._klio.logger.info("Downloaded file to %s" % local_tmp_file.name)
return local_tmp_file
def load_image(self, image_file):
"""Load a given image for classification.
Args:
image_file (tempfile.NamedTemporaryFile): Temporary image
file object with which to load.
Returns:
(numpy.ndarray) loaded image tensor.
"""
# Adapted from https://stackoverflow.com/a/47341572/1579977
img = kimage.load_img(
image_file,
target_size=(CatVDog.IMAGE_WIDTH, CatVDog.IMAGE_HEIGHT),
)
img_tensor = kimage.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.0
return img_tensor
@decorators.set_klio_context
def upload_image(self, local_file, classification, filename):
"""Upload a given image to GCS.
Args:
local_file (tempfile.NamedTemporaryFile): Temporary image
file object with which to load.
classification (str): which classification subfolder to
upload local_file to.
filename (str): name for the uploaded file.
"""
remote_dir = os.path.join(self.output_loc, classification, filename)
with self.gcs_client.open(remote_dir, "wb") as dest:
with open(local_file.name, "rb") as source:
dest.write(source.read())
self._klio.logger.info("Uploaded file to %s" % remote_dir)
@decorators.handle_klio
def process(self, data):
"""Predict whether a given image ID is a cat or a dog.
This is the main entry point for a Beam/Klio transform.
Download the image, file, make a prediction, then upload image
to its classified folder in a GCS bucket.
Args:
data (KlioMessage.data): data attribute of a KlioMessage including
fields ``element`` and ``payload``.
Returns:
data (KlioMessage.data): data attribute of the incoming KlioMessage
as there is no need to pass state to the downstream transform
within the pipeline.
"""
image_id = data.element.decode("utf-8")
self._klio.logger.info("Received {} from PubSub".format(image_id))
filename = "{}.jpg".format(image_id)
# download image
input_file = self.download_image(filename)
# load & predict image
loaded_image = self.load_image(input_file.name)
prediction = self.model.predict_classes(loaded_image)
prediction = CatVDog.CLASSES[prediction[0][0]]
self._klio.logger.info(
"Predicted {} for {}".format(prediction, image_id)
)
# save image to particular output directory
self.upload_image(input_file, prediction, filename)
# return original data for any downstream processing
yield data
class CatVDogOutputCheck(beam.DoFn):
"""Custom output data existence check to handle two output directories"""
def setup(self):
"""Setup instance variables that are not pickle-able"""
self.gcs_client = gcsio.GcsIO()
@decorators.handle_klio
def process(self, data):
"""Detect if data for an element exists in one of two dirs in a bucket.
Args:
data (KlioMessage.data): data attribute of a KlioMessage including
fields ``element`` and ``payload``.
Returns:
apache_beam.pvalue.TaggedOutput: data tagged with either
``found`` or ``not_found``.
"""
element = data.element.decode("utf-8")
oc = self._klio.config.job_config.data.outputs[0]
subdirs = ("cat", "dog")
outputs_exist = []
for subdir in subdirs:
path = f"{oc.location}/{subdir}/{element}{oc.file_suffix}"
self._klio.logger.info(f"Checking output in {path}")
exists = self.gcs_client.exists(path)
outputs_exist.append(exists)
if exists:
self._klio.logger.info(f"Output exists at {path}")
else:
self._klio.logger.info(
f"Output does not exist for {element} in {subdir}"
)
if any(outputs_exist):
yield beam.pvalue.TaggedOutput("found", data)
else:
yield beam.pvalue.TaggedOutput("not_found", data)
|
gaybro8777/klio | exec/src/klio_exec/commands/utils/wrappers.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import inspect
def _get_transform_error_msg(txf=None, entity_id=None, err_msg=None):
# This error message is printed instead of logged since user may not
# run with logs turned on
return (
"WARN: Error caught while profiling {txf}.process for "
"entity ID {entity_id}: {err_msg}".format(
txf=txf, entity_id=entity_id, err_msg=err_msg
)
)
def _print_user_exceptions_generator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
transform_name = args[0].__class__.__name__
entity_id = args[1]
result = None
try:
result = yield from func(*args, **kwargs)
except Exception as e:
msg = _get_transform_error_msg(
txf=transform_name, entity_id=entity_id, err_msg=e
)
print(msg)
return result
return wrapper
def _print_user_exceptions_func(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
transform_name = args[0].__class__.__name__
entity_id = args[1]
result = None
try:
result = func(*args, **kwargs)
except Exception as e:
msg = _get_transform_error_msg(
txf=transform_name, entity_id=entity_id, err_msg=e
)
print(msg)
return result
return wrapper
def print_user_exceptions(transforms):
# Don't crap out if the process method errors; just continue profiling
for txf in transforms:
process_method = getattr(txf, "process")
if inspect.isgeneratorfunction(process_method):
process_method = _print_user_exceptions_generator(process_method)
else:
process_method = _print_user_exceptions_func(process_method)
setattr(txf, "process", process_method)
yield txf
# adapted from line_profiler; memory_profiler doesn't handle generator
# functions for some reason.
class KLineProfilerMixin(object):
"""Mixin for CPU & Memory line profilers."""
def __call__(self, func):
# Overwrite to handle generators in the same fashion as funcs
self.add_function(func)
if inspect.isgeneratorfunction(func):
return self.wrap_generator(func)
return self.wrap_function(func)
def wrap_function(self, func):
"""Wrap a function to profile it."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.enable_by_count()
try:
return func(*args, **kwargs)
finally:
self.disable_by_count()
return wrapper
def wrap_generator(self, func):
"""Wrap a generator to profile it."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.enable_by_count()
try:
yield from func(*args, **kwargs)
finally:
self.disable_by_count()
return wrapper
|
gaybro8777/klio | lib/src/klio/message/serializer.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from apache_beam import pvalue
from klio_core.proto import klio_pb2
from klio.message import exceptions
def _handle_msg_compat(parsed_message):
if parsed_message.version is klio_pb2.Version.V1:
if parsed_message.data.entity_id and not parsed_message.data.element:
# make v1 messages compatible with v2
parsed_message.data.element = bytes(
parsed_message.data.entity_id, "utf-8"
)
return parsed_message
if parsed_message.version is klio_pb2.Version.V2:
# is it safe to assume if a message is already labeled as v2, it should
# have an element or payload? i.e. not just entity_id?
return parsed_message
if parsed_message.data.entity_id and not parsed_message.data.element:
# assume v1 message
parsed_message.version = klio_pb2.Version.V1
# make v1 messages compatible with v2
parsed_message.data.element = bytes(
parsed_message.data.entity_id, "utf-8"
)
elif not parsed_message.data.entity_id and not parsed_message.data.element:
# assume v1 message
parsed_message.version = klio_pb2.Version.V1
elif parsed_message.data.element and not parsed_message.data.entity_id:
# assume v2 message
parsed_message.version = klio_pb2.Version.V2
return parsed_message
# [batch dev] attemping to make this a little generic so it can (eventually)
# be used with transforms other than DoFns
def to_klio_message(incoming_message, kconfig=None, logger=None):
"""Serialize ``bytes`` to a :ref:`KlioMessage <klio-message>`.
.. tip::
Set ``job_config.allow_non_klio_messages`` to ``True`` in
``klio-job.yaml`` in order to process non-``KlioMessages`` as
regular ``bytes``. This function will create a new ``KlioMessage``
and set the incoming ``bytes`` to ``KlioMessage.data.element``.
Args:
incoming_message (bytes): Incoming bytes to parse into a \
``KlioMessage``.
kconfig (klio_core.config.KlioConfig): the current job's
configuration.
logger (logging.Logger): the logger associated with the Klio
job.
Returns:
klio_core.proto.klio_pb2.KlioMessage: a ``KlioMessage``.
Raises:
klio_core.proto.klio_pb2._message.DecodeError: incoming message
can not be parsed into a ``KlioMessage`` and
``job_config.allow_non_klio_messages`` in ``klio-job.yaml``
is set to ``False``.
"""
# TODO: when making a generic de/ser func, be sure to assert
# kconfig and logger exists
parsed_message = klio_pb2.KlioMessage()
try:
parsed_message.ParseFromString(incoming_message)
except klio_pb2._message.DecodeError as e:
if kconfig.job_config.allow_non_klio_messages:
# We are assuming that we have been given "raw" data that is not in
# the form of a serialized KlioMessage.
parsed_message.data.element = incoming_message
# default to set recipients to anyone - can't know who the
# appropriate recipient is when it's not a real klio msg
parsed_message.metadata.intended_recipients.anyone.SetInParent()
parsed_message.version = klio_pb2.Version.V2
else:
logger.error(
"Can not parse incoming message. To support non-Klio "
"messages, add `job_config.allow_non_klio_messages = true` "
"in the job's `klio-job.yaml` file."
)
raise e
parsed_message = _handle_msg_compat(parsed_message)
return parsed_message
def _handle_v2_payload(klio_message, payload):
if payload:
# if the user just returned exactly what they received in the
# process method; let's avoid recursive payloads
if payload == klio_message.data:
payload = b""
if not payload:
# be sure to clear out old payload if there's no new payload
payload = b""
else:
if not isinstance(payload, bytes):
try:
payload = bytes(payload, "utf-8")
except TypeError:
msg = (
"Returned payload could not be coerced to `bytes`.\n"
"Erroring payload: {}\nErroring KlioMessage: {}".format(
payload, klio_message
)
)
raise exceptions.KlioMessagePayloadException(msg)
return payload
def from_klio_message(klio_message, payload=None):
"""Deserialize a given :ref:`KlioMessage <klio-message>` to ``bytes``.
Args:
klio_message (klio_core.proto.klio_pb2.KlioMessage): the
``KlioMessage`` in which to deserialize into ``bytes``
payload (bytes or str): Optional ``bytes`` or ``str`` to update
the value of ``KlioMessage.data.payload`` with before
deserializing into bytes. Default: ``None``.
Returns:
bytes: a ``KlioMessage`` as ``bytes``.
Raises:
exceptions.KlioMessagePayloadException: the provided payload
value cannot be coerced into ``bytes``.
"""
tagged, tag = False, None
if isinstance(payload, pvalue.TaggedOutput):
tagged = True
tag = payload.tag
payload = payload.value
# only update payload if it's a v2 message.
if klio_message.version == klio_pb2.Version.V2:
payload = _handle_v2_payload(klio_message, payload)
# [batch dev] TODO: figure out how/where to clear out this payload
# when publishing to pubsub (and potentially other output transforms)
klio_message.data.payload = payload
if tagged:
return pvalue.TaggedOutput(tag, klio_message.SerializeToString())
return klio_message.SerializeToString()
|
gaybro8777/klio | lib/tests/unit/metrics/test_client.py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from klio.metrics import client
from klio.metrics import dispatcher
@pytest.fixture
def metric_params():
return {
"name": "my-metric",
"value": 0,
"kwargs": {"tags": {"key-tag": "value-tag"}},
}
@pytest.fixture
def relay_client(mocker):
return mocker.Mock()
@pytest.fixture
def metrics_registry(relay_client):
return client.MetricsRegistry(
relay_clients=[relay_client], transform_name="HelloKlio"
)
@pytest.fixture
def metric_data(metric_params):
metric_params["type"] = "counter"
return metric_params
@pytest.fixture
def counter_metric(metric_params, relay_client):
return dispatcher.CounterDispatcher(
relay_clients=[relay_client], **metric_params
)
@pytest.mark.parametrize(
"method,cls",
(
("counter", dispatcher.CounterDispatcher),
("gauge", dispatcher.GaugeDispatcher),
("timer", dispatcher.TimerDispatcher),
),
)
def test_get_metric_inst(method, cls, metrics_registry, metric_params):
assert {} == metrics_registry._registry # sanity check
method_to_call = getattr(metrics_registry, method)
ret_metric = method_to_call(**metric_params)
assert isinstance(ret_metric, cls)
exp_key = "{method}_{name}_HelloKlio".format(
method=method, **metric_params
)
assert exp_key in metrics_registry._registry
assert ret_metric == metrics_registry._registry[exp_key]
# assert same metric is returned rather than creating a new instance
ret_metric_again = method_to_call(**metric_params)
assert ret_metric is ret_metric_again
@pytest.mark.parametrize(
"metric_type,cls",
(
("counter", dispatcher.CounterDispatcher),
("gauge", dispatcher.GaugeDispatcher),
("timer", dispatcher.TimerDispatcher),
("unknown", dispatcher.GaugeDispatcher),
),
)
def test_marshal_unmarshal(
metrics_registry, metric_type, cls, metric_params, relay_client
):
metric_inst = cls(
relay_clients=[relay_client], transform="HelloKlio", **metric_params
)
metric_data = metric_params.copy()
metric_data["type"] = metric_type
ret_metric_data = metrics_registry.marshal(metric_inst)
exp_metric_data = metric_data.copy()
if metric_type == "unknown":
exp_metric_data["type"] = "gauge"
if metric_type == "timer":
exp_metric_data["timer_unit"] = "ns"
assert exp_metric_data == ret_metric_data
ret_metric = metrics_registry.unmarshal(metric_data)
# can't simply compare exp_metric to ret_metric since they are
# two different instances
# FIXME: implement __eq__ and __ne__ for dispatch objects (@lynn)
assert metric_inst.METRIC_TYPE == ret_metric.METRIC_TYPE
assert metric_inst.name == ret_metric.name
assert metric_inst.value == ret_metric.value
assert metric_inst.transform == ret_metric.transform
assert metric_inst.kwargs == ret_metric.kwargs
|
gaybro8777/klio | exec/src/klio_exec/commands/stop.py | <gh_stars>100-1000
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import time
from googleapiclient import discovery
#####
# TODO: this is nearly identical to klio_cli/commands/stop_job.py. This is
# copy-pasta'ed to avoid depending on `klio-cli` and having any weird
# dependency version conflicts for now. Ideally, the copied code should
# be extracted out into a light-weight shared lib. @lynn
#####
JOB_STATE_MAP = {"cancel": "JOB_STATE_CANCELLED", "drain": "JOB_STATE_DRAINED"}
_client = None
def _set_dataflow_client(api_version=None):
global _client
if not api_version:
api_version = "v1b3"
_client = discovery.build("dataflow", api_version)
def _check_job_running(config):
request = (
_client.projects()
.locations()
.jobs()
.list(
projectId=config.pipeline_options.project,
location=config.pipeline_options.region,
filter="ACTIVE",
)
)
try:
response = request.execute()
except Exception as e:
logging.warning(
"Could not find running job '{}' in project '{}': {}".format(
config.job_name, config.pipeline_options.project, e
)
)
logging.warning(
"Continuing to attempt deploying '{}'".format(config.job_name)
)
return
job_results = response.get("jobs", [])
if job_results:
for result in job_results:
if result["name"] == config.job_name:
return result
def _update_job_state(job, req_state=None, retries=None):
if retries is None:
retries = 0
_req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP["cancel"])
if job.get("requestedState") is not _req_state:
job["requestedState"] = _req_state
request = (
_client.projects()
.locations()
.jobs()
.update(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
body=job,
)
)
try:
request.execute()
except Exception as e:
# generic catch if 4xx error - probably shouldn't retry
if getattr(e, "resp", None):
if e.resp.status < 500:
msg = "Failed to {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
if retries > 2:
msg = "Max retries reached: could not {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
logging.info(
"Failed to {} job '{}'. Trying again after 30s...".format(
req_state, job["name"]
)
)
retries += 1
time.sleep(30)
_update_job_state(job, req_state, retries)
def _watch_job_state(job, timeout=600):
timeout_end = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
request = (
_client.projects()
.locations()
.jobs()
.get(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
)
)
while datetime.datetime.now() < timeout_end:
try:
resp = request.execute()
except Exception as e:
msg = (
"Failed to get current status for job '{}'. Error: {}.\n"
"Trying again after 5s...".format(job["name"], e)
)
logging.info(msg)
time.sleep(5)
continue
if resp["currentState"] in JOB_STATE_MAP.values():
return
else:
msg = "Waiting for job '{}' to reach a terminal state...".format(
job["name"]
)
logging.info(msg)
time.sleep(5)
msg = "Job '{}' did not reach a terminal state after '{}' seconds.".format(
job["name"], timeout
)
logging.error(msg)
raise SystemExit(1)
def stop(config, strategy):
_set_dataflow_client()
current_running_job = _check_job_running(config)
if not current_running_job:
logging.info("Found no currently running job to stop.")
return
_update_job_state(current_running_job, req_state=strategy)
_watch_job_state(current_running_job)
verb = "cancelled" if strategy == "cancel" else "drained"
logging.info("Successfully {} job '{}'".format(verb, config.job_name))
|
gaybro8777/klio | integration/read-file/integration_test.py | <gh_stars>100-1000
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To be run after `klio job run --direct-runner` (not within job container)
import os
import unittest
HERE = os.path.abspath(os.path.join(os.path.abspath(__file__), os.path.pardir))
EXPECTED_LOGS = os.path.join(HERE, "expected_job_output.txt")
ACTUAL_LOGS = os.path.join(HERE, "job_output.log")
class TestExpectedOutput(unittest.TestCase):
@classmethod
def setUpClass(self):
with open(EXPECTED_LOGS, "r") as f:
self.expected_logs = f.readlines()
if not os.path.exists(ACTUAL_LOGS):
# tox deletes the file after the test is done so that tests
# don't pass accidentally from a previously successful run/
# cached results
raise Exception(
"The job's output does not exist. Rerun the job to produce "
"the required output."
)
with open(ACTUAL_LOGS, "r") as f:
self.actual_logs = f.readlines()
def test_expected_logs(self):
# sort them since the order of some parts of the pipeline are not
# deterministic
self.assertEqual(sorted(self.expected_logs), sorted(self.actual_logs))
if __name__ == '__main__':
unittest.main()
|
gaybro8777/klio | exec/src/klio_exec/commands/audit_steps/tempfile_usage.py | <filename>exec/src/klio_exec/commands/audit_steps/tempfile_usage.py
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import traceback
from klio_exec.commands.audit_steps import base
class TempFileUsage(base.BaseKlioAuditStep):
"""Avoid leaky file descriptors from `tempfile.TemporaryFile`."""
AUDIT_STEP_NAME = "tempfile"
PACKAGES_TO_IGNORE = ("_pytest",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tempfile_used = False
self._tempfile_tracebacks = []
def _mock_tempfile(self):
"""Override tempfile.TemporaryFile in the user's code
with a MockTemporaryFile that marks the class attribute
`TempfileUsage.AuditStep.mock_temporary_file_was used`
as True before returning an actual tempfile.TemporaryFile.
Ignores any use of tempfile.TemporaryFile by
pytest.
"""
RealTempFile = tempfile.TemporaryFile
def MockTemporaryFile(*args, **kwargs):
stack = traceback.extract_stack()[:-1]
caller_frame = stack[-1]
should_ignore = any(
[
("/%s/" % ignored) in caller_frame.filename
for ignored in TempFileUsage.PACKAGES_TO_IGNORE
]
)
if not should_ignore:
self._tempfile_used = True
self._tempfile_tracebacks.append(stack)
return RealTempFile(*args, **kwargs)
tempfile.TemporaryFile = MockTemporaryFile
def before_tests(self):
self._mock_tempfile()
def after_tests(self):
if self._tempfile_used:
self.emit_error(
"`tempfile.TemporaryFile` was used! Please use `tempfile."
"NamedTemporaryFile` instead to avoid leaking files. "
"Traceback:",
self._tempfile_tracebacks[0],
)
# shortcut for registering plugins in setup.py
_init = TempFileUsage
|
gaybro8777/klio | exec/tests/unit/commands/audit_steps/test_base.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from klio_exec.commands.audit_steps import base
class DummyAuditStep(base.BaseKlioAuditStep):
AUDIT_STEP_NAME = "dummy_step"
@staticmethod
def get_description():
return "A description of a dummy step!"
def after_tests(self):
pass
class IncompleteAuditStep(base.BaseKlioAuditStep):
AUDIT_STEP_NAME = "incomplete_dummy_step"
class NotAnAuditStep(object):
def after_tests(self):
pass
TB_PYTEST = [
'File "/usr/local/lib/python3.6/site-packages/pluggy/callers.py", line 187, in _multicall\n res = hook_impl.function(*args)', # NOQA: E501
'File "/usr/local/lib/python3.6/site-packages/_pytest/python.py", line 178, in pytest_pyfunc_call\n testfunction(**testargs)', # NOQA: E501
'File "/usr/src/app/test_transform.py", line 13, in test_process\n assert expected == list(output)[0]', # NOQA: E501
'File "/usr/src/app/transforms.py", line 21, in process\n with tempfile.TemporaryFile() as t:', # NOQA: E501
]
TB_NO_PYTEST = [
'File "/usr/src/app/transforms.py", line 21, in process\n with tempfile.TemporaryFile() as t:' # NOQA: E501
]
@pytest.mark.parametrize("tb,exp_len", ((TB_PYTEST, 2), (TB_NO_PYTEST, 1)))
def test_remove_all_frames_until_after_pytest(tb, exp_len):
act_ret = base._get_relevant_frames(tb)
assert exp_len == len(act_ret)
def test_base_klio_audit_step(mock_terminal_writer):
assert issubclass(DummyAuditStep, base.BaseKlioAuditStep)
assert issubclass(IncompleteAuditStep, base.BaseKlioAuditStep)
assert not issubclass(NotAnAuditStep, base.BaseKlioAuditStep)
dummy_inst = DummyAuditStep("job/dir", "config", mock_terminal_writer)
assert dummy_inst.before_tests() is None
# just making sure this doesn't raise
dummy_inst.after_tests()
assert dummy_inst.get_description() is not None
inc_inst = IncompleteAuditStep("job/dir", "config", mock_terminal_writer)
assert inc_inst.before_tests() is None
assert inc_inst.get_description() is None
with pytest.raises(NotImplementedError):
inc_inst.after_tests()
@pytest.mark.parametrize("tb", (None, ["a traceback"]))
def test_emit(tb, mock_terminal_writer, mocker, monkeypatch):
mock_format_list = mocker.Mock()
mock_format_list.return_value = tb
monkeypatch.setattr(base.traceback, "format_list", mock_format_list)
dummy_inst = DummyAuditStep("job/dir", "config", mock_terminal_writer)
msg = "Emit this message"
exp_msg = "[dummy_step]: Emit this message\n"
if tb:
exp_msg = "{}{}\n".format(exp_msg, tb[0])
kwargs = {"foo": "bar"}
assert dummy_inst.warned is False # sanity check
dummy_inst.emit_warning(msg, tb=tb, **kwargs)
if tb:
mock_format_list.assert_called_once_with(tb)
else:
mock_format_list.assert_not_called()
exp_kwargs = {"foo": "bar", "yellow": True}
mock_terminal_writer.write.assert_called_once_with(exp_msg, **exp_kwargs)
assert dummy_inst.warned is True
mock_format_list.reset_mock()
mock_terminal_writer.reset_mock()
assert dummy_inst.errored is False # sanity check
dummy_inst.emit_error(msg, tb=tb, **kwargs)
if tb:
mock_format_list.assert_called_once_with(tb)
else:
mock_format_list.assert_not_called()
exp_kwargs = {"foo": "bar", "red": True}
mock_terminal_writer.write.assert_called_once_with(exp_msg, **exp_kwargs)
assert dummy_inst.errored is True
|
gaybro8777/klio | cli/src/klio_cli/commands/job/__init__.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from klio_cli.commands.job import audit
from klio_cli.commands.job import configuration
from klio_cli.commands.job import create
from klio_cli.commands.job import delete
from klio_cli.commands.job import profile
from klio_cli.commands.job import run
from klio_cli.commands.job import stop
from klio_cli.commands.job import test
from klio_cli.commands.job import verify
__all__ = (
audit,
configuration,
create,
delete,
profile,
run,
stop,
test,
verify,
)
|
gaybro8777/klio | core/tests/config/test_converters.py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from klio_core import config
from klio_core.config import _converters as converters
from klio_core.config import _utils as utils
@utils.config_object(key_prefix="foo.bar")
class ConfigTestClass(object):
f1 = utils.field(type=str, default=None)
f2 = utils.field(type=str)
def test_config_decorator_direct_instantiation():
a = ConfigTestClass(f1="f1value", f2=None)
assert "f1value" == a.f1
assert a.f2 is None
b = ConfigTestClass(f2="value")
assert b.f1 is None
assert "value" == b.f2
with pytest.raises(Exception):
ConfigTestClass(f1="value")
@pytest.mark.parametrize(
"config_dict, expected",
(
(
{"f1": "f1value", "f2": None},
ConfigTestClass(f1="f1value", f2=None),
),
({"f1": "f1value"}, None),
({"f2": "value"}, ConfigTestClass(f1=None, f2="value")),
({}, None),
),
)
def test_config_decorator_no_value(config_dict, expected):
if expected:
assert expected == ConfigTestClass(config_dict)
else:
with pytest.raises(Exception):
ConfigTestClass(config_dict)
@pytest.mark.parametrize(
"value, expected", ((5, "5"), ("foo", "foo"), (None, None), (True, "True"))
)
def test_str_converter(value, expected):
assert expected == converters.StringConverter("foo").validate(value)
@pytest.mark.parametrize(
"bad_value", (converters.UNSET_REQUIRED_VALUE, {}, [])
)
def test_str_converter_raises(bad_value):
with pytest.raises(Exception):
converters.StringConverter("foo").validate(bad_value)
@pytest.mark.parametrize(
"value, expected",
(
(5, True),
(True, True),
(None, None),
(0, False),
(False, False),
("true", True),
("false", True), # hmmmmm
),
)
def test_bool_converter(value, expected):
assert expected == converters.BoolConverter("foo").validate(value)
@pytest.mark.parametrize(
"bad_value", (converters.UNSET_REQUIRED_VALUE, {}, [])
)
def test_bool_converter_raises(bad_value):
with pytest.raises(Exception):
converters.BoolConverter("foo").validate(bad_value)
@pytest.mark.parametrize(
"value, expected", ((5, 5), ("5", 5), (None, None), (True, 1))
)
def test_int_converter(value, expected):
assert expected == converters.IntConverter("foo").validate(value)
@pytest.mark.parametrize(
"bad_value", ("3.14", converters.UNSET_REQUIRED_VALUE, "something", {}, [])
)
def test_int_converter_raises(bad_value):
with pytest.raises(Exception):
config.IntConverter("foo").validate(bad_value)
|
gaybro8777/klio | lib/src/klio/transforms/_utils.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For internal use only; no backwards-compatibility guarantees."""
import enum
import functools
import warnings
class AnnotatedStates(enum.Enum):
DEPRECATED = "deprecated"
EXPERIMENTAL = "experimental"
# adapted from https://github.com/apache/beam/blob/9c3941fc/
# sdks/python/apache_beam/utils/annotations.py
class KlioDeprecationWarning(DeprecationWarning):
"""Klio-specific deprecation warnings."""
class KlioFutureWarning(FutureWarning):
"""Klio-specific deprecation warnings."""
# Don't ignore klio deprecation warnings! (future warnings ok)
warnings.simplefilter("once", KlioDeprecationWarning)
def is_original_process_func(clsdict, bases, base_class=None):
"""Only wrap the original `process` function.
Without these (minimal) checks, the `process` function would be
wrapped at least twice (the original `process` function from the
user's DoFn, and our wrapped/decorated one), essentially causing
any call to `process` (and the decorator) to be called at least
twice.
Args:
clsdict (dict): dictionary of items for the class being
instantiated.
bases (tuple(class)): base class(es) of the class being
instantiated.
Returns:
(bool) whether or not to wrap the `process` method of the class
being instantiated.
"""
if "process" not in clsdict:
return False
# ignore classes that don't inherit from our base class
base_cls_names = [b.__name__ for b in bases]
if base_class and base_class not in base_cls_names:
return False
# if the value of clsdict["process"] is not a meth/func
if not callable(clsdict["process"]):
return False
# if the value of clsdict["process"] is already "new_process"
if getattr(clsdict["process"], "__name__") != "process":
return False
return True
# adapted from https://github.com/apache/beam/blob/9c3941fc/
# sdks/python/apache_beam/utils/annotations.py
def annotate(state, since=None, current=None, message=None):
"""Decorates an API with a `deprecated` or `experimental` annotation.
When a user uses a objected decorated with this annotation, they
will see a `KlioFutureWarning` or `KlioDeprecationWarning` during
runtime.
Args:
state (AnnotatedStates): the kind of annotation (AnnotatedStates.
DEPRECATED or AnnotatedStates.EXPERIMENTAL).
since: the version that causes the annotation (used for
AnnotatedStates.DEPRECATED when no `message` is given;
ignored for AnnotatedStates.EXPERIMENTAL).
current: the suggested replacement function.
message: if the default message does not suffice, the message
can be changed using this argument. Default message for
Returns:
The decorator for the API.
"""
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
warning_type = KlioFutureWarning
if state == AnnotatedStates.DEPRECATED:
warning_type = KlioDeprecationWarning
warn_message = message
if message is None:
addl_ctx = (
" and is subject to incompatible changes, or removal "
"in a future release of Klio."
)
if state == AnnotatedStates.DEPRECATED:
_since = " since {}".format(since) if since else ""
_current = (
". Use {} instead".format(current) if current else ""
)
addl_ctx = "{}{}.".format(_since, _current)
msg_kwargs = {
"obj": func.__name__,
"annotation": state.value,
"addl_ctx": addl_ctx,
}
warn_message = "'{obj}' is {annotation}{addl_ctx}".format(
**msg_kwargs
)
warnings.warn(warn_message, warning_type, stacklevel=2)
return func(*args, **kwargs)
return inner
return wrapper
# partials for ease of use
deprecated = functools.partial(annotate, state=AnnotatedStates.DEPRECATED)
experimental = functools.partial(
annotate, state=AnnotatedStates.EXPERIMENTAL, since=None
)
|
gaybro8777/klio | integration/read-bq-write-bq/transforms.py | <filename>integration/read-bq-write-bq/transforms.py
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Klio DoFn for basic integration test.
"""
import apache_beam as beam
import json
from klio.transforms import decorators
class LogKlioMessage(beam.DoFn):
@decorators.handle_klio
def process(self, item):
self._klio.logger.info("Hello, Klio!")
self._klio.logger.info("Received element {}".format(item.element))
self._klio.logger.info("Received payload {}".format(item.payload))
element_str = item.element.decode("utf-8")
row = {"entity_id": element_str, "value": element_str}
yield json.dumps(row)
|
gaybro8777/klio | lib/tests/unit/message/test_serializer.py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from apache_beam import pvalue
from google.protobuf import message as gproto_message
from klio_core.proto.v1beta1 import klio_pb2
from klio.message import exceptions
from klio.message import serializer
def _get_klio_job():
job = klio_pb2.KlioJob()
job.job_name = "klio-job"
job.gcp_project = "test-project"
return job
def _get_klio_message():
parent_klio_job = _get_klio_job()
msg = klio_pb2.KlioMessage()
msg.metadata.visited.extend([parent_klio_job])
msg.metadata.force = True
msg.metadata.ping = True
msg.data.element = b"1234567890"
msg.version = klio_pb2.Version.V2
return msg
@pytest.fixture
def klio_message():
return _get_klio_message()
@pytest.fixture
def klio_message_str(klio_message):
return klio_message.SerializeToString()
@pytest.fixture
def logger(mocker):
return mocker.Mock()
@pytest.mark.parametrize(
"version",
(klio_pb2.Version.UNKNOWN, klio_pb2.Version.V1, klio_pb2.Version.V2),
)
@pytest.mark.parametrize(
"element,entity_id,payload",
(
(b"an-element", None, None),
(None, "an-entity-id", None),
(None, "an-entity-id", b"some-payload"),
(b"an-element", None, b"some-payload"),
(None, None, b"some-payload"),
),
)
def test_handle_msg_compat(version, element, entity_id, payload):
msg = klio_pb2.KlioMessage()
msg.version = version
if element:
msg.data.element = element
if payload:
msg.data.payload = payload
if entity_id:
msg.data.entity_id = entity_id
actual_msg = serializer._handle_msg_compat(msg)
assert actual_msg.version is not klio_pb2.Version.UNKNOWN
# we assume in the function's logic that v2 messages are already parsed
# correctly
if entity_id and not klio_pb2.Version.V2:
assert entity_id == actual_msg.data.element.decode("utf-8")
def test_to_klio_message(klio_message, klio_message_str, klio_config, logger):
actual_message = serializer.to_klio_message(
klio_message_str, klio_config, logger
)
assert klio_message == actual_message
logger.error.assert_not_called()
def test_to_klio_message_allow_non_kmsg(klio_config, logger, monkeypatch):
monkeypatch.setattr(
klio_config.job_config, "allow_non_klio_messages", True
)
incoming = b"Not a klio message"
expected = klio_pb2.KlioMessage()
expected.data.element = incoming
expected.version = klio_pb2.Version.V2
expected.metadata.intended_recipients.anyone.SetInParent()
actual_message = serializer.to_klio_message(incoming, klio_config, logger)
assert expected == actual_message
logger.error.assert_not_called()
def test_to_klio_message_raises(klio_config, logger, monkeypatch):
incoming = b"Not a klio message"
with pytest.raises(gproto_message.DecodeError):
serializer.to_klio_message(incoming, klio_config, logger)
# Just asserting it's called - not testing the error string itself
# to avoid making brittle tests
assert 1 == logger.error.call_count
@pytest.mark.parametrize(
"payload,exp_payload",
(
(None, None),
(b"some payload", b"some payload"),
(_get_klio_message().data, None),
("string payload", b"string payload"),
),
)
def test_from_klio_message(klio_message, payload, exp_payload):
expected = _get_klio_message()
if exp_payload:
expected.data.payload = exp_payload
expected_str = expected.SerializeToString()
actual_message = serializer.from_klio_message(klio_message, payload)
assert expected_str == actual_message
def test_from_klio_message_v1():
payload = b"some-payload"
msg = klio_pb2.KlioMessage()
msg.version = klio_pb2.Version.V1
msg.data.payload = payload
expected_str = msg.SerializeToString()
actual_message = serializer.from_klio_message(msg, payload)
assert expected_str == actual_message
def test_from_klio_message_tagged_output(klio_message):
payload = b"some payload"
expected_msg = _get_klio_message()
expected_msg.data.payload = payload
expected = pvalue.TaggedOutput("a-tag", expected_msg.SerializeToString())
tagged_payload = pvalue.TaggedOutput("a-tag", payload)
actual_message = serializer.from_klio_message(klio_message, tagged_payload)
# can't compare expected vs actual directly since pvalue.TaggedOutput
# hasn't implemented the comparison operators
assert expected.tag == actual_message.tag
assert expected.value == actual_message.value
def test_from_klio_message_raises(klio_message):
payload = {"no": "bytes casting"}
with pytest.raises(
exceptions.KlioMessagePayloadException, match="Returned payload"
):
serializer.from_klio_message(klio_message, payload)
|
gaybro8777/klio | integration/read-bq-write-bq/integration_test.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To be run after `klio job run --direct-runner` (not within job container)
import os
import unittest
import apache_beam as beam
from apache_beam.options import pipeline_options
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.testing import util as test_util
from apache_beam.testing import test_pipeline
from it import common
class TestExpectedOutput(unittest.TestCase):
def test_is_equal(self):
"""The contents of the event input table are fed into the event output table"""
klio_config = common.get_config()
output_table_cfg = klio_config.job_config.events.outputs[0]
output_table_spec = bigquery.TableReference(
projectId=output_table_cfg.project,
datasetId=output_table_cfg.dataset,
tableId=output_table_cfg.table
)
options = {
'project': output_table_cfg.project,
'runner:': 'DirectRunner'
}
options = pipeline_options.PipelineOptions(flags=[], **options)
with test_pipeline.TestPipeline(options=options) as p:
actual_pcoll = p | "Actual" >> beam.io.Read(beam.io.BigQuerySource(output_table_spec))
expected = [{"entity_id": v, "value": v} for v in common.entity_ids]
test_util.assert_that(actual_pcoll, test_util.equal_to(expected))
if __name__ == '__main__':
unittest.main()
|
gaybro8777/klio | cli/tests/conftest.py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import pytest
@pytest.fixture
def caplog(caplog):
"""Set global test logging levels."""
caplog.set_level(logging.DEBUG)
return caplog
@pytest.fixture
def pipeline_config_dict():
return {
"project": "test-project",
"staging_location": "gs://some/stage",
"temp_location": "gs://some/temp",
"worker_harness_container_image": "gcr.io/sigint/foo",
"streaming": True,
"update": False,
"experiments": ["beam_fn_api"],
"region": "us-central1",
"num_workers": 3,
"max_num_workers": 5,
"disk_size_gb": 50,
"worker_machine_type": "n1-standard-4",
}
@pytest.fixture
def patch_os_getcwd(monkeypatch, tmpdir):
test_dir = str(tmpdir.mkdir("testing"))
monkeypatch.setattr(os, "getcwd", lambda: test_dir)
return test_dir
|
gaybro8777/klio | lib/tests/unit/transforms/test_io.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
import os
import tempfile
import apache_beam as beam
import pytest
from apache_beam.testing import test_pipeline
from klio_core.proto import klio_pb2
from klio.transforms import io as io_transforms
HERE = os.path.abspath(os.path.join(os.path.abspath(__file__), os.path.pardir))
FIXTURE_PATH = os.path.join(HERE, os.path.pardir, "fixtures")
def assert_expected_klio_msg_from_file(element):
message = klio_pb2.KlioMessage()
message.ParseFromString(element)
assert message.data.element is not None
assert isinstance(message.data.element, bytes)
def test_read_from_file():
file_path = os.path.join(FIXTURE_PATH, "elements_text_file.txt")
transform = io_transforms.KlioReadFromText(file_path)
with test_pipeline.TestPipeline() as p:
(
p
| "Read" >> transform
| beam.Map(assert_expected_klio_msg_from_file)
)
assert transform._REQUIRES_IO_READ_WRAP is False
def test_write_to_file():
file_path_read = os.path.join(FIXTURE_PATH, "elements_text_file.txt")
with tempfile.TemporaryDirectory() as tmp_path:
with test_pipeline.TestPipeline() as p:
(
p
| io_transforms.KlioReadFromText(file_path_read)
| io_transforms.KlioWriteToText(tmp_path)
)
# WriteToText will shard files so we iterate through each
# file in the directory
write_results = []
for file_name in glob.glob(tmp_path + "*"):
if os.path.isfile(os.path.join(tmp_path, file_name)):
with open(file_name, "rb") as f:
write_results.extend(f.readlines())
with open(file_path_read, "rb") as fr:
read_results = fr.readlines()
assert write_results == read_results
def _expected_avro_kmsgs():
expected_records = [
{
"username": "miguno",
"tweet": "Rock: Nerf paper, scissors is fine.",
"timestamp": 1366150681,
},
{
"username": "BlizzardCS",
"tweet": "Works as intended. Terran is IMBA.",
"timestamp": 1366154481,
},
]
expected_kmsgs = []
for record in expected_records:
message = klio_pb2.KlioMessage()
message.version = klio_pb2.Version.V2
message.metadata.intended_recipients.anyone.SetInParent()
message.data.element = bytes(json.dumps(record).encode("utf-8"))
expected_kmsgs.append(message)
return expected_kmsgs
def assert_expected_klio_msg_from_avro(element):
expected_kmsgs = _expected_avro_kmsgs()
message = klio_pb2.KlioMessage()
message.ParseFromString(element)
assert message in expected_kmsgs
def test_read_from_avro():
file_pattern = os.path.join(FIXTURE_PATH, "twitter.avro")
with test_pipeline.TestPipeline() as p:
(
p
| io_transforms.KlioReadFromAvro(file_pattern=file_pattern)
| beam.Map(assert_expected_klio_msg_from_avro)
)
assert io_transforms.KlioReadFromAvro._REQUIRES_IO_READ_WRAP is True
def assert_expected_klio_msg_from_avro_write(element):
file_path_read = os.path.join(FIXTURE_PATH, "elements_text_file.txt")
with open(file_path_read, "rb") as fr:
expected_elements = fr.read().splitlines()
message = klio_pb2.KlioMessage()
message.ParseFromString(element)
assert message.data.element in expected_elements
def test_write_to_avro():
file_path_read = os.path.join(FIXTURE_PATH, "elements_text_file.txt")
with tempfile.TemporaryDirectory() as tmp_path:
with test_pipeline.TestPipeline() as p:
p | io_transforms.KlioReadFromText(
file_path_read
) | io_transforms.KlioWriteToAvro(file_path_prefix=tmp_path)
files = glob.glob(tmp_path + "*")
assert len(files) > 0
assert (
os.path.isfile(os.path.join(tmp_path, file_name))
for file_name in files
)
with test_pipeline.TestPipeline() as p2:
p2 | io_transforms.KlioReadFromAvro(
file_pattern=(tmp_path + "*")
) | beam.Map(assert_expected_klio_msg_from_avro_write)
def test_avro_io_immutability():
initial_data_path = os.path.join(FIXTURE_PATH, "twitter.avro")
with tempfile.TemporaryDirectory() as tmp_path:
with test_pipeline.TestPipeline() as p:
p | io_transforms.KlioReadFromAvro(
initial_data_path
) | io_transforms.KlioWriteToAvro(
file_path_prefix=tmp_path, num_shards=0
)
with test_pipeline.TestPipeline() as p2:
p2 | io_transforms.KlioReadFromAvro(
file_pattern=tmp_path + "*"
) | beam.Map(assert_expected_klio_msg_from_avro)
def test_bigquery_mapper_generate_klio_message():
mapper = io_transforms._KlioReadFromBigQueryMapper()
message = mapper._generate_klio_message()
assert message.version == klio_pb2.Version.V2
assert (
message.metadata.intended_recipients.WhichOneof("recipients")
== "anyone"
)
@pytest.mark.parametrize(
"klio_message_columns,row,expected",
(
(["one_column"], {"a": "A", "b": "B", "one_column": "value"}, "value"),
(
["a", "b"],
{"a": "A", "b": "B", "c": "C"},
json.dumps({"a": "A", "b": "B"}),
),
(None, {"a": "A", "b": "B"}, json.dumps({"a": "A", "b": "B"})),
),
)
def test_bigquery_mapper_map_row_element(klio_message_columns, row, expected):
mapper = io_transforms._KlioReadFromBigQueryMapper(
klio_message_columns=klio_message_columns
)
actual = mapper._map_row_element(row)
assert actual == expected
|
gaybro8777/klio | exec/src/klio_exec/commands/profile.py | <gh_stars>100-1000
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import contextlib
import functools
import logging
import os
import subprocess
import sys
import tempfile
import time
import apache_beam as beam
try:
import memory_profiler
except ImportError: # pragma: no cover
logging.error(
"Failed to import profiling dependencies. Did you install "
"`klio-exec[debug]` in your job's Docker image?"
)
raise SystemExit(1)
from klio.transforms import decorators
from klio_core.proto import klio_pb2
from klio_exec.commands.utils import cpu_utils
from klio_exec.commands.utils import memory_utils
from klio_exec.commands.utils import profile_utils
@contextlib.contextmanager
def smart_open(filename=None, fmode=None):
"""Handle both stdout and files in the same manner."""
if filename and filename != "-":
fh = open(filename, fmode)
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
class StubIOSubMapper(object):
def __init__(self, input_pcol):
def fake_constructor(*args, **kwargs):
return input_pcol
# normally this is a map of io-name -> transform class. Instead we'll
# just have every possible name return our pretend constructor that
# returns our pre-constructed transform
self.input = collections.defaultdict(lambda: fake_constructor)
self.output = {} # no outputs
class StubIOMapper(object):
def __init__(self, input_pcol, iterations):
repeated_input = input_pcol | beam.FlatMap(lambda x: [x] * iterations)
self.batch = StubIOSubMapper(repeated_input)
self.streaming = StubIOSubMapper(repeated_input)
@staticmethod
def from_input_file(file_path, iterations):
transform = beam.io.ReadFromText(file_path)
return StubIOMapper(transform, iterations)
@staticmethod
def from_entity_ids(id_list, iterations):
transform = beam.Create(id_list)
return StubIOMapper(transform, iterations)
class KlioPipeline(object):
DEFAULT_FILE_PREFIX = "klio_profile_{what}_{ts}"
TRANSFORMS_PATH = "./transforms.py"
def __init__(
self, klio_config, input_file=None, output_file=None, entity_ids=None
):
self.input_file = input_file
self.output_file = output_file
self.entity_ids = entity_ids
self._stream = None
self._now_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
self.klio_config = klio_config
def _get_output_png_file(self, what, temp_output):
output_file_base = self.output_file
prefix = KlioPipeline.DEFAULT_FILE_PREFIX.format(
what=what, ts=self._now_str
)
if temp_output:
output_file_base = prefix
elif "." in self.output_file:
# reuse a user's output file name, just replace existing extension
output_file_base = os.path.splitext(self.output_file)[0]
return "{}.png".format(output_file_base)
@contextlib.contextmanager
def _smart_temp_create(self, what, plot_graph):
# For plotting a graph, an output file of the data collected is
# needed, but the user shouldn't be required to provide an output
# file if they don't want. This creates a tempfile to write data
# to for generating the plot graph off of.
# A context manager needed so that temp file can be cleaned up after.
temp_output = False
prefix = KlioPipeline.DEFAULT_FILE_PREFIX.format(
what=what, ts=self._now_str
)
if plot_graph and not self.output_file:
temp_output_file = tempfile.NamedTemporaryFile(
dir=".", prefix=prefix
)
self.output_file = temp_output_file.name
temp_output = True
yield temp_output
def _get_subproc(self, **kwargs):
cmd = ["klioexec", "profile", "run-pipeline"]
if kwargs.get("show_logs"):
cmd.append("--show-logs")
if self.input_file:
cmd.extend(["--input-file", self.input_file])
else:
cmd.extend(self.entity_ids)
return subprocess.Popen(cmd)
def _get_cpu_line_profiler(self):
return cpu_utils.KLineProfiler()
def _profile_wall_time_per_line(self, iterations, **_):
profiler = self._get_cpu_line_profiler()
decorators.ACTIVE_PROFILER = profiler
self._run_pipeline(iterations=iterations)
if self.output_file:
return profiler.print_stats(self.output_file, output_unit=1)
# output_unit = 1 second, meaning the numbers in "Time" and
# "Per Hit" columns are in seconds
profiler.print_stats(output_unit=1)
def _get_memory_line_profiler(self):
return memory_utils.KMemoryLineProfiler(backend="psutil")
def _get_memory_line_wrapper(self, profiler, get_maximum):
wrapper = memory_utils.KMemoryLineProfiler.wrap_per_element
if get_maximum:
wrapper = functools.partial(
memory_utils.KMemoryLineProfiler.wrap_maximum, profiler
)
return wrapper
def _profile_memory_per_line(self, get_maximum=False):
profiler = self._get_memory_line_profiler()
decorators.ACTIVE_PROFILER = self._get_memory_line_wrapper(
profiler, get_maximum
)
# "a"ppend if output per element; "w"rite (once) for maximum.
# append will append a file with potentially already-existing data
# (i.e. from a previous run), which may be confusing; but with how
# memory_profiler treats streams, there's no simple way to prevent
# appending data for per-element without re-implementing parts of
# memory_profiler (maybe someday?) @lynn
fmode = "w" if get_maximum else "a"
with smart_open(self.output_file, fmode=fmode) as f:
self._stream = f
self._run_pipeline()
if get_maximum:
memory_profiler.show_results(profiler, stream=self._stream)
def _profile_memory(self, **kwargs):
# Profile the memory while the pipeline runs in another process
p = self._get_subproc(**kwargs)
plot_graph = kwargs.get("plot_graph")
with self._smart_temp_create("memory", plot_graph) as temp_output:
with smart_open(self.output_file, fmode="w") as f:
memory_profiler.memory_usage(
proc=p,
interval=kwargs.get("interval"),
timestamps=True,
include_children=kwargs.get("include_children"),
multiprocess=kwargs.get("multiprocess"),
stream=f,
)
if not plot_graph:
return
output_png = self._get_output_png_file("memory", temp_output)
profile_utils.plot(
input_file=self.output_file,
output_file=output_png,
x_label="Time (in seconds)",
y_label="Memory used (in MiB)",
title="Memory Used While Running Klio-based Transforms",
)
return output_png
def _profile_cpu(self, **kwargs):
# Profile the CPU while the pipeline runs in another process
p = self._get_subproc(**kwargs)
plot_graph = kwargs.get("plot_graph")
with self._smart_temp_create("cpu", plot_graph) as temp_output:
with smart_open(self.output_file, fmode="w") as f:
cpu_utils.get_cpu_usage(
proc=p, interval=kwargs.get("interval"), stream=f,
)
if not plot_graph:
return
output_png = self._get_output_png_file("cpu", temp_output)
profile_utils.plot(
input_file=self.output_file,
output_file=output_png,
x_label="Time (in seconds)",
y_label="CPU%",
title="CPU Usage of All Klio-based Transforms",
)
return output_png
def _get_user_pipeline(self, config, io_mapper):
runtime_config = collections.namedtuple(
"RuntimeConfig",
["image_tag", "direct_runner", "update", "blocking"],
)(None, True, False, True)
from klio_exec.commands.run import KlioPipeline as KP
return KP("profile_job", config, runtime_config, io_mapper)
def _get_user_config(self):
self.klio_config.pipeline_options.runner = "direct"
self.klio_config.job_config.events.outputs = {}
return self.klio_config
@staticmethod
def _entity_id_to_message(entity_id):
message = klio_pb2.KlioMessage()
message.data.element = bytes(entity_id, "UTF-8")
message.metadata.intended_recipients.anyone.SetInParent()
message.version = klio_pb2.Version.V2
return message
def _get_io_mapper(self, iterations):
if self.input_file:
return StubIOMapper.from_input_file(self.input_file, iterations)
else:
messages = []
for entity_id in self.entity_ids:
message = self._entity_id_to_message(entity_id)
messages.append(message.SerializeToString())
return StubIOMapper.from_entity_ids(messages, iterations)
def _run_pipeline(self, iterations=None, **_):
if not iterations:
iterations = 1
io_mapper = self._get_io_mapper(iterations)
config = self._get_user_config()
pipeline = self._get_user_pipeline(config, io_mapper)
pipeline.run()
def profile(self, what, **kwargs):
if what == "run":
return self._run_pipeline(**kwargs)
elif what == "cpu":
return self._profile_cpu(**kwargs)
elif what == "memory":
return self._profile_memory(**kwargs)
elif what == "memory_per_line":
return self._profile_memory_per_line(**kwargs)
elif what == "timeit":
return self._profile_wall_time_per_line(**kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.