hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6208cb2a2c64c71c60c631414ac312652b301b34
| 21,926
|
py
|
Python
|
kirbyClass.py
|
mattuff/KirbyCalculus
|
f2a2d2839cd5a658e5e82430619f43dfa6a65502
|
[
"MIT"
] | 1
|
2022-03-11T07:45:02.000Z
|
2022-03-11T07:45:02.000Z
|
kirbyClass.py
|
mattuff/KirbyCalculus
|
f2a2d2839cd5a658e5e82430619f43dfa6a65502
|
[
"MIT"
] | null | null | null |
kirbyClass.py
|
mattuff/KirbyCalculus
|
f2a2d2839cd5a658e5e82430619f43dfa6a65502
|
[
"MIT"
] | null | null | null |
from crossingClass import *
from joinClass import *
from strandClass import *
from componentClass import *
class Kirby:
def __init__(self,crossings,joins):
self.crossings=crossings
self.joins=joins
strands=[] #makes list of strands
for c in crossings:
for i in range(4):
if(c[i] not in strands):
strands.append(c[i])
for j in joins:
for i in range(2):
if(j[i] not in strands):
strands.append(j[i])
self.strands=strands
self.components=self.comp_list() #makes list of components
for c in self.components:
c.kirby=self
def __str__(self): #prints planar diagram
l=[]
c=[]
for x in self.strands:
if(x.component not in c):
l.append(x)
c.append(x.component)
s="Components:\n"
n=0
for x in l:
n+=1
s+=" - ["+str(n)
x.name=n
y=x.succ
while(y!=x):
n+=1
s+=","+str(n)
y.name=n
y=y.succ
s+="] ("
if(x.component.handle==2):
s+="2-handle;f="+str(x.component.framing)+")\n"
else:
s+=str(x.component.handle)+"-handle)\n"
s+="Crossings:\n"
if(len(self.crossings)==0):
s+="None"
if(len(self.crossings)>=2):
for i in range(len(self.crossings)-1):
s+=str(self.crossings[i])+","
if(len(self.crossings)>=1):
s+=str(self.crossings[-1])
s+="\nJoins:\n"
if(len(self.joins)==0):
s+="None"
if(len(self.joins)>=2):
for i in range(len(self.joins)-1):
s+=str(self.joins[i])+","
if(len(self.joins)>=1):
s+=str(self.joins[-1])
return(s)
def __getitem__(self,key):
self.rename_all()
for s in self.strands:
if(s.name==key):
return(s)
def reverse(self,c):
s=c[0]
s.pred,s.succ = s.succ,s.pred
t=s.pred
while(s!=t):
t.pred,t.succ = t.succ,t.pred
t=t.pred
for x in self.crossings:
if(x[0].component==c):
x.strands=[x[2],x[3],x[0],x[1]]
self.set_cons(x)
for x in self.joins:
if(x[0].component==c):
x.strands=[x[1],x[0]]
#self.set_cons(x)
self.set_all_cons()
def disjoint_union(self,k):
self.crossings+=k.crossings
self.joins+=k.joins
self=Kirby(self.crossings,self.joins)
def comp_list(self): #returns list of components
l=[]
for x in self.strands:
if(x.component not in l):
l.append(x.component)
return(l)
def strand_list(self, s): #returns an ordered list of strands in a component given a starting strand
l=[s]
t=s.succ
while(t!=s):
l.append(t)
t=t.succ
return l
def switch(self,c): #switches overcrossing strand given a component
if(c[1]==c[3].pred):
f=lambda x:c.strands[(x+1)%4]
else:
f=lambda x:c.strands[(x-1)%4]
c.strands = list(map(f,range(4)))
def set_cons(self, c):
if (c.len==4):
c[0].set_succ_con(c)
c[2].set_pred_con(c)
if(c[1].succ == c[3]):
c[1].set_succ_con(c)
c[3].set_pred_con(c)
else:
c[1].set_pred_con(c)
c[3].set_succ_con(c)
else:
c[0].set_succ_con(c)
c[1].set_pred_con(c)
def set_all_cons(self):
for x in self.crossings:
self.set_cons(x)
for y in self.joins:
self.set_cons(y)
def rename(self,s,n): #s is named n, strand's name is predecessor's +1
s.name=n
t=s.succ
while(t!=s):
n+=1
t.name=n
t=t.succ
return(n)
def rename_all(self): #renames every strand
l=[]
n=1
for s in self.strands:
if(s.component not in l):
n=self.rename(s,n)+1
l.append(s.component)
def comp_crossings(self,h1): #given a component, returns crossings fully contained within component
l=[]
for c in self.crossings:
if (c[0].component==c[1].component==h1):
l.append(c)
return l
def comp_joins(self,h1): #given a component, returns joins fully contained within component
l=[]
for j in self.joins:
if (j[0].component==h1):
l.append(j)
return l
def comp_intersections(self,h1): #given a component, returns crossings between that component and another
l=[]
for c in self.crossings:
if (c[0].component==h1 and c[1].component!=h1):
l.append(c)
elif (c[1].component==h1 and c[0].component!=h1):
l.append(c)
return l
def connect_sum(self,s0,s1): #connect sums two components given a strand from each
s=[s0,s1]
j=[]
for i in range(2):
self.add_join(s[i])
for i in range(2):
j=join(s[i],s[not i].succ)
self.joins.remove(s[i].succ_con)
self.joins.append(j)
s[i].succ_con=j
s[not i].succ.pred_con=j
for i in range(2):
s[i].succ=s[not i].succ
s[i].succ.pred=s[i]
def writhe(self,c): #given a component, returns writhe
return(sum(list(map(lambda x:(-1)**(x[1]==x[3].pred),self.comp_crossings(c)))))
def linking_number(self,h1,h2): #given two components, returns linking number
l=0
for c in list(set(self.comp_intersections(h1) and self.comp_intersections(h2))):
if (c[0].component==h1):
if (c[1].succ==c[3]):
l+=(-1)
else:
l+=1
return l
def add_join(self, s0): #s0 is strand to be split, s0 will be the predecessor of the new s1
c0=s0.pred_con
c1=s0.succ_con
s1=strand(s0.component,s0,s0.succ,None,s0.succ_con)
self.strands.append(s1)
s0.set_succ(s1)
s1.succ.set_pred(s1)
j=join(s0,s1)
s0.set_succ_con(j)
s1.set_pred_con(j)
if(c1==c0):
if(c0.len-2): #if c0 is a crossing rather than a join
if((c0[2]==s0) & (c0[3]==s0)): #this is the only case in which the first instance of s0 should not be replaced with s1
c0.strands[3]=s1
else:
c0.strands[c0.strands.index(s0)]=s1 #index method returns index of first instance of s0 in list
else:
c0.strands[0]=s1
else:
for i in range(c1.len):
if(c1[i]==s0):
c1.strands[i]=s1
self.joins.append(j)
def remove_join(self,j):
s=strand(j[0].component,j[0].pred,j[1].succ,j[0].pred_con,j[1].succ_con)
for i in range(s.pred_con.len):
if(s.pred_con[i]==j[0]):
s.pred_con.strands[i]=s
for i in range(s.succ_con.len):
if(s.succ_con[i]==j[1]):
s.succ_con.strands[i]=s
s.pred.succ=s
s.succ.pred=s
self.strands.append(s)
self.strands.remove(j[0])
self.strands.remove(j[1])
self.joins.remove(j)
def remove_joins(self): #removes all joins except for joins in unknots
j=self.joins.copy()
for x in j:
if(x[0]!=x[1]):
self.remove_join(x)
def add_r1(self, x, o, i): # x=strand to twist, o determines orientation of twist (T=ccw,F=cw), i =T if incoming strand is over, =F if under
x.component.framing+=(-1)**(o==i) #changes framing
self.add_join(x)
self.add_join(x)
for j in [x.succ_con,x.succ.succ_con]:
self.joins.remove(j)
s=[x,x.succ,x.succ.succ]
if(o): # computes crossing
if(i):
c=crossing(s[1],s[0],s[2],s[1])
else:
c=crossing(s[0],s[2],s[1],s[1])
else:
if(i):
c=crossing(s[1],s[1],s[2],s[0])
else:
c=crossing(s[0],s[1],s[1],s[2])
self.crossings.append(c) # adds crossing to crossing list
for i in range(2): # changes succ and pred crossings of strands involved
s[i].succ_con=c
s[i+1].pred_con=c
def remove_r1(self, x): #x is the looped strand
j1 = join(x.pred, x)
j2 = join(x, x.succ)
self.crossings.remove(x.succ_con)
c=x.succ_con
x.pred.set_succ_con(j1)
x.set_succ_con(j2)
x.set_pred_con(j1)
x.succ.set_pred_con(j2)
self.joins+=[j1,j2]
if (((c[0]==c[1]) and (c[0]==x.succ)) or ((c[2]==c[3]) and (c[2]==x.succ))):
x.component.framing+=(-1)
else:
x.component.framing+=(1)
def add_r2(self,s1,s2,o,d): #s1 is pulled over s2, o=True iff s2 is on the right of s1, d=True iff s1 and s2 face the same direction
if(s1==s2):
self.add_join(s1)
j=s1.succ_con
self.add_r2(s1,s1.succ,o,d)
self.remove_join(j)
return
l=[]
for s in [s1,s1,s2,s2]:
self.add_join(s)
l.append(s.succ_con)
l1=[s1,s1.succ,s1.succ.succ]
l2=[s2,s2.succ,s2.succ.succ]
c=[crossing(l2[0],l1[1+o-d],l2[1],l1[2-o-d]),crossing(l2[1],l1[1-o+d],l2[2],l1[o+d])]
self.crossings+=c
for s in l:
self.joins.remove(s)
for x in c:
self.set_cons(x)
def remove_r2(self,s0,s1):
s=[s0,s1]
c=[s0.pred_con,s0.succ_con]
j=[join(s0.pred,s0),join(s1.pred,s1),join(s0,s0.succ),join(s1,s1.succ)]
for i in range(2):
s[i].pred_con=j[i]
s[i].pred.succ_con=j[i]
s[i].succ_con=j[i+2]
s[i].succ.pred_con=j[i+2]
self.joins+=j
for x in c:
self.crossings.remove(x)
def add_r3(self, strandUnder, strandMiddle, strandOver):
# strandUnder is the strand that goes under strandMiddle and strandOver, which we are going to move
# strandMiddle goes over strandUnder and under strandOver
# strandOver goes over strandUnder and strandMiddle
# strands refer to triangle
c1 = strandUnder.pred_con
c2 = strandUnder.succ_con
if(strandMiddle.pred_con != c1 and strandMiddle.pred_con != c2): c3 = strandMiddle.pred_con
else: c3 = strandMiddle.succ_con
strandOrient = lambda s: s.pred if (s.pred in c3) else s.succ
#add unofficial joins to list
fixJoin = lambda s: join(s, s.succ) if s.succ not in c3 else join(s.pred, s)
j1 = fixJoin(strandMiddle)
j2 = fixJoin(strandOver)
self.joins.append(j1)
self.joins.append(j2)
#add real joins which will turn into crossings
self.add_join(strandOrient(strandMiddle))
self.add_join(strandOrient(strandOver))
oldC1 = crossing(c1[0],c1[1],c1[2],c1[3])#for some reason plain old c1 is changing instead of remainging constant
#python is being annoying and not letting me consolidate even to one 'for' statement (won't produce correct results)
for j in self.joins:
if (strandOrient(strandOver) in j): self.joins.remove(j)
for j in self.joins:
if(strandOrient(strandMiddle) in j): self.joins.remove(j)
crTest = lambda strand1, strand2, cross2: ((strand1 in c3) and (strand2 in cross2))
crossSet = lambda strand1, strand2, b: c1.set_strands(strandUnder.pred, strand1, strandUnder, strand2) if (b==1) \
else c2.set_strands(strandUnder, strand1, strandUnder.succ, strand2)
#redefine c1
if (crTest(strandMiddle.pred, strandOver, c1)): crossSet(strandMiddle.pred,strandMiddle.pred.pred, 1)
elif (crTest(strandOver.pred, strandOver, c2)): crossSet(strandOver.pred.pred, strandOver.pred, 1)
elif (crTest(strandMiddle.succ, strandOver, c1)): crossSet(strandMiddle.succ, strandMiddle.succ.succ, 1)
elif (crTest(strandOver.succ, strandOver, c2)): crossSet(strandOver.succ.succ, strandOver.succ, 1)
#redefine c2 - use oldC1 since c1 gets redefined above
if (crTest(strandMiddle.pred, strandOver, c2)): crossSet(strandMiddle.pred.pred, strandMiddle.pred, 2)
elif (crTest(strandOver.pred, strandOver, oldC1)): crossSet(strandOver.pred, strandOver.pred.pred, 2)
elif (crTest(strandMiddle.succ, strandOver, c2)): crossSet(strandMiddle.succ.succ, strandMiddle.succ, 2)
elif (crTest(strandOver.succ, strandOver, oldC1)): crossSet(strandOver.succ, strandOver.succ.succ, 2)
#setting succ_pred/succ_con
for i in [c1,c2,c3]: self.set_cons(i)
for i in self.joins:
if((i[0] == j1[0]) and (i[1] == j1[1])): self.remove_join(i)
for i in self.joins:
if((i[0] == j2[0]) and (i[1] == j2[1])): self.remove_join(i)
def handle_annihilation(self,h1,h2=None): #h1,h2 strands
self.remove_joins()
#checks to make sure each handle only has 2 strads (all joins must be removed)
if (h2!=None):
if (len(self.strand_list(h1))==2 and len(self.strand_list(h2))==2):
if ((h1.pred_con==h2.pred_con) or (h1.pred_con==h2.succ_con)):
for i in [h1,h1.succ,h2,h2.succ]:
self.strands.remove(i)
self.crossings.remove(h1.succ_con)
self.crossings.remove(h1.pred_con)
self.components.remove(h1.component)
self.components.remove(h2.component)
#cancels out an unknot w framing=0
else:
if (len(self.strand_list(h1))==1):
if (h1.component.framing==0):
self.joins.remove(h1.succ_con)
self.strands.remove(h1)
self.components.remove(h1.component)
def handle_creation(self, f=None): #f=framing for 2-handle to have
if (f!=None):
h1=component(1)
h2=component(2,f)
h1.kirby=self
h2.kirby=self
a=strand(h1)
b=strand(h1, a,a)
a.set_pred(b)
a.set_succ(b)
c=strand(h2)
d=strand(h2,c,c)
c.set_pred(d)
c.set_succ(d)
c1=crossing(a,c,b,d)
c2=crossing(c,a,d,b)
for i in [c1,c2]:
self.set_cons(i)
self.crossings+=[c1,c2]
self.strands+=[a,b,c,d]
self.components+=[h1,h2]
else:
h1=component(2,0)
h1.kirby=self
a=strand(h1)
a.set_pred(a)
a.set_succ(a)
j=join(a,a)
self.set_cons(j)
self.joins.append(j)
self.strands.append(a)
self.components.append(h1)
def handle_slide(self, h1, h2, sign): #h2 is being slid over h1; sign=True if same orientation
s=self.strand_list(h1) #list of strands in h1, in succ order
l=[]
h3=component(2)
comp_crossings=self.comp_crossings(h1.component) #crossings with only strands in h1
comp_intersections=self.comp_intersections(h1.component) #crossings w 2 strands in h1, and 2 in another strand
sf1=self.writhe(h1.component)-h1.component.framing #seifert framing of first handle
sf2=self.writhe(h2.component)-h2.component.framing #seifert framing of second handle
lk=self.linking_number(h1.component,h2.component) #linking number of two handles
for k in range (len(s)): #sets up parallel copy of h1
st=strand(h3)
l.append(st)
for i in range (len(l)-1): #sets up preds and succs
## if (sign):
l[i].set_pred(l[i-1])
if (i < (len(l)-2)):
l[i].set_succ(l[i+1])
l[-1].set_succ(l[0])
## else:
## l[i].set_succ(l[i-1])
## if (i < (len(l)-1)):
## l[i].set_pred(l[i+1])
## else:
## l[i].set_pred(l[0])
self.strands+=l
for j in self.comp_joins(h1.component): #duplicates joins
a=s.index(j[0])
## if (sign):
jn=join(l[a],l[a+1])
## else:
## jn=join(l[a+1],l[a])
self.set_cons(jn)
self.joins.append(jn)
for cx in comp_crossings: #takes crossing, makes into four
a=cx[0]
b=cx[1]
c=cx[2]
d=cx[3]
aa=l[s.index(a)]
bb=l[s.index(b)]
cc=l[s.index(c)]
dd=l[s.index(d)]
var=(b.succ==d)
e=strand(h1.component, a,c)
a.set_succ(e)
c.set_pred(e)
s.insert(s.index(a)+1, e)
## if (sign):
ee=strand(h2.component, aa, cc)
aa.set_succ(ee)
cc.set_pred(ee)
## else:
## ee=strand(h2.component, cc, aa)
## cc.set_succ(ee)
## aa.set_pred(ee)
l.insert(l.index(aa)+1, ee)
if (var):
f=strand(h1.component, b, d)
s.insert(s.index(b)+1, f)
b.set_succ(f)
d.set_pred(f)
## if (sign):
ff=strand(h2.component, bb, dd)
bb.set_succ(ff)
dd.set_pred(ff)
## else:
## ff=strand(h2.component, dd, bb)
## dd.set_succ(ff)
## bb.set_pred(ff)
## l.insert(l.index(bb)+1, ff)
else:
f=strand(h1.component, d, b)
d.set_succ(f)
b.set_pred(f)
## s.insert(s.index(d)+1, f)
## if (sign):
ff=strand(h2.component, dd, bb)
dd.set_succ(ff)
bb.set_pred(ff)
## else:
## ff=strand(h2.component, bb, dd)
## bb.set_succ(ff)
## dd.set_pred(ff)
l.insert(l.index(dd)+1, ff)
self.strands+=[e,ee,f,ff]
self.crossings.remove(cx)
if (var):
c1=crossing(a,f,e,d)
c4=crossing(e,ff,c,dd)
if (sign):
c2=crossing(aa,b,ee,f)
c3=crossing(ee,bb,cc,ff)
else:
c2=crossing(ee,f,aa,b)
c3=crossing(cc,ff,ee,bb)
else:
c1=crossing(a,ff,e,dd)
c4=crossing(e,f,c,d)
## if (sign):
c2=crossing(aa,bb,ee,ff)
c3=crossing(ee,b,cc,f)
## else:
## c2=crossing(ee,ff,aa,bb)
## c3=crossing(cc,f,ee,b)
self.crossings+=[c1,c2,c3,c4]
self.set_cons(c1)
self.set_cons(c2)
self.set_cons(c3)
self.set_cons(c4)
for cx in comp_intersections: #turns crossings between h1 and another comp into 2 crossings
if (cx[0].component==h1.component):
a=cx[0]
b=cx[1]
c=cx[2]
d=cx[3]
aa=l[s.index(a)]
cc=l[s.index(c)]
f=strand(b.component)
self.strands+=[f]
c1=crossing(a,f,c,d)
if (b.succ==d):
f.set_pred(b)
b.set_succ(f)
f.set_succ(d)
d.set_pred(f)
f1 = lambda x : b if x else f
f2 = lambda x : aa if x else cc
c2=crossing(f2(sign),f1(sign),f2(not sign),f1(not sign))
else:
f.set_pred(d)
d.set_succ(f)
f.set_succ(b)
b.set_pred(f)
f1 = lambda x : aa if x else cc
f2 = lambda x : b if x else f
c2=crossing(f1(sign),f2(sign),f1(not sign),f2(not sign))
else:
a=cx[0]
b=cx[1]
c=cx[2]
d=cx[3]
bb=l[s.index(b)]
dd=l[s.index(d)]
e=strand(a.component,a,c)
a.set_succ(e)
c.set_pred(e)
self.strands+=[e]
if (b.succ==d):
c1=crossing(a,b,e,d)
c2=crossing(e,bb,c,dd)
else:
c1=crossing(a,bb,e,dd)
c2=crossing(e,b,c,d)
for i in [c1,c2]:
self.set_cons(i)
self.crossings.remove(cx)
self.crossings+=[c1,c2]
#adding extra twists for framing
pos=(h1.component.framing>0)
## if (sign):
for i in range(abs(h1.component.framing)):
l1=l[-1]
s1=s[-1]
self.add_join(l1)
self.add_join(l1)
l2=l1.succ
l3=l1.succ.succ
self.add_join(s1)
self.add_join(s1)
s2=s1.succ
s3=s1.succ
joinlist=[l1.succ_con, l2.succ_con, s1.succ_con, s2.succ_con]
l+=[l2,l3]
s+=[s2,s3]
if (pos): #clockwise twists
c1=crossing(l1,s2,l2,s1)
c2=crossing(s2,l3,s3,l2)
else: #counterclockwise twists
c1=crossing(s1,l1,s2,l2)
c2=crossing(s2,l3,s3,l2)
self.crossings+=[c1,c2]
for j in joinlist:
self.joins.remove(j)
for i in [c1,c2]:
self.set_cons(i)
## else:
## for i in range(abs(h1.component.framing)):
## l1=l[-1]
## s1=s[-1]
## self.add_join(l1)
## self.add_join(l1)
## l2=l1.succ
## l3=l2.succ
## self.add_join(s1)
## self.add_join(s1)
## s2=s1.succ
## s3=s2.succ
## joinlist=[l1.succ_con, l2.succ_con, s1.succ_con, s2.succ_con]
## s+=[s2,s3]
## l.remove(l1)
## l+=[l3,l2,l1]
## if (pos): #counterclockwise twists
## c1=crossing(l2,s1,l3,s2)
## c2=crossing(s2,l1,s3,l2)
## else: #clockwise twists
## c1=crossing(s1,l3,s2,l2)
## c2=crossing(s2,l1,s3,l2)
## self.crossings+=[c1,c2]
## for j in joinlist:
## self.joins.remove(j)
## for i in [c1,c2]:
## self.set_cons(i)
if (not sign):
self.reverse(h3)
for i in l:
i.set_component(h2.component)
self.connect_sum(h2,l[0])
self.remove_joins()
fr=sf1+sf2+((-1)**(not sign))*lk #sets new seifert framing
h2.component.framing=int(self.writhe(h2.component)+fr) #taking back to blackbaord
| 32.434911
| 144
| 0.528505
| 21,817
| 0.995029
| 0
| 0
| 0
| 0
| 0
| 0
| 4,554
| 0.207699
|
62098ed13ce2805c2274aa650c177f0c748ff79f
| 401
|
py
|
Python
|
projects/migrations/0017_project_status_isvalidated.py
|
joatuapp/joatu-django
|
5626d03ba89c55650ff5bff2e706ca0883ae3b9c
|
[
"MIT"
] | 10
|
2018-05-13T18:01:57.000Z
|
2018-12-23T17:11:14.000Z
|
projects/migrations/0017_project_status_isvalidated.py
|
moileretour/joatu
|
9d18cb58b4280235688e269be6fd2d34b77ccead
|
[
"MIT"
] | 88
|
2018-05-04T15:33:46.000Z
|
2022-03-08T21:09:21.000Z
|
projects/migrations/0017_project_status_isvalidated.py
|
joatuapp/joatu-django
|
5626d03ba89c55650ff5bff2e706ca0883ae3b9c
|
[
"MIT"
] | 7
|
2018-05-08T16:05:06.000Z
|
2018-09-13T05:49:05.000Z
|
# Generated by Django 2.0.3 on 2018-03-26 01:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0016_auto_20180325_2116'),
]
operations = [
migrations.AddField(
model_name='project_status',
name='isValidated',
field=models.BooleanField(default=False),
),
]
| 21.105263
| 53
| 0.613466
| 308
| 0.76808
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.276808
|
6209f7fa8b911f0682b7e6cecc7dc9fe96d9e302
| 1,177
|
py
|
Python
|
ps1/ps1_3.py
|
collin-li/mitx-6.00.1x
|
7fb111586c6b82e205e86fadfb4d91d09de46808
|
[
"MIT"
] | null | null | null |
ps1/ps1_3.py
|
collin-li/mitx-6.00.1x
|
7fb111586c6b82e205e86fadfb4d91d09de46808
|
[
"MIT"
] | 1
|
2017-02-06T02:46:08.000Z
|
2017-02-06T02:46:08.000Z
|
ps1/ps1_3.py
|
collin-li/mitx-6.00.1x
|
7fb111586c6b82e205e86fadfb4d91d09de46808
|
[
"MIT"
] | null | null | null |
# PROBLEM
#
# Assume s is a string of lower case characters.
#
# Write a program that prints the longest substring of s in which the letters
# occur in alphabetical order. For example, if s = 'azcbobobegghakl', then your
# program should print:
#
# 'Longest substring in alphabetical order is: beggh'
#
# In case of ties, print the first substring. For example, if s = 'abcbcd',
# then your program should print:
#
# 'Longest substring in alphabetical order is: abc'
# For test purposes
s = 'azcbobobegghakl'
# SOLUTION
if len(s) > 1:
substring = s[0]
length = 1
# Store initial solution
bestsubstring = substring
bestlength = length
for num in range(len(s)-1): # Last letter is checked by 2nd-last letter
if s[num] <= s[num+1]:
substring = substring + s[num+1]
length += 1
if length > bestlength:
bestsubstring = substring
bestlength = length
else: # Reset substring and length
substring = s[num+1]
length = 1
else:
bestsubstring = s
print ('Longest substring in alphabetical order is: ' + bestsubstring)
| 26.155556
| 80
| 0.626168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 643
| 0.546304
|
620afd69fe7804f73854cad5c0dd48effc58af61
| 992
|
py
|
Python
|
bib2web/mandatory_fields.py
|
Juvawa/bib2web
|
8d6c2244e46eefee1a519f8b3b656a143aa8bd9e
|
[
"MIT"
] | null | null | null |
bib2web/mandatory_fields.py
|
Juvawa/bib2web
|
8d6c2244e46eefee1a519f8b3b656a143aa8bd9e
|
[
"MIT"
] | null | null | null |
bib2web/mandatory_fields.py
|
Juvawa/bib2web
|
8d6c2244e46eefee1a519f8b3b656a143aa8bd9e
|
[
"MIT"
] | null | null | null |
mandatory = \
{
'article' : ['ENTRYTYPE', 'ID', 'author', 'title', 'journal', 'year', 'volume'],
'book' : ['ENTRYTYPE', 'ID', 'title', 'publisher', 'year'],
'booklet' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'conference' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'publisher', 'year'],
'inbook' : ['ENTRYTYPE', 'ID', 'title', 'publisher', 'year'],
'incollection' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'publisher', 'year'],
'inproceedings' : ['ENTRYTYPE', 'ID', 'author', 'title', 'booktitle', 'year'],
'manual' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'mastersthesis' : ['ENTRYTYPE', 'ID', 'author', 'title', 'school', 'year'],
'misc' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'phdthesis' : ['ENTRYTYPE', 'ID', 'author', 'title', 'school', 'year'],
'proceedings' : ['ENTRYTYPE', 'ID', 'title', 'year'],
'techreport' : ['ENTRYTYPE', 'ID', 'author', 'title', 'institution', 'year'],
'unpublished' : ['ENTRYTYPE', 'ID', 'author', 'title', 'note']
}
| 58.352941
| 91
| 0.563508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 730
| 0.735887
|
620c76cdf8b6c6cb6855109a069ebc57b866672e
| 6,809
|
py
|
Python
|
trainAndTest/processOneFold_sm.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 5
|
2020-01-21T21:11:49.000Z
|
2022-02-06T19:55:28.000Z
|
trainAndTest/processOneFold_sm.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | null | null | null |
trainAndTest/processOneFold_sm.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 3
|
2018-05-25T14:57:36.000Z
|
2022-01-27T12:53:41.000Z
|
from __future__ import print_function
import itertools
import sys, os
import inspect
import numpy as np
from joblib import load as joblib_load
from .resultsManager import ResultsManager
#from .classifiers.randomForest import trainMethod, predictMethod
from .classifiers.xgBoost import trainMethod, predictMethod
def getDataForTestFromPrefix( testPrefix, testPath ):
'''
Load a data file whose name startswith testPrefix and it is contained in testPath.
Returns a tuple with all data needed to perform predictions and testing
:param prefix: str. The prefix of the filename to be loaded. E.g. "1A2K"
:param filesPath: str. The path where data files are contained
:return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())
data_d: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in direct form (first ligand aa second receptor aa)
data_l: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in transpose form (first receptor aa second ligand aa)
ppiComplex.getLabels(): np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)
ppiComplex.getIds(): pandas.DataFrame whose columns are:
chainIdL resIdL resNameL chainIdR resIdR resNameR categ
'''
for fname in sorted(os.listdir(testPath)):
if fname.startswith(testPrefix):
ppiComplex= joblib_load(os.path.join(testPath, fname) )
data_d,data_t= ppiComplex.getData()
return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())
def getDataForClassifierFromComplexes(listOfComplexes):
'''
Extracts the needed information to train a classifier from a list of codified complexes
(codifyComplexes.ComplexCodified.ComplexCodified).
:param listOfComplexes: [codifyComplexes.ComplexCodified.ComplexCodified]. The complex codified that will be used for
training
:return (dataDir,dataTrans, labels)
dataDir: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in direct form (first ligand aa second receptor aa)
dataTrans: np.array (n,m). A np.array that can be feed to the classifier. Each row represents
a pair of amino acids in transpose form (first receptor aa second ligand aa)
labels: np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)
'''
dataDir= []
dataTrans= []
labels= []
prefixes= []
complexesNumId=[]
if not isinstance(listOfComplexes, list) and not isinstance(listOfComplexes, tuple):
listOfComplexes= [listOfComplexes]
for complexNum, ppiComplex in enumerate(listOfComplexes):
if not inspect.isgenerator(ppiComplex):
ppiComplex= [ppiComplex]
for dataBatch in ppiComplex: #In case ppiComplex is an iterator of chunks
data_d,data_t= dataBatch.getData()
dataDir.append( data_d)
dataTrans.append( data_t)
labels.append( dataBatch.getLabels())
prefixes.append(dataBatch.getPrefix())
complexesNumId+= [complexNum]* data_d.shape[0]
# print(dataBatch.prefix, np.max(data_d),np.max(data_t))
dataDir= np.concatenate(dataDir)
dataTrans= np.concatenate(dataTrans)
labels= np.concatenate(labels)
return dataDir,dataTrans, labels, complexesNumId
def trainAndTestOneFold(trainData, testPrefixes, testPath, outputPath, verbose=False, ncpu= 1):
'''
Trains and tests one fold
:param trainData: a numpy array for training with first column labels and the others features
:param testPrefixes: str[]. A list that contains prefixes for all complexes to be tested
:param testPath: str. Path to a dir where testing data files are stored
:param outputPath: str. Path to a dir where predictions will be stored
:param verbose: boolean. Whether or not print to stdout info
:param ncpu: int. Number of cpu's to use in parallel
'''
resultsForEvaluation_list= []
testPrefixesNotEvaluated=[]
finalResults=[]
for testPrefix in testPrefixes:
if outputPath is not None:
outName= os.path.join( outputPath, testPrefix+".res.tab")
if verbose and os.path.isfile(outName):
print("Complex already computed: %s"%(outName))
resultsForEvaluation_list.append( (testPrefix, ResultsManager.loadExistingResults(outName) ) )
else:
testPrefixesNotEvaluated.append( testPrefix )
else:
testPrefixesNotEvaluated.append( testPrefix )
modelo=None
if len(testPrefixesNotEvaluated)> 0 or len(testPrefixes)==0:
if verbose:
print("Testing:", testPrefixesNotEvaluated)
print("Training classifier")
verboseLevel=1
else:
verboseLevel=0
# dataDir,dataTrans, labels, __ = getDataForClassifierFromComplexes(trainComplexes)
# trainData= np.concatenate([dataDir,dataTrans])
# trainLabels= np.concatenate([labels,labels])
# dataDir,dataTrans, labels = (None, None, None)
# trainLabels, trainData= trainData[:, 0]
modelo= trainMethod(trainData[:, 1:], trainData[:, 0], verboseLevel= verboseLevel, ncpu= ncpu)
if verbose==True: print ("Classifier fitted.")
for testPrefix in testPrefixesNotEvaluated:
prob_predictionsDir_list= []
prob_predictionsTrans_list=[]
testlabels_list=[]
testPairsIds_list=[]
if verbose==True: print("Computing predictions for %s"%(testPrefix))
testDataDirect, testDataTrans, testlabels, testPairsIds= getDataForTestFromPrefix( testPrefix, testPath )
prob_predictionsDir= predictMethod(modelo, testDataDirect)
prob_predictionsTrans= predictMethod(modelo,testDataTrans)
resultEval= ResultsManager(testPrefix, prob_predictionsDir, prob_predictionsTrans, testPairsIds)
if verbose==True: print("Evaluating predictions of %s"%(testPrefix))
resultEval.getFullEvaluation()
if verbose==True: print(resultEval)
## raw_input("press enter")
finalResults.append( resultEval )
if not outputPath is None:
outName= os.path.join(outputPath, testPrefix+".res.tab")
if not os.path.isfile(outName):
if verbose==True: print("Saving results at %s"%(outName))
resultEval.writeResults(outName)
for testPrefix, resultEval in resultsForEvaluation_list:
if verbose==True: print("Evaluating predictions for %s"%(testPrefix))
resultEval.getFullEvaluation()
if verbose==True: print(resultEval)
finalResults.append( resultEval )
return finalResults, modelo
| 46.636986
| 122
| 0.697606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,225
| 0.473638
|
620d0e354faec7f287cbe008e0fab6e397c53f56
| 7,289
|
py
|
Python
|
manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | 9
|
2017-10-31T10:36:34.000Z
|
2020-10-07T01:31:38.000Z
|
manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | null | null | null |
manila_tempest_tests/tests/api/admin/test_snapshot_export_locations_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | 4
|
2018-07-19T13:55:51.000Z
|
2021-11-05T17:50:27.000Z
|
# Copyright (c) 2017 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
class SnapshotExportLocationsNegativeTest(base.BaseSharesMixedTest):
@classmethod
def skip_checks(cls):
super(SnapshotExportLocationsNegativeTest, cls).skip_checks()
if not CONF.share.run_snapshot_tests:
raise cls.skipException('Snapshot tests are disabled.')
if not CONF.share.run_mount_snapshot_tests:
raise cls.skipException('Mountable snapshots tests are disabled.')
utils.check_skip_if_microversion_not_supported("2.32")
@classmethod
def setup_clients(cls):
super(SnapshotExportLocationsNegativeTest, cls).setup_clients()
cls.admin_client = cls.admin_shares_v2_client
cls.different_project_client = cls.alt_shares_v2_client
@classmethod
def resource_setup(cls):
super(SnapshotExportLocationsNegativeTest, cls).resource_setup()
# create share type
extra_specs = {
'snapshot_support': True,
'mount_snapshot_support': True,
}
cls.share_type = cls.create_share_type(extra_specs=extra_specs)
cls.share_type_id = cls.share_type['id']
# create share
cls.share = cls.create_share(share_type_id=cls.share_type_id,
client=cls.admin_client)
cls.snapshot = cls.create_snapshot_wait_for_active(
cls.share['id'], client=cls.admin_client)
cls.snapshot = cls.admin_client.get_snapshot(
cls.snapshot['id'])['snapshot']
cls.snapshot_instances = cls.admin_client.list_snapshot_instances(
snapshot_id=cls.snapshot['id'])['snapshot_instances']
@decorators.idempotent_id('53f0f184-7398-4e7a-ac21-fa432570db7f')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_get_inexistent_snapshot_export_location(self):
self.assertRaises(
lib_exc.NotFound,
self.admin_client.get_snapshot_export_location,
self.snapshot['id'],
"fake-inexistent-snapshot-export-location-id",
)
@decorators.idempotent_id('43229517-bf93-4be7-9f89-a69034d2f03c')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_list_snapshot_export_locations_by_different_project_user(self):
self.assertRaises(
lib_exc.NotFound,
self.different_project_client.list_snapshot_export_locations,
self.snapshot['id']
)
@decorators.idempotent_id('66839514-796a-4ee9-a8ed-7614521d01d5')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_get_snapshot_export_location_by_different_project_user(self):
export_locations = (
self.admin_client.list_snapshot_export_locations(
self.snapshot['id'])['share_snapshot_export_locations'])
for export_location in export_locations:
if export_location['is_admin_only']:
continue
self.assertRaises(
lib_exc.NotFound,
self.different_project_client.get_snapshot_export_location,
self.snapshot['id'],
export_location['id'])
@decorators.idempotent_id('52e0b807-7b29-4795-960a-518bcadc1503')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_get_inexistent_snapshot_instance_export_location(self):
for snapshot_instance in self.snapshot_instances:
self.assertRaises(
lib_exc.NotFound,
self.admin_client.get_snapshot_instance_export_location,
snapshot_instance['id'],
"fake-inexistent-snapshot-export-location-id",
)
@decorators.idempotent_id('3f4e2a0e-1522-47fb-b770-9d7a0651dde2')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_get_snapshot_instance_export_location_by_member(self):
for snapshot_instance in self.snapshot_instances:
export_locations = (
self.admin_client.list_snapshot_instance_export_locations(
snapshot_instance['id'])['share_snapshot_export_locations']
)
for el in export_locations:
self.assertRaises(
lib_exc.Forbidden,
(self.different_project_client.
get_snapshot_instance_export_location),
snapshot_instance['id'], el['id'],
)
class SnapshotExportLocationsAPIOnlyNegativeTest(base.BaseSharesMixedTest):
@classmethod
def skip_checks(cls):
super(SnapshotExportLocationsAPIOnlyNegativeTest, cls).skip_checks()
if not CONF.share.run_snapshot_tests:
raise cls.skipException('Snapshot tests are disabled.')
if not CONF.share.run_mount_snapshot_tests:
raise cls.skipException('Mountable snapshots tests are disabled.')
utils.check_skip_if_microversion_not_supported('2.32')
@classmethod
def setup_clients(cls):
super(SnapshotExportLocationsAPIOnlyNegativeTest, cls).setup_clients()
cls.admin_client = cls.admin_shares_v2_client
# admin_member_client is a regular user in admin's project
cls.admin_member_client = (
cls.admin_project_member_client.shares_v2_client)
@decorators.idempotent_id('37901216-b574-4786-9b1d-9b1ccdf123d2')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_export_locations_by_nonexistent_snapshot(self):
self.assertRaises(
lib_exc.NotFound,
self.admin_client.list_snapshot_export_locations,
"fake-inexistent-snapshot-id",
)
@decorators.idempotent_id('c2aa3770-c061-4b49-83ac-ab29773c2e0c')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_export_locations_by_nonexistent_snapshot_instance(self):
self.assertRaises(
lib_exc.NotFound,
self.admin_client.list_snapshot_instance_export_locations,
"fake-inexistent-snapshot-instance-id",
)
@decorators.idempotent_id('74d5d46d-8161-4e17-acbc-812248d6d694')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_inexistent_snapshot_instance_export_locations_by_member(
self):
self.assertRaises(
lib_exc.Forbidden,
self.admin_member_client.list_snapshot_instance_export_locations,
"fake-inexistent-snapshot-instance-id"
)
| 41.651429
| 79
| 0.69255
| 6,384
| 0.87584
| 0
| 0
| 6,163
| 0.845521
| 0
| 0
| 1,580
| 0.216765
|
620d308ec14780c98f6cbb15fbaefde43dfb9edb
| 6,614
|
py
|
Python
|
gateway/spvtable.py
|
trinity-project/trinity
|
081eba1d4294a3bed33ba18c3f7b862b8803ee22
|
[
"MIT"
] | 60
|
2018-01-12T07:33:15.000Z
|
2021-12-28T23:06:28.000Z
|
gateway/spvtable.py
|
trinity-project/trinity
|
081eba1d4294a3bed33ba18c3f7b862b8803ee22
|
[
"MIT"
] | 13
|
2018-01-23T00:14:35.000Z
|
2020-04-23T00:03:31.000Z
|
gateway/spvtable.py
|
trinity-project/trinity
|
081eba1d4294a3bed33ba18c3f7b862b8803ee22
|
[
"MIT"
] | 13
|
2018-01-05T07:27:29.000Z
|
2021-01-06T16:45:05.000Z
|
"""Author: Trinity Core Team
MIT License
Copyright (c) 2018 Trinity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import json
import copy
from treelib import Node, Tree
from treelib.exceptions import DuplicatedNodeIdError
import re
def parse_uri(uri):
# fixed url format: publicKey@IP:PORT
if isinstance(uri, str):
return re.split('[@:]', uri)
return None
class RouteTree(Tree):
"""
# Node(tag, nid, data)
# tag: readable noe name for human to
# nid: unique id in scope three
"""
def __init__(self):
super().__init__()
# record the route path
self.route_path = []
def create(self,tag, identifier, data):
self.create_node(tag=tag, identifier=identifier, data=data)
self.root = identifier
def find_router(self, identifier, policy=None):
"""
:param identifier: use the url as the identifier
:param policy: not used currently
:return:
"""
self.route_path = [nid for nid in self.rsearch(identifier)][::-1]
return self.route_path
@property
def next_jump(self):
try:
return self.route_path[self.route_path.index(self.root)+1]
except Exception:
return None
@classmethod
def to_tree(cls, tr_json):
tree = cls()
for item in json.loads(tr_json):
tree.expand_branch(tr_json = tr_json)
return tree
def expand_branch(self, tr_json, father= None):
tr = json.loads(tr_json)
tag = list(tr.keys())[0]
nid = tr[tag]["data"]["Ip"]
try:
self.create_node(tag=tag, identifier=nid, parent=father, data=tr[tag]["data"])
except DuplicatedNodeIdError:
pass
# print(tr.values())
child = list(tr.values())[0].get("children")
# print(child)
if child:
for item in child:
self.expand_branch(json.dumps(item), father=nid)
else:
pass
def sync_tree(self, peer_tree):
"""
get all peers node id\n
traversal all peers \n
deep copy current tree get the new_tree\n
make child as the new_tree root\n
:param peer_tree:
:return:
"""
copy_peer_tree = copy.deepcopy(peer_tree)
# if contains each other
for self_nid in self.nodes.keys():
if copy_peer_tree.contains(self_nid) and self_nid != peer_tree.root:
copy_peer_tree.remove_node(self_nid)
if self.contains(peer_tree.root):
self.remove_node(peer_tree.root)
# print(peer_tree.to_dict(with_data=True))
self.paste(self.root, copy_peer_tree)
class WalletSet(object):
def __init__(self, **kwargs):
self.address = None
self.ip = None
self.port = None
self.public_key = None
self.deposit = None
self.fee = 0
self.__dict__.update(kwargs)
class SPVHashTable(object):
"""
Description: use the dictionary to hash the spv table with wallet node address
"""
hash_instance = None
def __init__(self):
self.__maps = {}
pass
def __new__(cls, *args, **kwargs):
if not cls.hash_instance:
cls.hash_instance = object.__new__(cls, *args, **kwargs)
return cls.hash_instance
@property
def maps(self):
return self.__maps
def find_keys(self, spv_key):
"""
:param spv_key: The public key string of the spv\n
:return: list type. [wallet-1-public-key , wallet-2-public-key, ...]
"""
keys = []
for key in self.maps:
if spv_key in self.find(key):
keys.append(key)
return keys
def find(self, key):
"""
:param key: The public key string of the wallet\n
:return: list type. [spv-1-public-key , spv-2-public-key, ...]
"""
return self.maps.get(key)
def add(self, key, value):
"""
:param key: The public key string of the wallet
:param value: the public key of the spv
:return:
"""
if key not in self.maps.keys():
self.maps.update({key:[value]})
else:
self.maps[key].append(value)
# elif value not in self.maps.get(key):
# self.maps[key].append(value)
def remove(self, key, value):
"""
:param key: The public key string of the wallet
:param value: the public key of the spv
:return:
"""
if key in self.maps.keys():
spv_list = self.maps[key]
if value in spv_list:
spv_list.remove(value)
def sync_table(self, hash_table):
"""
:param hash_table: json or dict type
:return:
"""
if isinstance(hash_table, str):
# decoder
hash_table = self.to_dict(hash_table)
if not hash_table:
return
for key in hash_table:
if key in self.maps:
self.maps[key].extend(hash_table[key])
self.maps[key] = list(set(self.maps[key]))
else:
self.maps[key] = hash_table[key]
def to_json(self):
return json.dumps(self.maps)
@staticmethod
def to_dict(s):
return json.loads(s)
| 29.659193
| 91
| 0.580738
| 5,169
| 0.781524
| 0
| 0
| 477
| 0.07212
| 0
| 0
| 2,662
| 0.40248
|
620eed4cbd2619972703ee779c16c8a7ab6c7ba9
| 54
|
py
|
Python
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/startposes/models/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .startpos import StartPos, StartPosCumWeightOnly
| 27
| 53
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
620fe82f37b4b5c4aa7773e3305715688885bc0e
| 8,342
|
py
|
Python
|
python/chronos/test/bigdl/chronos/autots/test_tspipeline.py
|
joan726/BigDL
|
2432f420418c8ccf02325f8677c94f291e112053
|
[
"Apache-2.0"
] | 3
|
2021-07-14T01:28:47.000Z
|
2022-03-02T01:16:32.000Z
|
python/chronos/test/bigdl/chronos/autots/test_tspipeline.py
|
liangs6212/BigDL
|
3c89ff7e8bbdc713110536c18099506811cd2b3a
|
[
"Apache-2.0"
] | null | null | null |
python/chronos/test/bigdl/chronos/autots/test_tspipeline.py
|
liangs6212/BigDL
|
3c89ff7e8bbdc713110536c18099506811cd2b3a
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
from unittest import TestCase
import pytest
import torch
from torch.utils.data import TensorDataset, DataLoader
from bigdl.chronos.autots import AutoTSEstimator, TSPipeline
from bigdl.orca.common import init_orca_context, stop_orca_context
def train_data_creator(config):
return DataLoader(TensorDataset(torch.randn(1000,
config.get('past_seq_len', 10),
config.get('input_feature_num', 2)),
torch.randn(1000,
config.get('future_seq_len', 2),
config.get('output_feature_num', 2))),
batch_size=config.get('batch_size', 32), shuffle=True)
def valid_data_creator(config):
return DataLoader(TensorDataset(torch.randn(1000,
config.get('past_seq_len', 10),
config.get('input_feature_num', 2)),
torch.randn(1000,
config.get('future_seq_len', 2),
config.get('output_feature_num', 2))),
batch_size=config.get('batch_size', 32), shuffle=False)
class TestTSPipeline(TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_seq2seq_tsppl_support_dataloader(self):
tmp_seq2seq_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="seq2seq",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10,
future_seq_len=2)
tsppl_seq2seq = autots.fit(data=train_data_creator({}),
validation_data=valid_data_creator({}),
epochs=2,
batch_size=32)
tsppl_seq2seq.save(tmp_seq2seq_dir.name)
del tsppl_seq2seq
stop_orca_context()
# load
tsppl_seq2seq = TSPipeline.load(tmp_seq2seq_dir.name)
tsppl_seq2seq.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_seq2seq._best_config['batch_size'] == 128
config = tsppl_seq2seq._best_config
# predict
yhat = tsppl_seq2seq.predict(valid_data_creator, batch_size=16)
assert yhat.shape == (1000,
config['future_seq_len'],
config['input_feature_num'])
assert tsppl_seq2seq._best_config['batch_size'] == 16
yhat = tsppl_seq2seq.predict_with_onnx(valid_data_creator, batch_size=64)
assert yhat.shape == (1000,
config['future_seq_len'],
config['input_feature_num'])
assert tsppl_seq2seq._best_config['batch_size'] == 64
# evaluate
_, smape = tsppl_seq2seq.evaluate(valid_data_creator,
metrics=['mse', 'smape'],
batch_size=16)
assert tsppl_seq2seq._best_config['batch_size'] == 16
assert smape < 2.0
_, smape = tsppl_seq2seq.evaluate_with_onnx(valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_seq2seq._best_config['batch_size'] == 64
assert smape < 2.0
with pytest.raises(RuntimeError):
tsppl_seq2seq.predict(torch.randn(1000,
config['past_seq_len'],
config['input_feature_num']))
with pytest.raises(RuntimeError):
tsppl_seq2seq.evaluate(torch.randn(1000,
config['past_seq_len'],
config['input_feature_num']))
def test_tcn_tsppl_support_dataloader(self):
tmp_tcn_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="tcn",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10,
future_seq_len=2)
tsppl_tcn = autots.fit(data=train_data_creator({}),
validation_data=valid_data_creator({}),
epochs=2,
batch_size=32)
tsppl_tcn.save(tmp_tcn_dir.name)
del tsppl_tcn
stop_orca_context()
# load
tsppl_tcn = TSPipeline.load(tmp_tcn_dir.name)
tsppl_tcn.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_tcn._best_config['batch_size'] == 128
config = tsppl_tcn._best_config
yhat = tsppl_tcn.predict(data=valid_data_creator, batch_size=16)
assert tsppl_tcn._best_config['batch_size'] == 16
assert yhat.shape == (1000,
config['future_seq_len'],
config['output_feature_num'])
_, smape = tsppl_tcn.evaluate(data=valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_tcn._best_config['batch_size'] == 64
assert smape < 2.0
def test_lstm_tsppl_support_dataloader(self):
tmp_lstm_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="lstm",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10)
tsppl_lstm = autots.fit(data=train_data_creator({'future_seq_len': 1}),
validation_data=valid_data_creator({'future_seq_len': 1}),
epochs=2,
batch_size=32)
tsppl_lstm.save(tmp_lstm_dir.name)
del tsppl_lstm
stop_orca_context()
# load
tsppl_lstm = TSPipeline.load(tmp_lstm_dir.name)
tsppl_lstm.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_lstm._best_config['batch_size'] == 128
config = tsppl_lstm._best_config
yhat = tsppl_lstm.predict(data=valid_data_creator, batch_size=16)
assert tsppl_lstm._best_config['batch_size'] == 16
assert yhat.shape == (1000,
config['future_seq_len'],
config['output_feature_num'])
_, smape = tsppl_lstm.evaluate(data=valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_lstm._best_config['batch_size'] == 64
assert smape < 2.0
if __name__ == "__main__":
pytest.main([__file__])
| 44.849462
| 90
| 0.532846
| 6,335
| 0.75941
| 0
| 0
| 0
| 0
| 0
| 0
| 1,259
| 0.150923
|
62112ee54eed681ca29c3d8ae3b4bec88531086a
| 42,076
|
py
|
Python
|
src/app/QKeithleySweep.py
|
mwchalmers/QKeithleyControl
|
94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41
|
[
"MIT"
] | 6
|
2020-06-18T18:42:24.000Z
|
2022-01-26T06:21:13.000Z
|
src/app/QKeithleySweep.py
|
mwchalmers/QKeithleyControl
|
94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41
|
[
"MIT"
] | 1
|
2021-12-23T11:12:17.000Z
|
2021-12-23T11:12:17.000Z
|
src/app/QKeithleySweep.py
|
mwchalmers/QKeithleyControl
|
94e85cd8bc42d54f2cef4d0cfdb3ee4b62bcba41
|
[
"MIT"
] | 3
|
2019-12-24T20:43:23.000Z
|
2021-08-29T13:48:17.000Z
|
# ---------------------------------------------------------------------------------
# QKeithleySweep -> QVisaApplication
# Copyright (C) 2019 Michael Winters
# github: https://github.com/mesoic
# email: mesoic@protonmail.com
# ---------------------------------------------------------------------------------
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#!/usr/bin/env python
import os
import sys
import time
import threading
# Import numpy
import numpy as np
# Import QVisaApplication
from PyQtVisa import QVisaApplication
# Import PyQtVisa widgets
from PyQtVisa.widgets import QVisaUnitSelector
from PyQtVisa.widgets import QVisaDynamicPlot
# Import QT backends
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QComboBox, QSpinBox, QDoubleSpinBox, QPushButton, QCheckBox, QLabel, QLineEdit, QStackedWidget, QSizePolicy
from PyQt5.QtCore import Qt, QStateMachine, QState, QObject
from PyQt5.QtCore import Qt, QStateMachine, QState, QObject
from PyQt5.QtGui import QIcon
# Container class to construct sweep measurement widget
class QKeithleySweep(QVisaApplication.QVisaApplication):
def __init__(self, _config):
# Inherits QVisaApplication -> QWidget
super(QKeithleySweep, self).__init__(_config)
# Generate Main Layout
self.gen_main_layout()
#####################################
# APPLICATION HELPER METHODS
#
# Wrapper method to get keitley write handle
# Returns the pyVisaDevice object
def keithley(self, __widget__):
return self.get_device_by_name( __widget__.currentText() )
# Method to refresh the widget
def refresh(self):
# If add insturments have been initialized
if self.get_devices() is not None:
# Reset the widget and add insturments
self.sweep_inst.refresh( self )
self.step_inst.refresh( self )
# Plot control widgets
self.plot_x_inst.refresh( self )
self.plot_y_inst.refresh( self )
# Update sweep parameters and enable output button
self.meas_button.setEnabled(True)
self.update_meas_params()
else:
# Disable output button
self.meas_button.setEnabled(False)
# Method to set sweep parameters
def set_sweep_params(self, start, stop, npts):
# No hysteresis
if self.sweep_hist.currentText() == "None":
sp = np.linspace(float(start), float(stop), int(npts) )
self._set_app_metadata("__sweep__", sp)
# Prepare reverse sweep
if self.sweep_hist.currentText() == "Reverse-sweep":
# Sweep centered hysteresis
sp = np.linspace(float(start), float(stop), int(npts) )
sp = np.concatenate( (sp, sp[-2::-1]) )
self._set_app_metadata("__sweep__", sp)
# Prepare a zero centered hysteresis
if self.sweep_hist.currentText() == "Zero-centered":
# Create a linspace
sp = np.linspace(float(start), float(stop), int(npts) )
# Extract positive slice
pos = np.where(sp > 0, sp, np.nan)
pos = pos[~np.isnan(pos)]
# Extract negative slice
neg = np.where(sp < 0, sp, np.nan)
neg = neg[~np.isnan(neg)]
# Create the zero centered hysteresis re-insert zeros
# Forward sweep, zero crossing
if (start < 0.) and (stop > 0.) and (start < stop):
sp = np.concatenate( ([0.0], pos, pos[-2::-1], [0.0], neg[::-1], neg[1::], [0.0]) )
# Reverse sweep, zero crossing
elif (start > 0.) and (stop < 0.) and (start > stop):
sp = np.concatenate( ([0.0], neg, neg[-2::-1], [0.0], pos[::-1], pos[1::], [0.0]) )
print(sp)
# If not zero crossing, default to "Reverse-sweep" case
else:
sp = np.concatenate( (sp, sp[-2::-1]) )
# Set meta field
self._set_app_metadata( "__sweep__", sp)
# Method to set step parameters
def set_step_params(self, start, stop, npts):
# No hysteresis
sp = np.linspace(float(start), float(stop), int(npts) )
self._set_app_metadata("__step__", sp)
#####################################
# MAIN LAYOUT
#
def gen_main_layout(self):
# Create Icon for QMessageBox
self._set_icon( QIcon(os.path.join(os.path.dirname(os.path.realpath(__file__)), "python.ico")))
# Create layout objects and set layout
self.layout = QHBoxLayout()
self.layout.addWidget(self.gen_main_ctrl(), 1)
self.layout.addWidget(self.gen_main_plot(), 3)
self.setLayout(self.layout)
#####################################
# MAIN LAYOUT
#
# Main controls:
# a) Measure button and state machine
# b) V-Step mode on/off state machine
# c) IV-sweep and V-step configure pages
# d) Save button
def gen_main_ctrl(self):
# Main control widget
self.meas_ctrl = QWidget()
self.meas_ctrl_layout = QVBoxLayout()
#####################################
# MEASURE STATE MACHINE AND BUTTON
#
# Measurement Button. This will be a state machine which
# alternates between 'measure' and 'abort' states
self.meas_state = QStateMachine()
self.meas_button = QPushButton()
self.meas_button.setStyleSheet(
"background-color: #dddddd; border-style: solid; border-width: 1px; border-color: #aaaaaa; padding: 7px;" )
# Create measurement states
self.meas_run = QState()
self.meas_stop = QState()
# Assign state properties and transitions
self.meas_run.assignProperty(self.meas_button, 'text', 'Abort Sweep')
self.meas_run.addTransition(self.meas_button.clicked, self.meas_stop)
self.meas_run.entered.connect(self.exec_meas_run)
self.meas_stop.assignProperty(self.meas_button, 'text', 'Measure Sweep')
self.meas_stop.addTransition(self.meas_button.clicked, self.meas_run)
self.meas_stop.entered.connect(self.exec_meas_stop)
# Add states, set initial state, and state machine
self.meas_state.addState(self.meas_run)
self.meas_state.addState(self.meas_stop)
self.meas_state.setInitialState(self.meas_stop)
self.meas_state.start()
# Meas pages
self.meas_pages = QStackedWidget()
self.meas_pages.addWidget(self.gen_sweep_ctrl())
self.meas_pages.addWidget(self.gen_step_ctrl())
self.meas_pages.addWidget(self.gen_plot_ctrl())
# Meta widget for trace description
self.meta_widget_label = QLabel("<b>Trace Description</b>")
self.meta_widget = self._gen_meta_widget()
self.meta_widget.set_meta_subkey("__desc__")
# Save widget
self.save_widget = self._gen_save_widget()
# Pack widgets into layout
self.meas_ctrl_layout.addWidget(self.meas_button)
self.meas_ctrl_layout.addWidget(self.gen_config_ctrl())
self.meas_ctrl_layout.addWidget(self.meas_pages)
# Add save widget
self.meas_ctrl_layout.addStretch(1)
self.meas_ctrl_layout.addWidget(self.meta_widget_label)
self.meas_ctrl_layout.addWidget(self.meta_widget)
self.meas_ctrl_layout.addWidget(self.save_widget)
# Set layout and return widget reference
self.meas_ctrl.setLayout(self.meas_ctrl_layout)
return self.meas_ctrl
#####################################
# CONFIGURE WIDGET
#
def gen_config_ctrl(self):
self.meas_config = QWidget()
self.meas_config_layout = QVBoxLayout()
# Current/Voltage Sweep Mode
self.meas_config_page_label = QLabel("<b>Configure Parameters</b>")
self.meas_config_page = QComboBox()
self.meas_config_page.setFixedWidth(200)
self.meas_config_page.addItems(["IV-sweep", "IV-step", "IV-plot"])
self.meas_config_page.currentTextChanged.connect(self.update_config_page)
# Add some space for layout clarity
self.meas_config_layout.setContentsMargins(0,10,0,10)
self.meas_config_layout.addWidget(self._gen_vbox_widget([self.meas_config_page_label, self.meas_config_page]))
# Pack config layout and return reference
self.meas_config.setLayout(self.meas_config_layout)
return self.meas_config
# Sweep control layout
def gen_sweep_ctrl(self):
self.sweep_ctrl = QWidget()
self.sweep_ctrl_layout = QVBoxLayout()
# Main control label
self.sweep_ctrl_label = QLabel("<b>IV-sweep Parameters</b>")
#####################################
# SWEEP INST SELECT
#
# Insturement selector and save widget
self.sweep_inst_label = QLabel("Select Device")
self.sweep_inst = self._gen_device_select()
self.sweep_inst.setFixedWidth(200)
#####################################
# SWEEP MEASUREMENT CONFIGURATION
#
# Current/Voltage Sweep Mode
self.sweep_src_label = QLabel("Sweep Type")
self.sweep_src = QComboBox()
self.sweep_src.setFixedWidth(200)
self.sweep_src.addItems(["Voltage", "Current"])
self.sweep_src.currentTextChanged.connect(self.update_sweep_ctrl)
# Generate voltage and current source widgets
self.gen_voltage_sweep() # self.voltage_sweep
self.gen_current_sweep() # self.current_sweep
# Add to stacked widget
self.sweep_pages = QStackedWidget()
self.sweep_pages.addWidget(self.voltage_sweep)
self.sweep_pages.addWidget(self.current_sweep)
self.sweep_pages.setCurrentIndex(0)
# Hysteresis mode
self.sweep_hist_label = QLabel("Hysteresis Mode")
self.sweep_hist = QComboBox()
self.sweep_hist.setFixedWidth(200)
self.sweep_hist.addItems(["None", "Reverse-sweep", "Zero-centered"])
#####################################
# ADD CONTROLS
#
# Sweep configuration controls
self.sweep_ctrl_layout.addWidget(self.sweep_ctrl_label)
self.sweep_ctrl_layout.addWidget(self._gen_hbox_widget([self.sweep_inst,self.sweep_inst_label]))
self.sweep_ctrl_layout.addWidget(self._gen_hbox_widget([self.sweep_src, self.sweep_src_label]))
self.sweep_ctrl_layout.addWidget(self._gen_hbox_widget([self.sweep_hist, self.sweep_hist_label]))
self.sweep_ctrl_layout.addWidget(self.sweep_pages)
# Positioning
self.sweep_ctrl.setLayout(self.sweep_ctrl_layout)
return self.sweep_ctrl
# Step control layout
def gen_step_ctrl(self):
self.step_ctrl = QWidget()
self.step_ctrl_layout = QVBoxLayout()
# Step control label
self.step_ctrl_label = QLabel("<b>V-step Parameters</b>")
# Voltage step instruement selector
self.step_inst_label = QLabel("Select Device")
self.step_inst = self._gen_device_select()
self.step_inst.setFixedWidth(200)
# Step control mode selector
self.step_src_label = QLabel("Step Type")
self.step_src = QComboBox()
self.step_src.setFixedWidth(200)
self.step_src.addItems(["Voltage", "Current"])
self.step_src.currentTextChanged.connect(self.update_step_ctrl)
# Generate voltage and current source widgets
self.gen_voltage_step() # self.voltage_step
self.gen_current_step() # self.current_step
# Add step modes to step_pages widget
self.step_pages = QStackedWidget()
self.step_pages.addWidget(self.voltage_step)
self.step_pages.addWidget(self.current_step)
self.step_pages.setCurrentIndex(0)
# Step control state machine
self.step_state = QStateMachine()
self.step_button = QPushButton()
self.step_button.setStyleSheet(
"background-color: #dddddd; border-style: solid; border-width: 1px; border-color: #aaaaaa; padding: 7px;" )
# Create measurement states
self.step_on = QState()
self.step_off = QState()
# Assign state properties and transitions
self.step_on.assignProperty(self.step_button, 'text', 'Step Bias ON')
self.step_on.addTransition(self.step_button.clicked, self.step_off)
self.step_on.entered.connect(self.exec_step_on)
self.step_off.assignProperty(self.step_button, 'text', 'Step Bias OFF')
self.step_off.addTransition(self.step_button.clicked, self.step_on)
self.step_off.entered.connect(self.exec_step_off)
# Add states, set initial state, and state machine
self.step_state.addState(self.step_on)
self.step_state.addState(self.step_off)
self.step_state.setInitialState(self.step_off)
self.step_state.start()
# Pack widgets
self.step_ctrl_layout.addWidget(self.step_ctrl_label)
self.step_ctrl_layout.addWidget(self._gen_hbox_widget([self.step_inst,self.step_inst_label]))
self.step_ctrl_layout.addWidget(self._gen_hbox_widget([self.step_src, self.step_src_label]))
self.step_ctrl_layout.addWidget(self.step_pages)
self.step_ctrl_layout.addWidget(self.step_button)
self.step_ctrl_layout.addStretch(1)
# Set layout and return reference
self.step_ctrl.setLayout(self.step_ctrl_layout)
return self.step_ctrl
# Plot control layout
def gen_plot_ctrl(self):
self.plot_ctrl = QWidget()
self.plot_ctrl_layout = QVBoxLayout()
# Voltage step instruement selector
self.plot_x_inst_label = QLabel("<b>Configure x-axis</b>")
self.plot_x_inst = self._gen_device_select()
self.plot_x_inst.setFixedWidth(200)
self.plot_x_inst.set_callback("update_plot_ctrl")
self.plot_x_data = QComboBox()
self.plot_x_data.setFixedWidth(100)
self.plot_x_data.addItems(["Voltage", "Current"])
self.plot_x_data.currentTextChanged.connect( self.update_plot_ctrl )
# Voltage step instruement selector
self.plot_y_inst_label = QLabel("<b>Configure y-axis</b>")
self.plot_y_inst = self._gen_device_select()
self.plot_y_inst.setFixedWidth(200)
self.plot_y_inst.set_callback("update_plot_ctrl")
self.plot_y_data = QComboBox()
self.plot_y_data.setFixedWidth(100)
self.plot_y_data.addItems(["Voltage", "Current"])
self.plot_y_data.setCurrentIndex(1)
self.plot_y_data.currentTextChanged.connect( self.update_plot_ctrl )
# Add widgets
self.plot_ctrl_layout.addWidget( self.plot_x_inst_label )
self.plot_ctrl_layout.addWidget( self._gen_hbox_widget( [self.plot_x_inst,self.plot_x_data]) )
self.plot_ctrl_layout.addWidget( self.plot_y_inst_label )
self.plot_ctrl_layout.addWidget( self._gen_hbox_widget( [self.plot_y_inst,self.plot_y_data]) )
self.plot_ctrl_layout.addStretch(1)
# Set layout and return reference
self.plot_ctrl.setLayout(self.plot_ctrl_layout)
return self.plot_ctrl
# Generate voltage sweep widget
def gen_voltage_sweep(self):
# New QWidget
self.voltage_sweep = QWidget()
self.voltage_sweep_layout = QVBoxLayout()
# Sweep Start
self.voltage_sweep_start_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Sweep Start (V)",
"signed" : True,
"limit" : [20.0, ""],
"default" : [0.00, ""]
}
self.voltage_sweep_start = QVisaUnitSelector.QVisaUnitSelector(self.voltage_sweep_start_config)
# Sweep Stop
self.voltage_sweep_stop_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Sweep Stop (V)",
"signed" : True,
"limit" : [20.0, ""],
"default" : [1.00, ""]
}
self.voltage_sweep_stop = QVisaUnitSelector.QVisaUnitSelector(self.voltage_sweep_stop_config)
# Compliance Spinbox
self.voltage_sweep_cmpl_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Compliance (A)",
"signed" : False,
"limit" : [1.0, "" ],
"default" : [150, "m"]
}
self.voltage_sweep_cmpl = QVisaUnitSelector.QVisaUnitSelector(self.voltage_sweep_cmpl_config)
# Number of points
self.voltage_sweep_npts_config={
"unit" : "__INT__",
"label" : "Number of Points",
"signed" : False,
"limit" : [512],
"default" : [51]
}
self.voltage_sweep_npts = QVisaUnitSelector.QVisaUnitSelector(self.voltage_sweep_npts_config)
# Measurement Delay
self.voltage_sweep_delay_config={
"unit" : "__DOUBLE__",
"label" : "Measurement Interval (s)",
"signed" : False,
"limit" : [60.0],
"default" : [0.10]
}
self.voltage_sweep_delay = QVisaUnitSelector.QVisaUnitSelector(self.voltage_sweep_delay_config)
# Pack selectors into layout
self.voltage_sweep_layout.addWidget(self.voltage_sweep_start)
self.voltage_sweep_layout.addWidget(self.voltage_sweep_stop)
self.voltage_sweep_layout.addWidget(self.voltage_sweep_cmpl)
self.voltage_sweep_layout.addWidget(self.voltage_sweep_npts)
self.voltage_sweep_layout.addWidget(self.voltage_sweep_delay)
self.voltage_sweep_layout.setContentsMargins(0,0,0,0)
# Set layout
self.voltage_sweep.setLayout(self.voltage_sweep_layout)
# Generate current sweep widget
def gen_current_sweep(self):
# New QWidget
self.current_sweep = QWidget()
self.current_sweep_layout = QVBoxLayout()
# Sweep Start
self.current_sweep_start_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Sweep Start (A)",
"signed" : True,
"limit" : [1.0, "" ],
"default" : [0.0, "m"]
}
self.current_sweep_start = QVisaUnitSelector.QVisaUnitSelector(self.current_sweep_start_config)
# Sweep Stop
self.current_sweep_stop_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Sweep Stop (A)",
"signed" : True,
"limit" : [1.0, "" ],
"default" : [100, "m"]
}
self.current_sweep_stop = QVisaUnitSelector.QVisaUnitSelector(self.current_sweep_stop_config)
# Compliance Spinbox
self.current_sweep_cmpl_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Compliance (V)",
"signed" : False,
"limit" : [20., ""],
"default" : [1.0, ""]
}
self.current_sweep_cmpl = QVisaUnitSelector.QVisaUnitSelector(self.current_sweep_cmpl_config)
# Number of points
self.current_sweep_npts_config={
"unit" : "__INT__",
"label" : "Number of Points",
"signed" : False,
"limit" : [256],
"default" : [11]
}
self.current_sweep_npts = QVisaUnitSelector.QVisaUnitSelector(self.current_sweep_npts_config)
# Measurement Delay
self.current_sweep_delay_config={
"unit" : "__DOUBLE__",
"label" : "Measurement Interval (s)",
"signed" : False,
"limit" : [60.0],
"default" : [0.1]
}
self.current_sweep_delay = QVisaUnitSelector.QVisaUnitSelector(self.current_sweep_delay_config)
# Pack selectors into layout
self.current_sweep_layout.addWidget(self.current_sweep_start)
self.current_sweep_layout.addWidget(self.current_sweep_stop)
self.current_sweep_layout.addWidget(self.current_sweep_cmpl)
self.current_sweep_layout.addWidget(self.current_sweep_npts)
self.current_sweep_layout.addWidget(self.current_sweep_delay)
self.current_sweep_layout.setContentsMargins(0,0,0,0)
# Set layout
self.current_sweep.setLayout(self.current_sweep_layout)
# Generate voltage step widget
def gen_voltage_step(self):
# New QWidget
self.voltage_step= QWidget()
self.voltage_step_layout = QVBoxLayout()
# Step Start
self.voltage_step_start_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Step Start (V)",
"signed" : True,
"limit" : [20.0, ""],
"default" : [0.00, ""]
}
self.voltage_step_start = QVisaUnitSelector.QVisaUnitSelector(self.voltage_step_start_config)
# Step Stop
self.voltage_step_stop_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Step Stop (V)",
"signed" : True,
"limit" : [20.0, ""],
"default" : [1.00, ""]
}
self.voltage_step_stop = QVisaUnitSelector.QVisaUnitSelector(self.voltage_step_stop_config)
# Step Compliance Spinbox
self.voltage_step_cmpl_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Compliance (A)",
"signed" : False,
"limit" : [1.0, "" ],
"default" : [150, "m"]
}
self.voltage_step_cmpl = QVisaUnitSelector.QVisaUnitSelector(self.voltage_step_cmpl_config)
# Step Number of points
self.voltage_step_npts_config={
"unit" : "__INT__",
"label" : "Number of Points",
"signed" : False,
"limit" : [256],
"default" : [5]
}
self.voltage_step_npts = QVisaUnitSelector.QVisaUnitSelector(self.voltage_step_npts_config)
# Pack selectors into layout
self.voltage_step_layout.addWidget(self.voltage_step_start)
self.voltage_step_layout.addWidget(self.voltage_step_stop)
self.voltage_step_layout.addWidget(self.voltage_step_cmpl)
self.voltage_step_layout.addWidget(self.voltage_step_npts)
self.voltage_step_layout.setContentsMargins(0,0,0,0)
# Set layout
self.voltage_step.setLayout(self.voltage_step_layout)
# Generate current step widget
def gen_current_step(self):
# New QWidget
self.current_step = QWidget()
self.current_step_layout = QVBoxLayout()
# Step Start
self.current_step_start_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Step Start (A)",
"signed" : True,
"limit" : [1.0, "" ],
"default" : [0.0, "m"]
}
self.current_step_start = QVisaUnitSelector.QVisaUnitSelector(self.current_step_start_config)
# Step Stop
self.current_step_stop_config={
"unit" : "A",
"min" : "u",
"max" : "",
"label" : "Step Stop (A)",
"signed" : True,
"limit" : [1.0, "" ],
"default" : [1.0, "m"]
}
self.current_step_stop = QVisaUnitSelector.QVisaUnitSelector(self.current_step_stop_config)
# Step Compliance Spinbox
self.current_step_cmpl_config={
"unit" : "V",
"min" : "u",
"max" : "",
"label" : "Compliance (V)",
"signed" : False,
"limit" : [20.0, ""],
"default" : [1.00, ""]
}
self.current_step_cmpl = QVisaUnitSelector.QVisaUnitSelector(self.current_step_cmpl_config)
# Step Number of points
self.current_step_npts_config={
"unit" : "__INT__",
"label" : "Number of Points",
"signed" : False,
"limit" : [256],
"default" : [5]
}
self.current_step_npts = QVisaUnitSelector.QVisaUnitSelector(self.current_step_npts_config)
# Pack selectors into layout
self.current_step_layout.addWidget(self.current_step_start)
self.current_step_layout.addWidget(self.current_step_stop)
self.current_step_layout.addWidget(self.current_step_cmpl)
self.current_step_layout.addWidget(self.current_step_npts)
self.current_step_layout.addStretch(1)
self.current_step_layout.setContentsMargins(0,0,0,0)
# Set layout
self.current_step.setLayout(self.current_step_layout)
# Ádd dynamic plot
def gen_main_plot(self):
# Create QVisaDynamicPlot object (inherits QWidget)
self.plot = QVisaDynamicPlot.QVisaDynamicPlot(self)
self.plot.add_subplot(111)
self.plot.add_origin_lines("111", "both")
self.plot.set_axes_labels("111", "Voltage (V)", "Current (A)")
# Refresh canvas
self.plot.refresh_canvas(supress_warning=True)
# Sync plot clear data button with application data
self.plot.sync_application_data(True)
# Sync meta widget when clearing data
self.plot.set_mpl_refresh_callback("_sync_meta_widget_to_data_object")
# Return the plot
return self.plot
# Sync meta widget
def _sync_meta_widget_to_data_object(self):
# Application keys
_data_keys = self._get_data_object().keys()
_widget_keys = self.meta_widget.get_meta_keys()
# Check if widget keys are not in data keys
for _key in _widget_keys:
# If not then delete the key from meta_widget
if _key not in _data_keys:
self.meta_widget.del_meta_key(_key)
#####################################
# UPDATE CONFIG PAGE
#
def update_config_page(self):
if self.meas_config_page.currentText() == "IV-sweep":
self.meas_pages.setCurrentIndex(0)
if self.meas_config_page.currentText() == "IV-step":
self.meas_pages.setCurrentIndex(1)
if self.meas_config_page.currentText() == "IV-plot":
self.meas_pages.setCurrentIndex(2)
#####################################
# SWEEP CONTROL UPDATE METHODS
#
# Sweep control dynamic update
def update_sweep_ctrl(self):
# Switch to voltage sweep page
if self.sweep_src.currentText() == "Voltage":
self.sweep_pages.setCurrentIndex(0)
self.update_meas_params()
# Switch to current sweep page
if self.sweep_src.currentText() == "Current":
self.sweep_pages.setCurrentIndex(1)
self.update_meas_params()
# Sweep control dynamic update
def update_step_ctrl(self):
# Switch to voltage sweep page
if self.step_src.currentText() == "Voltage":
self.step_pages.setCurrentIndex(0)
self.update_meas_params()
# Switch to current sweep page
if self.step_src.currentText() == "Current":
self.step_pages.setCurrentIndex(1)
self.update_meas_params()
# Update plot axes when we change configuration
def update_plot_ctrl(self):
# Extract correct unit labels
x_unit = "(V)" if self.plot_x_data.currentText() == "Voltage" else "(A)"
y_unit = "(V)" if self.plot_y_data.currentText() == "Voltage" else "(A)"
# Update axes
self.plot.set_axes_labels("111",
"%s %s : %s"%(self.plot_x_data.currentText(), x_unit ,self.plot_x_inst.currentText()),
"%s %s : %s"%(self.plot_y_data.currentText(), y_unit ,self.plot_y_inst.currentText())
)
self.plot.update_canvas()
# Create Measurement
def update_meas_params(self):
# Set up v-source(i-compliance) on keithley
if self.sweep_src.currentText() == "Voltage":
# Set sweeep paramaters
self.set_sweep_params(
self.voltage_sweep_start.value(),
self.voltage_sweep_stop.value(),
self.voltage_sweep_npts.value())
# Set keithley as voltage source
if self.keithley(self.sweep_inst) is not None:
self.keithley(self.sweep_inst).voltage_src()
self.keithley(self.sweep_inst).set_voltage(0.0)
self.keithley(self.sweep_inst).current_cmp(self.voltage_sweep_cmpl.value())
# Set up i-source(v-compliance) on keithley
if self.sweep_src.currentText() == "Current":
# Set sweeep paramaters
self.set_sweep_params(
self.current_sweep_start.value(),
self.current_sweep_stop.value(),
self.current_sweep_npts.value())
# Set keithley as voltage source
if self.keithley(self.sweep_inst) is not None:
self.keithley(self.sweep_inst).current_src()
self.keithley(self.sweep_inst).set_current(0.0)
self.keithley(self.sweep_inst).voltage_cmp(self.current_sweep_cmpl.value())
# Set step keithley as voltage source. Also ensure that we are not initializing
# the the sweep keithely with step params if doubly selected.
if ( ( self.keithley(self.step_inst) is not None) and
(self.keithley(self.step_inst) != self.keithley(self.sweep_inst) ) ):
# Set up v-source(i-compliance) on keithley
if self.step_src.currentText() == "Voltage":
# Set sweeep paramaters
self.set_step_params(
self.voltage_step_start.value(),
self.voltage_step_stop.value(),
self.voltage_step_npts.value())
# Set keithley as voltage source
if self.keithley(self.step_inst) is not None:
self.keithley(self.step_inst).voltage_src()
self.keithley(self.step_inst).set_voltage(0.0)
self.keithley(self.step_inst).current_cmp(self.voltage_step_cmpl.value())
# Set up i-source(v-compliance) on keithley
if self.step_src.currentText() == "Current":
# Set sweeep paramaters
self.set_step_params(
self.current_step_start.value(),
self.current_step_stop.value(),
self.current_step_npts.value())
# Set keithley as voltage source
if self.keithley(self.step_inst) is not None:
self.keithley(self.step_inst).current_src()
self.keithley(self.step_inst).set_current(0.0)
self.keithley(self.step_inst).voltage_cmp(self.current_step_cmpl.value())
#####################################
# MEASUREMENT EXECUTION THREADS
#
# Function we run when we enter run state
def exec_step_on(self):
# Update UI button to abort
self.step_button.setStyleSheet(
"background-color: #cce6ff; border-style: solid; border-width: 1px; border-color: #1a75ff; padding: 7px;")
# Check if no insturments are initialized
if self.sweep_inst.currentText() == "" and self.step_inst.currentText() == "":
# Message box to warn the user
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("No devices initialized")
msg.setWindowTitle("QKeithleySweep")
msg.setWindowIcon(self._icon)
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
# Set app meta and revert state
self._set_app_metadata("__exec_step__", False)
self.step_button.click()
# Check if the same insturment is initialized
elif self.sweep_inst.currentText() == self.step_inst.currentText():
# Message box to warn the user
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Same device %s selected for sweep and step parameters. Proceed?"%self.step_inst.currentText())
msg.setWindowTitle("QKeithleySweep")
msg.setWindowIcon(self._icon)
msg.setStandardButtons(QMessageBox.Ok)
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
self.msg_clear = msg.exec_()
# Expose this for testing
if self.msg_clear == QMessageBox.Yes:
self._set_app_metadata("__exec_step__", True)
else:
self._set_app_metadata("__exec_step__", False)
self.step_button.click()
else:
self._set_app_metadata("__exec_step__", True)
# Function we run when we enter run state
def exec_step_off(self):
# Update UI button to abort
self.step_button.setStyleSheet(
"background-color: #dddddd; border-style: solid; border-width: 1px; border-color: #aaaaaa; padding: 7px;" )
self._set_app_metadata("__exec_step__", False)
# Execute Sweep-Step Measurement
def exec_sweep_step_thread(self):
# Generate function pointer for sweep voltage/current mode
if self.sweep_src.currentText() == "Voltage":
__sweep_func__ = self.keithley(self.sweep_inst).set_voltage
__sweep_delay__ = self.voltage_sweep_delay.value()
if self.sweep_src.currentText() == "Current":
__sweep_func__ = self.keithley(self.sweep_inst).set_current
__sweep_delay__ = self.current_sweep_delay.value()
# Clear plot and zero arrays
start = time.time()
# Use generator function so all traces have same color
_c = self.plot.gen_next_color()
_handle_index = 0
# Get data object
data = self._get_data_object()
# Master key
_root = data.add_hash_key("iv-sweep-v-step")
# Set metatata for root
if self.step_src.currentText() == "Voltage":
data.set_metadata(_root, "__type__", "iv-sweep-v-step")
if self.step_src.currentText() == "Current":
data.set_metadata(_root, "__type__", "iv-sweep-v-step")
# Add key to meta widget
self.meta_widget.add_meta_key(_root)
# Create internal data structure for buffers
buffers = {
"__sweep__" : {"inst" : self.sweep_inst, "data" : None},
"__step__" : {"inst" : self.step_inst , "data" : None},
"__plotx__" : None,
"__ploty__" : None
}
# plot-axis insturments
for plot_key, plot_inst in zip(["__plotx__", "__ploty__" ], [ self.plot_x_inst, self.plot_y_inst] ):
if self.sweep_inst.currentText() == plot_inst.currentText():
buffers[ plot_key ] = {"inst" : "__sweep__", "data" : None }
elif self.step_inst.currentText() == plot_inst.currentText():
buffers[ plot_key ] = {"inst" : "__step__", "data" : None }
else:
buffers[ plot_key ] = {"inst" : plot_inst, "data" : None}
# Loop throgh all insurments and enable outputs
for _key, _buffer in buffers.items():
if _buffer["inst"] not in ["__sweep__", "__step__"]:
self.keithley( _buffer["inst"] ).output_on()
# Loop through step variables and generate subkeys
for _step in self._get_app_metadata("__step__"):
# If thread is running
if self.thread_running:
# A hash is generated for each voltage/current step for ease of data processing
# Generate function pointer for step voltage/current mode
if self.step_src.currentText() == "Voltage":
__step_func__ = self.keithley(self.step_inst).set_voltage
# Generate data key and set metadata
data = self._get_data_object()
key = data.add_hash_key("iv-sweep-v-step%s"%_step)
# Add keys and metadata to data object
data.set_metadata(key, "__root__", _root)
data.set_metadata(key, "__step__", _step)
data.set_subkeys(key, ["t", "V0", "I0", "P0", "V1", "I1", "P1"])
# Current Mode
if self.step_src.currentText() == "Current":
__step_func__ = self.keithley(self.step_inst).set_current
key = data.add_hash_key("iv-sweep-i-step%s"%_step)
# Add keys and metadata to data object
data.set_metadata(key, "__root__", _root)
data.set_metadata(key, "__step__", _step)
data.set_subkeys(key, ["t", "V0", "I0", "P0", "V1", "I1", "P1"])
# Set step voltage/current
__step_func__(_step)
# Add axes handle to root
self.plot.add_axes_handle("111", _root, _color=_c)
# Bias settle
if __sweep_delay__ != 0:
time.sleep(__sweep_delay__)
# Loop through sweep variables
for _bias in self._get_app_metadata("__sweep__"):
# If thread is running
if self.thread_running:
# Set voltage/current bias
__sweep_func__(_bias)
# Get data from buffer
# Populate buffers
buffers["__sweep__"]["data"] = self.keithley( buffers["__sweep__"]["inst"] ).meas().split(",")
buffers["__step__"]["data"] = self.keithley( buffers["__step__"]["inst"] ).meas().split(",")
# Plot insturments will copy sweep data or meas() if needed
for plot_buffer in ["__plotx__", "__ploty__"]:
if buffers[plot_buffer]["inst"] == "__sweep__":
buffers[plot_buffer]["data"] = buffers["__sweep__"]["data"]
elif buffers[plot_buffer]["inst"] == "__step__":
buffers[plot_buffer]["data"] = buffers["__step__"]["data"]
else:
buffers[plot_buffer]["data"] = self.keithley( buffers[plot_buffer]["inst"] ).meas().split(",")
# Apply delay
if __sweep_delay__ != 0:
time.sleep(__sweep_delay__)
# Extract data from buffer
_now = float(time.time() - start)
# Append measured values to data arrays
data.append_subkey_data(key,"t", _now )
data.append_subkey_data(key,"V0", float( buffers["__sweep__"]["data"][0]) )
data.append_subkey_data(key,"I0", float( buffers["__sweep__"]["data"][1]) )
data.append_subkey_data(key,"P0", float( buffers["__sweep__"]["data"][0]) * float(buffers["__sweep__"]["data"][1]) )
data.append_subkey_data(key,"V1", float( buffers["__step__"]["data"][0]) )
data.append_subkey_data(key,"I1", float( buffers["__step__"]["data"][1]) )
data.append_subkey_data(key,"P1", float( buffers["__step__"]["data"][0]) * float(buffers["__step__"]["data"][1]) )
# Sync x-axis data
if self.plot_x_data.currentText() == "Voltage":
p0 = buffers["__plotx__"]["data"][0]
if self.plot_x_data.currentText() == "Current":
p0 = buffers["__plotx__"]["data"][1]
# Sync y-axis data
if self.plot_y_data.currentText() == "Voltage":
p1 = buffers["__ploty__"]["data"][0]
if self.plot_y_data.currentText() == "Current":
p1 = buffers["__ploty__"]["data"][1]
# Update the data
self.plot.append_handle_data("111", _root, float(p0), float(p1), _handle_index)
self.plot.update_canvas()
else:
break
# Increment handle index
_handle_index += 1
else:
break
# Reset active keithleys
__sweep_func__(0.0)
__step_func__(0.0)
# Loop throgh all insurments and disable outputs
for _key, _buffer in buffers.items():
if _buffer["inst"] not in ["__sweep__", "__step__"]:
self.keithley( _buffer["inst"] ).output_off()
# Reset sweep control and update measurement state to stop.
# Post a button click event to the QStateMachine to trigger
# a state transition if thread is still running (not aborted)
if self.thread_running:
self.meas_button.click()
# Execute Sweep Measurement
def exec_sweep_thread(self):
# Generate data key
data = self._get_data_object()
key = data.add_hash_key("iv-sweep")
# Add data fields to key
data.set_subkeys(key, ["t", "V", "I", "P"])
data.set_metadata(key, "__type__", "iv-sweep")
# Add key to meta widget
self.meta_widget.add_meta_key(key)
# Generate function pointer for voltage/current mode
if self.sweep_src.currentText() == "Voltage":
__sweep_func__ = self.keithley(self.sweep_inst).set_voltage
__sweep_delay__ = self.voltage_sweep_delay.value()
if self.sweep_src.currentText() == "Current":
__sweep_func__ = self.keithley(self.sweep_inst).set_current
__sweep_delay__ = self.current_sweep_delay.value()
# Clear plot and zero arrays
handle = self.plot.add_axes_handle("111", key)
start = time.time()
# Create internal data structure for buffers
buffers = {
"__sweep__" : {"inst" : self.sweep_inst, "data" : None},
"__plotx__" : None,
"__ploty__" : None
}
# x-axis insturment
for plot_key, plot_inst in zip( ["__plotx__", "__ploty__" ], [self.plot_x_inst, self.plot_y_inst] ):
if self.sweep_inst.currentText() == plot_inst.currentText():
buffers[plot_key] = {"inst" : "__sweep__", "data" : None }
else:
buffers[plot_key] = {"inst" : plot_inst, "data" : None}
# Loop throgh all insurments and enable outputs
for _key, _buffer in buffers.items():
if _buffer["inst"] not in ["__sweep__"]:
self.keithley( _buffer["inst"] ).output_on()
# Loop through sweep variables
for _bias in self._get_app_metadata("__sweep__"):
# If thread is running
if self.thread_running:
# Set voltage/current bias
__sweep_func__(_bias)
# Populate buffers
buffers["__sweep__"]["data"] = self.keithley( buffers["__sweep__"]["inst"] ).meas().split(",")
# Plot insturments will copy sweep data or meas() if needed
for plot_buffer in ["__plotx__", "__ploty__"]:
if buffers[plot_buffer]["inst"] == "__sweep__":
buffers[plot_buffer]["data"] = buffers["__sweep__"]["data"]
else:
buffers[plot_buffer]["data"] = self.keithley( buffers[plot_buffer]["inst"] ).meas().split(",")
if __sweep_delay__ != 0:
time.sleep(__sweep_delay__)
# Extract data from buffer
_now = float(time.time() - start)
# Append measured values to data arrays
data.append_subkey_data(key,"t", _now )
data.append_subkey_data(key,"V", float( buffers["__sweep__"]["data"][0]) )
data.append_subkey_data(key,"I", float( buffers["__sweep__"]["data"][1]) )
data.append_subkey_data(key,"P", float( buffers["__sweep__"]["data"][0]) * float(buffers["__sweep__"]["data"][1]) )
# Sync x-axis data
if self.plot_x_data.currentText() == "Voltage":
p0 = buffers["__plotx__"]["data"][0]
if self.plot_x_data.currentText() == "Current":
p0 = buffers["__plotx__"]["data"][1]
# Sync y-axis data
if self.plot_y_data.currentText() == "Voltage":
p1 = buffers["__ploty__"]["data"][0]
if self.plot_y_data.currentText() == "Current":
p1 = buffers["__ploty__"]["data"][1]
# Update the data
self.plot.append_handle_data("111", key, float(p0), float(p1))
self.plot.update_canvas()
# Reset Keithley
__sweep_func__(0.0)
# Loop throgh all insurments and enable outputs
for _key, _buffer in buffers.items():
if _buffer["inst"] not in ["__sweep__"]:
self.keithley( _buffer["inst"] ).output_off()
# Reset sweep control and update measurement state to stop.
# Post a button click event to the QStateMachine to trigger
# a state transition if thread is still running (not aborted)
if self.thread_running:
self.meas_button.click()
# Function we run when we enter run state
def exec_meas_run(self):
# Update sweep and step params
self.update_meas_params()
# For startup protection
if self.keithley(self.sweep_inst) is not None:
# Update UI button to abort
self.meas_button.setStyleSheet(
"background-color: #ffcccc; border-style: solid; border-width: 1px; border-color: #800000; padding: 7px;")
# Disable controls (sweep)
self.sweep_src.setEnabled(False)
self.sweep_inst.setEnabled(False)
# Disable controls (step)
self.step_src.setEnabled(False)
self.step_inst.setEnabled(False)
self.step_button.setEnabled(False)
# Disable controls (save)
self.save_widget.setEnabled(False)
# Plot contollers (plots)
self.plot.mpl_refresh_setEnabled(False)
self.plot_x_inst.setEnabled(False)
self.plot_x_data.setEnabled(False)
self.plot_y_inst.setEnabled(False)
self.plot_y_data.setEnabled(False)
# Check app meta and run sweep or sweep-step tread
if self._get_app_metadata("__exec_step__") == True:
self.thread = threading.Thread(target=self.exec_sweep_step_thread, args=())
else:
self.thread = threading.Thread(target=self.exec_sweep_thread, args=())
self.thread.daemon = True # Daemonize thread
self.thread.start() # Start the execution
self.thread_running = True
# Function we run when we enter abort state
def exec_meas_stop(self):
# For startup protection
if self.keithley(self.sweep_inst) is not None:
# Update UI button to start state
self.meas_button.setStyleSheet(
"background-color: #dddddd; border-style: solid; border-width: 1px; border-color: #aaaaaa; padding: 7px;" )
# Enable controls (sweep)
self.sweep_src.setEnabled(True)
self.sweep_inst.setEnabled(True)
# Enable controls (step)
self.step_src.setEnabled(True)
self.step_inst.setEnabled(True)
self.step_button.setEnabled(True)
# Enable controls (save)
self.save_widget.setEnabled(True)
# Plot contollers
self.plot.mpl_refresh_setEnabled(True)
self.plot_x_inst.setEnabled(True)
self.plot_x_data.setEnabled(True)
self.plot_y_inst.setEnabled(True)
self.plot_y_data.setEnabled(True)
# Kill measurement thread
self.thread_running = False
self.thread.join() # Waits for thread to complete
| 30.824908
| 197
| 0.696811
| 39,964
| 0.949783
| 0
| 0
| 0
| 0
| 0
| 0
| 13,823
| 0.328517
|
62130b375ece64a9c0e907cb577ca9c4c8cd327e
| 1,620
|
py
|
Python
|
tests/fields/test_render.py
|
jpsca/pforms
|
77c9da93e5224e79bb147aa873f28951e972bb21
|
[
"MIT"
] | 2
|
2020-09-30T22:41:00.000Z
|
2020-12-04T16:47:17.000Z
|
tests/fields/test_render.py
|
jpsca/hyperform
|
d5c450ad8684a853fed26f8c2606877151125a9e
|
[
"MIT"
] | 2
|
2021-11-18T18:01:28.000Z
|
2021-11-18T18:03:29.000Z
|
tests/fields/test_render.py
|
jpsca/hyperform
|
d5c450ad8684a853fed26f8c2606877151125a9e
|
[
"MIT"
] | null | null | null |
import proper_forms.fields as f
def test_render_attrs():
field = f.Text()
attrs = {
"id": "text1",
"classes": "myclass",
"data_id": 1,
"checked": True,
"ignore": False,
}
assert (
str(field.render_attrs(**attrs))
== 'class="myclass" data-id="1" id="text1" checked'
)
def test_render_attrs_empty():
field = f.Text()
assert str(field.render_attrs()) == ""
def test_render_attrs_bad():
field = f.Text()
assert (
str(field.render_attrs(myattr="a'b\"><script>bad();</script>"))
== 'myattr="a\'b"><script>bad();</script>"'
)
def test_object_value():
field = f.Text(prepare=lambda x: [str(x * 2)])
field.object_value = 2
assert field.values == ["4"]
assert field.value == "4"
def test_input_values():
field = f.Text()
field.input_values = ["hello"]
assert field.values == ["hello"]
assert field.value == "hello"
def test_input_value_over_object_value():
field = f.Text()
field.input_values = ["foo"]
field.object_value = "bar"
assert field.values == ["foo"]
assert field.value == "foo"
def test_render_error():
field = f.Text(required=True)
assert str(field.render_error()) == ""
field.validate()
error = "This field is required."
assert str(field.render_error()) == f'<div class="error">{error}</div>'
assert str(field.render_error("p")) == f'<p class="error">{error}</p>'
assert (
str(field.render_error(classes="errorMessage"))
== f'<div class="errorMessage">{error}</div>'
)
| 24.179104
| 75
| 0.588889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 395
| 0.243827
|
621340935801ad4caf2565122ba09adde8da7eaf
| 30,832
|
py
|
Python
|
gerber.py
|
BetaPollux/gerbex
|
aeb013da642135d28d809ddb07febc129219d297
|
[
"MIT"
] | null | null | null |
gerber.py
|
BetaPollux/gerbex
|
aeb013da642135d28d809ddb07febc129219d297
|
[
"MIT"
] | null | null | null |
gerber.py
|
BetaPollux/gerbex
|
aeb013da642135d28d809ddb07febc129219d297
|
[
"MIT"
] | 1
|
2021-12-05T14:44:26.000Z
|
2021-12-05T14:44:26.000Z
|
#!/usr/bin/python3
# RS-274X per standard Revision 2021.02
import re
import copy
import numpy as np
import vertices
# TODO replace all vertices with outline class
# Meant for extracting substrings only
# Cast to int or float will catch invalid strings
RE_INT = r'[+-]?[0-9]+'
RE_DEC = r'[+-]?[0-9\.]+?'
EXPOSURE_ON = 1
EXPOSURE_OFF = 0
class Gerber():
def __init__(self):
self.format_num_int = None
self.format_num_dec = None
self.unit = None
self.current_point = None
self.current_aperture = None
# Interpolation should be None, but not all files have G01
self.interpolation = 'linear'
self.region = None
self.transform = ApertureTransform()
self.apertures = {}
self.templates = {
'C': Circle(1.0),
'R': Rectangle(1.0, 1.0),
'O': Obround(1.0, 1.0),
'P': Polygon(1.0, 3, 0.0)
}
self.objects = []
self.objects_list_stack = [self.objects]
self.reached_eof = False
def add_object(self, new_obj):
self.objects_list_stack[-1].append(new_obj)
def comment(self, statement: str):
pass
def ignore(self, statement: str):
pass
def not_implemented(self, statement: str):
raise NotImplementedError('Command not implemented: ' + statement)
def begin_region(self, statement: str):
# TODO is self.region required?
self.region = Region(self.transform)
self.objects_list_stack.append(self.region)
def end_region(self, statement: str):
self.region.end_contour()
self.objects_list_stack.pop()
self.add_object(self.region)
self.region = None
def get_command_function(self, statement: str):
commands = {
'G04': self.comment,
'MO': self.set_mode,
'FS': self.set_format,
'AD': self.aperture_define,
'AM': self.aperture_macro,
'Dnn': self.set_current_aperture,
'D01': self.interpolate_operation,
'D02': self.move_operation,
'D03': self.flash_operation,
'G01': self.set_interpolation,
'G02': self.set_interpolation,
'G03': self.set_interpolation,
'G74': self.not_implemented,
'G75': self.ignore,
'LP': self.load_polarity,
'LM': self.load_mirroring,
'LR': self.load_rotation,
'LS': self.load_scaling,
'G36': self.begin_region,
'G37': self.end_region,
'AB': self.aperture_block,
'SR': self.step_and_repeat,
'TF': self.ignore,
'TA': self.ignore,
'TO': self.ignore,
'TD': self.ignore,
'M02': self.set_eof
}
# Extended commands
# e.g.
# %MOMM*%
# %AMDonut*
# 1,1,$1,$2,$3*
# $4=$1x0.75*
# 1,0,$4,$2,$3*
# %
# %ADD11Donut,0.30X0X0*%
code = None
if statement.startswith('%'):
code = statement[1:3]
else:
# Word commands
# e.g.
# G04 comment *
# D10*
# X0Y0D02*
match = re.search(r'[GDM](\d\d)', statement)
if match:
code = match.group()
if code[0] == 'D' and int(match.group(1)) >= 10:
code = 'Dnn'
try:
return commands[code]
except KeyError:
raise KeyError(f'Unrecognized statement: {statement}')
def set_eof(self, statement: str):
if statement != 'M02*':
raise ValueError('Invalid EOF statement')
self.reached_eof = True
def parse(self, filename: str):
with open(filename, 'r') as f:
delimiter = False
for line_num, line in enumerate(f):
if line.isspace():
continue
if line.startswith('%'):
delimiter = True
statement = ''
if delimiter:
statement += line
else:
statement = line
if line.endswith('%\n'):
delimiter = False
if not delimiter:
statement = statement.strip()
try:
command = self.get_command_function(statement)
command(statement)
except (ValueError, KeyError) as ex:
raise ValueError(f'Error line {line_num + 1}: {ex}')
if not self.reached_eof:
raise ValueError('File did not contain EOF marker')
def set_mode(self, statement: str):
# Set unit of measurement to metric or imperial
if statement == '%MOMM*%':
self.unit = 'mm'
elif statement == '%MOIN*%':
self.unit = 'in'
else:
raise ValueError(f'Unrecognized mode statement: {statement}')
def set_format(self, statement: str):
# Set coordinates and distances in operations
# %FSLAX36Y36*% sets 3 integer digits, 6 decimal digits
# 6 decimal digits implies 10^-6 is increment
if self.format_num_dec is not None or self.format_num_int is not None:
raise ValueError('Format must be set exactly once')
match = re.search(r'%FSLAX([1-6])([3-6])Y([1-6])([3-6])\*%', statement)
if match is None:
raise ValueError(f'Unrecognized format statement: {statement}')
if match.group(1) != match.group(3):
raise ValueError(f'Mismatched format X, Y integer digits: {statement}')
if match.group(2) != match.group(4):
raise ValueError(f'Mismatched format X, Y decimal digits: {statement}')
self.format_num_int = int(match.group(1))
self.format_num_dec = int(match.group(2))
self.format_scale = 10**(-self.format_num_dec)
def set_interpolation(self, statement: str):
# Set interpolation state to linear or circular
if statement == 'G01*':
self.interpolation = 'linear'
elif statement == 'G02*':
self.interpolation = 'cw_circular'
elif statement == 'G03*':
self.interpolation = 'ccw_circular'
else:
raise ValueError(f'Unrecognized interpolation statement: {statement}')
def create_aperture(self):
if self.current_aperture is not None:
return self.apertures[self.current_aperture].clone()
else:
return None
def get_new_point(self, x: str, y: str):
# Parse strings, e.g. X2152000 and Y1215000
if x and y:
return (int(x[1:]), int(y[1:]))
elif x:
return (int(x[1:]), self.current_point[1])
elif y:
return (self.current_point[0], int(y[1:]))
else:
raise ValueError('Invalid x and y')
def interpolate_operation(self, statement: str):
# D01 linear/circular line segment
match = re.search(rf'(X{RE_INT})?(Y{RE_INT})?(I{RE_INT})?(J{RE_INT})?D01\*', statement)
if match is not None:
x = match.group(1)
y = match.group(2)
i = match.group(3)
j = match.group(4)
new_point = self.get_new_point(x, y)
if self.interpolation == 'linear':
self.add_object(Draw(self.create_aperture(), self.transform,
self.current_point, new_point))
elif self.interpolation in ('cw_circular', 'ccw_circular'):
if i and j:
offset = (int(i[1:]), int(j[1:]))
else:
raise ValueError(f'Missing offset: I {i}, J {j}')
is_cw = (self.interpolation == 'cw_circular')
self.add_object(Arc(self.create_aperture(), self.transform,
self.current_point, new_point,
offset, is_cw))
else:
raise ValueError(f'Invalid interpolation: {self.interpolation}')
self.current_point = new_point
else:
raise ValueError(f'Unrecognized interpolate operation: {statement}')
def move_operation(self, statement: str):
# D02 move operation
match = re.search(rf'(X{RE_INT})?(Y{RE_INT})?D02\*', statement)
if match is not None:
x = match.group(1)
y = match.group(2)
self.current_point = self.get_new_point(x, y)
else:
raise ValueError(f'Unrecognized move operation: {statement}')
def flash_operation(self, statement: str):
# D03 create flash object
match = re.search(rf'(X{RE_INT})?(Y{RE_INT})?D03\*', statement)
if match is not None:
x = match.group(1)
y = match.group(2)
new_point = self.get_new_point(x, y)
aperture = self.create_aperture()
self.add_object(Flash(aperture, self.transform, new_point))
self.current_point = new_point
else:
raise ValueError(f'Unrecognized flash operation: {statement}')
def load_polarity(self, statement: str):
# Polarity is either clear or dark
if statement == '%LPC*%':
self.transform.polarity = 'clear'
elif statement == '%LPD*%':
self.transform.polarity = 'dark'
else:
raise ValueError(f'Unrecognized polarity statement: {statement}')
def load_mirroring(self, statement: str):
# Mirror either N, X, Y or XY
match = re.search(r'%LM(N|X|Y|XY)\*%', statement)
if match is not None:
self.transform.mirroring = match.group(1)
else:
raise ValueError(f'Unrecognized mirroring statement: {statement}')
def load_rotation(self, statement: str):
# Rotation in degrees counterclockwise
match = re.search(r'%LR(\S+)\*%', statement)
if match is not None:
self.transform.rotation = float(match.group(1))
else:
raise ValueError(f'Unrecognized rotation statement: {statement}')
def load_scaling(self, statement: str):
# Scaling where 1.0 is no scaling
match = re.search(r'%LS(\S+)\*%', statement)
if match is not None:
self.transform.scaling = float(match.group(1))
else:
raise ValueError(f'Unrecognized scaling statement: {statement}')
def aperture_define(self, statement: str):
# Parse e.g. %ADD100C,1.5*%
# AD, D100, C, 1.5
# cmd, ident, template
match = re.search(r'%AD(D[0-9]{2,})([\w\.\$]+)(,\S*)?\*%', statement)
if match is not None:
ident = match.group(1)
template_name = match.group(2)
parameters = match.group(3)
if parameters:
parameters = parameters.lstrip(',')
if ident in self.apertures:
raise ValueError(f'Aperture {ident} already defined')
if template_name in self.templates:
self.apertures[ident] = self.templates[template_name].derive_from(parameters)
else:
raise KeyError(f'Aperture template {template_name} not defined')
else:
raise ValueError(f'Unrecognized aperture define statement: {statement}')
def aperture_macro(self, statement: str):
# %AMCIRC*\n1,1,1.5,0,0,0*%
match = re.search(r'%AM([\w\.\$]+)', statement)
if match is not None:
ident = match.group(1)
if ident in self.templates:
raise ValueError(f'Aperture {ident} template already defined')
self.templates[ident] = Macro.parse(statement)
else:
raise ValueError(f'Unrecognized aperture macro statement: {statement}')
def aperture_block(self, statement: str):
# %ABD12*%
# %ADD11C,0.5*%
# D10*
# G01*
# X-2500000Y-1000000D03*
# Y1000000D03*
# %LPC*%
# ...
# G01*
# %AB*%
match = re.search(r'%AB(D[0-9]{2,})?\*%', statement)
if match is not None:
ident = match.group(1)
if ident is None: # Close Block
self.objects_list_stack.pop()
else: # Open new Block
if ident in self.apertures:
raise ValueError(f'Aperture {ident} already defined')
self.apertures[ident] = BlockAperture()
self.objects_list_stack.append(self.apertures[ident])
else:
raise ValueError(f'Unrecognized aperture block statement: {statement}')
def set_current_aperture(self, statement: str):
# D10*
# select aperture D10
match = re.search(r'(D[0-9]{2,})\*', statement)
if match is not None:
ident = match.group(1)
if ident in self.apertures:
self.current_aperture = ident
else:
raise KeyError(f'Aperture {ident} is not defined')
else:
raise ValueError(f'Unrecognized set current aperture statement: {statement}')
def step_and_repeat(self, statement: str):
# %SRX3Y2I5.0J4.0*%
# ...
# %SR*%
# Step and repeat all enclosed statements
if statement == '%SR*%':
self.objects_list_stack.pop()
else:
match = re.search(rf'%SRX(\d+)Y(\d+)I({RE_DEC})J({RE_DEC})\*%', statement)
if match is not None:
x = int(match.group(1))
y = int(match.group(2))
i = float(match.group(3))
j = float(match.group(4))
sr = StepAndRepeat(x, y, i, j)
self.add_object(sr)
self.objects_list_stack.append(sr)
else:
raise ValueError(f'Unrecognized step and repeat statement: {statement}')
class ApertureTransform():
def __init__(self,
polarity: str = 'dark', mirroring: str = 'N',
rotation: float = 0.0, scaling: float = 1.0):
self.polarity = polarity
self.mirroring = mirroring
self.rotation = rotation
self.scaling = scaling
class Aperture():
def __init__(self):
pass
def derive_from(self, statement: str):
if statement is None:
raise ValueError('Missing parameters statement')
tokens = statement.split('X')
return type(self)(*[float(token) for token in tokens])
def clone(self):
new = copy.copy(self)
return new
def get_hole_vertices(self, dest: list = None):
hole_pts = None
if self.hole_diameter:
hole_pts = vertices.circle(self.hole_diameter)
hole_pts = np.flip(hole_pts, 0)
if dest is not None:
dest.append(hole_pts)
return hole_pts
def get_outline(self, dest: list = None):
raise NotImplementedError('get_outline not implemented')
class Circle(Aperture):
def __init__(self, diameter: float, hole_diameter: float = None):
super().__init__()
self.diameter = diameter
self.hole_diameter = hole_diameter
def get_outline(self, dest: list = None):
pts = vertices.circle(self.diameter)
holes = []
self.get_hole_vertices(holes)
outline = vertices.OutlineVertices(pts, holes)
if dest is not None:
dest.append(outline)
return outline
class Rectangle(Aperture):
def __init__(self, x_size: float, y_size: float,
hole_diameter: float = None):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.hole_diameter = hole_diameter
def get_outline(self, dest: list = None):
pts = vertices.rectangle(self.x_size, self.y_size)
holes = []
self.get_hole_vertices(holes)
outline = vertices.OutlineVertices(pts, holes)
if dest is not None:
dest.append(outline)
return outline
class Obround(Aperture):
def __init__(self, x_size: float, y_size: float,
hole_diameter: float = None):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.hole_diameter = hole_diameter
def get_outline(self, dest: list = None):
w = min(self.x_size, self.y_size)
z = 0.5 * (max(self.x_size, self.y_size) - w)
if self.x_size > self.y_size:
x1, x2 = -z, z
y1, y2 = 0, 0
else:
x1, x2 = 0, 0
y1, y2 = -z, z
pts = vertices.rounded_line(w, x1, y1, x2, y2)
holes = []
self.get_hole_vertices(holes)
outline = vertices.OutlineVertices(pts, holes)
if dest is not None:
dest.append(outline)
return outline
class Polygon(Aperture):
def __init__(self, outer_diameter: float, vertices: int,
rotation: float = 0.0, hole_diameter: float = None):
super().__init__()
self.outer_diameter = outer_diameter
self.vertices = int(vertices)
self.rotation = rotation
self.hole_diameter = hole_diameter
if self.vertices not in range(3, 13):
raise ValueError('Polygon vertices must be from 3 to 12')
def get_outline(self, dest: list = None):
pts = vertices.regular_poly(self.outer_diameter, self.vertices)
vertices.rotate(pts, self.rotation)
holes = []
self.get_hole_vertices(holes)
outline = vertices.OutlineVertices(pts, holes)
if dest is not None:
dest.append(outline)
return outline
class Macro(Aperture):
def __init__(self, template_str: str, primitives: list):
super().__init__()
self.template_str = template_str
self.primitives = primitives
def get_outline(self, dest: list = None):
outlines = []
for prim in self.primitives:
outlines.append(prim.get_outline(dest))
return outlines
def derive_from(self, statement: str):
# Collect parameter values from creation statement
params = {}
if statement is not None:
for i, token in enumerate(statement.split('X')):
params[i + 1] = float(token)
# Create primitives by parsing template string
primitives = []
blocks = self.template_str.replace('\n', '').split('*')
for block in blocks:
# Ignore open/close block or comment
if block.startswith('%') or block.startswith('0'):
continue
# Resolve new variables
if block.startswith('$'):
expr = re.search(r'\$(\d+)\s*=([^*]+)*', block)
expr_p = expr.group(1)
expr_e = expr.group(2)
for p, v in params.items():
expr_e = expr_e.replace(f'${p}', str(v))
params[expr_p] = Macro.eval_expression(expr_e)
# Attempt to create a primitive
else:
code = block.split(',')[0]
for p, v in params.items():
block = block.replace(f'${p}', str(v))
missing = re.search(r'\$\d+', block)
if missing:
raise KeyError('Unfulfilled macro parameter ' +
missing.group())
try:
primitives.append(Macro.primtypes(code).parse(block))
except KeyError:
raise KeyError('Unrecognized macro code ' + str(code))
return type(self)(self.template_str, primitives)
@staticmethod
def primtypes(code):
prims = {
'1': MacroCircle,
'20': MacroVectorLine,
'21': MacroCenterLine,
'4': MacroOutline,
'5': MacroPolygon,
'6': MacroMoire,
'7': MacroThermal
}
return prims[code]
@classmethod
def parse(cls, statement: str):
if not statement.startswith('%AM'):
raise ValueError('Invalid define macro statement')
# TODO validate template
return cls(statement, [])
@staticmethod
def eval_expression(expr: str):
legal = set('0123456789()-+/x.')
chars = set(expr)
illegal = chars.difference(legal)
if len(illegal) > 0:
raise ValueError('Illegal characters in expression: ' + expr)
expr = expr.replace('x', '*') # Multiplication
return eval(expr)
class MacroPrimitive():
def __init__(self, exposure, x, y, rotation):
if exposure not in (EXPOSURE_OFF, EXPOSURE_ON):
raise ValueError('Invalid exposure value')
self.exposure = exposure
self.x = x
self.y = y
self.rotation = rotation
def get_outline(self, dest: list = None):
raise NotImplementedError('get_vertices not implemented')
@classmethod
def parse(cls, statement: str):
if statement is None:
raise ValueError('Missing parameters statement')
tokens = statement.split(',')[1:] # Discard first token (shape code)
return cls(*[Macro.eval_expression(token) for token in tokens])
class MacroCircle(MacroPrimitive):
def __init__(self, exposure, diameter, x, y, rotation=0.0):
super().__init__(exposure, x, y, rotation)
self.diameter = diameter
def get_outline(self, dest: list = None):
pts = vertices.circle(self.diameter)
outline = vertices.OutlineVertices(pts)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroVectorLine(MacroPrimitive):
def __init__(self, exposure, width, x1, y1, x2, y2, rotation=0.0):
super().__init__(exposure, 0, 0, rotation)
self.width = width
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def get_outline(self, dest: list = None):
pts = vertices.thick_line(self.width,
self.x1, self.y1,
self.x2, self.y2)
outline = vertices.OutlineVertices(pts)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroCenterLine(MacroPrimitive):
def __init__(self, exposure, width, height, x, y, rotation=0.0):
super().__init__(exposure, x, y, rotation)
self.width = width
self.height = height
def get_outline(self, dest: list = None):
pts = vertices.rectangle(self.width, self.height)
outline = vertices.OutlineVertices(pts)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroPolygon(MacroPrimitive):
def __init__(self, exposure, vertices, x, y, diameter, rotation=0.0):
super().__init__(exposure, x, y, rotation)
self.vertices = vertices
self.diameter = diameter
def get_outline(self, dest: list = None):
pts = vertices.regular_poly(self.diameter, self.vertices)
outline = vertices.OutlineVertices(pts)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroThermal(MacroPrimitive):
def __init__(self, x, y, outer_diameter, inner_diameter,
gap, rotation=0.0):
super().__init__(EXPOSURE_ON, x, y, rotation)
self.outer_diameter = outer_diameter
self.inner_diameter = inner_diameter
self.gap = gap
def get_outline(self, dest: list = None):
pts = vertices.circle(self.outer_diameter)
hole_pts = vertices.circle(self.inner_diameter)
holes = [np.flip(hole_pts, 0)]
# TODO add gaps
outline = vertices.OutlineVertices(pts, holes)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroMoire(MacroPrimitive):
def __init__(self, x, y, outer_diameter, ring_thickness,
gap, num_rings, crosshair_thickness, crosshair_length,
rotation=0.0):
super().__init__(EXPOSURE_ON, x, y, rotation)
self.outer_diameter = outer_diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.num_rings = num_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
def get_outline(self, dest: list = None):
pts = vertices.circle(self.outer_diameter)
holes = [vertices.circle(self.inner_diameter)]
# TODO implement properly
outline = vertices.OutlineVertices(pts, holes)
outline.positive = self.exposure == 1
outline.translate(self.x, self.y)
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class MacroOutline(MacroPrimitive):
def __init__(self, exposure, vertices, x, y, *args):
N = 2 * vertices + 1
if len(args) == N:
super().__init__(exposure, x, y, rotation=float(args[-1]))
self.vertices = vertices
self.coordinates = [*args[:-1]]
else:
raise ValueError(f'Expected {N} parameters but received {len(args)}')
def get_outline(self, dest: list = None):
N = int(len(self.coordinates) / 2)
pts = np.array(self.coordinates)
pts.resize((N, 2))
outline = vertices.OutlineVertices(pts)
outline.positive = self.exposure == 1
outline.rotate(self.rotation)
if dest is not None:
dest.append(outline)
return outline
class BlockAperture(Aperture):
def __init__(self):
super().__init__()
self.objects = []
def append(self, object):
self.objects.append(object)
class GraphicalObject():
def __init__(self, aperture, transform, origin: tuple):
self.aperture = aperture
self.transform = copy.copy(transform)
self.origin = origin
def translate(self, translation):
dx, dy = translation
x0, y0 = self.origin
self.origin = (x0 + dx, y0 + dy)
def get_outline(self, dest: list = None, scale: float = 1e-6):
raise NotImplementedError('get_outline not implemented')
class Draw(GraphicalObject):
def __init__(self, aperture, transform, origin: tuple, endpoint: tuple):
super().__init__(aperture, transform, origin)
self.endpoint = endpoint
def translate(self, translation):
dx, dy = translation
x0, y0 = self.origin
x1, y1 = self.endpoint
self.origin = (x0 + dx, y0 + dy)
self.endpoint = (x1 + dx, y1 + dy)
def get_outline(self, dest: list = None, scale: float = 1e-6):
x0, y0 = scale * np.array(self.origin)
x1, y1 = scale * np.array(self.endpoint)
pts = vertices.rounded_line(self.aperture.diameter, x0, y0, x1, y1)
outline = vertices.OutlineVertices(pts)
# TODO apply transform
if dest is not None:
dest.append(outline)
return outline
# TODO Arc needs quadrant mode
class Arc(GraphicalObject):
def __init__(self, aperture, transform, origin: tuple, endpoint: tuple,
offset: tuple, is_cw: bool = True):
super().__init__(aperture, transform, origin)
self.endpoint = endpoint
self.offset = offset
self.is_cw = is_cw
def translate(self, translation):
dx, dy = translation
x0, y0 = self.origin
x1, y1 = self.endpoint
self.origin = (x0 + dx, y0 + dy)
self.endpoint = (x1 + dx, y1 + dy)
def get_outline(self, dest: list = None, scale: float = 1e-6):
dx, dy = scale * np.array(self.offset)
x1, y1 = scale * np.array(self.origin)
x2, y2 = scale * np.array(self.endpoint)
x0, y0 = x1 + dx, y1 + dy
pts = vertices.rounded_arc(self.aperture.diameter, x0, y0, x1, y1, x2, y2)
vertices.translate(pts, x0, y0)
outline = vertices.OutlineVertices(pts)
# TODO apply transform
if dest is not None:
dest.append(outline)
return outline
class Flash(GraphicalObject):
def __init__(self, aperture, transform, origin: tuple):
super().__init__(aperture, transform, origin)
def get_outline(self, dest: list = None, scale: float = 1e-6):
outlines = self.aperture.get_outline(dest)
if type(outlines) != list:
outlines = [outlines]
x0, y0 = scale * np.array(self.origin)
# TODO replace with apply transform function
for outline in outlines:
outline.positive = self.transform.polarity == 'dark'
outline.rotate(self.transform.rotation)
outline.translate(x0, y0)
return outlines
class Region(GraphicalObject):
def __init__(self, transform):
super().__init__(None, transform, None)
self.objects = []
self.contours = []
def end_contour(self):
if len(self.contours) > 0:
prev_start, prev_len = self.contours[-1]
new_start = prev_start + prev_len
self.contours.append((new_start, len(self.objects) - new_start))
else:
self.contours.append((0, len(self.objects)))
def append(self, object):
if not isinstance(object, (Draw, Arc)):
raise TypeError('Region only supports Draw and Arc objects')
if len(self.objects) > 0 and object.origin != self.objects[-1].endpoint:
self.end_contour()
self.objects.append(object)
class StepAndRepeat():
def __init__(self, nx: int, ny: int, step_x: float, step_y: float):
if nx < 1 or ny < 1:
raise ValueError('Repeat must be 1 or greater')
if step_x < 0.0 or step_y < 0.0:
raise ValueError('Step size must be positive')
self.nx = nx
self.ny = ny
self.step_x = step_x
self.step_y = step_y
self.objects = []
def append(self, object):
self.objects.append(object)
| 35.196347
| 95
| 0.570025
| 30,390
| 0.985664
| 0
| 0
| 1,168
| 0.037883
| 0
| 0
| 4,384
| 0.14219
|
621341c710939a44f425e8019b3137c8dfb8ad3f
| 3,558
|
py
|
Python
|
angrmanagement/ui/menus/disasm_insn_context_menu.py
|
yuzeming/angr-management
|
173d3ffa02146956e5f0c9c8862da56988fa67b2
|
[
"BSD-2-Clause"
] | 474
|
2015-08-10T17:47:15.000Z
|
2022-03-31T21:10:55.000Z
|
angrmanagement/ui/menus/disasm_insn_context_menu.py
|
yuzeming/angr-management
|
173d3ffa02146956e5f0c9c8862da56988fa67b2
|
[
"BSD-2-Clause"
] | 355
|
2015-08-17T09:35:53.000Z
|
2022-03-31T21:29:52.000Z
|
angrmanagement/ui/menus/disasm_insn_context_menu.py
|
yuzeming/angr-management
|
173d3ffa02146956e5f0c9c8862da56988fa67b2
|
[
"BSD-2-Clause"
] | 95
|
2015-08-11T14:36:12.000Z
|
2022-03-31T23:01:01.000Z
|
from functools import partial
from typing import Callable
from typing import TYPE_CHECKING
from ...config import Conf
from .menu import Menu, MenuEntry, MenuSeparator
if TYPE_CHECKING:
from ...ui.views.disassembly_view import DisassemblyView
class DisasmInsnContextMenu(Menu):
"""
Dissembly Instruction's Context Menu Items and callback funcion.
It provides context menu for dissembly instructions in the Dissembly View.
For adding items in plugins, use `Workspace.add_disasm_insn_ctx_menu_entry`
and `Workspace.remove_disasm_insn_ctx_menu_entry`.
"""
def __init__(self, disasm_view: 'DisassemblyView'):
super().__init__("", parent=disasm_view)
self.insn_addr = None
self.entries.extend([
MenuEntry('T&oggle selection', self._toggle_instruction_selection),
MenuSeparator(),
MenuEntry('&XRefs...', self._popup_xrefs),
MenuSeparator(),
])
if Conf.has_operation_mango:
self.entries.extend([
MenuEntry("&Depends on...", self._popup_dependson_dialog),
MenuSeparator(),
])
self.entries.extend([
MenuEntry('E&xecute symbolically...', self._popup_newstate_dialog),
MenuEntry('&Avoid in execution...', self._avoid_in_execution),
MenuEntry('&Find in execution...', self._find_in_execution),
MenuEntry('Add &hook...', self._add_hook),
MenuEntry('View function &documentation...', self._view_docs)
])
@property
def _disasm_view(self) -> 'DisassemblyView':
return self.parent
def _popup_newstate_dialog(self):
self._disasm_view.popup_newstate_dialog(async_=True)
def _popup_dependson_dialog(self):
self._disasm_view.popup_dependson_dialog(use_operand=True)
def _toggle_instruction_selection(self):
self._disasm_view.infodock.toggle_instruction_selection(self.insn_addr)
def _avoid_in_execution(self):
self._disasm_view.avoid_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _find_in_execution(self):
self._disasm_view.find_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _add_hook(self):
self._disasm_view.popup_hook_dialog(async_=True)
def _view_docs(self):
if self._disasm_view is None:
return
addr = self._disasm_view._address_in_selection()
if addr is not None:
self._disasm_view.popup_func_doc_dialog(addr)
def _popup_xrefs(self):
if self._disasm_view is None or self._disasm_view._flow_graph is None:
return
r = self._disasm_view._flow_graph.get_selected_operand_info()
if r is not None:
_, ins_addr, operand = r
self._disasm_view.parse_operand_and_popup_xref_dialog(ins_addr, operand, async_=True)
#
# Public Methods
#
def add_menu_entry(self, text, callback: Callable[['DisasmInsnContextMenu'], None], add_separator_first=True):
if add_separator_first:
self.entries.append(MenuSeparator())
self.entries.append(MenuEntry(text, partial(callback, self)))
def remove_menu_entry(self, text, remove_preceding_separator=True):
for idx, m in enumerate(self.entries):
if not isinstance(m, MenuEntry):
continue
if m.caption == text:
self.entries.remove(m)
if remove_preceding_separator:
self.entries.pop(idx-1)
| 36.306122
| 114
| 0.666948
| 3,308
| 0.929736
| 0
| 0
| 85
| 0.02389
| 0
| 0
| 537
| 0.150927
|
621428e35c36b4c6fb3b8e653cb6dee70e33f859
| 2,787
|
py
|
Python
|
src/panoptes/pocs/base.py
|
sarumanplaysguitar/POCS
|
b6c50cb70b8f3fc2147e975e5cd3cd953956da8d
|
[
"MIT"
] | null | null | null |
src/panoptes/pocs/base.py
|
sarumanplaysguitar/POCS
|
b6c50cb70b8f3fc2147e975e5cd3cd953956da8d
|
[
"MIT"
] | null | null | null |
src/panoptes/pocs/base.py
|
sarumanplaysguitar/POCS
|
b6c50cb70b8f3fc2147e975e5cd3cd953956da8d
|
[
"MIT"
] | null | null | null |
from requests.exceptions import ConnectionError
from panoptes.pocs import __version__
from panoptes.utils.database import PanDB
from panoptes.utils.config import client
from panoptes.pocs.utils.logger import get_logger
from panoptes.pocs import hardware
# Global database.
PAN_DB_OBJ = None
class PanBase(object):
""" Base class for other classes within the PANOPTES ecosystem
Defines common properties for each class (e.g. logger, config, db).
"""
def __init__(self, config_port='6563', *args, **kwargs):
self.__version__ = __version__
self._config_port = config_port
self.logger = get_logger()
# If the user requests a db_type then update runtime config
db_type = kwargs.get('db_type', self.get_config('db.type', default='file'))
db_name = kwargs.get('db_name', self.get_config('db.name', default='panoptes'))
global PAN_DB_OBJ
if PAN_DB_OBJ is None:
PAN_DB_OBJ = PanDB(db_type=db_type, db_name=db_name)
self.db = PAN_DB_OBJ
def get_config(self, *args, **kwargs):
"""Thin-wrapper around client based get_config that sets default port.
See `panoptes.utils.config.client.get_config` for more information.
Args:
*args: Passed to get_config
**kwargs: Passed to get_config
"""
config_value = None
try:
config_value = client.get_config(port=self._config_port, *args, **kwargs)
except ConnectionError as e: # pragma: no cover
self.logger.critical(f'Cannot connect to config_server from {self.__class__}: {e!r}')
return config_value
def set_config(self, key, new_value, *args, **kwargs):
"""Thin-wrapper around client based set_config that sets default port.
See `panoptes.utils.config.client.set_config` for more information.
Args:
key (str): The key name to use, can be namespaced with dots.
new_value (any): The value to store.
*args: Passed to set_config
**kwargs: Passed to set_config
"""
config_value = None
if key == 'simulator' and new_value == 'all':
# Don't use hardware.get_simulator_names because it checks config.
new_value = hardware.ALL_NAMES
try:
self.logger.trace(f'Setting config {key=} {new_value=}')
config_value = client.set_config(key, new_value, port=self._config_port, *args,
**kwargs)
self.logger.trace(f'Config set {config_value=}')
except ConnectionError as e: # pragma: no cover
self.logger.critical(f'Cannot connect to config_server from {self.__class__}: {e!r}')
return config_value
| 35.278481
| 97
| 0.642985
| 2,491
| 0.893793
| 0
| 0
| 0
| 0
| 0
| 0
| 1,224
| 0.439182
|
62148220d3b68cf5b490d8e272125fd66f2e326e
| 12,455
|
py
|
Python
|
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | null | null | null |
src/metarl/envs/multi_env_wrapper.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
"""A wrapper env that handles multiple tasks from different envs.
Useful while training multi-task reinforcement learning algorithms.
It provides observations augmented with one-hot representation of tasks.
"""
import random
import akro
import gym
import numpy as np
def round_robin_strategy(num_tasks, last_task=None):
"""A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
"""
if last_task is None:
return 0
return (last_task + 1) % num_tasks
def uniform_random_strategy(num_tasks, _):
"""A function for sampling tasks uniformly at random.
Args:
num_tasks (int): Total number of tasks.
_ (object): Ignored by this sampling strategy.
Returns:
int: task id.
"""
return random.randint(0, num_tasks - 1)
class MultiEnvWrapper(gym.Wrapper):
"""A wrapper class to handle multiple gym environments.
Args:
envs (list(gym.Env)):
A list of objects implementing gym.Env.
sample_strategy (function(int, int)):
Sample strategy to be used when sampling a new task.
"""
def __init__(self, envs, task_name=None, sample_strategy=uniform_random_strategy):
self._sample_strategy = sample_strategy
self._num_tasks = len(envs)
self._active_task_index = None
self._observation_space = None
self._envs_names_list = task_name or dict()
max_flat_dim = np.prod(envs[0].observation_space.shape)
for i, env in enumerate(envs):
assert len(env.observation_space.shape) == 1
if np.prod(env.observation_space.shape) >= max_flat_dim:
self.max_observation_space_index = i
max_flat_dim = np.prod(env.observation_space.shape)
self._max_plain_dim = max_flat_dim
super().__init__(envs[self.max_observation_space_index])
self._task_envs = []
for env in envs:
if env.action_space.shape != self.env.action_space.shape:
raise ValueError('Action space of all envs should be same.')
self._task_envs.append(env)
self.spec.observation_space = self.observation_space
@property
def num_tasks(self):
"""Total number of tasks.
Returns:
int: number of tasks.
"""
return len(self._task_envs)
@property
def task_space(self):
"""Task Space.
Returns:
akro.Box: Task space.
"""
one_hot_ub = np.ones(self.num_tasks)
one_hot_lb = np.zeros(self.num_tasks)
return akro.Box(one_hot_lb, one_hot_ub)
@property
def active_task_index(self):
"""Index of active task env.
Returns:
int: Index of active task.
"""
return self._active_task_index
@property
def observation_space(self):
"""Observation space.
Returns:
akro.Box: Observation space.
"""
task_lb, task_ub = self.task_space.bounds
env_lb, env_ub = self._observation_space.bounds
return akro.Box(np.concatenate([task_lb, env_lb]),
np.concatenate([task_ub, env_ub]))
@observation_space.setter
def observation_space(self, observation_space):
"""Observation space setter.
Args:
observation_space (akro.Box): Observation space.
"""
self._observation_space = observation_space
@property
def active_task_one_hot(self):
"""One-hot representation of active task.
Returns:
numpy.ndarray: one-hot representation of active task
"""
one_hot = np.zeros(self.task_space.shape)
index = self.active_task_index or 0
one_hot[index] = self.task_space.high[index]
return one_hot
def reset(self, **kwargs):
"""Sample new task and call reset on new task env.
Args:
kwargs (dict): Keyword arguments to be passed to gym.Env.reset
Returns:
numpy.ndarray: active task one-hot representation + observation
"""
self._active_task_index = self._sample_strategy(
self._num_tasks, self._active_task_index)
self.env = self._task_envs[self._active_task_index]
obs = self.env.reset(**kwargs)
obs = self._augment_observation(obs)
oh_obs = self._obs_with_one_hot(obs)
return oh_obs
def _augment_observation(self, obs):
# optionally zero-pad observation
if np.prod(obs.shape) < self._max_plain_dim:
zeros = np.zeros(
shape=(self._max_plain_dim - np.prod(obs.shape),)
)
obs = np.concatenate([obs, zeros])
return obs
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (object): object to be passed in gym.Env.reset(action)
Returns:
object: agent's observation of the current environment
float: amount of reward returned after previous action
bool: whether the episode has ended
dict: contains auxiliary diagnostic information
"""
obs, reward, done, info = self.env.step(action)
obs = self._augment_observation(obs)
oh_obs = self._obs_with_one_hot(obs)
info['task_id'] = self._active_task_index
info['task_name'] = self._envs_names_list[self._active_task_index]
return oh_obs, reward, done, info
def close(self):
"""Close all task envs."""
for env in self._task_envs:
env.close()
def _obs_with_one_hot(self, obs):
"""Concatenate active task one-hot representation with observation.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: active task one-hot + observation
"""
oh_obs = np.concatenate([self.active_task_one_hot, obs])
return oh_obs
# """A wrapper env that handles multiple tasks from different envs.
# Useful while training multi-task reinforcement learning algorithms.
# It provides observations augmented with one-hot representation of tasks.
# """
# import random
# import akro
# import gym
# import numpy as np
# def round_robin_strategy(num_tasks, last_task=None):
# """A function for sampling tasks in round robin fashion.
# Args:
# num_tasks (int): Total number of tasks.
# last_task (int): Previously sampled task.
# Returns:
# int: task id.
# """
# if last_task is None:
# return 0
# return (last_task + 1) % num_tasks
# def uniform_random_strategy(num_tasks, _):
# """A function for sampling tasks uniformly at random.
# Args:
# num_tasks (int): Total number of tasks.
# _ (object): Ignored by this sampling strategy.
# Returns:
# int: task id.
# """
# return random.randint(0, num_tasks - 1)
# class MultiEnvWrapper(gym.Wrapper):
# """A wrapper class to handle multiple gym environments.
# Args:
# envs (list(gym.Env)):
# A list of objects implementing gym.Env.
# sample_strategy (function(int, int)):
# Sample strategy to be used when sampling a new task.
# """
# def __init__(self, envs, sample_strategy=uniform_random_strategy):
# self._sample_strategy = sample_strategy
# self._num_tasks = len(envs)
# self._active_task_index = None
# self._observation_space = None
# max_flat_dim = np.prod(envs[0].observation_space.shape)
# max_observation_space_index = 0
# for i, env in enumerate(envs):
# assert len(env.observation_space.shape) == 1
# if np.prod(env.observation_space.shape) >= max_flat_dim:
# self.max_observation_space_index = i
# max_flat_dim = np.prod(env.observation_space.shape)
# self._max_plain_dim = max_flat_dim
# super().__init__(envs[self.max_observation_space_index])
# self._task_envs = []
# for i, env in enumerate(envs):
# if env.action_space.shape != self.env.action_space.shape:
# raise ValueError('Action space of all envs should be same.')
# self._task_envs.append(env)
# self.env.spec.observation_space = self._task_envs[self.max_observation_space_index].observation_space
# @property
# def num_tasks(self):
# """Total number of tasks.
# Returns:
# int: number of tasks.
# """
# return len(self._task_envs)
# @property
# def task_space(self):
# """Task Space.
# Returns:
# akro.Box: Task space.
# """
# one_hot_ub = np.ones(self.num_tasks)
# one_hot_lb = np.zeros(self.num_tasks)
# return akro.Box(one_hot_lb, one_hot_ub)
# @property
# def active_task_index(self):
# """Index of active task env.
# Returns:
# int: Index of active task.
# """
# return self._active_task_index
# @property
# def observation_space(self):
# """Observation space.
# Returns:
# akro.Box: Observation space.
# """
# task_lb, task_ub = self.task_space.bounds
# env_lb, env_ub = self._observation_space.bounds
# return akro.Box(np.concatenate([task_lb, env_lb]),
# np.concatenate([task_ub, env_ub]))
# @observation_space.setter
# def observation_space(self, observation_space):
# """Observation space setter.
# Args:
# observation_space (akro.Box): Observation space.
# """
# self._observation_space = observation_space
# @property
# def active_task_one_hot(self):
# """One-hot representation of active task.
# Returns:
# numpy.ndarray: one-hot representation of active task
# """
# one_hot = np.zeros(self.task_space.shape)
# index = self.active_task_index or 0
# one_hot[index] = self.task_space.high[index]
# return one_hot
# def reset(self, **kwargs):
# """Sample new task and call reset on new task env.
# Args:
# kwargs (dict): Keyword arguments to be passed to gym.Env.reset
# Returns:
# numpy.ndarray: active task one-hot representation + observation
# """
# self._active_task_index = self._sample_strategy(
# self._num_tasks, self._active_task_index)
# self.env = self._task_envs[self._active_task_index]
# obs = self.env.reset(**kwargs)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# return oh_obs
# def step(self, action):
# """gym.Env step for the active task env.
# Args:
# action (object): object to be passed in gym.Env.reset(action)
# Returns:
# object: agent's observation of the current environment
# float: amount of reward returned after previous action
# bool: whether the episode has ended
# dict: contains auxiliary diagnostic information
# """
# obs, reward, done, info = self.env.step(action)
# obs = self._augment_observation(obs)
# oh_obs = self._obs_with_one_hot(obs)
# info['task_id'] = self._active_task_index
# return oh_obs, reward, done, info
# def _augment_observation(self, obs):
# # optionally zero-pad observation
# if np.prod(obs.shape) < self._max_plain_dim:
# zeros = np.zeros(
# shape=(self._max_plain_dim - np.prod(obs.shape),)
# )
# obs = np.concatenate([obs, zeros])
# return obs
# def close(self):
# """Close all task envs."""
# for env in self._task_envs:
# env.close()
# def _obs_with_one_hot(self, obs):
# """Concatenate active task one-hot representation with observation.
# Args:
# obs (numpy.ndarray): observation
# Returns:
# numpy.ndarray: active task one-hot + observation
# """
# oh_obs = np.concatenate([self.active_task_one_hot, obs])
# return oh_obs
| 29.305882
| 111
| 0.607226
| 5,159
| 0.414211
| 0
| 0
| 1,602
| 0.128623
| 0
| 0
| 8,656
| 0.694982
|
6214cfd0d71589122131f56a39aa2ef13d007862
| 773
|
py
|
Python
|
MyNewHandTracking.py
|
Hai-Hoang-88/HandTracking
|
c35cf442c4305a48ac1182570c266df4d3b877dd
|
[
"Unlicense"
] | null | null | null |
MyNewHandTracking.py
|
Hai-Hoang-88/HandTracking
|
c35cf442c4305a48ac1182570c266df4d3b877dd
|
[
"Unlicense"
] | null | null | null |
MyNewHandTracking.py
|
Hai-Hoang-88/HandTracking
|
c35cf442c4305a48ac1182570c266df4d3b877dd
|
[
"Unlicense"
] | null | null | null |
import cv2
import mediapipe as mp
import time
import HandTracking_module as htm
# initiate time
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0) # 0 is internal camera, while 1 is external camera
detector = htm.handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img) # get para img and put in definition
lmList = detector.findPosition(img, draw=False) # if no draw=True, will draw a circle
if len(lmList) != 0:
print(lmList[4])
# time for fbs
cTime = time.time()
fbs = 1/(cTime - pTime)
pTime = cTime
# put txt on the image
cv2.putText(img, str(int(fbs)), (10, 70), cv2.FONT_ITALIC, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
| 27.607143
| 91
| 0.619664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.241915
|
62170b104a1052dcd0eae68ee028a14da9c51172
| 2,113
|
py
|
Python
|
exercises/en/solution_07_23.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 1
|
2020-09-12T15:40:11.000Z
|
2020-09-12T15:40:11.000Z
|
exercises/en/solution_07_23.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 13
|
2020-10-02T16:48:24.000Z
|
2020-12-09T18:58:21.000Z
|
exercises/en/solution_07_23.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 2
|
2020-10-28T19:43:42.000Z
|
2021-03-30T22:57:47.000Z
|
import numpy as np
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.svm import SVC
# Loading in the data
pk_df = pd.read_csv('data/pokemon.csv')
train_df, test_df = train_test_split(pk_df, test_size=0.2, random_state=1)
X_train = train_df.drop(columns=['legendary'])
y_train = train_df['legendary']
X_test = test_df.drop(columns=['legendary'])
y_test = test_df['legendary']
numeric_features = ["deck_no",
"attack",
"defense" ,
"sp_attack",
"sp_defense",
"speed",
"capture_rt",
"total_bs"]
categorical_features = ["type"]
numeric_transformer = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
categorical_transformer = make_pipeline(
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore"))
preprocessor = make_column_transformer(
(numeric_transformer, numeric_features),
(categorical_transformer, categorical_features))
# Build a pipeline containing the column transformer and an SVC model
# Use the parameter class_weight="balanced"
# Name this pipeline main_pipe
main_pipe = make_pipeline(preprocessor, SVC(class_weight="balanced"))
# Perform cross validation on the training split using the scoring measures accuracy, precision and recall
# Save the results in a dataframe named multi_scores
multi_scores = pd.DataFrame(cross_validate(main_pipe,
X_train,
y_train,
return_train_score=True,
scoring = ['accuracy', 'precision', 'recall']))
multi_scores
| 37.732143
| 107
| 0.681496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 546
| 0.2584
|
6217c3865432b1a663db3913c183c3b2bdd9e8cf
| 53
|
py
|
Python
|
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
src/algorithm/__init__.py
|
ShogoAkiyama/metaworld.pytorch
|
6b08163d2c0d73b6d1d9b4b513d18f0a308e92c4
|
[
"MIT"
] | null | null | null |
from .sac import SAC
from .eval import EvalAlgorithm
| 17.666667
| 31
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6218792313b28bf05b712a8e421f24aaaa0f9100
| 8,110
|
py
|
Python
|
Parallel_POD/online_svd_parallel.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | 8
|
2020-09-02T14:46:07.000Z
|
2021-11-29T15:27:05.000Z
|
Parallel_POD/online_svd_parallel.py
|
omersan/Practice
|
77eecdc2a202e6b333123cfd92e7db6dc0eea021
|
[
"MIT"
] | 18
|
2020-11-13T18:49:33.000Z
|
2022-03-12T00:54:43.000Z
|
Parallel_POD/online_svd_parallel.py
|
omersan/Practice
|
77eecdc2a202e6b333123cfd92e7db6dc0eea021
|
[
"MIT"
] | 5
|
2019-09-25T23:57:00.000Z
|
2021-04-18T08:15:34.000Z
|
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from mpi4py import MPI
# For shared memory deployment: `export OPENBLAS_NUM_THREADS=1`
# Method of snapshots
def generate_right_vectors(A):
'''
A - Snapshot matrix - shape: NxS
returns V - truncated right singular vectors
'''
new_mat = np.matmul(np.transpose(A),A)
w, v = np.linalg.eig(new_mat)
svals = np.sqrt(np.abs(w))
rval = np.argmax(svals<0.0001) # eps0
return v[:,:rval], np.sqrt(np.abs(w[:rval])) # Covariance eigenvectors, singular values
# Randomized SVD to accelerate
def low_rank_svd(A,K):
M = A.shape[0]
N = A.shape[1]
omega = np.random.normal(size=(N,2*K))
omega_pm = np.matmul(A,np.transpose(A))
Y = np.matmul(omega_pm,np.matmul(A,omega))
Qred, Rred = np.linalg.qr(Y)
B = np.matmul(np.transpose(Qred),A)
ustar, snew, _ = np.linalg.svd(B)
unew = np.matmul(Qred,ustar)
unew = unew[:,:K]
snew = snew[:K]
return unew, snew
# Check orthogonality
def check_ortho(modes,num_modes):
for m1 in range(num_modes):
for m2 in range(num_modes):
if m1 == m2:
s_ = np.sum(modes[:,m1]*modes[:,m2])
if not np.isclose(s_,1.0):
print('Orthogonality check failed')
break
else:
s_ = np.sum(modes[:,m1]*modes[:,m2])
if not np.isclose(s_,0.0):
print('Orthogonality check failed')
break
print('Orthogonality check passed successfully')
class online_svd_calculator(object):
"""
docstring for online_svd_calculator:
K : Number of modes to truncate
ff : Forget factor
"""
def __init__(self, K, ff, low_rank=False):
super(online_svd_calculator, self).__init__()
self.K = K
self.ff = ff
# Initialize MPI
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.nprocs = self.comm.Get_size()
self.iteration = 0
self.low_rank = low_rank
# Initialize
def initialize(self, A):
self.ulocal, self.svalue = self.parallel_svd(A)
def parallel_qr(self,A):
# Perform the local QR
q, r = np.linalg.qr(A)
rlocal_shape_0 = r.shape[0]
rlocal_shape_1 = r.shape[1]
# Gather data at rank 0:
r_global = self.comm.gather(r,root=0)
# perform SVD at rank 0:
if self.rank == 0:
temp = r_global[0]
for i in range(self.nprocs-1):
temp = np.concatenate((temp,r_global[i+1]),axis=0)
r_global = temp
qglobal, rfinal = np.linalg.qr(r_global)
qglobal = -qglobal # Trick for consistency
rfinal = -rfinal
# For this rank
qlocal = np.matmul(q,qglobal[:rlocal_shape_0])
# send to other ranks
for rank in range(1,self.nprocs):
self.comm.send(qglobal[rank*rlocal_shape_0:(rank+1)*rlocal_shape_0], dest=rank, tag=rank+10)
# Step b of Levy-Lindenbaum - small operation
if self.low_rank:
# Low rank SVD
unew, snew = low_rank_svd(rfinal,self.K)
else:
unew, snew, _ = np.linalg.svd(rfinal)
else:
# Receive qglobal slices from other ranks
qglobal = self.comm.recv(source=0, tag=self.rank+10)
# For this rank
qlocal = np.matmul(q,qglobal)
# To receive new singular vectors
unew = None
snew = None
unew = self.comm.bcast(unew,root=0)
snew = self.comm.bcast(snew,root=0)
return qlocal, unew, snew
def parallel_svd(self,A):
vlocal, slocal = generate_right_vectors(A)
# Find Wr
wlocal = np.matmul(vlocal,np.diag(slocal).T)
# Gather data at rank 0:
wglobal = self.comm.gather(wlocal,root=0)
# perform SVD at rank 0:
if self.rank == 0:
temp = wglobal[0]
for i in range(self.nprocs-1):
temp = np.concatenate((temp,wglobal[i+1]),axis=-1)
wglobal = temp
if self.low_rank:
x, s = low_rank_svd(wglobal,self.K)
else:
x, s, y = np.linalg.svd(wglobal)
else:
x = None
s = None
x = self.comm.bcast(x,root=0)
s = self.comm.bcast(s,root=0)
# # Find truncation threshold
# s_ratio = np.cumsum(s)/np.sum(s)
# rval = np.argmax(1.0-s_ratio<0.0001) # eps1
# perform APMOS at each local rank
phi_local = []
for mode in range(self.K):
phi_temp = 1.0/s[mode]*np.matmul(A,x[:,mode:mode+1])
phi_local.append(phi_temp)
temp = phi_local[0]
for i in range(self.K-1):
temp = np.concatenate((temp,phi_local[i+1]),axis=-1)
return temp, s[:self.K] #
def incorporate_data(self,A):
self.iteration+=1
ll = self.ff*np.matmul(self.ulocal,np.diag(self.svalue))
ll = np.concatenate((ll,A),axis=-1)
qlocal, utemp, self.svalue = self.parallel_qr(ll)
self.ulocal = np.matmul(qlocal,utemp)
def gather_modes(self):
# Gather modes at rank 0
# This is automatically in order
phi_global = self.comm.gather(self.ulocal,root=0)
if self.rank == 0:
phi = phi_global[0]
for i in range(self.nprocs-1):
phi = np.concatenate((phi,phi_global[i+1]),axis=0)
np.save('Online_Parallel_POD.npy',phi)
np.save('Online_Parallel_SingularValues.npy',self.svalue)
# Validate
serial = np.load('Serial_Modes_MOS.npy')
parallel_online = np.load('Online_Parallel_POD.npy')
serial_online = np.load('Online_Serial_POD.npy')
plt.figure()
plt.plot(serial[:,0],label='serial one-shot')
plt.plot(parallel_online[:,0],label='parallel_online')
plt.plot(serial_online[:,0],label='serial_online')
plt.title('U comparison - column 0')
plt.xlabel('Domain')
plt.ylabel('U magnitude')
plt.legend()
plt.figure()
plt.plot(serial[:,2],label='serial one-shot')
plt.plot(parallel_online[:,2],label='parallel_online')
plt.plot(serial_online[:,2],label='serial_online')
plt.title('U comparison - column 2')
plt.xlabel('Domain')
plt.ylabel('U magnitude')
plt.legend()
serial_svs = np.load('Serial_SingularValues.npy')
serial_online_svs = np.load('Online_Serial_SingularValues.npy')
parallel_online_svs = np.load('Online_Parallel_SingularValues.npy')
plt.figure()
plt.plot(serial_svs[:self.K],label='serial one-shot')
plt.plot(parallel_online_svs[:self.K],label='parallel_online')
plt.plot(serial_online_svs[:self.K],label='serial_online')
plt.title('Singular values')
plt.xlabel('Index')
plt.ylabel('Magnitude')
plt.legend()
plt.show()
# Check orthogonality - should all be successful
check_ortho(serial,self.K)
check_ortho(serial_online,self.K)
check_ortho(parallel_online,self.K)
if __name__ == '__main__':
from time import time
# Initialize timer
start_time = time()
test_class = online_svd_calculator(10,1.0,low_rank=True)
iteration = 0
data = np.load('points_rank_'+str(test_class.rank)+'_batch_'+str(iteration)+'.npy')
test_class.initialize(data)
for iteration in range(1,4):
data = np.load('points_rank_'+str(test_class.rank)+'_batch_'+str(iteration)+'.npy')
test_class.incorporate_data(data)
end_time = time()
print('Time required for parallel streaming SVD (each rank):', end_time-start_time)
test_class.gather_modes()
| 30.954198
| 108
| 0.569667
| 5,904
| 0.72799
| 0
| 0
| 0
| 0
| 0
| 0
| 1,750
| 0.215783
|
62190035d82be78029e09978dd1bef9d1d34feb6
| 12,887
|
py
|
Python
|
joboffers/models.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | 1
|
2022-01-14T18:38:25.000Z
|
2022-01-14T18:38:25.000Z
|
joboffers/models.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | 37
|
2022-01-17T14:41:51.000Z
|
2022-02-16T13:50:05.000Z
|
joboffers/models.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | null | null | null |
import html
import json
import re
from datetime import date
from autoslug import AutoSlugField
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinLengthValidator
from django.db.models.aggregates import Count
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from django.utils.timezone import now
from django.utils.translation import gettext as _
from easyaudit.models import CRUDEvent
from taggit_autosuggest.managers import TaggableManager
from pycompanies.models import UserCompanyProfile
from .constants import STATE_LABEL_CLASSES
class EventType(models.IntegerChoices):
"""
Types of event visualization
"""
LISTING_VIEW = (0, _('Visualización en Listado'))
DETAIL_VIEW = (1, _('Visualización de la oferta completa'))
CONTACT_INFO_VIEW = (2, _('Apertura de la información de contacto'))
class Experience(models.TextChoices):
"""
Choices for JobOffer Experience.
"""
ZERO = '0', _('0')
ONE_PLUS = '1+', _('1+')
TWO_PLUS = '2+', _('2+')
THREE_PLUS = '3+', _('3+')
FIVE_PLUS = '5+', _('5+')
TEN_PLUS = '10+', _('10+')
class Remoteness(models.TextChoices):
"""
Choices for Remoteness.
"""
REMOTE = 'REMOTE', _('Remoto')
OFFICE = 'IN_OFFICE', _('Presencial')
HYBRID = 'MIXED', _('Mixto')
class HiringType(models.TextChoices):
"""
Choices for HiringType.
"""
EMPLOYEE = 'EMPLOYEE', _('Relación de dependencia')
MONOTRIBUTISTA = 'MONOTRIBUTO', _('Monotributista')
CONTRACTOR_SHORT = 'CONTRACTOR_SHORT', _('Contractor short term')
CONTRACTOR_LONG = 'CONTRACTOR_LONG', _('Contractor long term')
COOPERATIVE = 'COOPERATIVE', _('Cooperativa de trabajo')
GOVERNMENT = 'GOVERNMENT', _('Estado')
OTHER = 'OTHER', _('Otra')
class OfferState(models.TextChoices):
"""
Choices for JobOfferStates.
"""
NEW = 'NEW', _('Nuevo') # Used only for actions
DEACTIVATED = 'DEACTIVATED', _('Desactivada')
MODERATION = 'MODERATION', _('En moderación')
ACTIVE = 'ACTIVE', _('Activa')
REJECTED = 'REJECTED', _('Rechazada')
EXPIRED = 'EXPIRED', _('Caducada')
class JobOffer(models.Model):
"""A PyAr Job Offer."""
title = models.CharField(
max_length=255, verbose_name=_('Título'), validators=[MinLengthValidator(20)], unique=True
)
company = models.ForeignKey(
'pycompanies.Company',
verbose_name=_('Empresa'),
on_delete=models.CASCADE,
)
location = models.CharField(max_length=100, blank=True, null=True, verbose_name=_('Lugar'))
contact_mail = models.EmailField(
max_length=255, blank=True, null=True, verbose_name=_('E-mail')
)
contact_phone = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('Teléfono')
)
contact_url = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('URL Contacto')
)
experience = models.CharField(
max_length=3, choices=Experience.choices, verbose_name=_('Experiencia')
)
remoteness = models.CharField(
max_length=32, choices=Remoteness.choices, verbose_name=_('Modalidad de trabajo')
)
tags = TaggableManager(verbose_name=_('Etiquetas'), blank=True)
hiring_type = models.CharField(
max_length=32, choices=HiringType.choices, verbose_name=_('Tipo de contratación')
)
salary = models.CharField(
max_length=255, null=True, verbose_name=_('Rango salarial')
)
description = models.TextField(verbose_name=_('Descripción'))
short_description = models.TextField(
max_length=512,
verbose_name=_('Descripción corta')
)
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_('Hora de creación')
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Creado por'),
related_name='created_offers',
)
modified_at = models.DateTimeField(auto_now=True, verbose_name=_('Hora de Modificación'))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Modificado por'),
related_name='modified_offers',
)
state = models.CharField(
max_length=32, choices=OfferState.choices, default=OfferState.DEACTIVATED,
verbose_name=_('Estado de la oferta')
)
slug = AutoSlugField(populate_from='title', unique=True)
def get_absolute_url(self):
url = reverse('joboffers:view', kwargs={'slug': self.slug})
absolute_url = "".join((settings.BASE_URL, url))
return absolute_url
def __str__(self):
return self.title
@property
def last_comment(self):
"""
Return the last rejection JobOfferComment
"""
return self.joboffercomment_set.last()
@classmethod
def get_short_description(cls, description):
"""
Deduce the short_description from a given html description string
"""
description_stripped_tags = re.sub(r'<[^>]*>', ' ', description)
description_without_spaces = re.sub(r'\s+', ' ', description_stripped_tags).strip()
description_unescaped = html.unescape(description_without_spaces)
return description_unescaped[:512]
def track_visualization(self, session, event_type: EventType):
"""
Either get or create the matching JobOfferAccessLog instance for the joboffer.
"""
today = date.today()
month_year = today.year * 100 + today.month
if session.session_key is None:
session.save()
return JobOfferAccessLog.objects.get_or_create(
month_and_year=month_year,
event_type=event_type,
session=session.session_key,
joboffer=self
)
def get_publisher_mail_addresses(self):
"""
Return a list of the email addresses of the publishers of this offer.
It filters users with empty mail field
"""
profiles = UserCompanyProfile.objects.filter(company=self.company)
addresses = set()
for profile in profiles:
if profile.user.email:
addresses.add(profile.user.email)
return addresses
def get_visualizations_count(self):
"""
Get a dict with visualizations count for every kind of event
"""
items = JobOfferAccessLog.objects \
.filter(joboffer=self) \
.values_list('event_type') \
.annotate(total=Count('event_type')) \
.order_by()
return dict(items)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
if not self.short_description:
self.short_description = self.get_short_description(self.description)
super().save(*args, **kwargs)
@classmethod
def get_options(cls):
"""
Public _meta API accesor https://docs.djangoproject.com/en/4.0/ref/models/meta/
"""
return cls._meta
class Meta:
constraints = [
models.CheckConstraint(
name='%(app_label)s_%(class)s_not_all_contact_info_null',
check=(
models.Q(
contact_mail__isnull=False,
)
| models.Q(
contact_phone__isnull=False,
)
| models.Q(
contact_url__isnull=False,
)
),
),
models.CheckConstraint(
name='%(app_label)s_%(class)s_location_not_null_when_not_remote',
check=(
(
models.Q(remoteness__in=(Remoteness.HYBRID, Remoteness.OFFICE))
& models.Q(location__isnull=False)
)
| models.Q(remoteness=Remoteness.REMOTE)
),
),
]
class CommentType(models.TextChoices):
"""
Choices for Types of JobOfferComments.
"""
MODERATION = 'MODERATION', _('Moderación')
EDITION = 'EDITION', _('Edición')
SPAM = 'SPAM', _('Spam')
INSUFICIENT = 'INSUFICIENT', _('Información insuficiente')
NOT_RELATED = 'NOT_PYTHON', _('Oferta no relacionada con Python')
class JobOfferComment(models.Model):
"""
A comment on a JobOffer.
"""
text = models.TextField(verbose_name=_('Texto'))
comment_type = models.CharField(
max_length=32, choices=CommentType.choices, verbose_name=_('Tipo'))
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_('Rango salarial')
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Creado por'),
related_name='created_joboffer_comments',
)
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
@classmethod
def get_options(cls):
"""
Public _meta API accesor https://docs.djangoproject.com/en/4.0/ref/models/meta/
"""
return cls._meta
def __str__(self):
return f"{self.joboffer.title}: {self.get_comment_type_display()}"
class JobOfferHistoryManager(models.Manager):
def for_offer(self, joboffer):
"""
Get all the history objects for a given joboffer. It can be JobOffer and JobOfferComment
"""
qs = super().get_queryset()
offer_ctype = ContentType.objects.get(app_label='joboffers', model='joboffer')
offer_comment_ctype = ContentType.objects.get(
app_label='joboffers', model='joboffercomment'
)
offer_q = models.Q(event_type__lt=4, object_id=joboffer.id, content_type=offer_ctype)
offer_comment_ids = [
offer_comment.id for offer_comment in joboffer.joboffercomment_set.all()
]
offer_comment_q = models.Q(
object_id__in=offer_comment_ids, content_type=offer_comment_ctype
)
qs = qs.filter(offer_q | offer_comment_q)
return qs
class JobOfferHistory(CRUDEvent):
"""
This is a proxy model used to simplify the code take away all the logic from the controller
"""
objects = JobOfferHistoryManager()
@property
def fields(self):
"""
Return the representation of the joboffer after this particular change is applied.
It returns a python dict that can contain different fields that the current model.
"""
obj_repr = json.loads(self.object_json_repr)
fields = obj_repr[0]['fields']
return fields
@property
def joboffer_comment(self):
"""
Return the JobOfferComment instance for the matching JobOfferHistory
"""
if self.content_type.model != 'joboffercomment':
raise ValueError("Unexpected model. Expected a JobOfferComment instance.")
return JobOfferComment.objects.get(id=self.object_id)
@property
def changes(self):
"""
Get a dict with the changes made to the object.
"""
if self.changed_fields:
return json.loads(self.changed_fields)
else:
return None
@property
def state_label(self):
"""
Get the state of the joboffer at the time of the change
"""
if self.content_type.model != 'joboffer':
raise ValueError("Unexpected model. Expected a JobOffer instance.")
fields = self.fields
joboffer = JobOffer(state=fields['state'])
return joboffer.get_state_display()
@property
def state_label_class(self):
"""
Get the bootstrap label class for the matching joboffer state. Returns a default if the
'state' field is not present. Maybe because a name update in the model.
"""
if self.content_type.model != 'joboffer':
raise ValueError("Unexpected model. Expected a JobOffer instance.")
state = self.fields['state']
return STATE_LABEL_CLASSES[state]
class Meta:
proxy = True
class JobOfferAccessLog(models.Model):
"""
Model to track visualization of joboffers
"""
created_at = models.DateTimeField(default=now)
month_and_year = models.PositiveIntegerField()
event_type = models.PositiveSmallIntegerField(
choices=EventType.choices, verbose_name=_('Tipo de Evento')
)
session = models.CharField(max_length=40, verbose_name=_('Identificador de Sesión'))
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
class Meta:
ordering = ['created_at']
| 32.298246
| 96
| 0.632575
| 12,204
| 0.945827
| 0
| 0
| 2,714
| 0.210339
| 0
| 0
| 3,583
| 0.277687
|
621946fa869b479764d5f279c948e790f062b5f0
| 32,670
|
py
|
Python
|
lib/networks/ResNet101_HICO.py
|
zhihou7/VCL
|
1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48
|
[
"MIT"
] | 29
|
2020-07-28T03:11:21.000Z
|
2022-03-09T04:37:47.000Z
|
lib/networks/ResNet101_HICO.py
|
zhihou7/VCL
|
1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48
|
[
"MIT"
] | 8
|
2020-08-19T06:40:42.000Z
|
2022-03-07T03:48:57.000Z
|
lib/networks/ResNet101_HICO.py
|
zhihou7/VCL
|
1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48
|
[
"MIT"
] | 7
|
2020-07-20T09:05:17.000Z
|
2021-11-26T13:04:25.000Z
|
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou, based on code from Transferable-Interactiveness-Network, Chen Gao, Zheqi he and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import ops
from ult.tools import get_convert_matrix
from ult.config import cfg
from ult.visualization import draw_bounding_boxes_HOI
import numpy as np
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer = slim.variance_scaling_initializer(),
biases_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
biases_initializer = tf.constant_initializer(0.0),
trainable = is_training,
activation_fn = tf.nn.relu,
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
class ResNet101():
def __init__(self, model_name):
self.model_name = model_name
self.visualize = {}
self.test_visualize = {}
self.intermediate = {}
self.predictions = {}
self.score_summaries = {}
self.event_summaries = {}
self.train_summaries = []
self.losses = {}
self.image = tf.placeholder(tf.float32, shape=[1, None, None, 3], name = 'image')
self.spatial = tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name = 'sp')
self.H_boxes = tf.placeholder(tf.float32, shape=[None, 5], name = 'H_boxes')
self.O_boxes = tf.placeholder(tf.float32, shape=[None, 5], name = 'O_boxes')
self.gt_class_HO = tf.placeholder(tf.float32, shape=[None, 600], name = 'gt_class_HO')
self.H_num = tf.placeholder(tf.int32) # positive nums
self.image_id = tf.placeholder(tf.int32)
self.num_classes = 600
self.compose_num_classes = 600
self.num_fc = 1024
self.verb_num_classes = 117
self.obj_num_classes = 80
self.scope = 'resnet_v1_101'
self.stride = [16, ]
self.lr = tf.placeholder(tf.float32)
if tf.__version__ == '1.1.0':
raise Exception('wrong tensorflow version 1.1.0')
else:
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
self.blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=1),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
resnet_v1_block('block5', base_depth=512, num_units=3, stride=1)]
if self.model_name.__contains__('unique_weights') or self.model_name.__contains__('_pa3')\
or self.model_name.__contains__('_pa4'):
print("add block6 unique_weights2")
self.blocks.append(resnet_v1_block('block6', base_depth=512, num_units=3, stride=1))
"""We copy from TIN. calculated by log(1/(n_c/sum(n_c)) c is the category and n_c is
the number of positive samples"""
self.HO_weight = np.array([
9.192927, 9.778443, 10.338059, 9.164914, 9.075144, 10.045923, 8.714437, 8.59822, 12.977117, 6.2745423,
11.227917, 6.765012, 9.436157, 9.56762, 11.0675745, 11.530198, 9.609821, 9.897503, 6.664475, 6.811699,
6.644726, 9.170454, 13.670264, 3.903943, 10.556748, 8.814335, 9.519224, 12.753973, 11.590822, 8.278912,
5.5245695, 9.7286825, 8.997436, 10.699849, 9.601237, 11.965516, 9.192927, 10.220277, 6.056692, 7.734048,
8.42324, 6.586457, 6.969533, 10.579222, 13.670264, 4.4531965, 9.326459, 9.288238, 8.071842, 10.431585,
12.417501, 11.530198, 11.227917, 4.0678477, 8.854023, 12.571651, 8.225684, 10.996116, 11.0675745,
10.100731,
7.0376034, 7.463688, 12.571651, 14.363411, 5.4902234, 11.0675745, 14.363411, 8.45805, 10.269067,
9.820116,
14.363411, 11.272368, 11.105314, 7.981595, 9.198626, 3.3284247, 14.363411, 12.977117, 9.300817,
10.032678,
12.571651, 10.114916, 10.471591, 13.264799, 14.363411, 8.01953, 10.412168, 9.644913, 9.981384,
7.2197933,
14.363411, 3.1178555, 11.031207, 8.934066, 7.546675, 6.386472, 12.060826, 8.862153, 9.799063, 12.753973,
12.753973, 10.412168, 10.8976755, 10.471591, 12.571651, 9.519224, 6.207762, 12.753973, 6.60636,
6.2896967,
4.5198326, 9.7887, 13.670264, 11.878505, 11.965516, 8.576513, 11.105314, 9.192927, 11.47304, 11.367679,
9.275815, 11.367679, 9.944571, 11.590822, 10.451388, 9.511381, 11.144535, 13.264799, 5.888291,
11.227917,
10.779892, 7.643191, 11.105314, 9.414651, 11.965516, 14.363411, 12.28397, 9.909063, 8.94731, 7.0330057,
8.129001, 7.2817025, 9.874775, 9.758241, 11.105314, 5.0690055, 7.4768796, 10.129305, 9.54313, 13.264799,
9.699972, 11.878505, 8.260853, 7.1437693, 6.9321113, 6.990665, 8.8104515, 11.655361, 13.264799,
4.515912,
9.897503, 11.418972, 8.113436, 8.795067, 10.236277, 12.753973, 14.363411, 9.352776, 12.417501,
0.6271591,
12.060826, 12.060826, 12.166186, 5.2946343, 11.318889, 9.8308115, 8.016022, 9.198626, 10.8976755,
13.670264,
11.105314, 14.363411, 9.653881, 9.503599, 12.753973, 5.80546, 9.653881, 9.592727, 12.977117, 13.670264,
7.995224, 8.639826, 12.28397, 6.586876, 10.929424, 13.264799, 8.94731, 6.1026597, 12.417501, 11.47304,
10.451388, 8.95624, 10.996116, 11.144535, 11.031207, 13.670264, 13.670264, 6.397866, 7.513285, 9.981384,
11.367679, 11.590822, 7.4348736, 4.415428, 12.166186, 8.573451, 12.977117, 9.609821, 8.601359, 9.055143,
11.965516, 11.105314, 13.264799, 5.8201604, 10.451388, 9.944571, 7.7855496, 14.363411, 8.5463,
13.670264,
7.9288645, 5.7561946, 9.075144, 9.0701065, 5.6871653, 11.318889, 10.252538, 9.758241, 9.407584,
13.670264,
8.570397, 9.326459, 7.488179, 11.798462, 9.897503, 6.7530537, 4.7828183, 9.519224, 7.6492405, 8.031909,
7.8180614, 4.451856, 10.045923, 10.83705, 13.264799, 13.670264, 4.5245686, 14.363411, 10.556748,
10.556748,
14.363411, 13.670264, 14.363411, 8.037262, 8.59197, 9.738439, 8.652985, 10.045923, 9.400566, 10.9622135,
11.965516, 10.032678, 5.9017305, 9.738439, 12.977117, 11.105314, 10.725825, 9.080208, 11.272368,
14.363411,
14.363411, 13.264799, 6.9279733, 9.153925, 8.075553, 9.126969, 14.363411, 8.903826, 9.488214, 5.4571533,
10.129305, 10.579222, 12.571651, 11.965516, 6.237189, 9.428937, 9.618479, 8.620408, 11.590822,
11.655361,
9.968962, 10.8080635, 10.431585, 14.363411, 3.796231, 12.060826, 10.302968, 9.551227, 8.75394,
10.579222,
9.944571, 14.363411, 6.272396, 10.625742, 9.690582, 13.670264, 11.798462, 13.670264, 11.724354,
9.993963,
8.230013, 9.100721, 10.374427, 7.865129, 6.514087, 14.363411, 11.031207, 11.655361, 12.166186, 7.419324,
9.421769, 9.653881, 10.996116, 12.571651, 13.670264, 5.912144, 9.7887, 8.585759, 8.272101, 11.530198,
8.886948,
5.9870906, 9.269661, 11.878505, 11.227917, 13.670264, 8.339964, 7.6763024, 10.471591, 10.451388,
13.670264,
11.185357, 10.032678, 9.313555, 12.571651, 3.993144, 9.379805, 9.609821, 14.363411, 9.709451, 8.965248,
10.451388, 7.0609145, 10.579222, 13.264799, 10.49221, 8.978916, 7.124196, 10.602211, 8.9743395, 7.77862,
8.073695, 9.644913, 9.339531, 8.272101, 4.794418, 9.016304, 8.012526, 10.674532, 14.363411, 7.995224,
12.753973, 5.5157638, 8.934066, 10.779892, 7.930471, 11.724354, 8.85808, 5.9025764, 14.363411,
12.753973,
12.417501, 8.59197, 10.513264, 10.338059, 14.363411, 7.7079706, 14.363411, 13.264799, 13.264799,
10.752493,
14.363411, 14.363411, 13.264799, 12.417501, 13.670264, 6.5661197, 12.977117, 11.798462, 9.968962,
12.753973,
11.47304, 11.227917, 7.6763024, 10.779892, 11.185357, 14.363411, 7.369478, 14.363411, 9.944571,
10.779892,
10.471591, 9.54313, 9.148476, 10.285873, 10.412168, 12.753973, 14.363411, 6.0308623, 13.670264,
10.725825,
12.977117, 11.272368, 7.663911, 9.137665, 10.236277, 13.264799, 6.715625, 10.9622135, 14.363411,
13.264799,
9.575919, 9.080208, 11.878505, 7.1863923, 9.366199, 8.854023, 9.874775, 8.2857685, 13.670264, 11.878505,
12.166186, 7.616999, 9.44343, 8.288065, 8.8104515, 8.347254, 7.4738197, 10.302968, 6.936267, 11.272368,
7.058223, 5.0138307, 12.753973, 10.173757, 9.863602, 11.318889, 9.54313, 10.996116, 12.753973,
7.8339925,
7.569945, 7.4427395, 5.560738, 12.753973, 10.725825, 10.252538, 9.307165, 8.491293, 7.9161053,
7.8849015,
7.782772, 6.3088884, 8.866243, 9.8308115, 14.363411, 10.8976755, 5.908519, 10.269067, 9.176025,
9.852551,
9.488214, 8.90809, 8.537411, 9.653881, 8.662968, 11.965516, 10.143904, 14.363411, 14.363411, 9.407584,
5.281472, 11.272368, 12.060826, 14.363411, 7.4135547, 8.920994, 9.618479, 8.891141, 14.363411,
12.060826,
11.965516, 10.9622135, 10.9622135, 14.363411, 5.658909, 8.934066, 12.571651, 8.614018, 11.655361,
13.264799,
10.996116, 13.670264, 8.965248, 9.326459, 11.144535, 14.363411, 6.0517673, 10.513264, 8.7430105,
10.338059,
13.264799, 6.878481, 9.065094, 8.87035, 14.363411, 9.92076, 6.5872955, 10.32036, 14.363411, 9.944571,
11.798462, 10.9622135, 11.031207, 7.652888, 4.334878, 13.670264, 13.670264, 14.363411, 10.725825,
12.417501,
14.363411, 13.264799, 11.655361, 10.338059, 13.264799, 12.753973, 8.206432, 8.916674, 8.59509,
14.363411,
7.376845, 11.798462, 11.530198, 11.318889, 11.185357, 5.0664344, 11.185357, 9.372978, 10.471591,
9.6629305,
11.367679, 8.73579, 9.080208, 11.724354, 5.04781, 7.3777695, 7.065643, 12.571651, 11.724354, 12.166186,
12.166186, 7.215852, 4.374113, 11.655361, 11.530198, 14.363411, 6.4993753, 11.031207, 8.344818,
10.513264,
10.032678, 14.363411, 14.363411, 4.5873594, 12.28397, 13.670264, 12.977117, 10.032678, 9.609821
], dtype='float32').reshape(1, 600)
num_inst_path = cfg.ROOT_DIR + '/Data/num_inst.npy'
num_inst = np.load(num_inst_path)
self.num_inst = num_inst
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix(self.verb_num_classes, self.obj_num_classes)
self.obj_to_HO_matrix = tf.constant(obj_to_HO_matrix, tf.float32)
self.verb_to_HO_matrix = tf.constant(verb_to_HO_matrix, tf.float32)
self.gt_obj_class = tf.cast(tf.matmul(self.gt_class_HO, self.obj_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
self.gt_verb_class = tf.cast(tf.matmul(self.gt_class_HO, self.verb_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
def init_table(self):
pass
def set_ph(self, image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp):
if image is not None: self.image = image
if image_id is not None: self.image_id = image_id
if sp is not None: self.spatial = sp
if Human_augmented is not None: self.H_boxes = Human_augmented
if Object_augmented is not None: self.O_boxes = Object_augmented
if action_HO is not None: self.gt_class_HO = action_HO
self.H_num = num_pos
self.reset_classes()
def reset_classes(self):
from ult.tools import get_convert_matrix
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix(self.verb_num_classes, self.obj_num_classes)
self.obj_to_HO_matrix = tf.constant(obj_to_HO_matrix, tf.float32)
self.verb_to_HO_matrix = tf.constant(verb_to_HO_matrix, tf.float32)
self.gt_obj_class = tf.cast(tf.matmul(self.gt_class_HO, self.obj_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
self.gt_verb_class = tf.cast(tf.matmul(self.gt_class_HO, self.verb_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
def build_base(self):
with tf.variable_scope(self.scope, self.scope, reuse=tf.AUTO_REUSE,):
net = resnet_utils.conv2d_same(self.image, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
return net
def image_to_head(self, is_training):
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net = self.build_base()
net, _ = resnet_v1.resnet_v1(net,
self.blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
if self.model_name.__contains__('unique_weights'):
print("unique_weights3")
stop = -3
else:
stop = -2
head, _ = resnet_v1.resnet_v1(net,
self.blocks[cfg.RESNET.FIXED_BLOCKS:stop],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
return head
def sp_to_head(self):
with tf.variable_scope(self.scope, self.scope, reuse=tf.AUTO_REUSE,):
ends = 2
if self.model_name.__contains__('_spose'):
ends = 3
conv1_sp = slim.conv2d(self.spatial[:,:,:,0:ends], 64, [5, 5], padding='VALID', scope='conv1_sp')
pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], padding='VALID', scope='conv2_sp')
pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
pool2_flat_sp = slim.flatten(pool2_sp)
return pool2_flat_sp
def res5(self, pool5_H, pool5_O, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
if pool5_H is None:
fc7_H = None
else:
fc7_H, _ = resnet_v1.resnet_v1(pool5_H,
self.blocks[-2:-1],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
# fc7_H = tf.reduce_mean(fc7_H, axis=[1, 2])
if pool5_O is None:
fc7_O = None
else:
fc7_O, _ = resnet_v1.resnet_v1(pool5_O,
self.blocks[-1:],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
# fc7_O = tf.reduce_mean(fc7_O, axis=[1, 2])
return fc7_H, fc7_O
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
Concat_SH = tf.concat([fc7_H, fc7_SH], 1)
fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH', reuse=tf.AUTO_REUSE)
fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH')
fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH', reuse=tf.AUTO_REUSE)
fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH')
Concat_SO = tf.concat([fc7_O, fc7_SO], 1)
fc8_SO = slim.fully_connected(Concat_SO, self.num_fc, scope='fc8_SO', reuse=tf.AUTO_REUSE)
fc8_SO = slim.dropout(fc8_SO, keep_prob=0.5, is_training=is_training, scope='dropout8_SO')
fc9_SO = slim.fully_connected(fc8_SO, self.num_fc, scope='fc9_SO', reuse=tf.AUTO_REUSE)
fc9_SO = slim.dropout(fc9_SO, keep_prob=0.5, is_training=is_training, scope='dropout9_SO')
Concat_SHsp = tf.concat([fc7_H, sp], 1)
Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='Concat_SHsp', reuse=tf.AUTO_REUSE)
Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp')
fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='fc7_SHsp', reuse=tf.AUTO_REUSE)
fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp')
return fc9_SH, fc9_SO, fc7_SHsp
def crop_pool_layer(self, bottom, rois, name):
with tf.variable_scope(name) as scope:
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
bboxes = self.trans_boxes_by_feats(bottom, rois)
if cfg.RESNET.MAX_POOL:
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
crops = slim.max_pool2d(crops, [2, 2], padding='SAME')
else:
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [cfg.POOLING_SIZE, cfg.POOLING_SIZE], name="crops")
return crops
def trans_boxes_by_feats(self, bottom, rois):
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self.stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self.stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
return bboxes
def attention_pool_layer_H(self, bottom, fc7_H, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_H, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_H(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att)
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def attention_pool_layer_O(self, bottom, fc7_O, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_O, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_O(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att)
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def region_classification(self, fc7_H, fc7_O, fc7_SHsp, is_training, initializer, name):
with tf.variable_scope(name) as scope:
cls_score_H = slim.fully_connected(fc7_H, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_H')
cls_prob_H = tf.nn.sigmoid(cls_score_H, name='cls_prob_H')
tf.reshape(cls_prob_H, [-1, self.num_classes])
cls_score_O = slim.fully_connected(fc7_O, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_O')
cls_prob_O = tf.nn.sigmoid(cls_score_O, name='cls_prob_O')
tf.reshape(cls_prob_O, [-1, self.num_classes])
cls_score_sp = slim.fully_connected(fc7_SHsp, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_sp')
cls_prob_sp = tf.nn.sigmoid(cls_score_sp, name='cls_prob_sp')
tf.reshape(cls_prob_sp, [-1, self.num_classes])
self.predictions["cls_score_H"] = cls_score_H
self.predictions["cls_prob_H"] = cls_prob_H
self.predictions["cls_score_O"] = cls_score_O
self.predictions["cls_prob_O"] = cls_prob_O
self.predictions["cls_score_sp"] = cls_score_sp
self.predictions["cls_prob_sp"] = cls_prob_sp
self.predictions["cls_prob_HO"] = cls_prob_sp * (cls_prob_O + cls_prob_H)
return cls_prob_H, cls_prob_O, cls_prob_sp
def bottleneck(self, bottom, is_training, name, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
head_bottleneck = slim.conv2d(bottom, 1024, [1, 1], scope=name)
return head_bottleneck
def build_network(self, is_training):
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
# ResNet Backbone
head = self.image_to_head(is_training)
sp = self.sp_to_head()
pool5_H = self.crop_pool_layer(head, self.H_boxes, 'Crop_H')
pool5_O = self.crop_pool_layer(head, self.O_boxes[:self.H_num,:], 'Crop_O')
fc7_H, fc7_O = self.res5(pool5_H, pool5_O, sp, is_training, 'res5')
fc7_H = tf.reduce_mean(fc7_H, axis=[1, 2])
fc7_O = tf.reduce_mean(fc7_O, axis=[1, 2])
# Phi
head_phi = slim.conv2d(head, 512, [1, 1], scope='head_phi')
# g
head_g = slim.conv2d(head, 512, [1, 1], scope='head_g')
Att_H = self.attention_pool_layer_H(head_phi, fc7_H, is_training, 'Att_H')
Att_H = self.attention_norm_H(Att_H, 'Norm_Att_H')
att_head_H = tf.multiply(head_g, Att_H)
Att_O = self.attention_pool_layer_O(head_phi, fc7_O, is_training, 'Att_O')
Att_O = self.attention_norm_O(Att_O, 'Norm_Att_O')
att_head_O = tf.multiply(head_g, Att_O)
pool5_SH = self.bottleneck(att_head_H, is_training, 'bottleneck', False)
pool5_SO = self.bottleneck(att_head_O, is_training, 'bottleneck', True)
# fc7_O = tf.Print(fc7_O, [tf.shape(fc7_O), tf.shape(fc7_H)], message='check fc7_O:')
fc7_SH, fc7_SO, fc7_SHsp = self.head_to_tail(fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, 'fc_HO')
# fc7_SO = tf.Print(fc7_SO, [tf.shape(fc7_SO), tf.shape(fc7_SH), tf.shape(fc7_SHsp)], message='check fc7_SHsp:')
cls_prob_H, cls_prob_O, cls_prob_sp = self.region_classification(fc7_SH, fc7_SO, fc7_SHsp, is_training, initializer, 'classification')
self.score_summaries.update(self.predictions)
self.visualize["attention_map_H"] = (Att_H - tf.reduce_min(Att_H[0,:,:,:])) / tf.reduce_max((Att_H[0,:,:,:] - tf.reduce_min(Att_H[0,:,:,:])))
self.visualize["attention_map_O"] = (Att_O - tf.reduce_min(Att_O[0,:,:,:])) / tf.reduce_max((Att_O[0,:,:,:] - tf.reduce_min(Att_O[0,:,:,:])))
return cls_prob_H, cls_prob_O, cls_prob_sp
def create_architecture(self, is_training):
self.build_network(is_training)
# for var in tf.trainable_variables():
# self.train_summaries.append(var)
if is_training: self.add_loss()
layers_to_output = {}
layers_to_output.update(self.losses)
val_summaries = []
if is_training:
with tf.device("/cpu:0"):
# val_summaries.append(self.add_gt_image_summary_H())
# val_summaries.append(self.add_gt_image_summary_HO())
# tf.summary.image('ATTENTION_MAP_H', self.visualize["attention_map_H"], max_outputs=1)
# tf.summary.image('ATTENTION_MAP_O', self.visualize["attention_map_O"], max_outputs=1)
for key, var in self.visualize.items():
tf.summary.image(key, var, max_outputs=1)
for key, var in self.event_summaries.items():
val_summaries.append(tf.summary.scalar(key, var))
# val_summaries.append(tf.summary.scalar('lr', self.lr))
self.summary_op = tf.summary.merge_all()
self.summary_op_val = tf.summary.merge(val_summaries)
return layers_to_output
def add_loss(self):
with tf.variable_scope('LOSS') as scope:
cls_score_H = self.predictions["cls_score_H"]
cls_score_O = self.predictions["cls_score_O"]
cls_score_sp = self.predictions["cls_score_sp"]
label_HO = self.gt_class_HO
H_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO[:self.H_num,:], logits = cls_score_H[:self.H_num,:]))
O_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO[:self.H_num,:], logits = cls_score_O[:self.H_num,:]))
sp_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO, logits = cls_score_sp))
self.losses['H_cross_entropy'] = H_cross_entropy
self.losses['O_cross_entropy'] = O_cross_entropy
self.losses['sp_cross_entropy'] = sp_cross_entropy
loss = H_cross_entropy + O_cross_entropy + sp_cross_entropy
self.losses['total_loss'] = loss
self.event_summaries.update(self.losses)
return loss
def add_gt_image_summary_H(self):
image = tf.py_func(draw_bounding_boxes_HOI,
[tf.reverse(self.image+cfg.PIXEL_MEANS, axis=[-1]), self.H_boxes, self.gt_class_HO],
tf.float32, name="gt_boxes_H")
return tf.summary.image('GROUND_TRUTH_H', image)
def add_gt_image_summary_HO(self):
image = tf.py_func(draw_bounding_boxes_HOI,
[tf.reverse(self.image+cfg.PIXEL_MEANS, axis=[-1]), self.O_boxes, self.gt_class_HO],
tf.float32, name="gt_boxes_HO")
return tf.summary.image('GROUND_TRUTH_HO)', image)
def add_score_summary(self, key, tensor):
if tensor is not None and tensor.op is not None:
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
def get_feed_dict(self, blobs):
feed_dict = {self.image: blobs['image'], self.H_boxes: blobs['H_boxes'],
self.O_boxes: blobs['O_boxes'], self.gt_class_HO: blobs['gt_class_HO'],
self.spatial: blobs['sp'],
# self.lr: lr,
self.H_num: blobs['H_num']}
return feed_dict
def train_step(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, _ = sess.run([self.losses['total_loss'],
train_op],
feed_dict=feed_dict)
return loss
def train_step_with_summary(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, summary, _ = sess.run([self.losses['total_loss'],
self.summary_op,
train_op],
feed_dict=feed_dict)
return loss, summary
def train_step_tfr(self, sess, blobs, lr, train_op):
loss, image_id, _ = sess.run([self.losses['total_loss'], self.image_id,
train_op])
return loss, image_id
def train_step_tfr_with_summary(self, sess, blobs, lr, train_op):
loss, summary, image_id, _ = sess.run([self.losses['total_loss'],
self.summary_op, self.image_id,
train_op])
return loss, image_id, summary
def test_image_HO(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'], self.spatial: blobs['sp'], self.H_num: blobs['H_num']}
cls_prob_HO = sess.run([self.predictions["cls_prob_HO"]], feed_dict=feed_dict)
return cls_prob_HO
def obtain_all_preds(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'], self.H_num: blobs['H_num']}
cls_prob_HO, pH, pO, pSp = sess.run([self.predictions["cls_prob_HO"], self.predictions["cls_prob_H"],
self.predictions["cls_prob_O"], self.predictions["cls_prob_sp"]], feed_dict=feed_dict)
return cls_prob_HO, pH, pO, pSp, pSp
def obtain_all_preds_tfr(self, sess):
cls_prob_HO, pH, pO, pSp = sess.run([self.predictions["cls_prob_HO"], self.predictions["cls_prob_H"],
self.predictions["cls_prob_O"], self.predictions["cls_prob_sp"]])
return cls_prob_HO, pH, pO, pSp, pSp
| 52.949757
| 158
| 0.589654
| 30,682
| 0.939149
| 0
| 0
| 0
| 0
| 0
| 0
| 2,619
| 0.080165
|
621b663ff688adbcc08ffd8203aafeded181974f
| 7,738
|
py
|
Python
|
nova_powervm/tests/virt/powervm/test_mgmt.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 24
|
2015-10-18T02:55:20.000Z
|
2021-11-17T11:43:51.000Z
|
nova_powervm/tests/virt/powervm/test_mgmt.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | null | null | null |
nova_powervm/tests/virt/powervm/test_mgmt.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 12
|
2015-10-26T17:38:05.000Z
|
2021-07-21T12:45:19.000Z
|
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import exception
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm import mgmt
LPAR_HTTPRESP_FILE = "lpar.txt"
class TestMgmt(test.NoDBTestCase):
def setUp(self):
super(TestMgmt, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
self.assertNotEqual(lpar_http, None,
"Could not load %s " % LPAR_HTTPRESP_FILE)
self.resp = lpar_http.response
@mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
def test_mgmt_uuid(self, mock_get_partition):
mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
adpt = mock.Mock()
# First run should call the partition only once
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_called_once_with(adpt)
# But a subsequent call should effectively no-op
mock_get_partition.reset_mock()
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
self.assertEqual(0, mock_get_partition.call_count)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
def test_discover_vscsi_disk(self, mock_realpath, mock_dacw, mock_glob):
scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# Realistically, first glob would return e.g. .../host0/.../host0/...
# but it doesn't matter for test purposes.
mock_glob.side_effect = [[scanpath], [devlink]]
mgmt.discover_vscsi_disk(mapping)
mock_glob.assert_has_calls(
[mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
mock_dacw.assert_called_with(scanpath, 'a', '- - -')
mock_realpath.assert_called_with(devlink)
@mock.patch('retrying.retry', autospec=True)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_discover_vscsi_disk_not_one_result(self, mock_write, mock_glob,
mock_retry):
"""Zero or more than one disk is found by discover_vscsi_disk."""
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(300000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_passthrough(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return _poll_for_dev
return wrapped
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return raiser
return wrapped
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# No disks found
mock_retry.side_effect = retry_timeout
mock_glob.side_effect = lambda path: []
self.assertRaises(npvmex.NoDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
# Multiple disks found
mock_retry.side_effect = retry_passthrough
mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
self.assertRaises(npvmex.UniqueDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
@mock.patch('time.sleep', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
@mock.patch('os.stat', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_remove_block_dev(self, mock_dacw, mock_stat, mock_realpath,
mock_sleep):
link = '/dev/link/foo'
realpath = '/dev/sde'
delpath = '/sys/block/sde/device/delete'
mock_realpath.return_value = realpath
# Good path
mock_stat.side_effect = (None, None, OSError())
mgmt.remove_block_dev(link)
mock_realpath.assert_called_with(link)
mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
mock.call(realpath)])
mock_dacw.assert_called_with(delpath, 'a', '1')
self.assertEqual(0, mock_sleep.call_count)
# Device param not found
mock_dacw.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (OSError(), None, None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called once; privsep write was not called
self.assertEqual(1, mock_stat.call_count)
mock_dacw.assert_not_called()
# Delete special file not found
mock_stat.reset_mock()
mock_stat.side_effect = (None, OSError(), None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called twice; privsep write was not called
self.assertEqual(2, mock_stat.call_count)
mock_dacw.assert_not_called()
@mock.patch('retrying.retry')
@mock.patch('os.path.realpath')
@mock.patch('os.stat')
@mock.patch('nova.privsep.path.writefile')
def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
mock_realpath, mock_retry):
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(10000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_del):
return raiser
return wrapped
# Deletion was attempted, but device is still there
link = '/dev/link/foo'
delpath = '/sys/block/sde/device/delete'
realpath = '/dev/sde'
mock_realpath.return_value = realpath
mock_stat.side_effect = lambda path: 1
mock_retry.side_effect = retry_timeout
self.assertRaises(
npvmex.DeviceDeletionException, mgmt.remove_block_dev, link)
mock_realpath.assert_called_once_with(link)
mock_dacw.assert_called_with(delpath, 'a', '1')
| 40.093264
| 79
| 0.652106
| 6,790
| 0.877488
| 0
| 0
| 6,370
| 0.82321
| 0
| 0
| 2,016
| 0.260532
|
621e1832f00c1f8797826395cfb9b871267408f4
| 960
|
py
|
Python
|
tools.py
|
fairuzap97/psgan
|
18d49d3d9e6dcb66e019764141fc855d04e4b25b
|
[
"MIT"
] | 29
|
2018-02-16T09:56:08.000Z
|
2022-01-27T16:22:40.000Z
|
tools.py
|
fairuzap97/psgan
|
18d49d3d9e6dcb66e019764141fc855d04e4b25b
|
[
"MIT"
] | null | null | null |
tools.py
|
fairuzap97/psgan
|
18d49d3d9e6dcb66e019764141fc855d04e4b25b
|
[
"MIT"
] | 16
|
2018-09-24T21:16:25.000Z
|
2021-09-26T09:22:07.000Z
|
# -*- coding: utf-8 -*-
import sys, os
from time import time
def create_dir(folder):
'''
creates a folder, if necessary
'''
if not os.path.exists(folder):
os.makedirs(folder)
class TimePrint(object):
'''
Simple convenience class to print who long it takes between successive calls to its __init__ function.
Usage example:
TimePrint("some text") -- simply prints "some text"
<do some stuff here>
TimePrint("some other text ") -- prints "some other text (took ?s)", where ? is the time passed since TimePrint("some text") was called
'''
t_last = None
def __init__(self, text):
TimePrint.p(text)
@classmethod
def p(cls, text):
t = time()
print(text)
if cls.t_last!=None:
print(" (took ", t-cls.t_last, "s)")
cls.t_last = t
sys.stdout.flush()
if __name__=="__main__":
print("this is just a library.")
| 24.615385
| 145
| 0.591667
| 692
| 0.720833
| 0
| 0
| 201
| 0.209375
| 0
| 0
| 498
| 0.51875
|
621e26224a5b7df57e76176ccf102f633408ef39
| 290
|
py
|
Python
|
models/catch_event.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
models/catch_event.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
models/catch_event.py
|
THM-MA/XSDATA-waypoint
|
dd94442f9d6677c525bf3ebb03c15fec52fa1079
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from .t_catch_event import TCatchEvent
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class CatchEvent(TCatchEvent):
class Meta:
name = "catchEvent"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
| 24.166667
| 65
| 0.731034
| 140
| 0.482759
| 0
| 0
| 151
| 0.52069
| 0
| 0
| 102
| 0.351724
|
621f31f3e4ecd411a063519956fdcb002c6f41f7
| 1,305
|
py
|
Python
|
jarviscli/plugins/advice_giver.py
|
hugofpaiva/Jarvis
|
8c7bec950fa2850cba635e2dfcb45e3e8107fbf2
|
[
"MIT"
] | null | null | null |
jarviscli/plugins/advice_giver.py
|
hugofpaiva/Jarvis
|
8c7bec950fa2850cba635e2dfcb45e3e8107fbf2
|
[
"MIT"
] | null | null | null |
jarviscli/plugins/advice_giver.py
|
hugofpaiva/Jarvis
|
8c7bec950fa2850cba635e2dfcb45e3e8107fbf2
|
[
"MIT"
] | null | null | null |
import random
from plugin import plugin
ANSWERS = [
"No",
"Yes",
"You Can Do It!",
"I Cant Help You",
"Sorry To hear That, But You Must Forget :(",
"Keep It Up!",
"Nice",
"Dont Do It Ever Again",
"I Like It, Good Job",
"I Am Not Certain",
"Too Bad For You, Try To Find Something Else To Do And Enjoy",
"Time Will Pass And You Will Forget",
"Dont Do It",
"Do It",
"Never Ask Me About That Again",
"I Cant Give Advice Now I Am Sleepy",
"Sorry I Cant Hear This Language",
"Sorry But Your Question Does Not Make Sense"
]
@plugin("give me advice")
def advice(jarvis, s):
while True:
question = input("Ask Me A Question : ").strip()
if len(question) > 0 and question[-1] == '?':
break
else:
print("Questions should end with a question mark: ?")
while True:
random_idx = random.randint(0, len(ANSWERS))
print(ANSWERS[random_idx])
while True:
desire = input("Was This In Context? (Y/N) : ")
if desire.strip().lower() == 'n':
print("Its A Pitty :( I'll Try Again!")
break
elif desire.strip().lower() == 'y':
print("Good To hear! Happy To Advice You!")
print("Good Bye!")
return
| 27.765957
| 65
| 0.556322
| 0
| 0
| 0
| 0
| 749
| 0.573946
| 0
| 0
| 631
| 0.483525
|
621f442528eb038457c4f4d99ef47c676a11ad6e
| 3,204
|
py
|
Python
|
PaddleRec/text_matching_on_quora/models/sse.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 5
|
2021-09-28T13:28:01.000Z
|
2021-12-21T07:25:44.000Z
|
PaddleRec/text_matching_on_quora/models/sse.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
PaddleRec/text_matching_on_quora/models/sse.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 3
|
2019-10-31T07:18:49.000Z
|
2020-01-13T03:18:39.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from .my_layers import bi_lstm_layer
from .match_layers import ElementwiseMatching
class SSENet():
"""
SSE net: Shortcut-Stacked Sentence Encoders for Multi-Domain Inference
https://arxiv.org/abs/1708.02312
"""
def __init__(self, config):
self._config = config
def __call__(self, seq1, seq2, label):
return self.body(seq1, seq2, label, self._config)
def body(self, seq1, seq2, label, config):
"""Body function"""
def stacked_bi_rnn_model(seq):
embed = fluid.layers.embedding(
input=seq,
size=[self._config.dict_dim, self._config.emb_dim],
param_attr='emb.w')
stacked_lstm_out = [embed]
for i in range(len(self._config.rnn_hid_dim)):
if i == 0:
feature = embed
else:
feature = fluid.layers.concat(
input=stacked_lstm_out, axis=1)
bi_lstm_h = bi_lstm_layer(
feature,
rnn_hid_dim=self._config.rnn_hid_dim[i],
name="lstm_" + str(i))
# add dropout except for the last stacked lstm layer
if i != len(self._config.rnn_hid_dim) - 1:
bi_lstm_h = fluid.layers.dropout(
bi_lstm_h, dropout_prob=self._config.droprate_lstm)
stacked_lstm_out.append(bi_lstm_h)
pool = fluid.layers.sequence_pool(input=bi_lstm_h, pool_type='max')
return pool
def MLP(vec):
for i in range(len(self._config.fc_dim)):
vec = fluid.layers.fc(vec,
size=self._config.fc_dim[i],
act='relu')
# add dropout after every layer of MLP
vec = fluid.layers.dropout(
vec, dropout_prob=self._config.droprate_fc)
return vec
seq1_rnn = stacked_bi_rnn_model(seq1)
seq2_rnn = stacked_bi_rnn_model(seq2)
seq_match = ElementwiseMatching(seq1_rnn, seq2_rnn)
mlp_res = MLP(seq_match)
prediction = fluid.layers.fc(mlp_res,
size=self._config.class_dim,
act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, acc, prediction
| 39.073171
| 79
| 0.5902
| 2,479
| 0.77372
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.269039
|
622058c389457c7cd876a630d6f12b0ece1a7dc1
| 1,342
|
py
|
Python
|
pelicanconf.py
|
ChrisAD/attack-website
|
222c03f17ea13375753b7323cc7327430974890b
|
[
"Apache-2.0"
] | 9
|
2020-05-05T22:23:53.000Z
|
2021-10-15T18:13:17.000Z
|
pelicanconf.py
|
ChrisAD/attack-website
|
222c03f17ea13375753b7323cc7327430974890b
|
[
"Apache-2.0"
] | null | null | null |
pelicanconf.py
|
ChrisAD/attack-website
|
222c03f17ea13375753b7323cc7327430974890b
|
[
"Apache-2.0"
] | 2
|
2020-05-19T05:38:02.000Z
|
2021-01-27T12:12:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import json
import uuid
import sys
# import plugins
PLUGIN_PATHS = ['plugins']
PLUGINS = ['assets']
AUTHOR = 'MITRE'
SITENAME = 'ATT&CK'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
THEME = 'attack-theme'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = False
STATIC_PATHS = ['docs']
ARTICLE_PATHS = ['pages/updates']
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = False
# custom jinja filters
# remove index.html from end of a path, add / if not at beginning
def clean_path(path):
path = path.split("index.html")[0]
if not path.startswith("/"): path = "/" + path
if not path.endswith("/"): path += "/"
return path
# get a flattened tree of the "paths" of all children of a tree of objects.
# used in sidenav
def flatten_tree(root):
ret = []
if root["path"]: ret.append(root["path"])
for child in root["children"]:
ret = ret + flatten_tree(child)
return ret
JINJA_FILTERS = {
'from_json':json.loads,
'flatten_tree': flatten_tree,
'clean_path': clean_path
}
| 23.137931
| 77
| 0.698957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 557
| 0.415052
|
62219b03355dbbadf9063de4f0e77f3db6e7d6b9
| 1,810
|
py
|
Python
|
playbooks/roles/configure-vlan-for-ucsm-baremetal/configure_vlan_on_ucsm_bm.py
|
CiscoSystems/project-config-third-party
|
4f9ca3048d8701db673eaf13714f2b7f529a1831
|
[
"Apache-2.0"
] | 2
|
2017-09-19T15:52:22.000Z
|
2017-10-30T11:19:05.000Z
|
playbooks/roles/configure-vlan-for-ucsm-baremetal/configure_vlan_on_ucsm_bm.py
|
CiscoSystems/project-config-third-party
|
4f9ca3048d8701db673eaf13714f2b7f529a1831
|
[
"Apache-2.0"
] | 24
|
2017-10-31T11:36:04.000Z
|
2018-11-30T17:19:50.000Z
|
playbooks/roles/configure-vlan-for-ucsm-baremetal/configure_vlan_on_ucsm_bm.py
|
CiscoSystems/project-config-third-party
|
4f9ca3048d8701db673eaf13714f2b7f529a1831
|
[
"Apache-2.0"
] | 4
|
2017-09-18T16:02:34.000Z
|
2018-05-24T14:58:16.000Z
|
import argparse
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.mometa.vnic.VnicEtherIf import VnicEtherIf
from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan
parser = argparse.ArgumentParser()
parser.add_argument('ucsm_ip')
parser.add_argument('username')
parser.add_argument('password')
parser.add_argument('sp_name')
parser.add_argument('vlan')
parser.add_argument('--remove', action='store_true',
help=("Remove the service profile with name"))
def connect_to_ucsm(args):
handle = UcsHandle(args.ucsm_ip, args.username, args.password)
handle.login()
return handle
def assign_vlan_to_sp_vnic(handle, args):
# Remove any existing ironic-<vlan> vifs from this UCSM server
existing_ironic_vifs = handle.query_classid(
'VnicEtherIf',
filter_str=(
'(name, ".*ironic-.*") and (dn, ".*{0}.*")'.format(args.sp_name))
)
for vif in existing_ironic_vifs:
handle.remove_mo(vif)
handle.commit()
# Add the vlan to UCSM globally if it doesn't already exist
vlan = handle.query_dn('fabric/lan/net-ironic-{0}'.format(args.vlan))
if not vlan:
vp1 = handle.query_dn("fabric/lan")
handle.add_mo(FabricVlan(vp1, name="ironic-{0}".format(args.vlan),
id=args.vlan))
handle.commit()
# Add the the VLAN as the default network for the first NIC on the server
eth0 = handle.query_classid(
'VnicEther', filter_str='(dn, ".*{0}.*")'.format(args.sp_name))[0]
VnicEtherIf(parent_mo_or_dn=eth0, default_net="yes",
name="ironic-{0}".format(args.vlan))
handle.set_mo(eth0)
handle.commit()
if __name__ == '__main__':
args = parser.parse_args()
handle = connect_to_ucsm(args)
assign_vlan_to_sp_vnic(handle, args)
| 33.518519
| 77
| 0.674033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 460
| 0.254144
|
6221a46e082c35a5b882386742c5234fe505e8f6
| 9,529
|
py
|
Python
|
test/propagation_warn_only_test.py
|
lechat/jenkinsflow
|
87396069dda4f0681829e5d4e264e4f09ae34131
|
[
"BSD-3-Clause"
] | null | null | null |
test/propagation_warn_only_test.py
|
lechat/jenkinsflow
|
87396069dda4f0681829e5d4e264e4f09ae34131
|
[
"BSD-3-Clause"
] | null | null | null |
test/propagation_warn_only_test.py
|
lechat/jenkinsflow
|
87396069dda4f0681829e5d4e264e4f09ae34131
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from pytest import raises
from jenkinsflow.flow import serial, parallel, FailedChildJobException, FailedChildJobsException, Propagation, BuildResult
from .framework import api_select
from .framework.utils import pre_existing_fake_cli
def test_propagation_warn_only_serial(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j12_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=2, serial=True)
api.job('j13', exec_time=0.01, max_fails=0, expect_invocations=0, expect_order=None)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3, propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl1:
ctrl1.invoke('j11')
ctrl1.invoke('j12_fail')
ctrl1.invoke('j13')
assert ctrl1.result == BuildResult.UNSTABLE
# Note: the fact that no error was raised also implies that the failure didn't propagate as failure
def test_propagation_warn_only_parallel(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j1_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1)
api.job('j2', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3, propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl1:
ctrl1.invoke('j1_fail')
ctrl1.invoke('j2')
def test_propagation_warn_only_nested_serial_parallel(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2, serial=True)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=2)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.parallel(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl2:
ctrl2.invoke('j21')
ctrl2.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_parallel_serial(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1, serial=True)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=0, expect_order=None)
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl2:
ctrl2.invoke('j21')
ctrl2.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_serial_serial(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=2)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=0, expect_order=None)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl2:
ctrl2.invoke('j21')
ctrl2.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_parallel_parallel(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.parallel(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl2:
ctrl2.invoke('j21')
ctrl2.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_serial_serial_continue(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=3)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=4)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial() as ctrl2:
ctrl2.invoke('j21')
with ctrl2.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl3:
ctrl3.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_parallel_serial_continue(api_type, fake_java):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1)
api.job('j23', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial() as ctrl2:
ctrl2.invoke('j21')
with ctrl2.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl3:
ctrl3.invoke('j22_fail')
ctrl2.invoke('j23')
def test_propagation_warn_only_nested_serial_serial_continue_fail(api_type):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=3)
api.job('j23_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=4)
with raises(FailedChildJobException):
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial() as ctrl2:
ctrl2.invoke('j21')
with ctrl2.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl3:
ctrl3.invoke('j22_fail')
ctrl2.invoke('j23_fail')
def test_propagation_warn_only_nested_parallel_serial_continue_fail(api_type):
with api_select.api(__file__, api_type, login=True) as api:
pre_existing_fake_cli(api_type)
api.flow_job()
api.job('j11', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j21', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('j22_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1)
api.job('j23_fail', exec_time=0.01, max_fails=1, expect_invocations=1, expect_order=1)
with raises(FailedChildJobsException):
with parallel(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl1:
ctrl1.invoke('j11')
with ctrl1.serial() as ctrl2:
ctrl2.invoke('j21')
with ctrl2.serial(propagation=Propagation.FAILURE_TO_UNSTABLE) as ctrl3:
ctrl3.invoke('j22_fail')
ctrl2.invoke('j23_fail')
| 50.68617
| 149
| 0.683702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 720
| 0.075559
|
6221e9086b65f59966870eca97102d109aabb9a1
| 3,458
|
py
|
Python
|
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/interface/dhcpv4discoveredinfo/dhcpv4discoveredinfo.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/interface/dhcpv4discoveredinfo/dhcpv4discoveredinfo.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/interface/dhcpv4discoveredinfo/dhcpv4discoveredinfo.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class DhcpV4DiscoveredInfo(Base):
"""The DhcpV4DiscoveredInfo class encapsulates a required dhcpV4DiscoveredInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the DhcpV4DiscoveredInfo property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'dhcpV4DiscoveredInfo'
def __init__(self, parent):
super(DhcpV4DiscoveredInfo, self).__init__(parent)
@property
def Gateway(self):
"""(Read only) A learned/allocated IPv4 Gateway address for this interface on the router that connects to the network segment on which the source host is located.
Returns:
str
"""
return self._get_attribute('gateway')
@property
def Ipv4Address(self):
"""(Read only) A learned/allocated IPv4 address for this interface,
Returns:
str
"""
return self._get_attribute('ipv4Address')
@property
def Ipv4Mask(self):
"""(Read only) A 32-bit address mask used in IP to indicate the bits of an IP address that are being used for the subnet address.
Returns:
number
"""
return self._get_attribute('ipv4Mask')
@property
def IsDhcpV4LearnedInfoRefreshed(self):
"""(Read Only) When true, the DHCPv4 discovered information is refreshed automatically.
Returns:
bool
"""
return self._get_attribute('isDhcpV4LearnedInfoRefreshed')
@property
def LeaseDuration(self):
"""(Read Only) The user-specified value and the lease timer (from the DHCP Server) are compared. The lowest value is used as the release/renew timer. After this time period has elapsed, the address will be renewed.
Returns:
number
"""
return self._get_attribute('leaseDuration')
@property
def ProtocolInterface(self):
"""(Read only) An Ixia protocol interface that is negotiating with the DHCP Server.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)
"""
return self._get_attribute('protocolInterface')
@property
def Tlv(self):
"""(Read only) Type Length Value for DHCPv4.
Returns:
list(dict(arg1:number,arg2:str))
"""
return self._get_attribute('tlv')
| 34.58
| 217
| 0.738577
| 2,245
| 0.649219
| 0
| 0
| 1,687
| 0.487854
| 0
| 0
| 2,669
| 0.771833
|
62232ec4709e08c7148a5e26f3dac3505151c613
| 17,678
|
py
|
Python
|
ThreeBotPackages/radicaleserver/radicale/config.py
|
jimbertools/jumpscaleX_threebot
|
9909aa270a1f5d04350c440ad787d755b905c456
|
[
"Apache-2.0"
] | null | null | null |
ThreeBotPackages/radicaleserver/radicale/config.py
|
jimbertools/jumpscaleX_threebot
|
9909aa270a1f5d04350c440ad787d755b905c456
|
[
"Apache-2.0"
] | null | null | null |
ThreeBotPackages/radicaleserver/radicale/config.py
|
jimbertools/jumpscaleX_threebot
|
9909aa270a1f5d04350c440ad787d755b905c456
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
"""
Radicale configuration module.
Give a configparser-like interface to read and write configuration.
"""
import math
import os
from collections import OrderedDict
from configparser import RawConfigParser
from radicale import auth, rights, storage, web
from radicale.log import logger
from Jumpscale import j
DEFAULT_CONFIG_PATH = os.pathsep.join(["?/etc/radicale/config", "?~/.config/radicale/config"])
def positive_int(value):
value = int(value)
if value < 0:
raise j.exceptions.Value("value is negative: %d" % value)
return value
def positive_float(value):
value = float(value)
if not math.isfinite(value):
raise j.exceptions.Value("value is infinite")
if math.isnan(value):
raise j.exceptions.Value("value is not a number")
if value < 0:
raise j.exceptions.Value("value is negative: %f" % value)
return value
def logging_level(value):
if value not in ("debug", "info", "warning", "error", "critical"):
raise j.exceptions.Value("unsupported level: %r" % value)
return value
def filepath(value):
if not value:
return ""
value = os.path.expanduser(value)
if os.name == "nt":
value = os.path.expandvars(value)
return os.path.abspath(value)
def list_of_ip_address(value):
def ip_address(value):
try:
address, port = value.strip().rsplit(":", 1)
return address.strip("[] "), int(port)
except ValueError:
raise j.exceptions.Value("malformed IP address: %r" % value)
return [ip_address(s.strip()) for s in value.split(",")]
def _convert_to_bool(value):
if value.lower() not in RawConfigParser.BOOLEAN_STATES:
raise j.exceptions.Value("Not a boolean: %r" % value)
return RawConfigParser.BOOLEAN_STATES[value.lower()]
# Default configuration
DEFAULT_CONFIG_SCHEMA = OrderedDict(
[
(
"server",
OrderedDict(
[
(
"hosts",
{
"value": "127.0.0.1:5232",
"help": "set server hostnames including ports",
"aliases": ["-H", "--hosts"],
"type": list_of_ip_address,
},
),
(
"max_connections",
{"value": "8", "help": "maximum number of parallel connections", "type": positive_int},
),
(
"max_content_length",
{"value": "100000000", "help": "maximum size of request body in bytes", "type": positive_int},
),
("timeout", {"value": "30", "help": "socket timeout", "type": positive_int}),
(
"ssl",
{
"value": "False",
"help": "use SSL connection",
"aliases": ["-s", "--ssl"],
"opposite": ["-S", "--no-ssl"],
"type": bool,
},
),
(
"certificate",
{
"value": "/sandbox/cfg/ssl/radicale.cert.pem",
"help": "set certificate file",
"aliases": ["-c", "--certificate"],
"type": filepath,
},
),
(
"key",
{
"value": "/sandbox/cfg/ssl/radicale.key.pem",
"help": "set private key file",
"aliases": ["-k", "--key"],
"type": filepath,
},
),
(
"certificate_authority",
{
"value": "",
"help": "set CA certificate for validating clients",
"aliases": ["--certificate-authority"],
"type": filepath,
},
),
("protocol", {"value": "PROTOCOL_TLSv1_2", "help": "SSL protocol used", "type": str}),
("ciphers", {"value": "", "help": "available ciphers", "type": str}),
(
"dns_lookup",
{"value": "True", "help": "use reverse DNS to resolve client address in logs", "type": bool},
),
]
),
),
(
"encoding",
OrderedDict(
[
("request", {"value": "utf-8", "help": "encoding for responding requests", "type": str}),
("stock", {"value": "utf-8", "help": "encoding for storing local collections", "type": str}),
]
),
),
(
"auth",
OrderedDict(
[
(
"type",
{
"value": "none",
"help": "authentication method",
"type": str,
"internal": auth.INTERNAL_TYPES,
},
),
(
"htpasswd_filename",
{"value": "/etc/radicale/users", "help": "htpasswd filename", "type": filepath},
),
("htpasswd_encryption", {"value": "bcrypt", "help": "htpasswd encryption method", "type": str}),
(
"realm",
{
"value": "Radicale - Password Required",
"help": "message displayed when a password is needed",
"type": str,
},
),
("delay", {"value": "1", "help": "incorrect authentication delay", "type": positive_float}),
]
),
),
(
"rights",
OrderedDict(
[
(
"type",
{
"value": "owner_only",
"help": "rights backend",
"type": str,
"internal": rights.INTERNAL_TYPES,
},
),
(
"file",
{
"value": "/etc/radicale/rights",
"help": "file for rights management from_file",
"type": filepath,
},
),
]
),
),
(
"storage",
OrderedDict(
[
(
"type",
{
"value": "multifilesystem",
"help": "storage backend",
"type": str,
"internal": storage.INTERNAL_TYPES,
},
),
(
"filesystem_folder",
{
"value": "/var/lib/radicale/collections",
"help": "path where collections are stored",
"type": filepath,
},
),
(
"max_sync_token_age",
{
"value": "2592000", # 30 days
"help": "delete sync token that are older",
"type": positive_int,
},
),
("hook", {"value": "", "help": "command that is run after changes to storage", "type": str}),
]
),
),
(
"web",
OrderedDict(
[
(
"type",
{
"value": "internal",
"help": "web interface backend",
"type": str,
"internal": web.INTERNAL_TYPES,
},
)
]
),
),
(
"logging",
OrderedDict(
[
("level", {"value": "warning", "help": "threshold for the logger", "type": logging_level}),
("mask_passwords", {"value": "True", "help": "mask passwords in logs", "type": bool}),
]
),
),
("headers", OrderedDict([("_allow_extra", True)])),
(
"internal",
OrderedDict(
[
("_internal", True),
(
"filesystem_fsync",
{"value": "True", "help": "sync all changes to filesystem during requests", "type": bool},
),
("internal_server", {"value": "False", "help": "the internal server is used", "type": bool}),
]
),
),
]
)
def parse_compound_paths(*compound_paths):
"""Parse a compound path and return the individual paths.
Paths in a compound path are joined by ``os.pathsep``. If a path starts
with ``?`` the return value ``IGNORE_IF_MISSING`` is set.
When multiple ``compound_paths`` are passed, the last argument that is
not ``None`` is used.
Returns a dict of the format ``[(PATH, IGNORE_IF_MISSING), ...]``
"""
compound_path = ""
for p in compound_paths:
if p is not None:
compound_path = p
paths = []
for path in compound_path.split(os.pathsep):
ignore_if_missing = path.startswith("?")
if ignore_if_missing:
path = path[1:]
path = filepath(path)
if path:
paths.append((path, ignore_if_missing))
return paths
def load(paths=()):
"""Load configuration from files.
``paths`` a list of the format ``[(PATH, IGNORE_IF_MISSING), ...]``.
"""
configuration = Configuration(DEFAULT_CONFIG_SCHEMA)
for path, ignore_if_missing in paths:
parser = RawConfigParser()
config_source = "config file %r" % path
try:
if not parser.read(path):
config = Configuration.SOURCE_MISSING
if not ignore_if_missing:
raise j.exceptions.Base("No such file: %r" % path)
else:
config = {s: {o: parser[s][o] for o in parser.options(s)} for s in parser.sections()}
except Exception as e:
raise j.exceptions.Base("Failed to load %s: %s" % (config_source, e)) from e
configuration.update(config, config_source, internal=False)
return configuration
class Configuration:
SOURCE_MISSING = {}
def __init__(self, schema):
"""Initialize configuration.
``schema`` a dict that describes the configuration format.
See ``DEFAULT_CONFIG_SCHEMA``.
"""
self._schema = schema
self._values = {}
self._configs = []
values = {}
for section in schema:
values[section] = {}
for option in schema[section]:
if option.startswith("_"):
continue
values[section][option] = schema[section][option]["value"]
self.update(values, "default config")
def update(self, config, source, internal=True):
"""Update the configuration.
``config`` a dict of the format {SECTION: {OPTION: VALUE, ...}, ...}.
Set to ``Configuration.SOURCE_MISSING`` to indicate a missing
configuration source for inspection.
``source`` a description of the configuration source
``internal`` allows updating "_internal" sections and skips the source
during inspection.
"""
new_values = {}
for section in config:
if section not in self._schema or not internal and self._schema[section].get("_internal", False):
raise j.exceptions.Base("Invalid section %r in %s" % (section, source))
new_values[section] = {}
if "_allow_extra" in self._schema[section]:
allow_extra_options = self._schema[section]["_allow_extra"]
elif "type" in self._schema[section]:
if "type" in config[section]:
plugin_type = config[section]["type"]
else:
plugin_type = self.get(section, "type")
allow_extra_options = plugin_type not in self._schema[section]["type"].get("internal", [])
else:
allow_extra_options = False
for option in config[section]:
if option in self._schema[section]:
type_ = self._schema[section][option]["type"]
elif allow_extra_options:
type_ = str
else:
raise j.exceptions.Base("Invalid option %r in section %r in " "%s" % (option, section, source))
raw_value = config[section][option]
try:
if type_ == bool:
raw_value = _convert_to_bool(raw_value)
new_values[section][option] = type_(raw_value)
except Exception as e:
raise j.exceptions.Base(
"Invalid %s value for option %r in section %r in %s: "
"%r" % (type_.__name__, option, section, source, raw_value)
) from e
self._configs.append((config, source, internal))
for section in new_values:
if section not in self._values:
self._values[section] = {}
for option in new_values[section]:
self._values[section][option] = new_values[section][option]
def get(self, section, option):
"""Get the value of ``option`` in ``section``."""
return self._values[section][option]
def get_raw(self, section, option):
"""Get the raw value of ``option`` in ``section``."""
fconfig = self._configs[0]
for config, _, _ in reversed(self._configs):
if section in config and option in config[section]:
fconfig = config
break
return fconfig[section][option]
def sections(self):
"""List all sections."""
return self._values.keys()
def options(self, section):
"""List all options in ``section``"""
return self._values[section].keys()
def copy(self, plugin_schema=None):
"""Create a copy of the configuration
``plugin_schema`` is a optional dict that contains additional options
for usage with a plugin. See ``DEFAULT_CONFIG_SCHEMA``.
"""
if plugin_schema is None:
schema = self._schema
skip = 1 # skip default config
else:
skip = 0
schema = self._schema.copy()
for section, options in plugin_schema.items():
if section not in schema or "type" not in schema[section] or "internal" not in schema[section]["type"]:
raise j.exceptions.Value("not a plugin section: %r" % section)
schema[section] = schema[section].copy()
schema[section]["type"] = schema[section]["type"].copy()
schema[section]["type"]["internal"] = [self.get(section, "type")]
for option, value in options.items():
if option in schema[section]:
raise j.exceptions.Value("option already exists in %r: %r" % (section, option))
schema[section][option] = value
copy = self.__class__(schema)
for config, source, allow_internal in self._configs[skip:]:
copy.update(config, source, allow_internal)
return copy
def inspect(self):
"""Inspect all external config sources and write problems to logger."""
for config, source, internal in self._configs:
if internal:
continue
if config is self.SOURCE_MISSING:
logger.info("Skipped missing %s", source)
else:
logger.info("Parsed %s", source)
| 37.060797
| 119
| 0.461195
| 5,557
| 0.314274
| 0
| 0
| 0
| 0
| 0
| 0
| 5,448
| 0.30811
|
6224e1d3f02d7b9dda37a271e14789ceeccd2dd5
| 574
|
py
|
Python
|
code_hashers/attendant.py
|
ksajan/iis-ms-del
|
6339f639d674fedb88454b43dcd64493be2a4558
|
[
"MIT"
] | 2
|
2019-12-24T13:32:22.000Z
|
2019-12-26T11:26:08.000Z
|
code_hashers/attendant.py
|
ksajan/iis-ms-del
|
6339f639d674fedb88454b43dcd64493be2a4558
|
[
"MIT"
] | 1
|
2019-12-26T07:53:34.000Z
|
2019-12-26T07:53:34.000Z
|
code_hashers/attendant.py
|
ksajan/iis-ms-del
|
6339f639d674fedb88454b43dcd64493be2a4558
|
[
"MIT"
] | 35
|
2019-12-22T05:05:43.000Z
|
2019-12-22T07:16:56.000Z
|
class ParkingLot:
def __init__(self, username, latitude, longitude, totalSpace, costHour):
self.username = username
self.latitude = latitude
self.longitude = longitude
self.totalSpace = totalSpace
self.availableSpace = totalSpace
self.costHour = costHour
def getSpace(self):
return self.availableSpace
def setBook(self):
self.availableSpace -= 1
class signUp:
def __init__(self, username, password):
self.username = username
self.password = password
# def getDetails():
| 24.956522
| 76
| 0.651568
| 546
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.038328
|
6225f21abac38a02faf447c54384fc07ed6ded92
| 10,105
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/AnalyticalModelStick.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/AnalyticalModelStick.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/AnalyticalModelStick.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class AnalyticalModelStick(AnalyticalModel,IDisposable):
"""
An element that represents a stick in the structural analytical model.
Could be one of beam,brace or column type.
"""
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def GetAlignmentMethod(self,selector):
"""
GetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> AnalyticalAlignmentMethod
Gets the alignment method for a given selector.
selector: End of the analytical model.
Returns: The alignment method at a given end.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetLocalCoordinateSystem(self,*__args):
"""
GetLocalCoordinateSystem(self: AnalyticalModelStick,point: XYZ) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified point.
point: The point on the analytical model stick element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
GetLocalCoordinateSystem(self: AnalyticalModelStick,parameter: float) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified parameter value along a curve.
parameter: The parameter value along a curve that should be in the range [0,1],where 0
represents start and 1 represents end of the element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
"""
pass
def GetMemberForces(self):
"""
GetMemberForces(self: AnalyticalModelStick) -> IList[MemberForces]
Gets the member forces associated with this element.
Returns: Returns a collection of Member Forces associated with this element. Empty
collection will be returned if element doesn't have any Member Forces.
To
find out with which end member forces are associated use
Autodesk::Revit::DB::Structure::MemberForces::Position
property to obtain a
position of Member Forces on element.
"""
pass
def GetProjectionPlaneY(self,selector):
"""
GetProjectionPlaneY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionPlaneZ(self,selector):
"""
GetProjectionPlaneZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionY(self,selector):
"""
GetProjectionY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionY
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetProjectionZ(self,selector):
"""
GetProjectionZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionZ
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
GetReleases(self: AnalyticalModelStick,start: bool) -> (bool,bool,bool,bool,bool,bool)
Gets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def GetReleaseType(self,start):
"""
GetReleaseType(self: AnalyticalModelStick,start: bool) -> ReleaseType
Gets the release type.
start: The position on analytical model stick element. True for start,false for end.
Returns: The type of release.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def RemoveAllMemberForces(self):
"""
RemoveAllMemberForces(self: AnalyticalModelStick) -> bool
Removes all member forces associated with element.
Returns: True if any member forces were removed,false otherwise.
"""
pass
def RemoveMemberForces(self,start):
"""
RemoveMemberForces(self: AnalyticalModelStick,start: bool) -> bool
Removes member forces defined for given position.
start: Member Forces position on analytical model stick element. True for start,false
for end.
Returns: True if member forces for provided position were removed,false otherwise.
"""
pass
def SetAlignmentMethod(self,selector,method):
"""
SetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector,method: AnalyticalAlignmentMethod)
Sets the alignment method for a given selector.
selector: End of the analytical model.
method: The alignment method at a given end.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetMemberForces(self,*__args):
"""
SetMemberForces(self: AnalyticalModelStick,start: bool,force: XYZ,moment: XYZ)
Adds Member Forces to element.
start: Member Forces position on analytical model stick element. True for start,false
for end.
force: The translational forces at specified position of the element.
The x value
of XYZ object represents force along x-axis of the analytical model coordinate
system,y along y-axis,z along z-axis respectively.
moment: The rotational forces at specified position of the element.
The x value of
XYZ object represents moment about x-axis of the analytical model coordinate
system,y about y-axis,z about z-axis respectively.
SetMemberForces(self: AnalyticalModelStick,memberForces: MemberForces)
Sets Member Forces to element.
memberForces: End to which member forces will be added is defined by setting
Autodesk::Revit::DB::Structure::MemberForces::Position
property in provided
Member Forces object.
"""
pass
def SetProjection(self,selector,*__args):
"""
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
"""
pass
def SetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
SetReleases(self: AnalyticalModelStick,start: bool,fx: bool,fy: bool,fz: bool,mx: bool,my: bool,mz: bool)
Sets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def SetReleaseType(self,start,releaseType):
"""
SetReleaseType(self: AnalyticalModelStick,start: bool,releaseType: ReleaseType)
Sets the release type.
start: The position on analytical model stick element. True for start,false for end.
releaseType: The type of release.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| 25.012376
| 215
| 0.705492
| 10,103
| 0.999802
| 0
| 0
| 0
| 0
| 0
| 0
| 8,895
| 0.880257
|
622711071eb4006f1628d4e6d3019ab6f40c0b83
| 1,773
|
py
|
Python
|
projects/nano_det/net/header.py
|
yunshangyue71/mycodes
|
54b876004c32d38d9c0363fd292d745fee8dff3c
|
[
"Apache-2.0"
] | null | null | null |
projects/nano_det/net/header.py
|
yunshangyue71/mycodes
|
54b876004c32d38d9c0363fd292d745fee8dff3c
|
[
"Apache-2.0"
] | null | null | null |
projects/nano_det/net/header.py
|
yunshangyue71/mycodes
|
54b876004c32d38d9c0363fd292d745fee8dff3c
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from net.init_net import xavier_init
from net.basic_cnn import DWConvBnReluPool
"""
DW-DW-PW
"""
class Head(nn.Module):
def __init__(self,reg_max = 8, #defalut =8个bbox,用于分布, general focal loss format
inChannels = 96, #
clsOutChannels = 7):
super(Head, self).__init__()
self.reg_max = reg_max
self.inChannels = inChannels
self.clsOutChannels = clsOutChannels
self.makeLayers()
def makeLayers(self):
self.head= nn.ModuleList()
for i in range(2):
conv = DWConvBnReluPool(self.inChannels,self.inChannels, kernelSize = 3, stride = 1,
bias = True, bn = True, relu = True, maxp2 = False)
self.head.append(conv)
conv = nn.Conv2d(self.inChannels,
self.clsOutChannels + 4 * (self.reg_max),
1)
self.head.append(conv)
def init_weight(self):
for conv in self.modules():
if isinstance(conv, nn.Conv2d):
xavier_init(conv, distribution='uniform')
def forward(self, x):
for conv in self.head:
x = conv(x)
return x
if __name__ == '__main__':
from torchsummary import summary
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = Head().to(device)
summary(net, (96, 320, 320))
# net = nanodet_PAN(cfg)
# import netron
# import os
#
# x = torch.rand(2,58,320,320)
# net(x)
# name = os.path.basename(__file__)
# name = name.split('.')[0]
# onnx_path = '/media/q/deep/me/model/pytorch_script_use/'+name+'.onnx'
# torch.onnx.export(net, x, onnx_path)
# netron.start(onnx_path)
| 30.050847
| 96
| 0.5815
| 1,106
| 0.620303
| 0
| 0
| 0
| 0
| 0
| 0
| 390
| 0.218732
|
622719ea6c5735ec54aa9dbdf7b5a6d8d0c52ce7
| 1,115
|
py
|
Python
|
hard-gists/1191457/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/1191457/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/1191457/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
#!/usr/bin/env python
import urllib
import sys
import json
from mwlib import parser
from mwlib.refine import compat
if __name__ == "__main__":
params = urllib.urlencode({
"format": "json",
"action": "query",
"prop": "revisions",
"rvprop": "content",
"titles": "ISO_3166-1",
"rvsection": "4",
})
wc = urllib.urlopen("http://en.wikipedia.org/w/api.php?%s" % params)
if wc.getcode() != 200:
print "Fail!"
sys.exit(2)
raw = wc.read()
rdata = json.loads(raw)
wc.close()
page = rdata['query']['pages'].itervalues().next()
if not page:
print "NO page found"
sys.exit(3)
revision = page['revisions'][0]
if not revision:
print "NO revision found"
sys.exit(4)
content = revision[str(revision.keys()[0])]
parsed = compat.parse_txt(content)
table = parsed.find(parser.Table)[0]
if not table:
print "Table not found"
sys.exit(5)
for row in table.children:
cells = row.find(parser.Cell)
print cells[0].asText().replace("}}", "").replace("{{", "").strip() + \
" || " + cells[1].asText().strip() + " || " + cells[2].asText().strip() \
+ " || " + cells[3].asText().strip()
| 22.755102
| 75
| 0.625112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 279
| 0.250224
|
6228f1664df5b9ec6866831755970b61d71b6d58
| 3,058
|
py
|
Python
|
ECC_main/platform/slack.py
|
dongh9508/ECC-main
|
904110b70ba3e459d92c6d21a5ad1693b4ee726a
|
[
"MIT"
] | 2
|
2019-01-23T00:04:18.000Z
|
2019-02-01T10:09:15.000Z
|
ECC_main/platform/slack.py
|
dongh9508/ECC-main
|
904110b70ba3e459d92c6d21a5ad1693b4ee726a
|
[
"MIT"
] | 26
|
2018-07-11T07:59:46.000Z
|
2021-02-08T20:21:46.000Z
|
ECC_main/platform/slack.py
|
dongh9508/ECC-main
|
904110b70ba3e459d92c6d21a5ad1693b4ee726a
|
[
"MIT"
] | 2
|
2018-08-31T14:08:19.000Z
|
2018-08-31T15:14:29.000Z
|
from .platformBase import PlatformBase
from django.http import HttpResponse, JsonResponse
from ECC_main.baseRequest import BaseRequest
import ECC_main.settings
import threading
import requests
class Slack(PlatformBase):
def slash_command(request, func):
token = request.POST['token']
if ECC_main.settings.SLACK_VERIFICATION_TOKEN == token:
print("authenticated!")
json_body = Slack._get_json_list(request)
slash_response = Slack._func_start(json_body, func)
if slash_response.lazy_slash_response is not None:
Slack.lazy_slash_command(json_body, slash_response)
if slash_response.response_type is None:
slash_response['response_type'] = 'ephemeral'
if slash_response.status != 200 or slash_response.text == "":
json_response = JsonResponse(slash_response, status=slash_response.status)
else:
json_response = JsonResponse(slash_response)
return json_response
else:
print("unauthenticated")
return HttpResponse(status=403)
def lazy_slash_command(json_body, slash_response):
func, args, kwargs, request_result_func = slash_response.lazy_slash_response.get_lazy()
def async_func(*_args, **_kwargs):
print('lazy send func start')
slash_response = func(*_args, **_kwargs)
chat_id = Slack._get_chat_id(json_body)
response_url = Slack._get_response_url(json_body)
if slash_response.response_type is None:
slash_response['response_type'] = 'in_channel'
response = Slack._send_message(slash_response, response_url)
if request_result_func is not None:
request_result_func(response)
threading.Thread(target=async_func, args=args, kwargs=kwargs).start()
def platform():
return 'slack'
def _get_chat_id(json_body):# return channel_id
return json_body['channel_id']
def _get_user_id(json_body):
return json_body['user_id']
def _get_user_name(json_body):
return json_body['user_name']
def _get_json_list(request_body):
return request_body.POST
def _get_response_url(json_body):
return json_body['response_url']
def _func_start(json_body, func):
platform = Slack.platform()
text = Slack._get_text(json_body)
user_name = Slack._get_user_name(json_body)
user_id = Slack._get_user_id(json_body)
baseRequest = BaseRequest(platform, text, user_name, user_id)
return func(baseRequest)
def _get_text(json_body):
return json_body['text']
def _send_message(slash_response, response_url):
return requests.post(response_url, json=slash_response)
| 35.149425
| 95
| 0.624591
| 2,863
| 0.936233
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.063767
|
62291b009a583ae54f27aedb9899f9e284646d88
| 598
|
py
|
Python
|
Classes/ex17.py
|
oDallas/PythomBR
|
7d3b3bcefe05ce483f6aa664bbc4962a1e0fd285
|
[
"MIT"
] | 1
|
2019-06-02T18:59:18.000Z
|
2019-06-02T18:59:18.000Z
|
Classes/ex17.py
|
oDallas/PythonBR
|
7d3b3bcefe05ce483f6aa664bbc4962a1e0fd285
|
[
"MIT"
] | null | null | null |
Classes/ex17.py
|
oDallas/PythonBR
|
7d3b3bcefe05ce483f6aa664bbc4962a1e0fd285
|
[
"MIT"
] | null | null | null |
""""
Crie uma Fazenda de Bichinhos instanciando vários objetos bichinho e mantendo o controle deles através de uma lista.
Imite o funcionamento do programa básico, mas ao invés de exigis que o usuário tome conta de um único bichinho,
exija que ele tome conta da fazenda inteira. Cada opção do menu deveria permitir que o usuário executasse
uma ação para todos os bichinhos (alimentar todos os bichinhos, brincar com todos os bichinhos, ou ouvir a todos os
bichinhos). Para tornar o programa mais interessante, dê para cada bichinho um nivel inicial aleatório de fome e tédio.
"""
# todo: terminar
| 59.8
| 119
| 0.797659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.995098
|
6229642233706b071d8517f87c02f6fac096a7c6
| 10,468
|
py
|
Python
|
train.py
|
Thanh-Hoo/Custom_train_PanNet
|
aa50df0e32991d35112f3de6627baea963f0827a
|
[
"MIT"
] | null | null | null |
train.py
|
Thanh-Hoo/Custom_train_PanNet
|
aa50df0e32991d35112f3de6627baea963f0827a
|
[
"MIT"
] | null | null | null |
train.py
|
Thanh-Hoo/Custom_train_PanNet
|
aa50df0e32991d35112f3de6627baea963f0827a
|
[
"MIT"
] | null | null | null |
'''
THis is the main training code.
'''
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # set GPU id at the very begining
import argparse
import random
import math
import numpy as np
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
from torch.multiprocessing import freeze_support
import json
import sys
import time
import pdb
# internal package
from dataset import ctw1500, totaltext, synthtext, msra, ic15, custom
from models.pan import PAN
from loss.loss import loss
from utils.helper import adjust_learning_rate, upsample
from utils.average_meter import AverageMeter
torch.set_num_threads(2)
# main function:
if __name__ == '__main__':
freeze_support()
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch', type=int, default=16, help='input batch size')
parser.add_argument(
'--worker', type=int, default=4, help='number of data loading workers')
parser.add_argument(
'--epoch', type=int, default=601, help='number of epochs')
parser.add_argument('--output', type=str, default='outputs', help='output folder name')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset_type', type=str, default='ctw', help="dataset type - ctw | tt | synthtext | msra | ic15 | custom")
parser.add_argument('--gpu', type=bool, default=False, help="GPU being used or not")
opt = parser.parse_args()
print(opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed:", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
# turn on GPU for models:
if opt.gpu == False:
device = torch.device("cpu")
print("CPU being used!")
else:
if torch.cuda.is_available() == True and opt.gpu == True:
device = torch.device("cuda")
print("GPU being used!")
else:
device = torch.device("cpu")
print("CPU being used!")
# set training parameters
batch_size = opt.batch
neck_channel = (64, 128, 256, 512)
pa_in_channels = 512
hidden_dim = 128
num_classes = 6
loss_text_weight = 1.0
loss_kernel_weight = 0.5
loss_emb_weight = 0.25
opt.optimizer = 'Adam'
opt.lr = 1e-3
opt.schedule = 'polylr'
epochs = opt.epoch
worker = opt.worker
dataset_type = opt.dataset_type
output_path = opt.output
trained_model_path = opt.model
# create dataset
print("Create dataset......")
if dataset_type == 'ctw': # ctw dataset
train_dataset = ctw1500.PAN_CTW(split='train',
is_transform=True,
img_size=640,
short_size=640,
kernel_scale=0.7,
report_speed=False)
elif dataset_type == 'tt': # totaltext dataset
train_dataset = totaltext.PAN_TT(split='train',
is_transform=True,
img_size=640,
short_size=640,
kernel_scale=0.7,
with_rec=False,
report_speed=False)
elif dataset_type == 'synthtext': # synthtext dataset
train_dataset = synthtext.PAN_Synth(is_transform=True,
img_size=640,
short_size=640,
kernel_scale=0.5,
with_rec=False)
elif dataset_type == 'msra': # msra dataset
train_dataset = msra.PAN_MSRA(split='train',
is_transform=True,
img_size=736,
short_size=736,
kernel_scale=0.7,
report_speed=False)
elif dataset_type == 'ic15': # msra dataset
train_dataset = ic15.PAN_IC15(split='train',
is_transform=True,
img_size=736,
short_size=736,
kernel_scale=0.5,
with_rec=False)
elif dataset_type == 'custom': # msra dataset
train_dataset = custom.PAN_CTW(split='train',
is_transform=True,
img_size=640,
short_size=640,
kernel_scale=0.7,
report_speed=False)
else:
print("Not supported yet!")
exit(1)
# make dataloader
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=int(worker),
drop_last=True,
pin_memory=True)
print("Length of train dataset is:", len(train_dataset))
# make model output folder
try:
os.makedirs(output_path)
except OSError:
pass
# create model
print("Create model......")
model = PAN(pretrained=False, neck_channel=neck_channel, pa_in_channels=pa_in_channels, hidden_dim=hidden_dim, num_classes=num_classes)
if trained_model_path != '':
if torch.cuda.is_available() == True and opt.gpu == True:
model.load_state_dict(torch.load(trained_model_path, map_location=lambda storage, loc: storage), strict=False)
model = torch.nn.DataParallel(model).to(device)
else:
model.load_state_dict(torch.load(trained_model_path, map_location=lambda storage, loc: storage), strict=False)
else:
if torch.cuda.is_available() == True and opt.gpu == True:
model = torch.nn.DataParallel(model).to(device)
else:
model = model.to(device)
if opt.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.99, weight_decay=5e-4)
elif opt.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
else:
print("Error: Please specify correct optimizer!")
exit(1)
# train, evaluate, and save model
print("Training starts......")
start_epoch = 0
for epoch in range(start_epoch, epochs):
print('Epoch: [%d | %d]' % (epoch + 1, epochs))
model.train()
# meters
losses = AverageMeter()
losses_text = AverageMeter()
losses_kernels = AverageMeter()
losses_emb = AverageMeter()
losses_rec = AverageMeter()
ious_text = AverageMeter()
ious_kernel = AverageMeter()
for iter, data in enumerate(train_dataloader):
# adjust learning rate
adjust_learning_rate(optimizer, train_dataloader, epoch, iter, opt.schedule, opt.lr, epochs)
outputs = dict()
# forward for detection output
det_out = model(data['imgs'].to(device))
det_out = upsample(det_out, data['imgs'].size())
# retreive ground truth labels
gt_texts = data['gt_texts'].to(device)
gt_kernels = data['gt_kernels'].to(device)
training_masks = data['training_masks'].to(device)
gt_instances = data['gt_instances'].to(device)
gt_bboxes = data['gt_bboxes'].to(device)
# calculate total loss
det_loss = loss(det_out, gt_texts, gt_kernels, training_masks, gt_instances, gt_bboxes, loss_text_weight, loss_kernel_weight, loss_emb_weight)
outputs.update(det_loss)
# detection loss
loss_text = torch.mean(outputs['loss_text'])
losses_text.update(loss_text.item())
loss_kernels = torch.mean(outputs['loss_kernels'])
losses_kernels.update(loss_kernels.item())
loss_emb = torch.mean(outputs['loss_emb'])
losses_emb.update(loss_emb.item())
loss_total = loss_text + loss_kernels + loss_emb
iou_text = torch.mean(outputs['iou_text'])
ious_text.update(iou_text.item())
iou_kernel = torch.mean(outputs['iou_kernel'])
ious_kernel.update(iou_kernel.item())
losses.update(loss_total.item())
# backward
optimizer.zero_grad()
loss_total.backward()
optimizer.step()
# print log
#print("batch: {} / total batch: {}".format(iter+1, len(train_dataloader)))
if iter % 20 == 0:
output_log = '({batch}/{size}) LR: {lr:.6f} | ' \
'Loss: {loss:.3f} | ' \
'Loss (text/kernel/emb): {loss_text:.3f}/{loss_kernel:.3f}/{loss_emb:.3f} ' \
'| IoU (text/kernel): {iou_text:.3f}/{iou_kernel:.3f}'.format(
batch=iter + 1,
size=len(train_dataloader),
lr=optimizer.param_groups[0]['lr'],
loss_text=losses_text.avg,
loss_kernel=losses_kernels.avg,
loss_emb=losses_emb.avg,
loss=losses.avg,
iou_text=ious_text.avg,
iou_kernel=ious_kernel.avg,
)
print(output_log)
sys.stdout.flush()
with open(os.path.join(output_path,'statistics.txt'), 'a') as f:
f.write("{} {} {} {} {} {}\n".format(losses_text.avg, losses_kernels.avg, losses_emb.avg, losses.avg, ious_text.avg, ious_kernel.avg))
if epoch % 20 == 0:
print("Save model......")
if torch.cuda.is_available() == True and opt.gpu == True:
torch.save(model.module.state_dict(), '%s/model_epoch_%s.pth' % (output_path, str(epoch)))
else:
torch.save(model.state_dict(), '%s/model_epoch_%s.pth' % (output_path, str(epoch)))
| 40.261538
| 154
| 0.545472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,674
| 0.159916
|
6229671a08873684c79e48db6345c98847757965
| 3,070
|
py
|
Python
|
bioprocs/scripts/chipseq/pPeakToRegPotential.py
|
pwwang/biopipen
|
d53b78aa192fd56a5da457463b099b2aa833b284
|
[
"MIT"
] | 2
|
2021-09-10T00:17:52.000Z
|
2021-10-10T09:53:09.000Z
|
bioprocs/scripts/chipseq/pPeakToRegPotential.py
|
pwwang/biopipen
|
d53b78aa192fd56a5da457463b099b2aa833b284
|
[
"MIT"
] | 1
|
2021-12-02T07:54:09.000Z
|
2021-12-02T07:54:09.000Z
|
bioprocs/scripts/chipseq/pPeakToRegPotential.py
|
pwwang/biopipen
|
d53b78aa192fd56a5da457463b099b2aa833b284
|
[
"MIT"
] | 2
|
2021-09-10T00:17:54.000Z
|
2021-10-10T09:56:40.000Z
|
import math, gzip
peakfile = "{{peakfile}}"
genefile = "{{genefile}}"
arg_inst = {{args.signal | repr}}
arg_gf = "{{args.genefmt}}"
arg_pf = "{{args.peakfmt}}"
arg_wd = int({{args.window | repr}})
d0 = arg_wd / 2
assert (isinstance(arg_inst, bool))
assert (arg_gf in ['ucsc', 'bed', 'ucsc+gz', 'bed+gz'])
assert (arg_pf in ['peak', 'bed', 'peak+gz', 'bed+gz'])
open_gf = open_pf = open
if arg_gf.endswith ('+gz'):
arg_gf = arg_gf[:-3]
open_gf = gzip.open
if arg_pf.endswith ('+gz'):
arg_pf = arg_pf[:-3]
open_pf = gzip.open
# read genes
genes = {}
if arg_gf == 'bed':
with open_gf (genefile) as f:
for line in f:
line = line.strip()
if not line or line.startswith('track') or line.startswith('#'): continue
items = line.split("\t")
chr = items[0]
start = int(items[1])
end = int(items[2])
gene = items[3]
strand = '-' if len(items)>5 and items[5] == '-' else '+'
tss = start if strand == '+' else end
rstart = tss - d0
rend = tss + d0
genes[gene] = [chr, start, end, tss, rstart, rend]
else:
with open_gf (genefile) as f:
for line in f:
line = line.strip()
if not line or line.startswith('track') or line.startswith('#'): continue
items = line.split("\t")
chr = items[2]
start = int(items[4])
end = int(items[5])
gene = items[12]
strand = items[3]
tss = start if strand == '+' else end
rstart = tss - d0
rend = tss + d0
genes[gene] = [chr, start, end, tss, rstart, rend]
# read peaks
peaks = {}
if arg_pf == 'peak':
with open_pf (peakfile) as f:
for line in f:
line = line.strip()
if not line or line.startswith('track') or line.startswith('#'): coninue
items = line.split("\t")
chr = items[0]
start = int(items[1])
end = int(items[2])
signal = float(items[6])
if peaks.has_key(chr):
peaks[chr].append ([start, end, (start+end) / 2, signal])
else:
peaks[chr] = [[start, end, (start+end) / 2, signal]]
else:
with open_pf (peakfile) as f:
for line in f:
line = line.strip()
if not line or line.startswith('track') or line.startswith('#'): coninue
items = line.split("\t")
chr = items[0]
start = int(items[1])
end = int(items[2])
signal = float(items[4])
if peaks.has_key(chr):
peaks[chr].append ([start, end, (start+end) / 2, signal])
else:
peaks[chr] = [[start, end, (start+end) / 2, signal]]
for key, val in peaks.iteritems():
peaks[key] = sorted (val, cmp = lambda x, y: x[0] - y[0])
rp = {}
for gene, ginfo in genes.iteritems():
(gchr, gstart, gend, gtss, grstart, grend) = ginfo
rp[gene] = 0
if not peaks.has_key(gchr): continue
for pinfo in peaks[gchr]:
(pstart, pend, pcenter, psignal) = pinfo
if pcenter < grstart: continue
if pcenter > grend: break
score = psignal if arg_inst else 1
score *= math.exp (-(.5 + 4*abs(pcenter - tss)/d0))
rp[gene] += score
with open ("{{outfile}}", 'w') as f:
for key in sorted (rp, key=rp.get, reverse = True):
f.write ("%s\t%.3f\n" % (key, rp[key]))
| 29.238095
| 76
| 0.587948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.085993
|
622a87127bb1d17eed572fa385184b37ffbcf8bc
| 1,549
|
py
|
Python
|
parsifal/reviews/migrations/0014_auto_20150710_1445.py
|
michelav/parsifal
|
6633699ad64fd354ddef27f8802a76b7ec7c4ef8
|
[
"MIT"
] | 1
|
2020-11-12T08:36:41.000Z
|
2020-11-12T08:36:41.000Z
|
parsifal/reviews/migrations/0014_auto_20150710_1445.py
|
michelav/parsifal
|
6633699ad64fd354ddef27f8802a76b7ec7c4ef8
|
[
"MIT"
] | 7
|
2019-11-06T12:44:12.000Z
|
2022-01-13T01:48:22.000Z
|
parsifal/reviews/migrations/0014_auto_20150710_1445.py
|
michelav/parsifal
|
6633699ad64fd354ddef27f8802a76b7ec7c4ef8
|
[
"MIT"
] | 3
|
2019-10-05T04:16:59.000Z
|
2021-04-20T05:00:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('library', '0011_auto_20150706_0957'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0013_auto_20150708_1511'),
]
operations = [
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unclassified'), ('R', 'Rejected'), ('A', 'Accepted'), ('D', 'Duplicated')])),
('updated_at', models.DateTimeField(auto_now=True)),
('document', models.ForeignKey(to='library.Document')),
],
),
migrations.CreateModel(
name='StudySelection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('has_finished', models.BooleanField(default=False)),
('review', models.ForeignKey(to='reviews.Review')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AddField(
model_name='study',
name='study_selection',
field=models.ForeignKey(to='reviews.StudySelection'),
),
]
| 37.780488
| 164
| 0.586185
| 1,407
| 0.908328
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.213686
|
622afea05f222949c88d139d2a220b387b5d925a
| 58,968
|
py
|
Python
|
lua_protobuf/generator.py
|
JoJo2nd/lua-protobuf
|
f3fc8d451d4b43152e28a9a1eaa98aa744dcd0f5
|
[
"Apache-2.0"
] | null | null | null |
lua_protobuf/generator.py
|
JoJo2nd/lua-protobuf
|
f3fc8d451d4b43152e28a9a1eaa98aa744dcd0f5
|
[
"Apache-2.0"
] | null | null | null |
lua_protobuf/generator.py
|
JoJo2nd/lua-protobuf
|
f3fc8d451d4b43152e28a9a1eaa98aa744dcd0f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 Gregory Szorc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by James Moran 2014
# Updated to work with Lua 5.2. Fixing windows platform issues.
# Added wrapper classes for CodedInput/OutputStream(s)
from google.protobuf.descriptor import FieldDescriptor
import re
RE_BARE_BEGIN_BRACKET = re.compile(r'^\s*{\s*$')
RE_BEGIN_BRACKET = re.compile(r'{\s*$')
RE_END_BRACKET = re.compile(r'^\s*};?\s*$')
FIELD_LABEL_MAP = {
FieldDescriptor.LABEL_OPTIONAL: 'optional',
FieldDescriptor.LABEL_REQUIRED: 'required',
FieldDescriptor.LABEL_REPEATED: 'repeated'
}
FIELD_TYPE_MAP = {
FieldDescriptor.TYPE_DOUBLE: 'double',
FieldDescriptor.TYPE_FLOAT: 'float',
FieldDescriptor.TYPE_INT64: 'int64',
FieldDescriptor.TYPE_UINT64: 'uint64',
FieldDescriptor.TYPE_INT32: 'int32',
FieldDescriptor.TYPE_FIXED64: 'fixed64',
FieldDescriptor.TYPE_FIXED32: 'fixed32',
FieldDescriptor.TYPE_BOOL: 'bool',
FieldDescriptor.TYPE_STRING: 'string',
FieldDescriptor.TYPE_GROUP: 'group',
FieldDescriptor.TYPE_MESSAGE: 'message',
FieldDescriptor.TYPE_BYTES: 'bytes',
FieldDescriptor.TYPE_UINT32: 'uint32',
FieldDescriptor.TYPE_ENUM: 'enum',
FieldDescriptor.TYPE_SFIXED32: 'sfixed32',
FieldDescriptor.TYPE_SFIXED64: 'sfixed64',
FieldDescriptor.TYPE_SINT32: 'sint32',
FieldDescriptor.TYPE_SINT64: 'sint64',
}
def lua_protobuf_header():
'''Returns common header included by all produced files'''
return '''
#ifndef LUA_PROTOBUF_H
#define LUA_PROTOBUF_H
#include <google/protobuf/message.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <lua.h>
#ifdef WINDOWS
#define LUA_PROTOBUF_EXPORT __declspec(dllexport)
#else
#define LUA_PROTOBUF_EXPORT
#endif
// type for callback function that is executed before Lua performs garbage
// collection on a message instance.
// if called function returns 1, Lua will free the memory backing the object
// if returns 0, Lua will not free the memory
typedef int (*lua_protobuf_gc_callback)(::google::protobuf::MessageLite *msg, void *userdata);
// __index and __newindex functions for enum tables
LUA_PROTOBUF_EXPORT int lua_protobuf_enum_index(lua_State *L);
LUA_PROTOBUF_EXPORT int lua_protobuf_enum_newindex(lua_State *L);
// GC callback function that always returns true
LUA_PROTOBUF_EXPORT int lua_protobuf_gc_always_free(::google::protobuf::MessageLite *msg, void *userdata);
// A minimal Lua interface for coded input/output protobuf streams
int lua_protobuf_coded_streams_open(lua_State* L);
#ifdef __cplusplus
}
#endif
#endif
'''
def lua_protobuf_source():
'''Returns source for common code'''
return '''
#include "lua-protobuf.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <lauxlib.h>
#ifdef __cplusplus
}
#endif
int lua_protobuf_enum_index(lua_State *L)
{
return luaL_error(L, "attempting to access undefined enumeration value: %s", lua_tostring(L, 2));
}
int lua_protobuf_enum_newindex(lua_State *L)
{
return luaL_error(L, "cannot modify enumeration tables");
}
int lua_protobuf_gc_always_free(::google::protobuf::MessageLite *msg, void *ud)
{
return 1;
}
#include "google/protobuf/io/coded_stream.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include <fcntl.h>
#include <sys/stat.h>
#if defined (_MSC_VER)
# include <io.h> // for open
#else
# include <sys/types.h>
# define O_BINARY (0)
#endif
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
int lua_protobuf_coded_input_stream_new(lua_State* L) {
const char* filepath = luaL_checkstring(L, 1);
int fd = open(filepath, O_RDONLY | O_BINARY, S_IREAD);
if (fd == -1) {
return luaL_error(L, "Failed to open file %s", filepath);
}
char* udataptr = (char*)lua_newuserdata(L, sizeof(::google::protobuf::io::CodedInputStream)+sizeof(::google::protobuf::io::FileInputStream));
auto instream = new (udataptr+sizeof(::google::protobuf::io::FileInputStream)) ::google::protobuf::io::FileInputStream(fd);
instream->SetCloseOnDelete(true);
auto codestream = new (udataptr) ::google::protobuf::io::CodedInputStream(instream);
luaL_setmetatable(L, "protobuf_.CodedInputStream");
return 1;
}
int lua_protobuf_coded_input_stream_gc(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::io::FileInputStream* filestream = (::google::protobuf::io::FileInputStream*)(codestream+1);
codestream->~CodedInputStream();
filestream->~FileInputStream();
return 0;
}
int lua_protobuf_coded_input_stream_skip(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int count = luaL_checkint(L, 2);
codestream->Skip(count);
return 0;
}
int lua_protobuf_coded_input_stream_push_limit(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int limit = luaL_checkint(L, 2);
limit = codestream->PushLimit(limit);
lua_pushinteger(L, limit);
return 1;
}
int lua_protobuf_coded_input_stream_pop_limit(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int limit = luaL_checkint(L, 2);
codestream->PopLimit(limit);
return 0;
}
int lua_protobuf_coded_input_stream_current_position(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
lua_pushinteger(L, codestream->CurrentPosition());
return 1;
}
int lua_protobuf_coded_input_stream_read_raw(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int count = luaL_checkint(L, 2);
char* buf = new char[count];
bool success = codestream->ReadRaw(buf, count);
if (success) {
lua_pushlstring(L, buf, count);
} else {
lua_pushnil(L);
}
delete buf;
return 1;
}
int lua_protobuf_coded_input_stream_read_varint_32(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint32 val;
bool success = codestream->ReadVarint32(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_varint_64(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint64 val;
bool success = codestream->ReadVarint64(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_little_endian_32(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint32 val;
bool success = codestream->ReadLittleEndian32(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_little_endian_64(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint64 val;
bool success = codestream->ReadLittleEndian64(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
static const struct luaL_Reg CodedInputStream_functions [] = {
{"new", lua_protobuf_coded_input_stream_new},
{NULL, NULL}
};
static const struct luaL_Reg CodedInputStream_methods [] = {
{"__gc", lua_protobuf_coded_input_stream_gc},
{"Skip", lua_protobuf_coded_input_stream_skip},
{"PushLimit", lua_protobuf_coded_input_stream_push_limit},
{"PopLimit", lua_protobuf_coded_input_stream_pop_limit},
{"CurrentPosition", lua_protobuf_coded_input_stream_current_position},
{"ReadRaw", lua_protobuf_coded_input_stream_read_raw},
{"ReadVarint32", lua_protobuf_coded_input_stream_read_varint_32},
{"ReadVarint64", lua_protobuf_coded_input_stream_read_varint_64},
{"ReadLittleEndian32", lua_protobuf_coded_input_stream_read_little_endian_32},
{"ReadLittleEndian64", lua_protobuf_coded_input_stream_read_little_endian_64},
{NULL, NULL},
};
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
int lua_protobuf_coded_output_stream_new(lua_State* L) {
const char* filepath = luaL_checkstring(L, 1);
int fd = open(filepath, O_WRONLY | O_TRUNC | O_CREAT | O_BINARY, S_IREAD | S_IWRITE);
if (fd == -1) {
return luaL_error(L, "Failed to open file %s", filepath);
}
char* udataptr = (char*)lua_newuserdata(L, sizeof(::google::protobuf::io::CodedOutputStream)+sizeof(::google::protobuf::io::FileOutputStream));
auto outstream = new(udataptr+sizeof(::google::protobuf::io::CodedOutputStream)) ::google::protobuf::io::FileOutputStream(fd);
outstream->SetCloseOnDelete(true);
auto codestream = new (udataptr) ::google::protobuf::io::CodedOutputStream(outstream);
luaL_setmetatable(L, "protobuf_.CodedOutputStream");
return 1;
}
int lua_protobuf_coded_output_stream_gc(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::io::FileOutputStream* filestream = (::google::protobuf::io::FileOutputStream*)(codestream+1);
codestream->~CodedOutputStream();
filestream->~FileOutputStream();
return 0;
}
int lua_protobuf_coded_output_stream_skip(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
int count = luaL_checkint(L, 2);
codestream->Skip(count);
return 0;
}
int lua_protobuf_coded_output_stream_byte_count(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
lua_pushinteger(L, codestream->ByteCount());
return 1;
}
int lua_protobuf_coded_output_stream_write_raw(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
size_t count;
const char* buf = luaL_checklstring(L, 2, &count);
codestream->WriteRaw(buf, (int)count);
return 0;
}
int lua_protobuf_coded_output_stream_write_varint_32(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint32 val = luaL_checkunsigned(L, 2);
codestream->WriteVarint32(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_varint_64(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint64 val = luaL_checkunsigned(L, 2);
codestream->WriteVarint64(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_little_endian_32(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint32 val = luaL_checkunsigned(L, 2);
codestream->WriteLittleEndian32(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_little_endian_64(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint64 val = luaL_checkunsigned(L, 2);
codestream->WriteLittleEndian64(val);
return 0;
}
static const struct luaL_Reg CodedOutputStream_functions [] = {
{"new", lua_protobuf_coded_output_stream_new},
{NULL, NULL}
};
static const struct luaL_Reg CodedOutputStream_methods [] = {
{"__gc", lua_protobuf_coded_output_stream_gc},
{"Skip", lua_protobuf_coded_output_stream_skip},
{"ByteCount", lua_protobuf_coded_output_stream_byte_count},
{"WriteRaw", lua_protobuf_coded_output_stream_write_raw},
{"WriteVarint32", lua_protobuf_coded_output_stream_write_varint_32},
{"WriteVarint64", lua_protobuf_coded_output_stream_write_varint_64},
{"WriteLittleEndian32", lua_protobuf_coded_output_stream_write_little_endian_32},
{"WriteLittleEndian64", lua_protobuf_coded_output_stream_write_little_endian_64},
{NULL, NULL},
};
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static const struct luaL_Reg CodedInputStream_lib_functions [] = {
{NULL, NULL}
};
int lua_protobuf_coded_streams_open(lua_State* L) {
luaL_checktype(L, -1, LUA_TTABLE);
luaL_newmetatable(L, "protobuf_.CodedInputStream");
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
luaL_setfuncs(L, CodedInputStream_methods, 0);
lua_pop(L, 1);//pop the metatable
luaL_newmetatable(L, "protobuf_.CodedOutputStream");
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
luaL_setfuncs(L, CodedOutputStream_methods, 0);
lua_pop(L, 1);//pop the metatable
// add create funcs and tables
luaL_newlib(L, CodedInputStream_functions);
lua_setfield(L, -2, "CodedInputStream");
luaL_newlib(L, CodedOutputStream_functions);
lua_setfield(L, -2, "CodedOutputStream");
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
const char *luaEXT_findtable (lua_State *L, const char *fname, int idx, int szhint) {
const char *e;
if (idx) lua_pushvalue(L, idx);
do {
e = strchr(fname, '.');
if (e == NULL) e = fname + strlen(fname);
lua_pushlstring(L, fname, e - fname);
lua_rawget(L, -2);
if (lua_isnil(L, -1)) { /* no such field? */
lua_pop(L, 1); /* remove this nil */
lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
lua_pushlstring(L, fname, e - fname);
lua_pushvalue(L, -2);
lua_settable(L, -4); /* set new table into field */
}
else if (!lua_istable(L, -1)) { /* field has a non-table value? */
lua_pop(L, 2); /* remove table and value */
return fname; /* return problematic part of the name */
}
lua_remove(L, -2); /* remove previous table */
fname = e + 1;
} while (*e == '.');
return NULL;
}
#ifdef __cplusplus
}
#endif
'''
def c_header_header(filename, package):
return [
'// Generated by the lua-protobuf compiler.',
'// You shouldn\'t be editing this file manually',
'//',
'// source proto file: %s' % filename,
'',
'#ifndef LUA_PROTOBUF_%s_%s_H' % (package.replace('.', '_'), filename.replace('.proto', '')),
'#define LUA_PROTOBUF_%s_%s_H' % (package.replace('.', '_'), filename.replace('.proto', '')),
'',
'#include "lua-protobuf.h"',
'#include <%s.pb.h>' % filename.replace('.proto', ''),#package.replace('.', '_'),
'',
'#ifdef __cplusplus',
'extern "C" {',
'#endif',
'',
'#include <lua.h>',
'',
'const char* luaEXT_findtable (lua_State*, const char*, int, int);',
'',
## We do this function based on file name to avoid name collisions
'// register all messages in this package to a Lua state',
'LUA_PROTOBUF_EXPORT int %sopen(lua_State *L);' % proto_function_open_name(filename),
'',
]
def source_header(filename, package, file_descriptor):
'''Returns lines that begin a source file'''
lines = []
lines.extend( [
'// Generated by the lua-protobuf compiler',
'// You shouldn\'t edit this file manually',
'//',
'// source proto file: %s' % filename,
'',
])
lines.append('#include "%s.pb.lua.h"' % filename.replace('.proto', ''))
for type in file_descriptor.dependency:
lines.append('#include "%s.pb.lua.h"' % type.replace('.proto', ''))
lines.extend( ['',
'#ifdef __cplusplus',
'extern "C" { // make sure functions treated with C naming',
'#endif',
'',
'#include <lauxlib.h>',
'',
'#ifdef __cplusplus',
'}',
'#endif',
'',
'#include <string>',
'',
'// this represents Lua udata for a protocol buffer message',
'// we record where a message came from so we can GC it properly',
'typedef struct msg_udata { // confuse over-simplified pretty-printer',
' ::google::protobuf::MessageLite * msg;',
' bool lua_owns;',
' lua_protobuf_gc_callback gc_callback;',
' void * callback_data;',
'} msg_udata;',
'',])
return lines
def proto_function_open_name(filename):
return 'lua_protobuf_%s_' % filename.replace('.proto', '')
def package_function_prefix(package):
return 'lua_protobuf_%s_' % package.replace('.', '_')
def message_function_prefix(package, message):
return '%s%s_' % (package_function_prefix(package), message)
def message_open_function_name(package, message):
'''Returns function name that registers the Lua library for a message type'''
return '%sopen' % message_function_prefix(package, message)
def cpp_class(package, message = None):
'''Returns the fully qualified class name for a message type'''
if not message:
return package.replace('.', '::')
return '::%s::%s' % ( package.replace('.', '::'), message )
def field_function_name(package, message, prefix, field):
'''Obtain the function name of a field accessor/mutator function'''
return '%s%s_%s' % ( message_function_prefix(package, message), prefix, field )
def field_function_start(package, message, prefix, field):
'''Obtain the start of function for a field accessor function'''
return [
'int %s(lua_State *L)' % field_function_name(package, message, prefix, field.lower()),
'{',
]
def lua_libname(package, message):
'''Returns the Lua library name for a specific message'''
return 'protobuf.%s.%s' % (package, message)
def metatable(package, message):
'''Returns Lua metatable for protocol buffer message type'''
return 'protobuf_.%s.%s' % (package, message)
def obtain_message_from_udata(package, message=None, index=1, varname='m'):
'''Statement that obtains a message from userdata'''
c = cpp_class(package, message)
return [
'msg_udata * %sud = (msg_udata *)%s;' % ( varname, check_udata(package, message, index) ),
'%s *%s = (%s *)%sud->msg;' % ( c, varname, c, varname ),
]
def check_udata(package, message, index=1):
'''Validates a udata is instance of protocol buffer message
By default, it validates udata at top of the stack
'''
return 'luaL_checkudata(L, %d, "%s")' % ( index, metatable(package, message) )
def has_body(package, message, field):
'''Returns the function body for a has_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('lua_pushboolean(L, m->has_%s());' % field.lower())
lines.append('return 1;')
return lines
def clear_body(package, message, field):
'''Returns the function body for a clear_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('m->clear_%s();' % field.lower())
lines.append('return 0;')
return lines
def size_body(package, message, field):
'''Returns the function body for a size_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('int size = m->%s_size();' % field.lower())
lines.append('lua_pushinteger(L, size);')
lines.append('return 1;')
return lines
def add_body(package, message, field, type_name):
'''Returns the function body for the add_<field> function for repeated embedded messages'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.extend([
'%s *msg_new = m->add_%s();' % ( cpp_class(type_name), field.lower() ),
# since the message is allocated out of the containing message, Lua
# does not need to do GC
'lua_protobuf%s_pushreference(L, msg_new, NULL, NULL);' % type_name.replace('.', '_'),
'return 1;',
])
return lines
def field_get(package, message, field_descriptor):
'''Returns function definition for a get_<field> function'''
name = field_descriptor.name
type = field_descriptor.type
type_name = field_descriptor.type_name
label = field_descriptor.label
repeated = label == FieldDescriptor.LABEL_REPEATED
lines = []
lines.extend(field_function_start(package, message, 'get', name))
lines.extend(obtain_message_from_udata(package, message))
# the logic is significantly different depending on if the field is
# singular or repeated.
# for repeated, we have an argument which points to the numeric index to
# retrieve. in true Lua convention, we index starting from 1, which is
# different from protocol buffers, which indexes from 0
if repeated:
lines.extend([
'if (lua_gettop(L) != 2) {',
'return luaL_error(L, "missing required numeric argument");',
'}',
'lua_Integer index = luaL_checkinteger(L, 2);',
'if (index < 1 || index > m->%s_size()) {' % name.lower(),
# TODO is returning nil the more Lua way?
'return luaL_error(L, "index must be between 1 and current size: %%d", m->%s_size());' % name.lower(),
'}',
])
# TODO float and double types are not equivalent. don't treat them as such
# TODO figure out how to support 64 bit integers properly
if repeated:
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'string s = m->%s(index - 1);' % name.lower(),
'lua_pushlstring(L, s.c_str(), s.size());',
])
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('lua_pushboolean(L, m->%s(index-1));' % name.lower())
elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32,
FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]:
lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower())
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE:
lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.extend([
'%s * got_msg = m->mutable_%s(index-1);' % ( type_name.replace('.', '::'), name.lower() ),
'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'),
])
else:
lines.append('return luaL_error(L, "lua-protobuf does not support this field type");')
else:
# for scalar fields, we push nil if the value is not defined
# this is the Lua way
if type == FieldDescriptor.TYPE_STRING or type == FieldDescriptor.TYPE_BYTES:
lines.append('string s = m->%s();' % name.lower())
lines.append('if (m->has_%s()) lua_pushlstring(L, s.c_str(), s.size()); else lua_pushnil(L);' % name.lower())
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('if (m->has_%s()) lua_pushboolean(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32,
FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE:
lines.append('if (m->has_%s()) lua_pushnumber(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.extend([
'if (!m->has_%s()) {' % name.lower(),
'lua_pushnil(L);',
'}',
# we push the message as userdata
# since the message is allocated out of the parent message, we
# don't need to do garbage collection
'%s * got_msg = m->mutable_%s();' % ( type_name.replace('.', '::'), name.lower() ),
'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'),
])
else:
# not supported yet :(
lines.append('return luaL_error(L, "lua-protobuf does not support this field type");')
lines.append('return 1;')
lines.append('}\n')
return lines
def field_set_assignment(field, args):
return [
'if (index == current_size + 1) {',
'm->add_%s(%s);' % ( field.lower(), args ),
'}',
'else {',
'm->set_%s(index-1, %s);' % ( field.lower(), args ),
'}',
]
def field_set(package, message, field_descriptor):
'''Returns function definition for a set_<field> function'''
name = field_descriptor.name
type = field_descriptor.type
type_name = field_descriptor.type_name
label = field_descriptor.label
repeated = label == FieldDescriptor.LABEL_REPEATED
lines = []
lines.extend(field_function_start(package, message, 'set', name.lower()))
lines.extend(obtain_message_from_udata(package, message, 1))
# we do things differently depending on if this is a singular or repeated field
# for singular fields, the new value is the first argument
# for repeated fields, the index is arg1 and the value is arg2
if repeated:
lines.extend([
'if (lua_gettop(L) != 3) {',
' return luaL_error(L, "required 2 arguments not passed to function");',
'}',
'lua_Integer index = luaL_checkinteger(L, 2);',
'int current_size = m->%s_size();' % name.lower(),
'if (index < 1 || index > current_size + 1) {',
'return luaL_error(L, "index must be between 1 and %d", current_size + 1);',
'}',
# we don't support the automagic nil clears value... yet
'if (lua_isnil(L, 3)) {',
'return luaL_error(L, "cannot assign nil to repeated fields (yet)");',
'}',
])
# TODO proper 64 bit handling
# now move on to the assignment
if repeated:
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'size_t length = 0;',
'const char *s = luaL_checklstring(L, 3, &length);',
])
lines.extend(field_set_assignment(name, 's, length'))
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('bool b = !!lua_toboolean(L, 3);')
lines.extend(field_set_assignment(name, 'b'))
elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]:
lines.append('double d = lua_tonumber(L, 3);')
lines.extend(field_set_assignment(name, 'd'))
elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32,
FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, 'i'))
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, 'i'))
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, '(%s)i' % type_name.replace('.', '::')))
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.append('return luaL_error(L, "to manipulate embedded messages, fetch the embedded message and modify it");')
else:
lines.append('return luaL_error(L, "field type not yet supported");')
lines.append('return 0;')
else:
# if they call set() with nil, we interpret as a clear
# this is the Lua way, after all
lines.extend([
'if (lua_isnil(L, 2)) {',
'm->clear_%s();' % name.lower(),
'return 0;',
'}',
'',
])
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'if (!lua_isstring(L, 2)) return luaL_error(L, "passed value is not a string");',
'size_t len;',
'const char *s = lua_tolstring(L, 2, &len);',
'if (!s) {',
'luaL_error(L, "could not obtain string on stack. weird");',
'}',
'm->set_%s(s, len);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]:
lines.extend([
'if (!lua_isnumber(L, 2)) return luaL_error(L, "passed value cannot be converted to a number");',
'lua_Number n = lua_tonumber(L, 2);',
'm->set_%s(n);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32,
FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]:
lines.extend([
'lua_Integer v = luaL_checkinteger(L, 2);',
'm->set_%s(v);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s(i);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_BOOL:
lines.extend([
'bool b = !!lua_toboolean(L, 2);',
'm->set_%s(b);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_ENUM:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s((%s)i);' % ( name.lower(), type_name.replace('.', '::') ),
'return 0;',
])
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.append('return luaL_error(L, "to manipulate embedded messages, obtain the embedded message and manipulate it");')
else:
lines.append('return luaL_error(L, "field type is not yet supported");')
lines.append('}\n')
return lines
def new_message(package, message):
'''Returns function definition for creating a new protocol buffer message'''
lines = []
lines.append('int %snew(lua_State *L)' % message_function_prefix(package, message))
lines.append('{')
c = cpp_class(package, message)
lines.append('msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));')
lines.append('ud->lua_owns = true;')
lines.append('ud->msg = new %s();' % c)
lines.append('ud->gc_callback = NULL;')
lines.append('ud->callback_data = NULL;')
lines.append('luaL_getmetatable(L, "%s");' % metatable(package, message))
lines.append('lua_setmetatable(L, -2);')
lines.append('return 1;')
lines.append('}\n')
return lines
def message_pushcopy_function(package, message):
'''Returns function definition for pushing a copy of a message to the stack'''
return [
'bool %spushcopy(lua_State *L, const %s &from)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = new %s(from);' % cpp_class(package, message),
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def message_getcopy_function(package, message):
'''Returns function definition for getting a copy of a message from the stack'''
return [
'void %sgetcopy(lua_State *L, int index, %s &to)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)luaL_checkudata(L, index, "%s")' % ( metatable(package, message) ),
'to->CopyFrom(*ud->msg);',
'}',
]
def message_pushreference_function(package, message):
'''Returns function definition for pushing a reference of a message on the stack'''
return [
'bool %spushreference(lua_State *L, %s *msg, lua_protobuf_gc_callback f, void *data)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = false;',
'ud->msg = msg;',
'ud->gc_callback = f;',
'ud->callback_data = data;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def parsefromstring_message_function(package, message):
'''Returns function definition for parsing a message from a serialized string'''
lines = []
lines.append('int %sparsefromstring(lua_State *L)' % message_function_prefix(package, message))
c = cpp_class(package, message)
lines.extend([
'{',
'if (lua_gettop(L) != 1) {',
'return luaL_error(L, "parsefromstring() requires a string argument. none given");',
'}',
'size_t len;',
'const char *s = luaL_checklstring(L, -1, &len);',
'%s * msg = new %s();' % ( c, c ),
'if (!msg->ParseFromArray((const void *)s, len)) {',
'return luaL_error(L, "error deserializing message");',
'}',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = msg;',
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return 1;',
'}',
])
return lines
def label_to_string(label_value):
if label_value == FieldDescriptor.LABEL_OPTIONAL:
return "optional"
if label_value == FieldDescriptor.LABEL_REPEATED:
return "repeated"
if label_value == FieldDescriptor.LABEL_REQUIRED:
return "required"
def type_to_string(type_value):
if type_value == FieldDescriptor.TYPE_BOOL:# = 8
return "bool"
if type_value == FieldDescriptor.TYPE_BYTES:# = 12
return "bytes"
if type_value == FieldDescriptor.TYPE_DOUBLE:# = 1
return "double"
if type_value == FieldDescriptor.TYPE_ENUM:# = 14
return "enum"
if type_value == FieldDescriptor.TYPE_FIXED32:# = 7
return "fixed32"
if type_value == FieldDescriptor.TYPE_FIXED64:# = 6
return "fixed64"
if type_value == FieldDescriptor.TYPE_FLOAT:# = 2
return "float"
if type_value == FieldDescriptor.TYPE_GROUP:# = 10
return "group"
if type_value == FieldDescriptor.TYPE_INT32:# = 5
return "int32"
if type_value == FieldDescriptor.TYPE_INT64:# = 3
return "int64"
if type_value == FieldDescriptor.TYPE_MESSAGE:# = 11
return "message"
if type_value == FieldDescriptor.TYPE_SFIXED32:# = 15
return "sfixed32"
if type_value == FieldDescriptor.TYPE_SFIXED64:# = 16
return "sfixed64"
if type_value == FieldDescriptor.TYPE_SINT32:# = 17
return "sint32"
if type_value == FieldDescriptor.TYPE_SINT64:# = 18
return "sint64"
if type_value == FieldDescriptor.TYPE_STRING:# = 9
return "string"
if type_value == FieldDescriptor.TYPE_UINT32:# = 13
return "uint32"
if type_value == FieldDescriptor.TYPE_UINT64:# = 4
return "uint64"
def descriptor_message_function(package, message, descriptor):
''' Return a function that builds a table that describes message. Returns table to Lua for inspection'''
lines = []
lines.extend([
'int %sdescriptor(lua_State* L)' % message_function_prefix(package, message),
'{',
' lua_newtable(L);',
' ',
]);
for fields_descriptor in descriptor.field:
lines.extend([
' // Field: default_value = %s' % fields_descriptor.default_value,
' lua_newtable(L);',
' lua_pushstring(L, "%s");' % fields_descriptor.name,
' lua_setfield(L, -2, "name");',
' lua_pushstring(L, "%s");' % label_to_string(fields_descriptor.label),
' lua_setfield(L, -2, "label");',
' lua_pushnumber(L, %s);' % fields_descriptor.number,
' lua_setfield(L, -2, "number");',
' lua_pushstring(L, "%s");' % type_to_string(fields_descriptor.type),
' lua_setfield(L, -2, "type");',
' lua_pushstring(L, "%s");' % (fields_descriptor.type_name) if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "type_name");' if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "%s");' % fields_descriptor.name,
]);
lines.extend([
'',
' return 1;',
'}',
])
return lines
def gc_message_function(package, message):
'''Returns function definition for garbage collecting a message'''
lines = [
'int %sgc(lua_State *L)' % message_function_prefix(package, message),
'{',
]
lines.extend(obtain_message_from_udata(package, message, 1))
# if Lua "owns" the message, we delete it
# else, we delete only if a callback exists and it says it is OK
lines.extend([
'if (mud->lua_owns) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'if (mud->gc_callback && mud->gc_callback(m, mud->callback_data)) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'return 0;',
'}',
])
return lines
def clear_message_function(package, message):
'''Returns the function definition for clearing a message'''
lines = [
'int %sclear(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'm->Clear();',
'return 0;',
'}',
])
return lines
def serialized_message_function(package, message):
'''Returns the function definition for serializing a message and its length'''
lines = [
'int %sserialized(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'string s;',
'if (!m->SerializeToString(&s)) {',
'return luaL_error(L, "error serializing message");',
'}',
'lua_pushlstring(L, s.c_str(), s.length());',
'lua_pushnumber(L, s.length());',
'return 2;',
'}',
])
return lines
def message_function_array(package, message):
'''Defines functions for Lua object type
These are defined on the Lua metatable for the message type.
These are basically constructors and static methods in Lua land.
'''
return [
'static const struct luaL_Reg %s_functions [] = {' % message,
'{"new", %snew},' % message_function_prefix(package, message),
'{"parsefromstring", %sparsefromstring},' % message_function_prefix(package, message),
'{"descriptor", %sdescriptor},' % message_function_prefix(package, message),
'{NULL, NULL}',
'};\n',
]
def message_method_array(package, descriptor):
'''Defines functions for Lua object instances
These are functions available to each instance of a message.
They take the object userdata as the first parameter.
'''
message = descriptor.name
fp = message_function_prefix(package, message)
lines = []
lines.append('static const struct luaL_Reg %s_methods [] = {' % message)
lines.append('{"serialized", %sserialized},' % fp)
lines.append('{"clear", %sclear},' % fp)
lines.append('{"__gc", %sgc},' % message_function_prefix(package, message))
for fd in descriptor.field:
name = fd.name
label = fd.label
type = fd.type
lines.append('{"clear_%s", %s},' % ( name.lower(), field_function_name(package, message, 'clear', name.lower()) ))
lines.append('{"get_%s", %s},' % ( name.lower(), field_function_name(package, message, 'get', name.lower()) ))
lines.append('{"set_%s", %s},' % ( name.lower(), field_function_name(package, message, 'set', name.lower()) ))
if label in [ FieldDescriptor.LABEL_REQUIRED, FieldDescriptor.LABEL_OPTIONAL ]:
lines.append('{"has_%s", %s},' % ( name.lower(), field_function_name(package, message, 'has', name.lower()) ))
if label == FieldDescriptor.LABEL_REPEATED:
lines.append('{"size_%s", %s},' % ( name.lower(), field_function_name(package, message, 'size', name.lower()) ))
if type == FieldDescriptor.TYPE_MESSAGE:
lines.append('{"add_%s", %s},' % ( name.lower(), field_function_name(package, message, 'add', name.lower()) ))
lines.append('{NULL, NULL},')
lines.append('};\n')
return lines
def message_open_function(package, descriptor):
'''Function definition for opening/registering a message type'''
message = descriptor.name
lines = [
'int %s(lua_State *L)' % message_open_function_name(package, message),
'{',
'luaL_checktype(L, -1, LUA_TTABLE);', #
'luaL_newmetatable(L, "%s");' % metatable(package, message),
'lua_pushvalue(L, -1);',
'lua_setfield(L, -2, "__index");',
'luaL_setfuncs(L, %s_methods, 0);' % message, ##'luaL_register(L, NULL, %s_methods);' % message,
'lua_pop(L, 1); // remove the metatable', #
'if (luaEXT_findtable(L, "%s", -1, 1)) { ' % package, #
' return luaL_error(L, "Error finding correct table");',
'}',
'luaL_newlib(L, %s_functions);' % message, ##'luaL_register(L, "%s", %s_functions);' % (lua_libname(package, message), message),
'lua_setfield(L, -2, "%s");' % message, #
'lua_pop(L, 1); //remove the returned table from findtable' #
]
for enum_descriptor in descriptor.enum_type:
lines.extend(enum_source(enum_descriptor))
lines.extend([
# this is wrong if we are calling through normal Lua module load means
#'lua_pop(L, 1);',
'return 0;',#'return 1;',
'}',
'\n',
])
return lines
def message_header(package, message_descriptor):
'''Returns the lines for a header definition of a message'''
message_name = message_descriptor.name
lines = []
lines.append('// Message %s' % message_name)
function_prefix = 'lua_protobuf_' + package.replace('.', '_') + '_'
c = cpp_class(package, message_name)
lines.extend([
'// registers the message type with Lua',
'LUA_PROTOBUF_EXPORT int %s(lua_State *L);\n' % message_open_function_name(package, message_name),
'',
'// push a copy of the message to the Lua stack',
'// caller is free to use original message however she wants, but changes will not',
'// be reflected in Lua and vice-verse',
'LUA_PROTOBUF_EXPORT bool %s%s_pushcopy(lua_State *L, const %s &msg);' % ( function_prefix, message_name, c),
'',
'// push a reference of the message to the Lua stack',
'// the 3rd and 4th arguments define a callback that can be invoked just before Lua',
'// garbage collects the message. If the 3rd argument is NULL, Lua will *NOT* free',
'// memory. If the second argument points to a function, that function is called when',
'// Lua garbage collects the object. The function is sent a pointer to the message being',
'// collected and the 4th argument to this function. If the function returns true,',
'// Lua will free the memory. If false (0), Lua will not free the memory.',
'LUA_PROTOBUF_EXPORT bool %s%s_pushreference(lua_State *L, %s *msg, lua_protobuf_gc_callback callback, void *data);' % ( function_prefix, message_name, c ),
'',
'// get a copy of the message from the Lua stack',
'// caller is free to use the new message however she wants, but changes will not',
'// be reflected in Lua and vice-verse',
'LUA_PROTOBUF_EXPORT bool %s%s_getcopy(lua_State *L, int index, %s &msg);' % ( function_prefix, message_name, c),
'',
'',
'// The following functions are called by Lua. Many people will not need them,',
'// but they are exported for those that do.',
'',
'',
'// constructor called from Lua',
'LUA_PROTOBUF_EXPORT int %s%s_new(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain instance from a serialized string',
'LUA_PROTOBUF_EXPORT int %s%s_parsefromstring(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain table of fields in this message',
'LUA_PROTOBUF_EXPORT int %s%s_descriptor(lua_State* L);' % ( function_prefix, message_name),
'',
'// garbage collects message instance in Lua',
'LUA_PROTOBUF_EXPORT int %s%s_gc(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain serialized representation of instance',
'LUA_PROTOBUF_EXPORT int %s%s_serialized(lua_State *L);' % ( function_prefix, message_name ),
'',
'// clear all fields in the message',
'LUA_PROTOBUF_EXPORT int %s%s_clear(lua_State *L);' % ( function_prefix, message_name ),
'',
])
# each field defined in the message
for field_descriptor in message_descriptor.field:
field_name = field_descriptor.name
field_number = field_descriptor.number
field_label = field_descriptor.label
field_type = field_descriptor.type
field_default = field_descriptor.default_value
if field_label not in FIELD_LABEL_MAP.keys():
raise Exception('unknown field label constant: %s' % field_label)
field_label_s = FIELD_LABEL_MAP[field_label]
if field_type not in FIELD_TYPE_MAP.keys():
raise Exception('unknown field type: %s' % field_type)
field_type_s = FIELD_TYPE_MAP[field_type]
lines.append('// %s %s %s = %d' % (field_label_s, field_type_s, field_name, field_number))
lines.append('LUA_PROTOBUF_EXPORT int %s%s_clear_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
lines.append('LUA_PROTOBUF_EXPORT int %s%s_get_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
# TODO I think we can get rid of this for message types
lines.append('LUA_PROTOBUF_EXPORT int %s%s_set_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_label in [ FieldDescriptor.LABEL_REQUIRED, FieldDescriptor.LABEL_OPTIONAL ]:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_has_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_label == FieldDescriptor.LABEL_REPEATED:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_size_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_type == FieldDescriptor.TYPE_MESSAGE:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_add_%s(lua_State *L);' % ( function_prefix, message_name, field_name.lower()))
lines.append('')
lines.append('// end of message %s\n' % message_name)
return lines
def message_source(package, message_descriptor):
'''Returns lines of source code for an individual message type'''
lines = []
message = message_descriptor.name
lines.extend(message_function_array(package, message))
lines.extend(message_method_array(package, message_descriptor))
lines.extend(message_open_function(package, message_descriptor))
lines.extend(message_pushcopy_function(package, message))
lines.extend(message_pushreference_function(package, message))
lines.extend(message_getcopy_function(package, message))
lines.extend(new_message(package, message))
lines.extend(parsefromstring_message_function(package, message))
lines.extend(descriptor_message_function(package, message, message_descriptor))
lines.extend(gc_message_function(package, message))
lines.extend(clear_message_function(package, message))
lines.extend(serialized_message_function(package, message))
for descriptor in message_descriptor.field:
name = descriptor.name
# clear() is in all label types
lines.extend(field_function_start(package, message, 'clear', name))
lines.extend(clear_body(package, message, name))
lines.append('}\n')
lines.extend(field_get(package, message, descriptor))
lines.extend(field_set(package, message, descriptor))
if descriptor.label in [FieldDescriptor.LABEL_OPTIONAL, FieldDescriptor.LABEL_REQUIRED]:
# has_<field>()
lines.extend(field_function_start(package, message, 'has', name))
lines.extend(has_body(package, message, name))
lines.append('}\n')
if descriptor.label == FieldDescriptor.LABEL_REPEATED:
# size_<field>()
lines.extend(field_function_start(package, message, 'size', name))
lines.extend(size_body(package, message, name))
lines.append('}\n')
if descriptor.type == FieldDescriptor.TYPE_MESSAGE:
lines.extend(field_function_start(package, message, 'add', name))
lines.extend(add_body(package, message, name, descriptor.type_name))
lines.append('}\n')
return lines
def enum_source(descriptor):
'''Returns source code defining an enumeration type'''
# this function assumes the module/table the enum should be assigned to
# is at the top of the stack when it is called
name = descriptor.name
# enums are a little funky
# at the core, there is a table whose keys are the enum string names and
# values corresponding to the respective integer values. this table also
# has a metatable with __index to throw errors when unknown enumerations
# are accessed
#
# this table is then wrapped in a proxy table. the proxy table is empty
# but has a metatable with __index and __newindex set. __index is the
# table that actually contains the values. __newindex is a function that
# always throws an error.
#
# we need the proxy table so we can intercept all requests for writes.
# __newindex is only called for new keys, so we need an empty table so
# all writes are sent to __newindex
lines = [
'// %s enum' % name,
'lua_newtable(L); // proxy table',
'lua_newtable(L); // main table',
]
# assign enumerations to the table
for value in descriptor.value:
k = value.name
v = value.number
lines.extend([
'lua_pushnumber(L, %d);' % v,
'lua_setfield(L, -2, "%s");' % k
])
# assign the metatable
lines.extend([
'// define metatable on main table',
'lua_newtable(L);',
'lua_pushcfunction(L, lua_protobuf_enum_index);',
'lua_setfield(L, -2, "__index");',
'lua_setmetatable(L, -2);',
'',
'// define metatable on proxy table',
'lua_newtable(L);',
# proxy meta: -1; main: -2; proxy: -3
'lua_pushvalue(L, -2);',
'lua_setfield(L, -2, "__index");',
'lua_pushcfunction(L, lua_protobuf_enum_newindex);',
'lua_setfield(L, -2, "__newindex");',
'lua_remove(L, -2);',
'lua_setmetatable(L, -2);',
# proxy at top of stack now
# assign to appropriate module
'lua_setfield(L, -2, "%s");' % name,
'// end %s enum' % name
])
return lines
def file_header(file_descriptor):
filename = file_descriptor.name
package = file_descriptor.package
lines = []
lines.extend(c_header_header(filename, package))
for descriptor in file_descriptor.message_type:
lines.extend(message_header(package, descriptor))
lines.append('#ifdef __cplusplus')
lines.append('}')
lines.append('#endif')
lines.append('')
lines.append('#endif')
return '\n'.join(lines)
def file_source(file_descriptor):
'''Obtains the source code for a FileDescriptor instance'''
filename = file_descriptor.name
package = file_descriptor.package
lines = []
lines.extend(source_header(filename, package, file_descriptor))
lines.append('using ::std::string;\n')
lines.extend([
'int %sopen(lua_State *L)' % proto_function_open_name(filename),
'{',
])
# we populate enumerations as tables inside the protobuf global
# variable/module
# this is a little tricky, because we need to ensure all the parent tables
# are present
# i.e. protobuf.package.foo.enum => protobuf['package']['foo']['enum']
# we interate over all the tables and create missing ones, as necessary
# we cheat here and use the undocumented/internal luaL_findtable function
# we probably shouldn't rely on an "internal" API, so
# TODO don't use internal API call
lines.extend([
'luaL_checktype(L, -1, LUA_TTABLE);',
'const char *table = luaEXT_findtable(L, "%s", -1, 1);' % package,
'if (table) {',
'return luaL_error(L, "could not create parent Lua tables");',
'}',
'if (!lua_istable(L, -1)) {',
'return luaL_error(L, "could not create parent Lua tables");',
'}',
])
for descriptor in file_descriptor.enum_type:
lines.extend(enum_source(descriptor))
lines.extend([
# don't need main table on stack any more
'lua_pop(L, 1);',
# and we register this package as a module, complete with enumerations
#'luaL_Reg funcs [] = { { NULL, NULL } };',
#'luaL_register(L, "protobuf.%s", funcs);' % package,
])
for descriptor in file_descriptor.message_type:
lines.append('%s(L);' % message_open_function_name(package, descriptor.name))
lines.append('return 0;')
lines.append('}')
lines.append('\n')
for descriptor in file_descriptor.message_type:
lines.extend(message_source(package, descriptor))
# perform some hacky pretty-printing
formatted = []
indent = 0
for line in lines:
if RE_BARE_BEGIN_BRACKET.search(line):
formatted.append((' ' * indent) + line)
indent += 4
elif RE_BEGIN_BRACKET.search(line):
formatted.append((' ' * indent) + line)
indent += 4
elif RE_END_BRACKET.search(line):
if indent >= 4:
indent -= 4
formatted.append((' ' * indent) + line)
else:
formatted.append((' ' * indent) + line)
return '\n'.join(formatted)
| 38.692913
| 171
| 0.633734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33,205
| 0.563102
|
622c20a0f1ed0a0cb3f5825e401509f63208a68b
| 6,303
|
py
|
Python
|
autoimpute/AutoImpute.py
|
milescsmith/AutoImpute
|
b327283f6fe4efc9528052218ad7dbf094c8962c
|
[
"MIT"
] | null | null | null |
autoimpute/AutoImpute.py
|
milescsmith/AutoImpute
|
b327283f6fe4efc9528052218ad7dbf094c8962c
|
[
"MIT"
] | null | null | null |
autoimpute/AutoImpute.py
|
milescsmith/AutoImpute
|
b327283f6fe4efc9528052218ad7dbf094c8962c
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import scipy.io
import tensorflow as tf
from sklearn.metrics import mean_absolute_error, mean_squared_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def autoimpute(data: str, debug: bool = True, debug_display_step: int = 1,
hidden_units: int = 2000, lambda_val: int = 1, initial_learning_rate: float = 0.0001, iterations: int = 7000,
threshold: int = 0.0001,
masked_matrix_test: bool = False,
masking_percentage: float = 10,
log_file: str = 'log.txt',
load_saved: bool = False):
"""
# Print debug statements
debug: type = bool, default=True, Want debug statements
debug_display_step: type=int, default=1, Display loss after
# Hyper-parameters
hidden_units: type=int, default=2000, Size of hidden layer or latent space dimensions
lambda_val: type=int, default=1, Regularization coefficient, to control the contribution of regularization term in the cost function
initial_learning_rate: type=float, default=0.0001, Initial value of learning rate
iterations: type=int, default=7000, Number of iterations to train the model for
threshold: type=int, default=0.0001, To stop gradient descent after the change in loss function value in consecutive iterations is less than the threshold, implying convergence
# Data
data: type = str, default='blakeley.csv', help = "Dataset to run the script on. In the paper we choose from : ['blakeley.csv', 'jurkat-293T.mat', 'kolodziejczyk.csv', 'PBMC.csv', 'preimplantation.mat', 'quake.csv', 'usoskin.csv', 'zeisel.csv']
# Run the masked matrix recovery test
masked_matrix_test: type = bool, default=False, nargs = '+', help = "Run the masked matrix recovery test?
masking_percentage: type = float, default=10, nargs = '+', help = "Percentage of masking required. Like 10, 20, 12.5 etc
# Model save and restore options
save_model_location: type=str, default='checkpoints/model1.ckpt', Location to save the learnt model
load_model_location: type=str, default='checkpoints/model0.ckpt', Load the saved model from.
log_file: type=str, default='log.txt', text file to save training logs
load_saved: type=bool, default=False, flag to indicate if a saved model will be loaded
# masked and imputed matrix save location / name
imputed_save: type=str, default='imputed_matrix', save the imputed matrix as
masked_save: type=str, default='masked_matrix', save the masked matrix as
"""
# reading dataset
if(type(data) == np.ndarray):
processed_count_matrix = data
elif(type(data) != np.ndarray):
if(type(data) == str & data[-3:-1] == "csv"):
processed_count_matrix = np.loadtxt(data, delimiter=',')
elif(type(data) == str & data[-3:-1] == "mtx"):
processed_count_matrix = scipy.io.mmread(data)
processed_count_matrix = processed_count_matrix.toarray()
processed_count_matrix = np.array(processed_count_matrix)
if(masked_matrix_test):
masking_percentage = masking_percentage/100.0
idxi, idxj = np.nonzero(processed_count_matrix)
ix = np.random.choice(len(idxi), int(np.floor(masking_percentage * len(idxi))), replace = False)
store_for_future = processed_count_matrix[idxi[ix], idxj[ix]]
indices = idxi[ix], idxj[ix]
processed_count_matrix[idxi[ix], idxj[ix]] = 0 # making masks 0
matrix_mask = processed_count_matrix.copy()
matrix_mask[matrix_mask.nonzero()] = 1
mae = []
rmse = []
nmse = []
# finding number of genes and cells.
genes = processed_count_matrix.shape[1]
cells = processed_count_matrix.shape[0]
print(f"[info] Genes : {genes}, Cells : {cells}")
# placeholder definitions
X = tf.placeholder("float32", [None, genes])
mask = tf.placeholder("float32", [None, genes])
matrix_mask = processed_count_matrix.copy()
matrix_mask[matrix_mask.nonzero()] = 1
print(f"[info] Hyper-parameters"
f"\n\t Hidden Units : {hidden_units}"
f"\n\t Lambda : {lambda_val}"
f"\n\t Threshold : {threshold}"
f"\n\t Iterations : {iterations}"
f"\n\t Initial learning rate : {initial_learning_rate}")
# model definition
weights = {
'encoder_h': tf.Variable(tf.random_normal([genes, hidden_units])),
'decoder_h': tf.Variable(tf.random_normal([hidden_units, genes])),
}
biases = {
'encoder_b': tf.Variable(tf.random_normal([hidden_units])),
'decoder_b': tf.Variable(tf.random_normal([genes])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h']), biases['encoder_b']))
return layer_1
def decoder(x):
layer_1 = tf.add(tf.matmul(x, weights['decoder_h']), biases['decoder_b'])
return layer_1
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# loss definition
y_pred = decoder_op
y_true = X
rmse_loss = tf.pow(tf.norm(y_true - y_pred * mask), 2)
regularization = tf.multiply(tf.constant(lambda_val/2.0, dtype="float32"), tf.add(tf.pow(tf.norm(weights['decoder_h']), 2), tf.pow(tf.norm(weights['encoder_h']), 2)))
loss = tf.add(tf.reduce_mean(rmse_loss), regularization)
optimizer = tf.train.RMSPropOptimizer(initial_learning_rate).minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if(load_saved):
saver.restore(sess, load_model_location)
print("[info] model restored.")
else:
sess.run(init)
prev_loss = 0
for k in range(1, iterations+1):
_, loss = sess.run([optimizer, rmse_loss], feed_dict={X: processed_count_matrix, mask: matrix_mask})
lpentry = loss/cells
change = abs(prev_loss - lpentry)
if ( change <= threshold ):
print("Reached the threshold value.")
break
prev_loss = lpentry
if(debug):
if (k - 1) % debug_display_step == 0:
print(f'Step {k} : Total loss: {loss}, Loss per Cell : {lpentry}, Change : {change}')
with open(log_file, 'a') as log:
log.write('{0}\t{1}\t{2}\t{3}\n'.format(
k,
loss,
lpentry,
change
))
# if((k-1) % 5 == 0):
# save_path = saver.save(sess, save_model_location)
imputed_count_matrix = sess.run([y_pred], feed_dict={X: processed_count_matrix, mask: matrix_mask})
if(masked_matrix_test):
predictions = []
for idx, value in enumerate(store_for_future):
prediction = imputed_count_matrix[0][indices[0][idx], indices[1][idx]]
predictions.append(prediction)
else:
predictions = None
return imputed_count_matrix, predictions
| 38.668712
| 245
| 0.712201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,671
| 0.423766
|
622e6af5c7a24d6c89c506a8003134f044a4178c
| 6,358
|
py
|
Python
|
began/train.py
|
imironhead/ml_gan
|
f6c3bbb8de9d487cbf8ff821117768ffed04278e
|
[
"MIT"
] | 8
|
2017-06-11T05:03:30.000Z
|
2019-02-13T14:16:47.000Z
|
began/train.py
|
imironhead/ml_gan
|
f6c3bbb8de9d487cbf8ff821117768ffed04278e
|
[
"MIT"
] | null | null | null |
began/train.py
|
imironhead/ml_gan
|
f6c3bbb8de9d487cbf8ff821117768ffed04278e
|
[
"MIT"
] | null | null | null |
"""
"""
import began
import glob
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def sanity_check():
"""
"""
if not os.path.isdir(FLAGS.portraits_dir_path):
raise Exception('invalid portraits directory')
def build_dataset_reader():
"""
"""
paths_png_wildcards = os.path.join(FLAGS.portraits_dir_path, '*.png')
paths_png = glob.glob(paths_png_wildcards)
file_name_queue = tf.train.string_input_producer(paths_png)
reader = tf.WholeFileReader()
reader_key, reader_val = reader.read(file_name_queue)
image = tf.image.decode_png(reader_val, channels=3, dtype=tf.uint8)
# assume the size of input images are either 128x128x3 or 64x64x3.
if FLAGS.crop_image:
image = tf.image.crop_to_bounding_box(
image,
FLAGS.crop_image_offset_y,
FLAGS.crop_image_offset_x,
FLAGS.crop_image_size_m,
FLAGS.crop_image_size_m)
image = tf.random_crop(
image, size=[FLAGS.crop_image_size_n, FLAGS.crop_image_size_n, 3])
image = tf.image.resize_images(image, [FLAGS.image_size, FLAGS.image_size])
image = tf.image.random_flip_left_right(image)
image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0
return tf.train.batch(
tensors=[image],
batch_size=FLAGS.batch_size,
capacity=FLAGS.batch_size)
def reshape_batch_images(batch_images):
"""
"""
batch_size = FLAGS.batch_size
image_size = FLAGS.image_size
# build summary for generated fake images.
grid = \
tf.reshape(batch_images, [1, batch_size * image_size, image_size, 3])
grid = tf.split(grid, FLAGS.summary_row_size, axis=1)
grid = tf.concat(grid, axis=2)
grid = tf.saturate_cast(grid * 127.5 + 127.5, tf.uint8)
return grid
def build_summaries(gan):
"""
"""
summaries = {}
# build generator summary
summaries['generator'] = \
tf.summary.scalar('generator loss', gan['generator_loss'])
# build discriminator summaries
d_summaries = []
scalar_table = [
('convergence_measure', 'convergence measure'),
('discriminator_loss', 'discriminator loss'),
('learning_rate', 'learning rate'),
]
for scalar in scalar_table:
d_summaries.append(tf.summary.scalar(scalar[1], gan[scalar[0]]))
summaries['discriminator_part'] = tf.summary.merge(d_summaries)
# build image summaries
image_table = [
('real', 'real image'),
('fake', 'generated image'),
('ae_output_real', 'autoencoder real'),
('ae_output_fake', 'autoencoder fake')
]
for table in image_table:
grid = reshape_batch_images(gan[table[0]])
d_summaries.append(tf.summary.image(table[1], grid, max_outputs=4))
summaries['discriminator_plus'] = tf.summary.merge(d_summaries)
return summaries
def train():
"""
"""
# tensorflow
checkpoint_source_path = tf.train.latest_checkpoint(
FLAGS.checkpoints_dir_path)
checkpoint_target_path = os.path.join(
FLAGS.checkpoints_dir_path, 'model.ckpt')
# the input batch (uniform z) for the generator.
seed = tf.random_uniform(
shape=[FLAGS.batch_size, FLAGS.seed_size], minval=-1.0, maxval=1.0)
# the input batch (real data) for the discriminator.
real = build_dataset_reader()
gan_graph = began.build_began(seed, real)
summaries = build_summaries(gan_graph)
reporter = tf.summary.FileWriter(FLAGS.logs_dir_path)
with tf.Session() as session:
if checkpoint_source_path is None:
session.run(tf.global_variables_initializer())
else:
tf.train.Saver().restore(session, checkpoint_source_path)
# give up overlapped old data
global_step = session.run(gan_graph['global_step'])
reporter.add_session_log(
tf.SessionLog(status=tf.SessionLog.START),
global_step=global_step)
# make dataset reader work
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
while True:
# discriminator
fetches = {
'temp_0': gan_graph['next_k'],
'temp_1': gan_graph['discriminator_trainer'],
}
if global_step % 500 == 0:
fetches['summary'] = summaries['discriminator_plus']
else:
fetches['summary'] = summaries['discriminator_part']
fetched = session.run(fetches)
reporter.add_summary(fetched['summary'], global_step)
# generator
fetches = {
'global_step': gan_graph['global_step'],
'temp_0': gan_graph['generator_trainer'],
'summary': summaries['generator'],
}
fetched = session.run(fetches)
global_step = fetched['global_step']
reporter.add_summary(fetched['summary'], global_step)
if global_step % 70000 == 0:
session.run(gan_graph['decay_learning_rate'])
if global_step % 100 == 0:
print('step {}'.format(global_step))
if global_step % 5000 == 0:
tf.train.Saver().save(
session,
checkpoint_target_path,
global_step=gan_graph['global_step'])
coord.request_stop()
coord.join(threads)
def main(_):
"""
"""
began.sanity_check()
sanity_check()
train()
if __name__ == '__main__':
"""
"""
tf.app.flags.DEFINE_string('portraits-dir-path', '', '')
tf.app.flags.DEFINE_string('logs-dir-path', '', '')
tf.app.flags.DEFINE_string('checkpoints-dir-path', '', '')
tf.app.flags.DEFINE_boolean('crop-image', False, '')
tf.app.flags.DEFINE_integer('crop-image-offset-x', 25, '')
tf.app.flags.DEFINE_integer('crop-image-offset-y', 50, '')
tf.app.flags.DEFINE_integer('crop-image-size-m', 128, '')
tf.app.flags.DEFINE_integer('crop-image-size-n', 128, '')
tf.app.flags.DEFINE_integer('summary-row-size', 4, '')
tf.app.flags.DEFINE_integer('summary-col-size', 4, '')
# arXiv:1703.10717
# we typically used a batch size of n = 16.
tf.app.flags.DEFINE_integer('batch-size', 16, '')
tf.app.run()
| 27.764192
| 79
| 0.619063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,377
| 0.216578
|
622f35e7c59c0030afa33973573d5f2d9c50a69c
| 2,771
|
py
|
Python
|
custom/abt/reports/tests/test_fixture_utils.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
custom/abt/reports/tests/test_fixture_utils.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
custom/abt/reports/tests/test_fixture_utils.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
import doctest
from nose.tools import assert_equal, assert_true
from corehq.apps.fixtures.models import (
FieldList,
FixtureDataItem,
FixtureItemField,
)
from custom.abt.reports import fixture_utils
from custom.abt.reports.fixture_utils import (
dict_values_in,
fixture_data_item_to_dict,
)
def test_dict_values_in_param_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, None)
assert_true(result)
def test_dict_values_in_param_empty():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {})
assert_true(result)
def test_dict_values_in_value_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {'permutation': None})
assert_true(result)
def test_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='789abc',
properties={}
)
]
),
'name': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='John',
properties={'lang': 'en'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jan',
properties={'lang': 'nld'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jean',
properties={'lang': 'fra'}
),
]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': '789abc',
'name': 'John'
})
def test_empty_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[]
),
'name': FieldList(
doc_type='FieldList',
field_list=[]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': None,
'name': None,
})
def test_doctests():
results = doctest.testmod(fixture_utils)
assert results.failed == 0
| 26.644231
| 59
| 0.512811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.125586
|
622f4213fd755d69fc4b1a99782a6a2eaed6ce0c
| 1,223
|
py
|
Python
|
test/test_edit_contact.py
|
Lenchik13/Testing
|
fce156bfb639773056745ab1be19a840770739d4
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_contact.py
|
Lenchik13/Testing
|
fce156bfb639773056745ab1be19a840770739d4
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_contact.py
|
Lenchik13/Testing
|
fce156bfb639773056745ab1be19a840770739d4
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import random
def test_edit_contact(app, db, check_ui):
app.open_home_page()
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Contact", lastname="", nickname="",
address="", company="", home="",
mobile="", work="", fax="", email="",
email2="", email3="", homepage="",
byear="", address2="", phone2="",
notes="", bday="20", bmonth="6"))
old_contacts = db.get_contact_list()
rcontact = random.choice(old_contacts)
contact = Contact(lastname="lname", firstname="fname", address="address")
contact.id = rcontact.id
app.contact.modify_contact_by_id(contact)
app.open_home_page()
assert len(old_contacts) == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(rcontact)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| 42.172414
| 123
| 0.611611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.058054
|
622f91861f9e766601a659b3e6368f910237afb0
| 159
|
py
|
Python
|
Source Codes Testing/list1.py
|
urstrulykkr/Python
|
098ed5d391f0e62d4950ca80cc57a032c65d1637
|
[
"MIT"
] | null | null | null |
Source Codes Testing/list1.py
|
urstrulykkr/Python
|
098ed5d391f0e62d4950ca80cc57a032c65d1637
|
[
"MIT"
] | null | null | null |
Source Codes Testing/list1.py
|
urstrulykkr/Python
|
098ed5d391f0e62d4950ca80cc57a032c65d1637
|
[
"MIT"
] | null | null | null |
lst1=list()
lst1.append('K')
lst1.append('A')
lst2=['U', 'S', 'H', 'I', 'K']
print(lst1+lst2)
print(lst2[0] +lst2[1]+lst1[1])
for i in lst1+lst2:
print(i)
| 14.454545
| 32
| 0.578616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.132075
|
6231b011e60ef30120a9f211ba32d24a861eec6c
| 17,825
|
py
|
Python
|
sintel_GANs/flow_cgan_sintel_ssim_uv.py
|
tanlinc/opticalFlowGAN
|
f568e531265029f2f25f223ee92e1f53c0bb52f6
|
[
"MIT"
] | 1
|
2018-07-24T05:40:44.000Z
|
2018-07-24T05:40:44.000Z
|
sintel_GANs/flow_cgan_sintel_ssim_uv.py
|
tanlinc/opticalFlowGAN
|
f568e531265029f2f25f223ee92e1f53c0bb52f6
|
[
"MIT"
] | null | null | null |
sintel_GANs/flow_cgan_sintel_ssim_uv.py
|
tanlinc/opticalFlowGAN
|
f568e531265029f2f25f223ee92e1f53c0bb52f6
|
[
"MIT"
] | null | null | null |
import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.plot
import tflib.flow_handler as fh
import tflib.SINTELdata as sintel
MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
DIM = 64 # This overfits substantially; you're probably better off with 64 # or 128?
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 100000 # How many generator iterations to train for # 200000 takes too long
IM_DIM = 32 # number of pixels along x and y (square assumed)
SQUARE_IM_DIM = IM_DIM*IM_DIM # 32*32 = 1024
OUTPUT_DIM = IM_DIM*IM_DIM*3 # Number of pixels (3*32*32) - rgb color
OUTPUT_DIM_FLOW = IM_DIM*IM_DIM*2 # Number of pixels (2*32*32) - uv direction
CONTINUE = False # Default False, set True if restoring from checkpoint
START_ITER = 0 # Default 0, set accordingly if restoring from checkpoint (100, 200, ...)
CURRENT_PATH = "sintel/flowcganuv5"
restore_path = "/home/linkermann/opticalFlow/opticalFlowGAN/results/" + CURRENT_PATH + "/model.ckpt"
lib.print_model_settings(locals().copy())
if(CONTINUE):
tf.reset_default_graph()
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return LeakyReLU(output)
def Generator(n_samples, conditions, noise=None): # input conds additional to noise
if noise is None:
noise = tf.random_normal([n_samples, SQUARE_IM_DIM])
noise = tf.reshape(noise, [n_samples, 1, IM_DIM, IM_DIM])
# new conditional input: last frames
conds = tf.reshape(conditions, [n_samples, 6, IM_DIM, IM_DIM]) # conditions: (64,2*3072) TO conds: (64,6,32,32)
# for now just concat the inputs: noise as seventh dim of cond image
output = tf.concat([noise, conds], 1) # to: (BATCH_SIZE,7,32,32)
output = tf.reshape(output, [n_samples, SQUARE_IM_DIM*7]) # 32x32x4 = 4096; to: (BATCH_SIZE, 4096)
output = lib.ops.linear.Linear('Generator.Input', SQUARE_IM_DIM*7, 4*4*4*DIM, output) # 4*4*4*DIM = 64*64 = 4096
output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 2, 5, output) # output flow in color --> dim is 2
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTPUT_DIM_FLOW]) # output flow --> dim is 2
def Discriminator(inputs, conditions): # input conds as well
inputs = tf.reshape(inputs, [-1, 2, IM_DIM, IM_DIM]) # input flow --> dim is 2
conds = tf.reshape(conditions, [-1, 6, IM_DIM, IM_DIM]) # new conditional input: last frames
# for now just concat the inputs
ins = tf.concat([inputs, conds], 1) #to: (BATCH_SIZE, 8, 32, 32)
output = lib.ops.conv2d.Conv2D('Discriminator.1', 8, DIM, 5, ins, stride=2) # first dim is different: 8 now
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)
output = LeakyReLU(output)
#output = lib.ops.conv2d.Conv2D('Discriminator.4', 4*DIM, 8*DIM, 5, output, stride=2)
# if MODE != 'wgan-gp':
# output = lib.ops.batchnorm.Batchnorm('Discriminator.BN4', [0,2,3], output)
# output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*8*DIM]) # adjusted outcome
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*8*DIM, 1, output)
return tf.reshape(output, [-1])
cond_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, 2*OUTPUT_DIM]) # cond input for G and D, 2 frames!
cond_data = 2*((tf.cast(cond_data_int, tf.float32)/255.)-.5) #normalized [-1,1]!
#real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM_FLOW]) # real data is flow, dim 2!
real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM_FLOW]) #already float, normalized [-1,1]!
fake_data = Generator(BATCH_SIZE, cond_data)
disc_real = Discriminator(real_data, cond_data)
disc_fake = Discriminator(fake_data, cond_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost, var_list=disc_params)
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
elif MODE == 'wgan-gp':
# Standard WGAN loss
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
# Gradient penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates, cond_data), [interpolates])[0] #added cond here
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)
elif MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.ones_like(disc_fake)))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_real, tf.ones_like(disc_real)))
disc_cost /= 2.
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(gen_cost,
var_list=lib.params_with_name('Generator'))
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(disc_cost,
var_list=lib.params_with_name('Discriminator.'))
# Dataset iterators
gen = sintel.load_train_gen(BATCH_SIZE, (IM_DIM,IM_DIM,3), (IM_DIM,IM_DIM,2)) # batch size, im size, im size flow
dev_gen = sintel.load_test_gen(BATCH_SIZE, (IM_DIM,IM_DIM,3), (IM_DIM,IM_DIM,2))
# For generating samples: define fixed noise and conditional input
fixed_cond_samples, fixed_flow_samples = next(gen) # shape: (batchsize, 3072)
fixed_cond_data_int = fixed_cond_samples[:,0:2*OUTPUT_DIM] # earlier frames as condition, cond samples shape (64,3*3072)
fixed_real_data = fixed_flow_samples[:,OUTPUT_DIM_FLOW:] # later flow for discr, flow samples shape (64,2048)
fixed_real_data_norm01 = tf.cast(fixed_real_data+1.0, tf.float32)/2.0 # [0,1]
fixed_cond_data_normalized = 2*((tf.cast(fixed_cond_data_int, tf.float32)/255.)-.5) #normalized [-1,1]!
fixed_viz_data_int = fixed_cond_samples[:,OUTPUT_DIM:2*OUTPUT_DIM] # each later frame for viz
if(CONTINUE):
fixed_noise = tf.get_variable("noise", shape=[BATCH_SIZE, SQUARE_IM_DIM]) # take same noise like saved model
else:
fixed_noise = tf.Variable(tf.random_normal(shape=[BATCH_SIZE, SQUARE_IM_DIM], dtype=tf.float32), name='noise') #variable: saved, for additional channel
fixed_noise_samples = Generator(BATCH_SIZE, fixed_cond_data_normalized, noise=fixed_noise) # Generator(n_samples,conds, noise):
def generate_image(frame, true_dist): # generates 64 (batch-size) samples next to each other in one image!
print("Iteration %d : \n" % frame)
samples = session.run(fixed_noise_samples, feed_dict={real_data: fixed_real_data, cond_data_int: fixed_cond_data_int}) # output range (-1.0,1.0), size=(BATCH_SIZE, OUT_DIM)
#samples_255 = ((samples+1.)*(255./2)).astype('int32') #(-1,1) to [0,255] for displaying
samples_01 = ((samples+1.)/2.).astype('float32') # [0,1] is a np.ndarray shape (64, 2048)
# print(fixed_real_data_norm01.eval()) # shape (64, 2048) # bigger areas with (almost) same flow
images2show = fixed_viz_data_int.reshape(BATCH_SIZE,3,IM_DIM,IM_DIM)
sample_flowimages, real_flowimages = [], []
for i in range(0, BATCH_SIZE):
real_flowimg, flowimg = [],[] # reset to be sure
flowimg = fh.computeFlowImg(samples[i,:].reshape((IM_DIM,IM_DIM,2))) # (32, 32, 3) # now color img!! :)
flowimg_T = np.transpose(flowimg, [2,0,1]) # (3, 32, 32)
# flowimage = flowimage_T.reshape((OUTPUT_DIM,)) # instead of flatten?
sample_flowimages.append(flowimg_T)
real_uvflow = fixed_real_data[i,:]
real_uvflow = real_uvflow.reshape((IM_DIM,IM_DIM,2))
real_flowimg = fh.computeFlowImg(real_uvflow) # (32, 32, 3) color img!
real_flowimg = real_flowimg.reshape(IM_DIM,IM_DIM,3).astype('int32') # (32, 32, 3)
real_flowimg_T = np.transpose(real_flowimg, [2,0,1]) # (3, 32, 32)
real_flowimages.append(real_flowimg_T) # or which one? # also save as .flo?
images2show = np.insert(images2show, i*2+1, flowimg_T, axis=0)
#samples_255[2*i+1,:] = flowimage # sample flow color image
# images2show.shape: (128, 3, 32, 32) = (2*BATCH_SIZE, 3, IM_DIM, IM_DIM)
# images.reshape((2*BATCH_SIZE, 3, IM_DIM, IM_DIM))
lib.save_images.save_images(images2show, 'samples_{}.jpg'.format(frame))
sample_flowims_np = np.asarray(sample_flowimages, np.int32)
real_flowims_np = np.asarray(real_flowimages, np.int32)
sample_flowims = tf.convert_to_tensor(sample_flowims_np, np.int32)
real_flowims = tf.convert_to_tensor(real_flowims_np, np.int32) # turn into tensor to reshape later
# tensor = tf.constant(np_array) # another way to create a tensor
# compare generated flow to real one # float..?
# u-v-component wise
real = tf.reshape(fixed_real_data_norm01, [BATCH_SIZE,IM_DIM,IM_DIM,2]) # use tf.reshape! Tensor! batch!
real_u = tf.slice(real, [0,0,0,0], [real.get_shape()[0],real.get_shape()[1],real.get_shape()[2], 1])
real_v = tf.slice(real, [0,0,0,1], [real.get_shape()[0],real.get_shape()[1],real.get_shape()[2], 1])
pred = tf.reshape(samples_01,[BATCH_SIZE,IM_DIM,IM_DIM,2]) # use tf reshape!
pred_u = tf.slice(pred, [0,0,0,0], [pred.get_shape()[0],pred.get_shape()[1],pred.get_shape()[2], 1])
pred_v = tf.slice(pred, [0,0,0,1], [pred.get_shape()[0],pred.get_shape()[1],pred.get_shape()[2], 1]) # shape (64, 32, 32) all of them
# mse & ssim on components
mseval_per_entry_u = tf.keras.metrics.mse(real_u, pred_u) # on gray, on [0,1], (64,32,32), small vals (^-1,-2,-3)
mseval_u = tf.reduce_mean(mseval_per_entry_u, [1,2]) # shape (64,) # diff numbers
mseval_per_entry_v = tf.keras.metrics.mse(real_v, pred_v) # on gray, on [0,1], (64,32,32), small vals (^-1,-2,-3)
mseval_v = tf.reduce_mean(mseval_per_entry_v, [1,2]) # shape (64,) # diff than per u entry
#ssimval_u = tf.image.ssim(real_u, pred_u, max_val=1.0) # in: tensor 64-batch, out: tensor ssimvals (64,)
#ssimval_v = tf.image.ssim(real_v, pred_v, max_val=1.0) # in: tensor 64-batch, out: tensor ssimvals (64,) # also minus vals, around 0, u and v differ
# avg: add and divide by 2
mseval_uv = tf.add(mseval_u, mseval_v) # tf.cast neccessary?
tensor2 = tf.constant(2.0, shape=[64])
#ssimval_uv = tf.add(ssimval_u, ssimval_v) # (64,)
mseval_uv = tf.div(mseval_uv, tensor2)
#ssimval_uv = tf.div(ssimval_uv, tensor2) # (64,), small around 0, up to 0.3 after first 100 iter
#ssimval_list_uv = ssimval_uv.eval() # to numpy array # (64,)
mseval_list_uv = mseval_uv.eval() # (64,)
print("mseval uv")
print(mseval_list_uv)
#print("ssimval uv")
#print(ssimval_list_uv)
# flow color ims to gray
real_flowims = tf.cast(real_flowims, tf.float32)/255. # to [0,1]
real_color = tf.reshape(real_flowims, [BATCH_SIZE,IM_DIM,IM_DIM,3])
real_gray = tf.image.rgb_to_grayscale(real_color) # tensor batch to gray; returns original dtype = float [0,1]
# print("real gray") # (64, 32, 32, 1)
sample_flowims = tf.cast(sample_flowims, tf.float32)/255. # to [0,1]
pred_color = tf.reshape(sample_flowims, [BATCH_SIZE,IM_DIM,IM_DIM,3]) # use tf.reshape! Tensor! batch!
pred_gray = tf.image.rgb_to_grayscale(pred_color) # (64, 32, 32, 1)
# mse & ssim on grayscale
mseval_per_entry_rgb = tf.keras.metrics.mse(real_gray, pred_gray) # on grayscale, on [0,1]..
mseval_rgb = tf.reduce_mean(mseval_per_entry_rgb, [1,2])
#ssimval_rgb = tf.image.ssim(real_gray, pred_gray, max_val=1.0) # in: tensor 64-batch, out: tensor ssimvals (64,)
#ssimval_list_rgb = ssimval_rgb.eval() # to numpy array # (64,)
mseval_list_rgb = mseval_rgb.eval() # (64,)
print("mseval rgb")
print(mseval_list_rgb)
#print("ssimval rgb")
#print(ssimval_list_rgb)
# print(ssimval_list)
# print(mseval_list)
for i in range (0,3):
#lib.plot.plot('SSIM uv for sample %d' % (i+1), ssimval_list_uv[i])
#lib.plot.plot('SSIM rgb for sample %d' % (i+1), ssimval_list_rgb[i])
lib.plot.plot('MSE uv for sample %d' % (i+1), mseval_list_uv[i])
lib.plot.plot('MSE rgb for sample %d' % (i+1), mseval_list_rgb[i])
print("sample %d \t MSE: %.5f \t %.5f\r\n" % (i, mseval_list_uv[i], mseval_list_rgb[i]))
#SSIM: %.5f \t %.5f\r\n" % (i, mseval_list_uv[i], mseval_list_rgb[i], ssimval_list_uv[i], ssimval_list_rgb[i]))
init_op = tf.global_variables_initializer() # op to initialize the variables.
saver = tf.train.Saver() # ops to save and restore all the variables.
# Train loop
with tf.Session() as session:
if(CONTINUE):
# Restore variables from disk.
saver.restore(session, restore_path)
print("Model restored.")
lib.plot.restore(START_ITER) # does not fully work, but makes plots start from newly started iteration
else:
session.run(init_op)
for iteration in range(START_ITER, ITERS): # START_ITER: 0 or from last checkpoint
start_time = time.time()
# Train generator
if iteration > 0:
_data, _ = next(gen) # shape: (batchsize, 6144), double output_dim now # flow as second argument not needed
_cond_data = _data[:, 0:2*OUTPUT_DIM] # earlier frames as conditional data, # flow for disc not needed here
_ = session.run(gen_train_op, feed_dict={cond_data_int: _cond_data})
# Train critic
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in range(disc_iters):
_data, _flow = next(gen) # shape: (batchsize, 6144), double output_dim now # flow as second argument
_cond_data = _data[:, 0:2*OUTPUT_DIM] # earlier 2 frames as conditional data,
_real_data = _flow[:,OUTPUT_DIM_FLOW:] # later flow as real data for discriminator
_disc_cost, _ = session.run([disc_cost, disc_train_op], feed_dict={real_data: _real_data, cond_data_int: _cond_data})
if MODE == 'wgan':
_ = session.run(clip_disc_weights)
lib.plot.plot('train disc cost', _disc_cost)
lib.plot.plot('time', time.time() - start_time)
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
_data, _flow = next(gen) # shape: (batchsize, 6144), double output_dim now # flow as second argument
_cond_data = _data[:, 0:2*OUTPUT_DIM] # earlier 2 frames as conditional data
_real_data = _flow[:,OUTPUT_DIM_FLOW:] # later flow as real data for discriminator
_dev_disc_cost = session.run(disc_cost, feed_dict={real_data: _real_data, cond_data_int: _cond_data})
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
generate_image(iteration, _data)
# Save the variables to disk.
save_path = saver.save(session, restore_path)
print("Model saved in path: %s" % save_path)
# chkp.print_tensors_in_checkpoint_file("model.ckpt", tensor_name='', all_tensors=True)
# Save logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
| 52.272727
| 176
| 0.675792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,136
| 0.344236
|
6232533ff943a823cead2f8d5e39f9cced275e1a
| 872
|
py
|
Python
|
Development/Scripts/sobel_edge_regular.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | null | null | null |
Development/Scripts/sobel_edge_regular.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | 1
|
2021-11-11T14:47:43.000Z
|
2021-11-11T14:52:51.000Z
|
Development/Scripts/sobel_edge_regular.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | 1
|
2021-11-11T12:18:21.000Z
|
2021-11-11T12:18:21.000Z
|
import cv2
# Read the original image
img = cv2.imread('../input/LennaGrey.png', 1)
# Display original image
cv2.imshow('Original', img)
cv2.waitKey(0)
# Convert to graycsale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image for better edge detection
img_blur = cv2.GaussianBlur(img_gray, (3,3), 0)
# Sobel Edge Detection
sobelx = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=5) # Sobel Edge Detection on the X axis
sobely = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=5) # Sobel Edge Detection on the Y axis
sobelxy = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=1, dy=1, ksize=5) # Combined X and Y Sobel Edge Detection
# Display Sobel Edge Detection Images
cv2.imshow('Sobel X', sobelx)
cv2.waitKey(0)
cv2.imshow('Sobel Y', sobely)
cv2.waitKey(0)
cv2.imshow('Sobel X Y using Sobel() function', sobelxy)
cv2.waitKey(0)
| 36.333333
| 113
| 0.738532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.423165
|
6232c87d0d4107ba98750270bdd408dd5d0b9dfa
| 1,389
|
py
|
Python
|
src/python/pants/backend/terraform/target_types.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/terraform/target_types.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | 22
|
2022-01-27T09:59:50.000Z
|
2022-03-30T07:06:49.000Z
|
src/python/pants/backend/terraform/target_types.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.engine.rules import collect_rules
from pants.engine.target import (
COMMON_TARGET_FIELDS,
Dependencies,
FieldSet,
MultipleSourcesField,
Target,
generate_multiple_sources_field_help_message,
)
from pants.util.strutil import softwrap
class TerraformModuleSourcesField(MultipleSourcesField):
default = ("*.tf",)
expected_file_extensions = (".tf",)
ban_subdirectories = True
help = generate_multiple_sources_field_help_message(
"Example: `sources=['example.tf', 'new_*.tf', '!old_ignore.tf']`"
)
@dataclass(frozen=True)
class TerraformFieldSet(FieldSet):
required_fields = (TerraformModuleSourcesField,)
sources: TerraformModuleSourcesField
class TerraformModuleTarget(Target):
alias = "terraform_module"
core_fields = (*COMMON_TARGET_FIELDS, Dependencies, TerraformModuleSourcesField)
help = softwrap(
"""
A single Terraform module corresponding to a directory.
There must only be one `terraform_module` in a directory.
Use `terraform_modules` to generate `terraform_module` targets for less boilerplate.
"""
)
def rules():
return collect_rules()
| 26.711538
| 92
| 0.735781
| 844
| 0.607631
| 0
| 0
| 153
| 0.110151
| 0
| 0
| 463
| 0.333333
|
623385600c5a7bc996eaff731400e30ab4f95471
| 4,462
|
py
|
Python
|
src/mdns_client/util.py
|
bgamari/micropython-mdns
|
b1a9473cd5200e97ee578be4c623bbd610f46b6c
|
[
"MIT"
] | 22
|
2021-01-06T02:52:35.000Z
|
2022-03-18T00:28:01.000Z
|
src/mdns_client/util.py
|
bgamari/micropython-mdns
|
b1a9473cd5200e97ee578be4c623bbd610f46b6c
|
[
"MIT"
] | 3
|
2021-04-19T15:44:09.000Z
|
2021-08-31T19:17:24.000Z
|
src/mdns_client/util.py
|
bgamari/micropython-mdns
|
b1a9473cd5200e97ee578be4c623bbd610f46b6c
|
[
"MIT"
] | 5
|
2021-03-10T10:24:46.000Z
|
2021-10-11T15:57:24.000Z
|
import struct
import uasyncio
from mdns_client.constants import REPEAT_TYPE_FLAG, TYPE_CNAME, TYPE_MX, TYPE_NS, TYPE_PTR, TYPE_SOA, TYPE_SRV
def dotted_ip_to_bytes(ip: str) -> bytes:
"""
Convert a dotted IPv4 address string into four bytes, with
some sanity checks
"""
ip_ints = [int(i) for i in ip.split(".")]
if len(ip_ints) != 4 or any(i < 0 or i > 255 for i in ip_ints):
raise ValueError
return bytes(ip_ints)
def bytes_to_dotted_ip(a: "Iterable[int]") -> str:
"""
Convert four bytes into a dotted IPv4 address string, without any
sanity checks
"""
return ".".join(str(i) for i in a)
def check_name(n: str) -> "List[bytes]":
"""
Ensure that a name is in the form of a list of encoded blocks of
bytes, typically starting as a qualified domain name
"""
if isinstance(n, str):
n = n.split(".")
if n[-1] == "":
n = n[:-1]
n = [i.encode("UTF8") if isinstance(i, str) else i for i in n]
return n
def string_packed_len(string: "List[bytes]") -> int:
return sum(len(i) + 1 for i in string) + 1
def name_to_bytes(name: str) -> bytes:
name_bytes = check_name(name)
buffer = bytearray(len(name_bytes) + len(name))
pack_name(buffer, name_bytes)
return buffer
def pack_name(buffer: bytes, string: "List[bytes]") -> None:
"""
Pack a string into the start of the buffer
We don't support writing with name compression, BIWIOMS
"""
output_index = 0
for part in string:
part_length = len(part)
buffer[output_index] = part_length
after_size_next_index = output_index + 1
end_of_pack_name_index = after_size_next_index + part_length
buffer[after_size_next_index:end_of_pack_name_index] = part
output_index += part_length + 1
buffer[output_index] = 0
def string_to_bytes(item: str) -> bytes:
buffer = bytearray(len(item) + 1)
buffer[0] = len(item)
buffer[1:] = item.encode("utf-8")
return buffer
def might_have_repeatable_payload(record_type: int) -> bool:
return record_type in (TYPE_NS, TYPE_CNAME, TYPE_PTR, TYPE_SOA, TYPE_MX, TYPE_SRV)
def byte_count_of_lists(*list_of_lists: "Iterable[bytes]") -> int:
return sum(sum(len(item) for item in byte_list) for byte_list in list_of_lists)
def fill_buffer(buffer: bytes, item: bytes, offset: int) -> int:
end_offset = offset + len(item)
buffer[offset:end_offset] = item
return end_offset
def end_index_of_name(buffer: bytes, offset: int) -> int:
"""
Expects the offset to be in the beginning of a name and
scans through the buffer. It returns the last index of the
string representation.
"""
while offset < len(buffer):
string_part_length = buffer[offset]
if string_part_length & REPEAT_TYPE_FLAG == REPEAT_TYPE_FLAG:
# Repeat type flags are always at the end. Meaning the reference
# should be dereferenced and then the name is completed
return offset + 2
elif string_part_length == 0x00:
return offset + 1
offset += string_part_length
raise IndexError("Could not idenitfy end of index")
def bytes_to_name(data: bytes) -> str:
item = bytes_to_name_list(data)
return name_list_to_name(item)
def name_list_to_name(data: "List[str]") -> str:
return ".".join(data)
def bytes_to_name_list(data: bytes) -> "List[str]":
index = 0
item = []
data_length = len(data)
while index < data_length:
length_byte = data[index]
if length_byte == 0x00:
break
index += 1
end_index = index + length_byte
data_item = data[index:end_index]
item.append(data_item.decode("utf-8"))
index = end_index
return item
def a_record_rdata_to_string(rdata: bytes) -> str:
ip_numbers = struct.unpack("!BBBB", rdata)
return ".".join(str(ip_number) for ip_number in ip_numbers)
async def set_after_timeout(event: uasyncio.Event, timeout: float):
await uasyncio.sleep(timeout)
event.set()
def txt_data_to_bytes(txt_data: "Dict[str, Union[str, List[str]]]") -> bytes:
payload = b""
for key, values in txt_data.items():
if isinstance(values, str):
values = [values]
for value in values:
if value is None:
value = ""
payload += string_to_bytes("{}={}".format(key, value))
return payload
| 29.163399
| 110
| 0.64814
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.026221
| 947
| 0.212237
|
6235e26627f5dd8b3c591b34af122e6dd4fe2d7f
| 1,029
|
py
|
Python
|
algorithms/selection_sort.py
|
maneeshd/Algorithms-and-DataStructures
|
5c50de586657f0135edaa2e624dfe2648c9c4eef
|
[
"MIT"
] | null | null | null |
algorithms/selection_sort.py
|
maneeshd/Algorithms-and-DataStructures
|
5c50de586657f0135edaa2e624dfe2648c9c4eef
|
[
"MIT"
] | null | null | null |
algorithms/selection_sort.py
|
maneeshd/Algorithms-and-DataStructures
|
5c50de586657f0135edaa2e624dfe2648c9c4eef
|
[
"MIT"
] | null | null | null |
"""
@author: Maneesh D
@date: 11-Jul-17
@intepreter: Python 3.6
Worst Case Analysis: Selection Sort -> O(n^2)
"""
from timeit import Timer, default_timer
from random import shuffle
ARR = list()
def selection_sort(data):
"""Selection sort implementation"""
for i in range(len(data)):
min_pos = i
for j in range(i + 1, len(data)):
if data[j] < data[min_pos]:
min_pos = j
data[i], data[min_pos] = data[min_pos], data[i]
def main():
"""Main Driver Function"""
start = default_timer()
shuffle(ARR)
print("Input Array:", ARR)
selection_sort(ARR)
print("Sorted Array:", ARR)
print("Sorting Time: %f Seconds\n" % (default_timer() - start))
if __name__ == "__main__":
print("Selection Sort")
print("-" * len("Selection Sort"))
ARR = list(range(25, 0, -1)) # Worst Case Input(Reverse Sorted)
t = Timer(main)
print(
"\nAverage sorting time for 25 elements in 3 runs = %f Seconds"
% (t.timeit(3) / 3)
)
| 23.386364
| 71
| 0.595724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 374
| 0.36346
|
623654c971708305dfa901f1772fac7478631021
| 917
|
py
|
Python
|
image_repo/migrations/0002_auto_20210505_1448.py
|
elena-kolomeets/Django-Repo
|
f326b058dc70562a6815248df1b7550c0b634a73
|
[
"MIT"
] | null | null | null |
image_repo/migrations/0002_auto_20210505_1448.py
|
elena-kolomeets/Django-Repo
|
f326b058dc70562a6815248df1b7550c0b634a73
|
[
"MIT"
] | null | null | null |
image_repo/migrations/0002_auto_20210505_1448.py
|
elena-kolomeets/Django-Repo
|
f326b058dc70562a6815248df1b7550c0b634a73
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-05-05 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_repo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='colors',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AlterField(
model_name='image',
name='description',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='image',
name='result',
field=models.JSONField(blank=True, default=''),
),
migrations.AlterField(
model_name='image',
name='tags',
field=models.CharField(blank=True, default='', max_length=250),
),
]
| 26.970588
| 75
| 0.555071
| 826
| 0.900763
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.154853
|
6236a853e217ec41f065c4c8899eb05e1e528ac1
| 21,375
|
py
|
Python
|
ToricLearning/ising.py
|
danielfreeman11/thermal-toric-code
|
3718f1b16737dfae09443466f6cfb65036faaa89
|
[
"MIT"
] | 6
|
2017-11-15T00:54:13.000Z
|
2021-11-21T02:08:21.000Z
|
ToricLearning/ising.py
|
danielfreeman11/thermal-toric-code
|
3718f1b16737dfae09443466f6cfb65036faaa89
|
[
"MIT"
] | null | null | null |
ToricLearning/ising.py
|
danielfreeman11/thermal-toric-code
|
3718f1b16737dfae09443466f6cfb65036faaa89
|
[
"MIT"
] | null | null | null |
"""
Ising model one-shot dynamics simulation.
From C. Daniel Freeman (2016 http://arxiv.org/abs/1603.05005)
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
#import isingutils.py
import random
from random import choice
import copy
import sys
from compiler.ast import flatten
from numpy import *
logger = logging.getLogger(__name__)
class IsingEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self):
#Holds transform objects for rendering
self.translist = []
self.error_translist = []
self.TotalTime = 0.
#self.NextActionTime = 0.
#self.NextActionProbability = 0.
self.SystemLength = 24
self.Temperature = .15
self.Delta = 1.0
self.CreationRate = abs(1./(1-np.exp(self.Delta*1.0/self.Temperature)))
self.AnnihilationRate = abs(1./(1-np.exp(-self.Delta*1.0/self.Temperature)))
self.HoppingRate = .01#self.Temperature
self.CorrectionRate = 1.
self.Sector = 0
self.state = np.zeros(self.SystemLength)
self.error_state = np.zeros(self.SystemLength)
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
low = np.zeros(self.SystemLength)
high = np.ones(self.SystemLength)
# Can perform a swap between any pair of sites. Convention is that 0 swaps from 0 to 1 and (SystemLength-1) swaps from (SystemLength-1) to 0.
# i.e., periodic boundary conditions.
self.action_space = spaces.Discrete(self.SystemLength)
self.observation_space = spaces.Box(low, high)
self._seed()
self.reset()
self.viewer = None
anyons_list = self.state
self.steps_beyond_done = 0.
#Need to calculate when the first bath interaction will occur
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(anyons_list)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate + len(ExPairLocList)*self.AnnihilationRate + \
(len(EmptyPairLocList))*self.CreationRate
self.PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate/Norm
self.PAnn = len(ExPairLocList)*self.AnnihilationRate/Norm
self.PCre = (len(anyons_list) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*self.HoppingRate/Norm
self.NextActionProbability = random.random()
self.NextActionTime = self.TotalTime + (-1./Norm)*np.log(self.NextActionProbability)
# Just need to initialize the relevant attributes
self._configure()
def _configure(self, display=None):
self.display = display
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
print "Current time: " + str(self.TotalTime)
print "Next action: " + str(self.NextActionTime)
state = self.state
anyons_list = state
#Store the winding operator before we do anything to the chain
p = int(floor(len(self.state)/2.))
pl,pr = self.state[p],self.state[p+1]
#I'm going to change this into a more discrete picture--where there's an integer system clock,
#and dynamics occur inbetween clock calls.
#The most obvious way to do this is to stick a while loop in the if statement below that performs dynamics until
#the next action time is after the next cycle of (CorrectionPeriod) * (n + 1) (if this were occuring between n and n+1)
#if the next corrective action would occur after the next bath interaction, do the bath interaction and calculate the next bath interaction time
if self.TotalTime + (1. / self.CorrectionRate) > self.NextActionTime:
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(self.state)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate + len(ExPairLocList)*self.AnnihilationRate + \
(len(EmptyPairLocList))*self.CreationRate
self.PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate/Norm
self.PAnn = len(ExPairLocList)*self.AnnihilationRate/Norm
self.PCre = (len(self.state) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*self.HoppingRate/Norm
print RightHoppableLocList
print LeftHoppableLocList
print "**"
r = self.NextActionProbability
#print PHop, PAnn, PCre
#print r
#Hopping
if r < self.PHop:
HopSite = choice(RightHoppableLocList + LeftHoppableLocList)
self.state[HopSite] = 0
if HopSite in RightHoppableLocList:
self.state[(HopSite+1)%len(self.state)] = 1
else:
self.state[(HopSite+1)%len(self.state)] = 0
self.state[(HopSite)] = 1
self.error_state[HopSite] = (self.error_state[HopSite] + 1) % 2
#print "Hopping!"
#print chain
#Annihilating
if (r >= self.PHop and r < self.PHop + self.PAnn):
AnnihilateSite = choice(ExPairLocList)
self.state[AnnihilateSite] = 0
self.state[(AnnihilateSite+1)%len(self.state)] = 0
self.error_state[AnnihilateSite] = (self.error_state[AnnihilateSite] + 1) % 2
#print "Annihilating!"
#print chain
#Creating
if (r >= self.PHop + self.PAnn):
CreateSite = choice(EmptyPairLocList)
self.state[CreateSite] = 1
self.state[(CreateSite+1)%len(self.state)] = 1
self.error_state[CreateSite] = (self.error_state[CreateSite] + 1) % 2
#print "Creating!"
#print chain
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(anyons_list)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate + len(ExPairLocList)*self.AnnihilationRate + \
(len(EmptyPairLocList))*self.CreationRate
self.PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate/Norm
self.PAnn = len(ExPairLocList)*self.AnnihilationRate/Norm
self.PCre = (len(anyons_list) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*self.HoppingRate/Norm
#Update the system time, next action time, and next action probability
self.TotalTime = self.NextActionTime
print "Action too late!" + str(self.TotalTime)
self.NextActionProbability = random.random()
self.NextActionTime = self.TotalTime + (-1./Norm)*np.log(self.NextActionProbability)
else:
#If we haven't exceeded the bath interaction timescale, we have to apply some swaps!
anyons_list, CycleTime, NewRates, NoHops, Proceed, self.Sector = self.CorrectionProtocol(anyons_list, self.TotalTime, self.TotalTime+(1./self.CorrectionRate), self.CorrectionRate, \
self.PHop, self.PAnn, self.PCre, [action], self.Sector)
#self.TotalTime+=CycleTime
self.TotalTime+=1.
print self.TotalTime
if NewRates == True:
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(anyons_list)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate + len(ExPairLocList)*self.AnnihilationRate + \
(len(EmptyPairLocList))*self.CreationRate
self.PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*self.HoppingRate/Norm
self.PAnn = len(ExPairLocList)*self.AnnihilationRate/Norm
self.PCre = (len(anyons_list) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*self.HoppingRate/Norm
self.NextActionProbability = random.random()
self.NextActionTime = self.TotalTime + (-1./Norm)*np.log(self.NextActionProbability)
self.state = anyons_list
#Update the sector
self.Sector = (self.Sector + self.CheckSector(self.state,p,pl,pr))%2
done = self.TotalTime > 1000. \
or self.CheckState(self.state, self.Sector) == 1
done = bool(done)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = np.zeros(self.SystemLength)
self.error_state = np.zeros(self.SystemLength)
self.TotalTime = 0.
self.state[0] = 1.
self.state[22] = 1.
self.error_state[22]=1
self.error_state[23]=1
self.steps_beyond_done = None
return np.array(self.state)
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
#world_width = self.x_threshold*2
#scale = screen_width/world_width
#carty = 100 # TOP OF CART
polewidth = 10.0
#polelen = scale * 1.0
#cartwidth = 50.0
#cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)#, display=self.display)
'''l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)'''
for i in xrange(self.SystemLength):
self.offsettrans = rendering.Transform()
self.error_offsettrans = rendering.Transform()
self.translist.append(self.offsettrans)
self.error_translist.append(self.error_offsettrans)
axle = rendering.make_circle(polewidth/2)
error = rendering.make_circle(polewidth/4)
axle.add_attr(self.offsettrans)
error.add_attr(self.error_offsettrans)
axle.set_color(.8,.6,.4)
error.set_color(.1,.1,.1)
self.viewer.add_geom(axle)
self.viewer.add_geom(error)
#print "Putting on the screen!"
#self.track = rendering.Line((0,carty), (screen_width,carty))
#self.track.set_color(0,0,0)
#self.viewer.add_geom(self.track)
for i,t in enumerate(self.translist):
#print "something happening?"
if self.state[i]!=0:
#print "Moving to be visible!"
t.set_translation(i*(400./self.SystemLength)+100., 200)
else:
t.set_translation(-10,-10)
for i,t in enumerate(self.error_translist):
if self.error_state[i]!=0:
t.set_translation(i*(400./self.SystemLength)+100. + (400./self.SystemLength)/2., 200)
else:
t.set_translation(-10,-10)
#print "This is being run, though!"
#x = self.state
#cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART
#self.carttrans.set_translation(cartx, carty)
#self.poletrans.set_rotation(-x[2])
return self.viewer.render()#return_rgb_array = mode=='rgb_array')
#ISING CODE
#*****************************************************
def ReturnExcitationInformation(self, chain):
ExLocList = []
ExPairLocList = []
EmptyLocList = []
EmptyPairLocList = []
RightHoppableLocList = []
LeftHoppableLocList = []
for i,c in enumerate(chain):
if c == 1:
ExLocList.append(i)
if chain[(i+1)%len(chain)] == 1:
ExPairLocList.append(i)
else:
RightHoppableLocList.append(i)
else:
EmptyLocList.append(i)
if chain[(i+1)%len(chain)] == 0:
EmptyPairLocList.append(i)
else:
LeftHoppableLocList.append((i)%len(chain))
return ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList
def CalculateProbabilities(self, chain, CreationRate, AnnihilationRate, HoppingRate):
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(chain)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate + len(ExPairLocList)*AnnihilationRate + \
(len(EmptyPairLocList))*CreationRate
PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate/Norm
PAnn = len(ExPairLocList)*AnnihilationRate/Norm
PCre = (len(chain) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*HoppingRate/Norm
return PHop, PAnn, PCre
def AdvanceTime(self, chain, StartTime, CreationRate, AnnihilationRate, HoppingRate, sector):
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(chain)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate + len(ExPairLocList)*AnnihilationRate + \
(len(EmptyPairLocList))*CreationRate
PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate/Norm
PAnn = len(ExPairLocList)*AnnihilationRate/Norm
PCre = (len(chain) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*HoppingRate/Norm
r = random.random()
DeltaTau = (-1./Norm)*np.log(r)
chain, CycleTime, NewRates, NoHops, Proceed, sector = CorrectionProtocol(chain, StartTime, StartTime+DeltaTau, CorrectionRate, \
PHop, PAnn, PCre, CorrectionSwaps, sector)
#NewRates = False
#CycleTime = 0
#NoHops = True
p = int(floor(len(chain)/2.))
pl,pr = chain[p],chain[p+1] #previous values of chain
if NewRates == False:
ExLocList, ExPairLocList, EmptyLocList, EmptyPairLocList, RightHoppableLocList, LeftHoppableLocList = self.ReturnExcitationInformation(chain)
Norm = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate + len(ExPairLocList)*AnnihilationRate + \
(len(EmptyPairLocList))*CreationRate
PHop = (len(RightHoppableLocList)+len(LeftHoppableLocList))*HoppingRate/Norm
PAnn = len(ExPairLocList)*AnnihilationRate/Norm
PCre = (len(chain) - len(ExPairLocList) - (len(RightHoppableLocList)+len(LeftHoppableLocList)))*HoppingRate/Norm
#print PHop, PAnn, PCre
#print r
#Hopping
if r < PHop:
HopSite = choice(RightHoppableLocList + LeftHoppableLocList)
chain[HopSite] = 0
if HopSite in RightHoppableLocList:
chain[(HopSite+1)%len(chain)] = 1
else:
chain[(HopSite-1)%len(chain)] = 1
#print "Hopping!"
#print chain
#Annihilating
if (r >= PHop and r < PHop + PAnn):
AnnihilateSite = choice(ExPairLocList)
chain[AnnihilateSite] = 0
chain[(AnnihilateSite+1)%len(chain)] = 0
#print "Annihilating!"
#print chain
#Creating
if (r >= PHop + PAnn):
CreateSite = choice(EmptyPairLocList)
chain[CreateSite] = 1
chain[(CreateSite+1)%len(chain)] = 1
#print "Creating!"
#print chain
sector = (sector + CheckSector(IsingChain,p,pl,pr))%2
if NoHops or not(Proceed):
return chain, DeltaTau, sector
else:
return chain, CycleTime, sector
def CheckSector(self, chain,p,pl,pr):
increment = 0
if chain[p]!=pl and chain[p+1] != pr:
increment = 1
#print p,pl,pr,"\t",chain[p],chain[p+1],"\t",increment
#print chain
return increment
#Constructs a list with the indices for conditional swaps in the correction protocol
#Convention is that the value at protocol[i] is CSWAPPED with protocol[(i+1)%length]
def SwapProtocol(self, length):
sublength = length/2 - 1
protocol = []
for i in xrange(length):
for j in xrange(sublength):
for k in xrange(sublength - j):
protocol.append((i+(j+k))%length)
for k in xrange(sublength - j):
protocol.append((i+(sublength-k-1))%length)
return protocol
def SwapProtocol2(self, length):
sublength = int(math.ceil(length/2))
subdomain = int(sublength / 2)
protocol = []
for c in xrange(4):
subprotocol = []
for i in xrange(subdomain-1):
subprotocol.append((subdomain*(c+1) + i)%length)
protocol.append(subprotocol)
for j in xrange(subdomain-1):
for k in xrange(j+1):
protocol.append((subdomain*c + k + (subdomain-1) - (j+1))%length)
protocol.append(subprotocol)
protocol = flatten(protocol)
return protocol
def SwapProtocol3(self, length):
sublength = int(math.ceil(length/2))
subdomain = int(sublength / 2)
protocol = []
for c in xrange(4):
subprotocol = []
for i in xrange(subdomain-1):
for m in xrange(i+1):
subprotocol.append((subdomain*(c+1) + i - m)%length)
protocol.append(subprotocol)
for j in xrange(subdomain-1):
for k in xrange(j+1):
protocol.append((subdomain*c + k + (subdomain-1) - (j+1))%length)
protocol.append(subprotocol)
protocol = flatten(protocol)
return protocol
def SwapProtocol4(self, length):
sublength = int(math.ceil(length/2))
subdomain = int(sublength / 4)
protocol = []
for c in xrange(8):
subprotocol = []
for i in xrange(subdomain-1):
for m in xrange(i+1):
subprotocol.append((subdomain*(c+1) + i - m)%length)
protocol.append(subprotocol)
for j in xrange(subdomain-1):
for k in xrange(j+1):
protocol.append((subdomain*c + k + (subdomain-1) - (j+1))%length)
protocol.append(subprotocol)
protocol = flatten(protocol)
return protocol
def SwapProtocol5(self, length):
sublength = int(math.ceil(length/2))
protocol = []
for j in xrange(sublength):
if j%2==0:
protocol.append(2*j)
protocol.append(2*j+1)
protocol.append(2*j)
protocol.append(2*j+1)
return protocol
def CSwap(self, chain, i):
#print i
if chain[i]!=chain[(i+1)%len(chain)]:
inter = chain[i]
chain[i] = chain[(i+1)%len(chain)]
chain[(i+1)%len(chain)] = inter
self.error_state[i] = (self.error_state[i] + 1) % 2
####print "Swapping at " + str(i) + "!: ",chain
return chain
def CorrectionProtocol(self, chain, oldtime, newtime, CorrectionRate, PHop, PAnn, PCre, CorrectionSwaps, sector):
print "Attempting correction protocol"
#print "What"
CycleTime = 0
#PHop, PAnn, PCre = CalculateProbabilities(chain, CreationRate, AnnihilationRate, HoppingRate)
#print PHop, PAnn, PCre
ProbabilityHasChanged = False
#NoChange = True
NumberOfSwaps = len(CorrectionSwaps)
#Need to calculate where the correction protocol currently is:
CorrectionPeriod = 1./CorrectionRate
NumberCompletedCycles, CurrentCycleTime = divmod(oldtime, CorrectionPeriod)
IndexInCycle = int(floor((CurrentCycleTime / CorrectionPeriod) * NumberOfSwaps))
Proceed = True
'''if (oldtime + CorrectionPeriod/NumberOfSwaps) > newtime:
Proceed = False
#psuccess = (newtime - oldtime) / (CorrectionPeriod/NumberOfSwaps)
#if random.random() < psuccess:
# Proceed = True
'''
#This loop should only execute once. lol why is it here then. Because I might generalize this later
print oldtime+CycleTime < newtime
print not(ProbabilityHasChanged)
print not(PHop == 0)
while(oldtime+CycleTime < newtime and not(ProbabilityHasChanged) and not(PHop == 0)):# and Proceed == True):# and not(PAnn > 0)):
####print "Timing information: ", CycleTime,"\t",oldtime,"\t", newtime,"\t",(newtime-oldtime)-CycleTime,"\t",CorrectionPeriod/NumberOfSwaps
#chain = self.CSwap(chain, CorrectionSwaps[IndexInCycle])
chain = self.CSwap(chain, CorrectionSwaps[0])
print "Swapping" + str(CorrectionSwaps[0])
#parallel?
#chain = CSwap(chain, CorrectionSwaps[(IndexInCycle + NumberOfSwaps/4)%NumberOfSwaps])
#chain = CSwap(chain, CorrectionSwaps[(IndexInCycle + 2*NumberOfSwaps/4)%NumberOfSwaps])
#chain = CSwap(chain, CorrectionSwaps[(IndexInCycle + 3*NumberOfSwaps/4)%NumberOfSwaps])
#chain = CSwap(chain, CorrectionSwaps[(IndexInCycle + 2*NumberOfSwaps/4)%NumberOfSwaps])
#chain = CSwap(chain, CorrectionSwaps[(IndexInCycle + 3*NumberOfSwaps/4)%NumberOfSwaps])
PHopInter, PAnnInter, PCreInter = self.CalculateProbabilities(chain, self.CreationRate, self.AnnihilationRate, self.HoppingRate)
#print PHopInter, PAnnInter, PCreInter
if (PHop != PHopInter or PAnn != PAnnInter or PCre != PCreInter):
ProbabilityHasChanged = True
IndexInCycle = (IndexInCycle+1)%NumberOfSwaps
CycleTime+=CorrectionPeriod/NumberOfSwaps
NoHops = (PHop == 0)
####print "At end of correction", chain
#print "Starttime: ",oldtime,"Candidate endtime:",newtime,"Cycle endtime:",oldtime+CycleTime
#print "New rate equations: ", ProbabilityHasChanged, "Nohops: ", NoHops, "Proceeded?", Proceed
return chain, CycleTime, ProbabilityHasChanged, NoHops, Proceed, sector
def CheckState(self, chain, sector):
if sum(chain)==0:
return 2*sector-1
else:
return 0
def ProcessTraj(self, traj,avgtraj,maxtime):
#print traj
#avgtraj[0]+=traj[0][1]
trajindex = 0
for i, val in enumerate(avgtraj):
if i < len(traj) and i > 0: #safety first!
while trajindex < len(traj) and traj[trajindex][0] < (1.0*maxtime / len(avgtraj))*i:
#print "Window: ",(1.0*maxtime / len(avgtraj))*i
trajindex+=1
avgtraj[i]+=traj[trajindex-1][1]
return avgtraj
| 34.364952
| 215
| 0.705076
| 20,971
| 0.981099
| 0
| 0
| 0
| 0
| 0
| 0
| 5,290
| 0.247485
|
62379999fae8c7604ac402164b9ffd7d1051d067
| 41,940
|
py
|
Python
|
pkg/vtreat/vtreat_impl.py
|
sthagen/pyvtreat
|
01cd9a70a6e1af779057fea90a9a43c2822cceb2
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T19:53:27.000Z
|
2019-12-23T19:53:27.000Z
|
pkg/vtreat/vtreat_impl.py
|
sthagen/pyvtreat
|
01cd9a70a6e1af779057fea90a9a43c2822cceb2
|
[
"BSD-3-Clause"
] | null | null | null |
pkg/vtreat/vtreat_impl.py
|
sthagen/pyvtreat
|
01cd9a70a6e1af779057fea90a9a43c2822cceb2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:07:57 2019
@author: johnmount
"""
from abc import ABC
import math
import pprint
import warnings
import numpy
import pandas
import vtreat.util
import vtreat.transform
def ready_data_frame(d):
orig_type = type(d)
if orig_type == numpy.ndarray:
d = pandas.DataFrame(d)
d.columns = [str(c) for c in d.columns]
if not isinstance(d, pandas.DataFrame):
raise TypeError("not prepared to process type " + str(orig_type))
return d, orig_type
def back_to_orig_type_data_frame(d, orig_type):
if not isinstance(d, pandas.DataFrame):
raise TypeError("Expected result to be a pandas.DataFrame, found: " + str(type(d)))
columns = [c for c in d.columns]
if orig_type == numpy.ndarray:
d = numpy.asarray(d)
return d, columns
class VarTransform:
def __init__(self, incoming_column_name, derived_column_names, treatment):
self.incoming_column_name_ = incoming_column_name
self.derived_column_names_ = derived_column_names.copy()
self.treatment_ = treatment
self.need_cross_treatment_ = False
self.refitter_ = None
def transform(self, data_frame):
raise NotImplementedError("base method called")
class MappedCodeTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name, treatment, code_book):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], treatment
)
self.code_book_ = code_book
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
derived_column_name = self.derived_column_names_[0]
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
res = pandas.merge(
sf, self.code_book_, on=[self.incoming_column_name_], how="left", sort=False
) # ordered by left table rows
# could also try pandas .map()
res = res[[derived_column_name]].copy()
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = 0
return res
class YAwareMappedCodeTransform(MappedCodeTransform):
def __init__(
self,
incoming_column_name,
derived_column_name,
treatment,
code_book,
refitter,
extra_args,
params,
):
MappedCodeTransform.__init__(
self,
incoming_column_name=incoming_column_name,
derived_column_name=derived_column_name,
treatment=treatment,
code_book=code_book,
)
self.need_cross_treatment_ = True
self.refitter_ = refitter
self.extra_args_ = extra_args
self.params_ = params
class CleanNumericTransform(VarTransform):
def __init__(self, incoming_column_name, replacement_value):
VarTransform.__init__(
self, incoming_column_name, [incoming_column_name], "clean_copy"
)
self.replacement_value_ = replacement_value
def transform(self, data_frame):
col = vtreat.util.safe_to_numeric_array(data_frame[self.incoming_column_name_])
bad_posns = vtreat.util.is_bad(col)
col[bad_posns] = self.replacement_value_
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res
class IndicateMissingTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], "missing_indicator"
)
def transform(self, data_frame):
col = vtreat.util.is_bad(data_frame[self.incoming_column_name_])
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res.astype(float)
def fit_clean_code(*, incoming_column_name, x, params, imputation_map):
if not vtreat.util.numeric_has_range(x):
return None
replacement = params['missingness_imputation']
try:
replacement = imputation_map[incoming_column_name]
except KeyError:
pass
if vtreat.util.can_convert_v_to_numeric(replacement):
replacement_value = 0.0 + replacement
elif callable(replacement):
replacement_value = vtreat.util.summarize_column(x, fn=replacement)
else:
raise TypeError("unexpected imputation type " + str(type(replacement)) + " (" + incoming_column_name + ")")
if pandas.isnull(replacement_value) or math.isnan(replacement_value) or math.isinf(replacement_value):
raise ValueError("replacement was bad " + incoming_column_name + ": " + str(replacement_value))
return CleanNumericTransform(
incoming_column_name=incoming_column_name, replacement_value=replacement_value
)
def fit_regression_impact_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
if params["use_hierarchical_estimate"]:
sf["_impact_code"] = sf["_hest"] - sf["_gm"]
else:
sf["_impact_code"] = sf["_group_mean"] - sf["_gm"]
sf = sf.loc[:, ["x", "_impact_code"]].copy()
newcol = incoming_column_name + "_impact_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="impact_code",
code_book=sf,
refitter=fit_regression_impact_code,
extra_args=extra_args,
params=params,
)
def fit_regression_deviation_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
sf["_deviation_code"] = numpy.sqrt(sf["_var"])
sf = sf.loc[:, ["x", "_deviation_code"]].copy()
newcol = incoming_column_name + "_deviation_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="deviation_code",
code_book=sf,
refitter=fit_regression_deviation_code,
extra_args=extra_args,
params=params,
)
def fit_binomial_impact_code(*, incoming_column_name, x, y, extra_args, params):
outcome_target = (extra_args["outcome_target"],)
var_suffix = extra_args["var_suffix"]
y = numpy.asarray(numpy.asarray(y) == outcome_target, dtype=float)
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
eps = 1.0e-3
if params["use_hierarchical_estimate"]:
sf["_logit_code"] = numpy.log((sf["_hest"] + eps) / (sf["_gm"] + eps))
else:
sf["_logit_code"] = numpy.log((sf["_group_mean"] + eps) / (sf["_gm"] + eps))
sf = sf.loc[:, ["x", "_logit_code"]].copy()
newcol = incoming_column_name + "_logit_code" + var_suffix
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="logit_code",
code_book=sf,
refitter=fit_binomial_impact_code,
extra_args=extra_args,
params=params,
)
class IndicatorCodeTransform(VarTransform):
def __init__(
self,
incoming_column_name,
derived_column_names,
levels,
*,
sparse_indicators=False
):
VarTransform.__init__(
self, incoming_column_name, derived_column_names, "indicator_code"
)
self.levels_ = levels
self.sparse_indicators_ = sparse_indicators
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
col = sf[self.incoming_column_name_]
def f(i):
v = numpy.asarray(col == self.levels_[i]) + 0.0
if self.sparse_indicators_:
v = pandas.arrays.SparseArray(v, fill_value=0.0)
return v
res = [
pandas.DataFrame({self.derived_column_names_[i]: f(i)})
for i in range(len(self.levels_))
]
res = pandas.concat(res, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def fit_indicator_code(
*, incoming_column_name, x, min_fraction, sparse_indicators=False
):
sf = pandas.DataFrame({incoming_column_name: x})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
counts = sf[incoming_column_name].value_counts()
n = sf.shape[0]
counts = counts[counts > 0]
counts = counts[counts >= min_fraction * n] # no more than 1/min_fraction symbols
levels = [str(v) for v in counts.index]
if len(levels) < 1:
return None
return IndicatorCodeTransform(
incoming_column_name,
vtreat.util.build_level_codes(incoming_column_name, levels),
levels=levels,
sparse_indicators=sparse_indicators
)
def fit_prevalence_code(incoming_column_name, x):
sf = pandas.DataFrame({"x": x})
bad_posns = vtreat.util.is_bad(sf["x"])
sf.loc[bad_posns, "x"] = "_NA_"
sf.reset_index(inplace=True, drop=True)
n = sf.shape[0]
sf["_ni"] = 1.0
sf = pandas.DataFrame(sf.groupby("x")["_ni"].sum())
sf.reset_index(inplace=True, drop=False)
sf["_hest"] = sf["_ni"] / n
sf = sf.loc[:, ["x", "_hest"]].copy()
newcol = incoming_column_name + "_prevalence_code"
sf.columns = [incoming_column_name, newcol]
sf[incoming_column_name] = sf[incoming_column_name].astype(str)
sf.reset_index(inplace=True, drop=True)
return MappedCodeTransform(
incoming_column_name, newcol, treatment="prevalence_code", code_book=sf
)
# noinspection PyPep8Naming
def fit_numeric_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
if "impact_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "deviation_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_deviation_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_binomial_outcome_treatment(
*, X, y, outcome_target, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
extra_args = {"outcome_target": outcome_target, "var_suffix": ""}
for vi in cat_list:
if "logit_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_multinomial_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
outcomes = [oi for oi in set(y)]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
for outcome in outcomes:
if "impact_code" in params["coders"]:
extra_args = {
"outcome_target": outcome,
"var_suffix": ("_" + str(outcome)),
}
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
if len(xforms) <= 0:
raise ValueError("no variables created")
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_unsupervised_treatment(*, X, var_list, outcome_name, cols_to_copy, params, imputation_map):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=None)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
def pre_prep_frame(x, *, col_list, cols_to_copy):
"""Create a copy of pandas.DataFrame x restricted to col_list union cols_to_copy with col_list - cols_to_copy
converted to only string and numeric types. New pandas.DataFrame has trivial indexing. If col_list
is empty it is interpreted as all columns."""
if cols_to_copy is None:
cols_to_copy = []
if (col_list is None) or (len(col_list) <= 0):
col_list = [co for co in x.columns]
x_set = set(x.columns)
col_set = set(col_list)
for ci in cols_to_copy:
if (ci in x_set) and (ci not in col_set):
col_list = col_list + [ci]
col_set = set(col_list)
missing_cols = col_set - x_set
if len(missing_cols) > 0:
raise KeyError("referred to not-present columns " + str(missing_cols))
cset = set(cols_to_copy)
if len(col_list) <= 0:
raise ValueError("no variables")
x = x.loc[:, col_list]
x = x.reset_index(inplace=False, drop=True)
for c in x.columns:
if c in cset:
continue
bad_ind = vtreat.util.is_bad(x[c])
if vtreat.util.can_convert_v_to_numeric(x[c]):
x[c] = vtreat.util.safe_to_numeric_array(x[c])
else:
# https://stackoverflow.com/questions/22231592/pandas-change-data-type-of-series-to-string
x[c] = numpy.asarray(x[c].apply(str), dtype=str)
x.loc[bad_ind, c] = numpy.nan
return x
def perform_transform(*, x, transform, params):
plan = transform.plan_
xform_steps = [xfi for xfi in plan["xforms"]]
user_steps = [stp for stp in params["user_transforms"]]
# restrict down to to results we are going to use
if (transform.result_restriction is not None) and (len(transform.result_restriction) > 0):
xform_steps = [xfi for xfi in xform_steps
if len(set(xfi.derived_column_names_).intersection(transform.result_restriction)) > 0]
user_steps = [stp for stp in user_steps
if len(set(stp.derived_vars_).intersection(transform.result_restriction)) > 0]
# check all required columns are present
needs = set()
for xfi in xform_steps:
if xfi.incoming_column_name_ is not None:
needs.add(xfi.incoming_column_name_)
for stp in user_steps:
if stp.incoming_vars_ is not None:
needs.update(stp.incoming_vars_)
missing = needs - set(x.columns)
if len(missing) > 0:
raise ValueError("missing required input columns " + str(missing))
# do the work
new_frames = [xfi.transform(x) for xfi in (xform_steps + user_steps)]
new_frames = [frm for frm in new_frames if (frm is not None) and (frm.shape[1] > 0)]
# see if we want to copy over any columns
copy_set = set(plan["cols_to_copy"])
to_copy = [ci for ci in x.columns if ci in copy_set]
if len(to_copy) > 0:
cp = x.loc[:, to_copy].copy()
new_frames = [cp] + new_frames
if len(new_frames) <= 0:
raise ValueError("no columns transformed")
res = pandas.concat(new_frames, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def limit_to_appropriate_columns(*, res, transform):
plan = transform.plan_
to_copy = set(plan["cols_to_copy"])
to_take = set([
ci for ci in transform.score_frame_["variable"][transform.score_frame_["has_range"]]])
if (transform.result_restriction is not None) and (len(transform.result_restriction) > 0):
to_take = to_take.intersection(transform.result_restriction)
cols_to_keep = [ci for ci in res.columns if (ci in to_copy) or (ci in to_take)]
if len(cols_to_keep) <= 0:
raise ValueError("no columns retained")
res = res[cols_to_keep].copy()
res.reset_index(inplace=True, drop=True)
return res
# val_list is a list single column Pandas data frames
def mean_of_single_column_pandas_list(val_list):
if val_list is None or len(val_list) <= 0:
return numpy.nan
d = pandas.concat(val_list, axis=0, sort=False)
col = d.columns[0]
d = d.loc[numpy.logical_not(vtreat.util.is_bad(d[col])), [col]]
if d.shape[0] < 1:
return numpy.nan
return numpy.mean(d[col])
# assumes each y-aware variable produces one derived column
# also clears out refitter_ values to None
def cross_patch_refit_y_aware_cols(*, x, y, res, plan, cross_plan):
if cross_plan is None or len(cross_plan) <= 1:
for xf in plan["xforms"]:
xf.refitter_ = None
return res
incoming_colset = set(x.columns)
derived_colset = set(res.columns)
for xf in plan["xforms"]:
if not xf.need_cross_treatment_:
continue
incoming_column_name = xf.incoming_column_name_
derived_column_name = xf.derived_column_names_[0]
if derived_column_name not in derived_colset:
continue
if incoming_column_name not in incoming_colset:
raise KeyError("missing required column " + incoming_column_name)
if xf.refitter_ is None:
raise ValueError(
"refitter is None: "
+ incoming_column_name
+ " -> "
+ derived_column_name
)
# noinspection PyPep8Naming
def maybe_transform(*, fit, X):
if fit is None:
return None
return fit.transform(X)
patches = [
maybe_transform(
fit=xf.refitter_(
incoming_column_name=incoming_column_name,
x=x[incoming_column_name][cp["train"]],
y=y[cp["train"]],
extra_args=xf.extra_args_,
params=xf.params_,
),
X=x.loc[cp["app"], [incoming_column_name]],
)
for cp in cross_plan
]
# replace any missing sections with global average (slight data leak potential)
avg = mean_of_single_column_pandas_list(
[pi for pi in patches if pi is not None]
)
if numpy.isnan(avg):
avg = 0
res[derived_column_name] = avg
for i in range(len(cross_plan)):
pi = patches[i]
if pi is None:
continue
pi.reset_index(inplace=True, drop=True)
cp = cross_plan[i]
res.loc[cp["app"], derived_column_name] = numpy.asarray(
pi[derived_column_name]
).reshape((len(pi), ))
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = avg
for xf in plan["xforms"]:
xf.refitter_ = None
return res
def cross_patch_user_y_aware_cols(*, x, y, res, params, cross_plan):
if cross_plan is None or len(cross_plan) <= 1:
return res
incoming_colset = set(x.columns)
derived_colset = set(res.columns)
if len(derived_colset) <= 0:
return res
for ut in params["user_transforms"]:
if not ut.y_aware_:
continue
instersect_in = incoming_colset.intersection(set(ut.incoming_vars_))
instersect_out = derived_colset.intersection(set(ut.derived_vars_))
if len(instersect_out) <= 0:
continue
if len(instersect_out) != len(ut.derived_vars_):
raise ValueError("not all derived columns are in res frame")
if len(instersect_in) != len(ut.incoming_vars_):
raise KeyError("missing required columns")
patches = [
ut.fit(X=x.loc[cp["train"], ut.incoming_vars_], y=y[cp["train"]]).transform(
X=x.loc[cp["app"], ut.incoming_vars_]
)
for cp in cross_plan
]
for col in ut.derived_vars_:
# replace any missing sections with global average (slight data leak potential)
avg = mean_of_single_column_pandas_list(
[pi.loc[:, [col]] for pi in patches if pi is not None]
)
if numpy.isnan(avg):
avg = 0
res[col] = avg
for i in range(len(cross_plan)):
pi = patches[i]
if pi is None:
continue
pi.reset_index(inplace=True, drop=True)
cp = cross_plan[i]
res.loc[cp["app"], col] = numpy.asarray(pi[col]).reshape((len(pi), ))
res.loc[vtreat.util.is_bad(res[col]), col] = avg
return res
def score_plan_variables(cross_frame, outcome, plan, params,
*,
is_classification=False):
def describe_xf(xf):
description = pandas.DataFrame({"variable": xf.derived_column_names_})
description["orig_variable"] = xf.incoming_column_name_
description["treatment"] = xf.treatment_
description["y_aware"] = xf.need_cross_treatment_
return description
def describe_ut(ut):
description = pandas.DataFrame(
{"orig_variable": ut.incoming_vars_, "variable": ut.derived_vars_}
)
description["treatment"] = ut.treatment_
description["y_aware"] = ut.y_aware_
return description
var_table = pandas.concat(
[describe_xf(xf) for xf in plan["xforms"]]
+ [
describe_ut(ut)
for ut in params["user_transforms"]
if len(ut.incoming_vars_) > 0
],
sort=False,
)
var_table.reset_index(inplace=True, drop=True)
sf = vtreat.util.score_variables(
cross_frame,
variables=var_table["variable"],
outcome=outcome,
is_classification=is_classification
)
score_frame = pandas.merge(var_table, sf, how="left", on=["variable"], sort=False)
num_treatment_types = len(score_frame["treatment"].unique())
score_frame["_one"] = 1.0
score_frame["vcount"] = score_frame.groupby("treatment")["_one"].transform("sum")
score_frame["default_threshold"] = 1.0 / (
score_frame["vcount"] * num_treatment_types
)
score_frame.drop(["_one"], axis=1, inplace=True)
score_frame["recommended"] = numpy.logical_and(
score_frame["has_range"],
numpy.logical_and(
numpy.logical_not(
numpy.logical_or(
numpy.isnan(score_frame["significance"]),
numpy.isnan(score_frame["PearsonR"]),
)
),
numpy.logical_and(
score_frame["significance"] < score_frame["default_threshold"],
numpy.logical_or(
score_frame["PearsonR"] > 0.0,
numpy.logical_not(score_frame["y_aware"]),
),
),
),
)
return score_frame
def pseudo_score_plan_variables(*, cross_frame, plan, params):
def describe_xf(xf):
description = pandas.DataFrame({"variable": xf.derived_column_names_})
description["orig_variable"] = xf.incoming_column_name_
description["treatment"] = xf.treatment_
description["y_aware"] = xf.need_cross_treatment_
return description
def describe_ut(ut):
description = pandas.DataFrame(
{"orig_variable": ut.incoming_vars_, "variable": ut.derived_vars_}
)
description["treatment"] = ut.treatment_
description["y_aware"] = ut.y_aware_
return description
score_frame = pandas.concat(
[describe_xf(xf) for xf in plan["xforms"]]
+ [
describe_ut(ut)
for ut in params["user_transforms"]
if len(ut.incoming_vars_) > 0
],
sort=False,
)
score_frame.reset_index(inplace=True, drop=True)
score_frame["has_range"] = [
vtreat.util.numeric_has_range(cross_frame[c]) for c in score_frame["variable"]
]
score_frame["PearsonR"] = numpy.nan
score_frame["significance"] = numpy.nan
score_frame["recommended"] = score_frame["has_range"].copy()
score_frame["_one"] = 1.0
score_frame["vcount"] = score_frame.groupby("treatment")["_one"].transform("sum")
score_frame.drop(["_one"], axis=1, inplace=True)
return score_frame
class VariableTreatment(ABC):
def __init__(
self, *,
var_list=None,
outcome_name=None,
outcome_target=None,
cols_to_copy=None,
params=None,
imputation_map=None,
):
if var_list is None:
var_list = []
else:
var_list = vtreat.util.unique_itmes_in_order(var_list)
if cols_to_copy is None:
cols_to_copy = []
else:
cols_to_copy = vtreat.util.unique_itmes_in_order(cols_to_copy)
if outcome_name is not None and outcome_name not in set(cols_to_copy):
cols_to_copy = cols_to_copy + [outcome_name]
confused = set(cols_to_copy).intersection(set(var_list))
if len(confused) > 0:
raise ValueError("variables in treatment plan and non-treatment: " + ', '.join(confused))
if imputation_map is None:
imputation_map = {} # dict
self.outcome_name_ = outcome_name
self.outcome_target_ = outcome_target
self.var_list_ = [vi for vi in var_list if vi not in set(cols_to_copy)]
self.cols_to_copy_ = cols_to_copy
self.params_ = params.copy()
self.imputation_map_ = imputation_map.copy()
self.plan_ = None
self.score_frame_ = None
self.cross_rows_ = None
self.cross_plan_ = None
self.last_fit_x_id_ = None
self.last_result_columns = None
self.result_restriction = None
self.clear()
def check_column_names(self, col_names):
to_check = set(self.var_list_)
if self.outcome_name_ is not None:
to_check.add(self.outcome_name_)
if self.cols_to_copy_ is not None:
to_check.update(self.cols_to_copy_)
seen = [c for c in col_names if c in to_check]
if len(seen) != len(set(seen)):
raise ValueError("duplicate column names in frame")
def clear(self):
self.plan_ = None
self.score_frame_ = None
self.cross_rows_ = None
self.cross_plan_ = None
self.last_fit_x_id_ = None
self.last_result_columns = None
self.result_restriction = None
def get_result_restriction(self):
if self.result_restriction is None:
return None
return self.result_restriction.copy()
def set_result_restriction(self, new_vars):
self.result_restriction = None
if (new_vars is not None) and (len(new_vars) > 0):
self.result_restriction = set(new_vars)
def merge_params(self, p):
raise NotImplementedError("base class called")
# display methods
def __repr__(self):
fmted = str(self.__class__.__module__) + "." + str(self.__class__.__name__) + '('
if self.outcome_name_ is not None:
fmted = fmted + "outcome_name=" + pprint.pformat(self.outcome_name_) + ", "
if self.outcome_target_ is not None:
fmted = fmted + "outcome_target=" + pprint.pformat(self.outcome_target_) + ", "
if (self.var_list_ is not None) and (len(self.var_list_) > 0):
fmted = fmted + "var_list=" + pprint.pformat(self.var_list_) + ", "
if (self.cols_to_copy_ is not None) and (len(self.cols_to_copy_) > 0):
fmted = fmted + "cols_to_copy=" + pprint.pformat(self.cols_to_copy_) + ", "
# if (self.params_ is not None) and (len(self.params_) > 0):
# fmted = fmted + "params=" + pprint.pformat(self.params_) + ",\n"
# if (self.imputation_map_ is not None) and (len(self.imputation_map_) > 0):
# fmted = fmted + "imputation_map=" + pprint.pformat(self.imputation_map_) + ",\n"
fmted = fmted + ')'
return fmted
def __str__(self):
return self.__repr__()
# sklearn pipeline step methods
# https://scikit-learn.org/stable/modules/generated/sklearn.base.TransformerMixin.html
# noinspection PyPep8Naming, PyUnusedLocal
def fit(self, X, y=None, **fit_params):
self.fit_transform(X=X, y=y)
return self
# noinspection PyPep8Naming, PyUnusedLocal
def fit_transform(self, X, y=None, **fit_params):
raise NotImplementedError("base class method called")
# noinspection PyPep8Naming
def transform(self, X):
raise NotImplementedError("base class method called")
# https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def get_params(self, deep=True):
"""
vtreat exposes a subset of controls as tunable parameters, users can choose this set
by specifying the tunable_params list in object construction parameters
"""
return {ti: self.params_[ti] for ti in self.params_["tunable_params"]}
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def set_params(self, **params):
"""
vtreat exposes a subset of controls as tunable parameters, users can choose this set
by specifying the tunable_params list in object construction parameters
"""
for (k, v) in params.items():
if k in self.params_["tunable_params"]:
self.params_[k] = v
return self
# extra methods to look more like sklearn objects
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
# noinspection PyPep8Naming
def fit_predict(self, X, y=None, **fit_params):
return self.fit_transform(X=X, y=y, **fit_params)
# noinspection PyPep8Naming
def predict(self, X):
return self.transform(X)
# noinspection PyPep8Naming
def predict_proba(self, X):
return self.transform(X)
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
def get_feature_names(self, input_features=None):
if self.score_frame_ is None:
raise ValueError("get_feature_names called on uninitialized vtreat transform")
if self.params_['filter_to_recommended']:
new_vars = [self.score_frame_['variable'][i] for i in range(self.score_frame_.shape[0])
if self.score_frame_['has_range'][i] and self.score_frame_['recommended'][i]
and (input_features is None or self.score_frame_['orig_variable'][i] in input_features)]
else:
new_vars = [self.score_frame_['variable'][i] for i in range(self.score_frame_.shape[0])
if self.score_frame_['has_range'][i]
and (input_features is None or self.score_frame_['orig_variable'][i] in input_features)]
new_vars = new_vars + self.cols_to_copy_
return new_vars
| 38.869323
| 115
| 0.612375
| 11,028
| 0.262947
| 0
| 0
| 0
| 0
| 0
| 0
| 6,418
| 0.153028
|
62383bc8933f1f4eaa948064e8b702400552ae83
| 428
|
py
|
Python
|
resqs/core/urls.py
|
UMass-Rescue/moto
|
3aa52aca28c622be9708da5fd31a8c8b92801634
|
[
"Apache-2.0"
] | null | null | null |
resqs/core/urls.py
|
UMass-Rescue/moto
|
3aa52aca28c622be9708da5fd31a8c8b92801634
|
[
"Apache-2.0"
] | null | null | null |
resqs/core/urls.py
|
UMass-Rescue/moto
|
3aa52aca28c622be9708da5fd31a8c8b92801634
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from .responses import MotoAPIResponse
url_bases = ["https?://motoapi.amazonaws.com"]
response_instance = MotoAPIResponse()
url_paths = {
"{0}/resqs-api/$": response_instance.dashboard,
"{0}/resqs-api/data.json": response_instance.model_data,
"{0}/resqs-api/reset": response_instance.reset_response,
"{0}/resqs-api/reset-auth": response_instance.reset_auth_response,
}
| 30.571429
| 70
| 0.752336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 121
| 0.28271
|
6238442b97ca6a6ef8a0ad9749bdaae56317f29d
| 1,305
|
py
|
Python
|
hammer/tracker.py
|
mizerlou/hammer
|
353f176bffff4a6b7726361cdafb986fe2302f19
|
[
"Apache-2.0"
] | 1
|
2016-06-06T20:22:13.000Z
|
2016-06-06T20:22:13.000Z
|
hammer/tracker.py
|
mizerlou/hammer
|
353f176bffff4a6b7726361cdafb986fe2302f19
|
[
"Apache-2.0"
] | null | null | null |
hammer/tracker.py
|
mizerlou/hammer
|
353f176bffff4a6b7726361cdafb986fe2302f19
|
[
"Apache-2.0"
] | null | null | null |
import anydbm, os.path, time, bsddb, sys
class MessageTracker:
def __init__(self, tracker_file):
flag = (os.path.exists(tracker_file) and "w") or "c"
#self.tracker = anydbm.open(tracker_file, flag)
self.tracker = bsddb.hashopen(tracker_file, flag)
def close(self):
self.tracker.close()
def get_id(self, msg):
return msg["message-id"]
# return (msg["message-id"]
# + "/" + msg.get("x-from-line", msg.get("from", ""))
# + "/" + msg.get("to", ""))
def ham(self, msg):
self._add(msg, "h")
def spam(self, msg):
self._add(msg, "s")
def _add(self, msg, val):
try:
key = self.get_id(msg)
self.tracker[key] = val
except:
print >> sys.stderr, "ERROR: '%s' => '%s'", (key, val)
raise
def get(self, msg, failobj=None):
key = self.get_id(msg)
try:
return self.tracker[key]
except KeyError:
return failobj
def seen(self, msg):
return self.tracker.has_key(self.get_id(msg))
def remove(self, msg):
del self.tracker[self.get_id(msg)]
def dump(self):
for (k,v) in self.tracker.iteritems():
print k, "---", v
| 27.1875
| 68
| 0.514943
| 1,262
| 0.96705
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.185441
|
623beafade5cf281facce9b0ef0d77606bd2dfcb
| 1,353
|
py
|
Python
|
code/glove_bot.py
|
AmanPriyanshu/Bavardez
|
221980add10a8bea69db4d3357660d27a8d6cdb3
|
[
"MIT"
] | 1
|
2021-12-28T13:16:17.000Z
|
2021-12-28T13:16:17.000Z
|
glove_bot.py
|
AmanPriyanshu/Bavardez
|
221980add10a8bea69db4d3357660d27a8d6cdb3
|
[
"MIT"
] | null | null | null |
glove_bot.py
|
AmanPriyanshu/Bavardez
|
221980add10a8bea69db4d3357660d27a8d6cdb3
|
[
"MIT"
] | null | null | null |
import random
import torch
import pandas as pd
import numpy as np
from glove_model import get_model
from intent_initializer import read_all_intents, read_all_responses
from GloVe_helper import GloVeLoader
PATH = './config/'
BOT_NAME = 'Bavardez'
def load_bot():
model_details = torch.load(PATH+'model_details_GloVe.pt')
model = get_model(model_details['input_size'], model_details['hidden_size'], model_details['output_size'])
model.load_state_dict(model_details['model_state'])
model.eval()
tags = model_details['tags']
return model, tags
def main():
model, tags = load_bot()
df_responses = read_all_responses()
activation = torch.nn.Softmax(1)
gl = GloVeLoader()
print("Let's chat! (GloVe version) Type \"quit\" to exit.")
while True:
sentence = input("You:\t")
if sentence == "quit":
break
embed = gl.pull_glove_embed([sentence])
output = model(embed)
probs = activation(output).flatten()
predicted_label = torch.argmax(probs)
tag = tags[predicted_label.item()]
if probs[predicted_label]>0.5:
if tag in list(df_responses.keys()):
answer = random.choice(df_responses[tag])
else:
answer = "Sorry there's an error in OUR SYSTEM! Please re-phrase"
else:
answer = "I do not understand you."
print(BOT_NAME+":\t"+answer)
print("Thankyou for using "+BOT_NAME)
if __name__ == '__main__':
main()
| 27.06
| 107
| 0.725795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.211382
|
623c5e03f30d1e94a196a72edaa4010032eb29e4
| 984
|
py
|
Python
|
tests/coworks/biz/test_biz_ms.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
tests/coworks/biz/test_biz_ms.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
tests/coworks/biz/test_biz_ms.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
import pytest
import requests
from tests.coworks.biz.biz_ms import BizMS
from tests.coworks.tech.test_ms import SimpleMS
class TestClass:
def atest_ms(self, local_server_factory):
tech = SimpleMS()
with pytest.raises(Exception) as pytest_wrapped_e:
@tech.schedule('rate(1 hour)', name='hourly', description="Test hourly.")
def every_sample(name):
return tech.get(name=name)
def atest_biz(self, local_server_factory):
biz = BizMS()
@biz.schedule('rate(1 hour)', name='hourly', description="Test hourly.")
@biz.schedule('cron(00 15 * * ? *)', name="daily", description="Test daiy.")
def every_sample(name):
return biz.get(name=name)
local_server = local_server_factory(biz)
response = local_server.make_call(requests.get, '/')
assert response.status_code == 200
assert response.text == 'ok'
assert len(biz.schedule_entries) == 2
| 30.75
| 85
| 0.642276
| 859
| 0.872967
| 0
| 0
| 379
| 0.385163
| 0
| 0
| 119
| 0.120935
|
623d0484a6ad38e8b613031601faa989033dfbd4
| 1,030
|
py
|
Python
|
d3rlpy/iterators/random_iterator.py
|
YangRui2015/d3rlpy
|
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
|
[
"MIT"
] | 1
|
2021-05-08T06:21:05.000Z
|
2021-05-08T06:21:05.000Z
|
d3rlpy/iterators/random_iterator.py
|
YangRui2015/d3rlpy
|
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
|
[
"MIT"
] | null | null | null |
d3rlpy/iterators/random_iterator.py
|
YangRui2015/d3rlpy
|
da778b2a2b0afbafe25395296baecd0d4d0cd0d5
|
[
"MIT"
] | null | null | null |
from typing import List, cast
import numpy as np
from ..dataset import Episode, Transition
from .base import TransitionIterator
class RandomIterator(TransitionIterator):
_n_samples_per_epoch: int
_index: int
def __init__(
self,
episodes: List[Episode],
batch_size: int,
n_steps: int = 1,
gamma: float = 0.99,
n_frames: int = 1,
):
super().__init__(episodes, batch_size, n_steps, gamma, n_frames)
self._n_samples_per_epoch = batch_size * (self.size() // batch_size)
self._index = 0
def _reset(self) -> None:
batch_size = self._batch_size
self._n_samples_per_epoch = batch_size * (self.size() // batch_size)
self._index = 0
def _next(self) -> Transition:
index = cast(int, np.random.randint(self.size()))
transition = self._transitions[index]
self._index += 1
return transition
def _has_finished(self) -> bool:
return self._index >= self._n_samples_per_epoch
| 26.410256
| 76
| 0.635922
| 897
| 0.870874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
623e6cd0ebfab7a2fa9506ea275e7ff09e80964a
| 420
|
py
|
Python
|
src/logger.py
|
Electronya/rc-mission-operator
|
2e1571a68df9df82629ebc4eebb248c055fe6066
|
[
"MIT"
] | null | null | null |
src/logger.py
|
Electronya/rc-mission-operator
|
2e1571a68df9df82629ebc4eebb248c055fe6066
|
[
"MIT"
] | 8
|
2021-09-02T23:58:28.000Z
|
2021-11-20T22:49:16.000Z
|
src/logger.py
|
Electronya/rc-mission-operator
|
2e1571a68df9df82629ebc4eebb248c055fe6066
|
[
"MIT"
] | null | null | null |
import logging
import os
def initLogger() -> object:
"""
Initialize the logger.
"""
logger_level = logging.INFO
if 'APP_ENV' in os.environ:
if os.environ['APP_ENV'] == 'dev':
logger_level = logging.DEBUG
logging.basicConfig(level=logger_level,
format='%(asctime)s %(levelname)s:'
'%(name)s:%(message)s')
return logging
| 21
| 59
| 0.552381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.264286
|
623e9fa11082a891b75c31694bc78e3fc6fdef29
| 392
|
py
|
Python
|
Photo.py
|
cpt-majkel/hashhash
|
5bac15097d8fe84d5c23611fd15ceda727999a41
|
[
"MIT"
] | null | null | null |
Photo.py
|
cpt-majkel/hashhash
|
5bac15097d8fe84d5c23611fd15ceda727999a41
|
[
"MIT"
] | null | null | null |
Photo.py
|
cpt-majkel/hashhash
|
5bac15097d8fe84d5c23611fd15ceda727999a41
|
[
"MIT"
] | null | null | null |
class Photo(object):
def __init__(self, photo_id: int, orientation: str, number_of_tags: int, tags: set):
self.id = photo_id
self.orientation = orientation
self.number_of_tags = number_of_tags
self.tags = tags
self.is_vertical = orientation == 'V'
self.is_horizontal = orientation == 'H'
def __str__(self):
return str(self.id)
| 32.666667
| 88
| 0.635204
| 391
| 0.997449
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.015306
|
623f19da861cce44fb9bf0964c673c92b5ac9b2f
| 713
|
py
|
Python
|
learn_big_data_on_aws/config.py
|
MacHu-GWU/learn_big_data_on_aws-project
|
0db78c35a1712fdd905763fd299663982e44601c
|
[
"MIT"
] | null | null | null |
learn_big_data_on_aws/config.py
|
MacHu-GWU/learn_big_data_on_aws-project
|
0db78c35a1712fdd905763fd299663982e44601c
|
[
"MIT"
] | null | null | null |
learn_big_data_on_aws/config.py
|
MacHu-GWU/learn_big_data_on_aws-project
|
0db78c35a1712fdd905763fd299663982e44601c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from s3pathlib import S3Path
class Config:
aws_profile = "aws_data_lab_sanhe"
aws_region = "us-east-2"
# where you store data, artifacts
bucket = "aws-data-lab-sanhe-for-everything-us-east-2"
# s3 folder for data lake
dataset_prefix = "poc/2022-02-26-learn_big_data_on_aws/dataset"
# s3 folder for athena results
athena_result_prefix = "athena/results"
# glue catalog database name
dbname = "poc"
@property
def s3path_dataset_prefix(self):
return S3Path(self.bucket, self.dataset_prefix)
@property
def s3path_athena_result_prefix(self):
return S3Path(self.bucket, self.athena_result_prefix)
config = Config()
| 24.586207
| 67
| 0.695652
| 635
| 0.890603
| 0
| 0
| 216
| 0.302945
| 0
| 0
| 282
| 0.395512
|
623f3e0f81181826dc28972fa527ecf69b25e1f9
| 15,091
|
py
|
Python
|
tests/test_plotter_utils.py
|
natter1/GEDFReader
|
360454c80d7aef375d3d5a825e51073ab8bc3d98
|
[
"MIT"
] | null | null | null |
tests/test_plotter_utils.py
|
natter1/GEDFReader
|
360454c80d7aef375d3d5a825e51073ab8bc3d98
|
[
"MIT"
] | 2
|
2021-05-03T22:04:17.000Z
|
2021-05-04T10:33:32.000Z
|
tests/test_plotter_utils.py
|
natter1/gdef_reader
|
360454c80d7aef375d3d5a825e51073ab8bc3d98
|
[
"MIT"
] | null | null | null |
"""
This file contains tests for plotter_utils.py.
@author: Nathanael Jöhrmann
"""
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.figure import Figure
from gdef_reporter.plotter_styles import get_plotter_style_histogram
from gdef_reporter.plotter_utils import plot_to_ax, create_plot, plot_z_histogram_to_ax, create_z_histogram_plot, \
_extract_ndarray_and_pixel_width, save_figure, create_rms_plot, create_rms_with_error_plot, create_summary_plot
from tests.conftest import AUTO_SHOW
ORIGINAL_FIGURE_SIZE = (4, 3.5)
ORIGINAL_DPI = 300
def auto_show(fig):
if AUTO_SHOW:
fig.show()
# tests for functions to plot a 2D area map
class TestAreaPlots:
def test_plot_to_ax(self, data_test_cases):
fig1, ax1 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE)
plot_to_ax(ax1, data_test_cases, pixel_width=1.0)
auto_show(fig1)
assert type(fig1) is Figure
assert fig1.axes[1].get_title() == "nm" # default unit for z-values should be nm
fig2, ax2 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE)
pixel_width = 5.0
title = f"{type(data_test_cases).__name__}\npixel_width={pixel_width}"
plot_to_ax(ax2, data_test_cases, pixel_width=pixel_width, title=title)
auto_show(fig2)
assert ax2.get_title() == title
fig3, ax3 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE)
z_factor = 1.0
title = f"{type(data_test_cases).__name__}\nz_unit: [m] - z_factor={z_factor}"
plot_to_ax(ax3, data_test_cases, pixel_width=5.0, z_unit="µm", title=title)
auto_show(fig3)
assert fig3.axes[1].get_title() == "\u03BCm"
def test_create_plot(self, data_test_cases):
fig1 = create_plot(data_test_cases, 1e-6, "default value for cropped (True)", ORIGINAL_FIGURE_SIZE,
ORIGINAL_DPI)
auto_show(fig1)
assert np.any(comparison := (fig1.get_size_inches() < ORIGINAL_FIGURE_SIZE)) and not np.all(comparison)
assert fig1.dpi == ORIGINAL_DPI
fig2 = create_plot(data_test_cases, 1e-6, "cropped=False", max_figure_size=ORIGINAL_FIGURE_SIZE, cropped=False)
assert np.all(fig2.get_size_inches() == ORIGINAL_FIGURE_SIZE)
auto_show(fig2)
class Test1DPlotZHistogram:
def test_plot_z_histogram_to_ax__defaults(self, data_test_cases):
# first, check default behaviour of parameters title, , n_bins, units and add_norm
fig1, ax1 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE, constrained_layout=True)
plot_z_histogram_to_ax(ax1, data_test_cases, title="")
auto_show(fig1)
assert len(ax1.lines) == 0 # no Gauss fit (expected default behaviour)
assert ax1.get_title().startswith("\u03BC=") # default title starts with mu=...
assert ax1.get_xlabel() == "z [\u03BCm]" # default units should be µm; note: µ == \u03BC is False!
assert len(ax1.containers[0]) == 200 # default n_bins should be 200
def test_plot_z_histogram_to_ax__defaults_multiple(self, multiple_data_test_cases):
# first, check default behaviour of parameters title, , n_bins, units and add_norm
fig1, ax1 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE, constrained_layout=True)
if isinstance(multiple_data_test_cases, dict):
if (_list := len([data for data in multiple_data_test_cases.values() if isinstance(data, np.ndarray)]) > 0)\
and _list < len(multiple_data_test_cases):
with pytest.raises(AssertionError):
plot_z_histogram_to_ax(ax1, multiple_data_test_cases)
return
plot_z_histogram_to_ax(ax1, multiple_data_test_cases, title="")
auto_show(fig1)
assert len(ax1.lines) == 0 # no Gauss fit (expected default behaviour)
if len(multiple_data_test_cases) == 1:
assert ax1.get_title().startswith("\u03BC=") # default title for one data set shows mu=...
else:
assert ax1.get_title() == "" # no default title if no data or more than one dataset
assert ax1.get_xlabel() == "z [\u03BCm]" # default units should be µm; note: µ == \u03BC is False!
for container in ax1.containers:
assert len(container.patches) == 200 # default n_bins should be 200
def test_plot_z_histogram_to_ax__set_parameters(self, data_test_cases):
# first, check setting a title, selecting units µm, set n_bins and draw normal distribution fit
fig1, ax1 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE, constrained_layout=True)
title = "Use [µm] and Gauss fit"
n_bins = 20
plot_z_histogram_to_ax(ax1, data_test_cases, n_bins=n_bins, units="nm", title=title, add_norm=True)
auto_show(fig1)
assert len(ax1.lines) == 1 # Gauss fit (add_norm=True)
assert ax1.get_title() == title
assert str(ax1.get_xlabel()) == str(f"z [nm]") # note: comparison between µ and \u03BC is False!
assert len(ax1.containers[0]) == n_bins # default n_bins should be 200
# second, check no title via title=None
fig2, ax2 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE, constrained_layout=True)
title = None
plot_z_histogram_to_ax(ax2, data_test_cases, n_bins=20, units="µm", title=title)
auto_show(fig2)
assert ax2.get_title() == "" # expected for title=None
def test_plot_z_histogram_to_ax__multiple(self, multiple_data_test_cases):
fig1, ax1 = plt.subplots(1, 1, dpi=ORIGINAL_DPI, figsize=ORIGINAL_FIGURE_SIZE, constrained_layout=True)
pixel_width = None
if isinstance(multiple_data_test_cases, dict):
pixel_width = 0.5e-6
plot_z_histogram_to_ax(ax1, multiple_data_test_cases, pixel_width=pixel_width, title="", add_norm=True)
auto_show(fig1)
assert isinstance(fig1, Figure)
assert len(fig1.axes[0].containers) == len(multiple_data_test_cases)
assert len(fig1.axes[0].lines) == len(multiple_data_test_cases) # Gauss fits (add_norm=True)
def test_create_z_histogram_plot__defaults(self, data_test_cases):
# check setting figure_size and dpi and also default of parameters title, n_bins, units and add_norm
fig1 = create_z_histogram_plot(data_test_cases)
auto_show(fig1)
assert type(fig1) is Figure
assert len(fig1.axes[0].lines) == 0 # no Gauss fit (expected default behaviour)
assert fig1.axes[0].get_title().startswith("\u03BC=") # default title starts with mu=...
assert fig1.axes[0].get_xlabel() == "z [\u03BCm]" # default units should be µm; note: µ == \u03BC is False!
assert len(fig1.axes[0].containers[0]) == 200 # default n_bins should be 200
def test_create_z_histogram_plot__set_paramaters(self, data_test_cases):
# first, check setting label, a title, selecting units µm, set n_bins and draw normal distribution fit
labels = type(data_test_cases).__name__
title = "Use [nm] and Gauss fit"
n_bins = 20
plotter_style = get_plotter_style_histogram(ORIGINAL_DPI, ORIGINAL_FIGURE_SIZE)
fig1 = create_z_histogram_plot(data_test_cases, labels, n_bins=n_bins, title=title, units="nm", add_norm=True,
plotter_style=plotter_style)
auto_show(fig1)
assert len(fig1.axes[0].lines) == 1 # Gauss fit (add_norm=True)
assert np.any(fig1.get_size_inches() == ORIGINAL_FIGURE_SIZE)
assert fig1.dpi == ORIGINAL_DPI
assert fig1._suptitle.get_text() == title
assert fig1.axes[0].get_title() == ""
assert str(fig1.axes[0].get_xlabel()) == str(f"z [nm]") # note: comparison between µ and \u03BC is False!
assert len(fig1.axes[0].containers[0]) == n_bins # default n_bins should be 200
# second, check no title via title=None
fig2 = create_z_histogram_plot(data_test_cases, title=None)
auto_show(fig2)
assert fig2._suptitle is None
assert fig2.axes[0].get_title() == ""
def test_create_z_histogram_plot__multiple(self, multiple_data_test_cases):
labels = None
pixel_width = 0.5e-6
if isinstance(multiple_data_test_cases, list):
labels = []
for i, data in enumerate(multiple_data_test_cases):
labels.append(f"{i} - {type(data).__name__}")
fig1 = create_z_histogram_plot(multiple_data_test_cases, pixel_width=pixel_width, labels=labels, title="",
add_norm=True)
auto_show(fig1)
assert len(fig1.axes[0].containers) == len(multiple_data_test_cases)
assert len(fig1.axes[0].lines) == len(multiple_data_test_cases) # Gauss fits (add_norm=True)
class Test1DPlotRMS:
def test_plot_rms_to_ax(self):
pass
def test_create_rms_plot__default(self, data_test_cases):
fig = create_rms_plot(data_test_cases)
assert isinstance(fig, Figure)
if isinstance(data_test_cases, np.ndarray):
assert fig.axes[0].get_xlabel() == "x [px]"
else:
assert fig.axes[0].get_xlabel() == "x [\u03BCm]"
assert fig.axes[0].legend_ is None
auto_show(fig)
def test_create_rms_plot__set_parameters(self, data_test_cases):
pixel_width = 0.5e-9 # define a length scale for np.ndarray
labels = f"{type(data_test_cases).__name__}"
fig = create_rms_plot(data_test_cases, label_list=labels, pixel_width=pixel_width, moving_average_n=1,
subtract_average=True, x_units="nm")
assert isinstance(fig, Figure)
assert fig.axes[0].get_xlabel() == "x [nm]"
assert fig.axes[0].legend_ is not None
auto_show(fig)
def test_create_rms_plot__multiple_default(self, multiple_data_test_cases):
if isinstance(multiple_data_test_cases, dict):
if (_list := len([data for data in multiple_data_test_cases.values() if isinstance(data, np.ndarray)]) > 0)\
and _list < len(multiple_data_test_cases):
with pytest.raises(AssertionError):
create_rms_plot(multiple_data_test_cases)
return
fig = create_rms_plot(multiple_data_test_cases)
assert len(multiple_data_test_cases) == len(fig.axes[0].lines)
auto_show(fig)
def test_create_rms_plot__multiple_set_parameter(self, multiple_data_test_cases):
labels = None
pixel_width = 0.5e-6
title = type(multiple_data_test_cases).__name__
if isinstance(multiple_data_test_cases, list):
labels = [f"{type(data).__name__}" for data in multiple_data_test_cases]
fig = create_rms_plot(multiple_data_test_cases, label_list=labels, pixel_width=pixel_width, moving_average_n=1,
subtract_average=False, x_units="nm", title=title)
assert fig.axes[0].legend_ is not None or len(multiple_data_test_cases) == 0
assert len(multiple_data_test_cases) == len(fig.axes[0].lines)
assert fig.axes[0].get_xlabel() == "x [nm]"
auto_show(fig)
class Test1DPlotRMSWithError:
def test_create_rms_with_error_plot(self, data_test_cases):
fig = create_rms_with_error_plot(data_test_cases)
if isinstance(data_test_cases, np.ndarray):
assert fig.axes[0].get_xlabel() == "x [px]"
else:
assert fig.axes[0].get_xlabel() == "x [\u03BCm]"
auto_show(fig)
def test_create_rms_with_error_plot__multiple(self, multiple_data_test_cases):
pixel_width = None
if isinstance(multiple_data_test_cases, dict):
if (_list := len([data for data in multiple_data_test_cases.values() if isinstance(data, np.ndarray)]) > 0)\
and _list < len(multiple_data_test_cases):
with pytest.raises(AssertionError):
create_rms_with_error_plot(multiple_data_test_cases)
pixel_width = 0.5e-6 # setting a pixel_width, np.ndarray has a length scale -> no AssertionError
fig = create_rms_with_error_plot(multiple_data_test_cases, pixel_width=pixel_width)
assert fig.axes[0].get_xlabel() == "x [\u03BCm]"
auto_show(fig)
class TestSummaryPlot:
def test_create_summary_plot(self, multiple_data_test_cases):
pixel_width = 0.5e-6
title = f"{type(multiple_data_test_cases).__name__}"
fig = create_summary_plot(multiple_data_test_cases, pixel_width=pixel_width, title=title)
assert isinstance(fig, Figure)
auto_show(fig)
class TestSpecialFunctions:
def test_extract_ndarray_and_pixel_width(self, data_test_cases):
pixel_width = 1
ndarray2d, px_width = _extract_ndarray_and_pixel_width(data_test_cases, pixel_width=pixel_width)
assert type(ndarray2d) is np.ndarray
if isinstance(data_test_cases, np.ndarray):
assert np.all(data_test_cases == ndarray2d)
assert px_width == pixel_width
else:
assert np.all(data_test_cases.values == ndarray2d)
assert data_test_cases.pixel_width == px_width
def test_save_figure(self, tmp_path):
fig, _ = plt.subplots(1, 1, dpi=72, figsize=(1, 1), constrained_layout=True)
# first try saving in existing folder with default settings
assert tmp_path.exists()
filename = "default"
save_figure(fig, tmp_path, filename)
png_file = tmp_path / f"{filename}.png" # should be saved by default
pdf_file = tmp_path / f"{filename}.pdf" # should not be saved by default
assert png_file.exists()
assert not pdf_file.exists()
# second, save nothing:
filename = "save_nothing"
save_figure(fig, tmp_path, filename, png=False, pdf=False)
png_file = tmp_path / f"{filename}.png" # should be saved by default
pdf_file = tmp_path / f"{filename}.pdf" # should not be saved by default
assert not png_file.exists()
assert not pdf_file.exists()
# third, only save pdf
filename = "save_pdf"
save_figure(fig, tmp_path, filename, png=False, pdf=True)
png_file = tmp_path / f"{filename}.png" # should be saved by default
pdf_file = tmp_path / f"{filename}.pdf" # should not be saved by default
assert not png_file.exists()
assert pdf_file.exists()
# fourth, use folder that does not exist jet and save both png and pdf
new_tmp_path = tmp_path / "new/"
assert not new_tmp_path.exists()
filename = "save_pdf_and_png"
save_figure(fig, new_tmp_path, filename, png=True, pdf=True)
png_file = new_tmp_path / f"{filename}.png" # should be saved by default
pdf_file = new_tmp_path / f"{filename}.pdf" # should not be saved by default
assert png_file.exists()
assert pdf_file.exists()
| 49.316993
| 120
| 0.673514
| 14,406
| 0.953724
| 0
| 0
| 0
| 0
| 0
| 0
| 2,861
| 0.189407
|
62407f44181f5ecd79d5e3e000c96c7ee62ec644
| 3,590
|
py
|
Python
|
eahub/profiles/tests/test_tags_api.py
|
walambert/eahub.org
|
21b6111b2626e4739c249d0881d16fbc818094cb
|
[
"MIT"
] | 36
|
2019-02-22T23:07:14.000Z
|
2022-02-10T13:24:27.000Z
|
eahub/profiles/tests/test_tags_api.py
|
walambert/eahub.org
|
21b6111b2626e4739c249d0881d16fbc818094cb
|
[
"MIT"
] | 717
|
2019-02-21T22:07:55.000Z
|
2022-02-26T15:17:49.000Z
|
eahub/profiles/tests/test_tags_api.py
|
walambert/eahub.org
|
21b6111b2626e4739c249d0881d16fbc818094cb
|
[
"MIT"
] | 19
|
2019-04-14T14:37:56.000Z
|
2022-02-14T22:05:16.000Z
|
from typing import Tuple
from rest_framework.test import APITestCase
from eahub.profiles.models import Profile, ProfileTag, ProfileTagTypeEnum
from eahub.tests.cases import EAHubTestCase
class TagsApiTestCase(EAHubTestCase, APITestCase):
def test_creation(self):
profile, _, _, _ = self._generate_tags()
self.client.force_login(profile.user)
response = self.client.post(
f"/profile/api/profiles/tags/create/",
data={
"name": "Management",
"type": ProfileTagTypeEnum.CAREER_INTEREST.value,
},
format="json",
)
tag = ProfileTag.objects.get(pk=response.data["pk"])
self.assertEqual(tag.author, profile)
def test_addition(self):
# noinspection PyTypeChecker
for tag_type in ProfileTagTypeEnum:
self._test_addition(tag_type)
def test_retrieval(self):
# noinspection PyTypeChecker
for tag_type in ProfileTagTypeEnum:
self._test_retrieval(tag_type)
def test_deletion(self):
# noinspection PyTypeChecker
for tag_type in ProfileTagTypeEnum:
self._test_deletion(tag_type)
def _test_addition(self, type_enum: ProfileTagTypeEnum):
profile, tag1, tag2, tags_field_name = self._generate_tags(type_enum)
tag3 = self.gen.tag(types=[type_enum])
response = self.client.patch(
self._url_detail(profile.pk),
data={f"{tags_field_name}_pks": [tag1.pk, tag2.pk, tag3.pk]},
)
self.assertEqual(response.status_code, 200)
self.assertIn(tag1.pk, response.data[f"{tags_field_name}_pks"])
self.assertIn(tag2.pk, response.data[f"{tags_field_name}_pks"])
self.assertIn(tag3.pk, response.data[f"{tags_field_name}_pks"])
def _test_retrieval(self, type_enum: ProfileTagTypeEnum):
profile, tag1, tag2, tags_field_name = self._generate_tags(type_enum)
response = self.client.get(self._url_detail(profile.pk))
self.assertEqual(response.status_code, 200)
tags = response.data[tags_field_name]
self.assertIn(tag1.pk, response.data[f"{tags_field_name}_pks"])
self.assertIn(tag2.pk, response.data[f"{tags_field_name}_pks"])
for tag in tags:
if tag["name"] == tag1.name:
self.assertEqual(tag["types"][0]["type"], type_enum.value)
def _test_deletion(self, type_enum: ProfileTagTypeEnum):
profile, tag1, tag2, tags_field_name = self._generate_tags(type_enum)
response = self.client.patch(
self._url_detail(profile.pk),
data={f"{tags_field_name}_pks": [tag1.pk]},
)
self.assertEqual(response.status_code, 200)
profile_updated = Profile.objects.get(pk=profile.pk)
tags_field = getattr(profile_updated, tags_field_name)
self.assertTrue(tags_field.filter(pk=tag1.pk).exists())
self.assertFalse(tags_field.filter(pk=tag2.pk).exists())
def _generate_tags(
self,
type_enum: ProfileTagTypeEnum = ProfileTagTypeEnum.GENERIC,
) -> Tuple[Profile, ProfileTag, ProfileTag, str]:
profile = self.gen.profile()
tag1 = self.gen.tag(types=[type_enum])
tag2 = self.gen.tag(types=[type_enum])
tags_field_name = f"tags_{type_enum.value}"
tags_field = getattr(profile, tags_field_name)
tags_field.set([tag1, tag2])
return profile, tag1, tag2, tags_field_name
def _url_detail(self, profile_pk: int) -> str:
return f"/profile/api/profiles/{profile_pk}/"
| 40.795455
| 77
| 0.659331
| 3,398
| 0.946518
| 0
| 0
| 0
| 0
| 0
| 0
| 405
| 0.112813
|
62416172cffe17c94f2ee1ae5b11d654511779a9
| 279
|
py
|
Python
|
Task1/chapter14.py
|
shkhaider2015/AI_Lab_Task
|
642a0d5e30515dac6972da194741b829cdc63f30
|
[
"Unlicense"
] | null | null | null |
Task1/chapter14.py
|
shkhaider2015/AI_Lab_Task
|
642a0d5e30515dac6972da194741b829cdc63f30
|
[
"Unlicense"
] | null | null | null |
Task1/chapter14.py
|
shkhaider2015/AI_Lab_Task
|
642a0d5e30515dac6972da194741b829cdc63f30
|
[
"Unlicense"
] | null | null | null |
# addition will takes place after multiplication and addition
num1 = 1 + 4 * 3 / 2;
# same as 5 * 3 /2
num2 = (1 + 4) * 3 / 2;
# same as 1+12/2
num3 = 1 + (4 * 3) / 2;
print("python follow precedence rules");
# this should produce 7.5
print(num1);
print(num2);
print(num3);
| 18.6
| 61
| 0.620072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.551971
|
6242dfa1c761870f2a85f43957247c13b7b53277
| 173
|
py
|
Python
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 202
|
2020-08-19T19:28:03.000Z
|
2022-03-29T07:10:47.000Z
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-24T09:28:05.000Z
|
2022-03-31T07:11:06.000Z
|
cosypose/simulator/__init__.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-19T19:28:05.000Z
|
2022-03-18T20:47:55.000Z
|
from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
| 28.833333
| 44
| 0.843931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62434c56ee7f47b918c8fe7743e7266baa6b6971
| 2,186
|
py
|
Python
|
python/scorecard/Config.py
|
opme/SurgeonScorecard
|
788f63fd4f906b27435d18565675553c7b738830
|
[
"Apache-2.0"
] | 6
|
2016-11-25T02:01:54.000Z
|
2021-08-01T21:54:46.000Z
|
python/scorecard/Config.py
|
opme/SurgeonScorecard
|
788f63fd4f906b27435d18565675553c7b738830
|
[
"Apache-2.0"
] | null | null | null |
python/scorecard/Config.py
|
opme/SurgeonScorecard
|
788f63fd4f906b27435d18565675553c7b738830
|
[
"Apache-2.0"
] | 2
|
2018-02-20T15:13:25.000Z
|
2020-02-16T07:56:06.000Z
|
import os
import sys
import configparser
class Config:
def __init__(self):
pass
#
# a simple function to read an array of configuration files into a config object
#
def read_config(self, cfg_files):
if(cfg_files != None):
config = configparser.RawConfigParser()
# merges all files into a single config
for i, cfg_file in enumerate(cfg_files):
if(os.path.exists(cfg_file)):
config.read(cfg_file)
if(config == None):
print("####################################")
print("Did not find any configuration files")
print("####################################")
sys.exit(0)
return config
#
# Validate properties
#
def validateProperties(self, config):
env = config.get('branch','env')
# Cannot set both filter_care_sites and include_care_sites
filter_care_sites = config.get(env+'.cohort','filter_care_sites').split(",")
if not filter_care_sites[0]:
filter_care_sites = []
include_care_sites = config.get(env+'.cohort','include_care_sites').split(",")
if not include_care_sites[0]:
include_care_sites = []
if (len(filter_care_sites) > 0 and len(include_care_sites) > 0):
print("###########################################################################")
print("Cannot set both filter_care_sites and include_care_sites in properties file")
print("###########################################################################")
sys.exit(0)
# If the user wants to dump the cohort back out to csv files, make sure the files
# do not already exist
write_csv_output = config.get(env+'.cohort','write_csv_output')
csv_output_dir = config.get(env+'.cohort','csv_output_dir')
if (write_csv_output == "True" and len(os.listdir(csv_output_dir)) > 0):
print("########################################")
print(" Files already exist in output directory")
print("########################################")
sys.exit(0)
| 39.745455
| 96
| 0.515554
| 2,142
| 0.979872
| 0
| 0
| 0
| 0
| 0
| 0
| 910
| 0.416285
|
624457e04a90c3819a6cdb9b28bb79d1ea2ace26
| 726
|
py
|
Python
|
message_passing_nn/utils/loss_function/loss_functions.py
|
mathisi-ai/message-passing-neural-network
|
d6e27fcf05d06268a461e5f9d9cf81b7e3a5dc09
|
[
"MIT"
] | null | null | null |
message_passing_nn/utils/loss_function/loss_functions.py
|
mathisi-ai/message-passing-neural-network
|
d6e27fcf05d06268a461e5f9d9cf81b7e3a5dc09
|
[
"MIT"
] | 1
|
2020-12-13T10:37:03.000Z
|
2020-12-13T10:37:03.000Z
|
message_passing_nn/utils/loss_function/loss_functions.py
|
mathisi-ai/message-passing-neural-network
|
d6e27fcf05d06268a461e5f9d9cf81b7e3a5dc09
|
[
"MIT"
] | null | null | null |
from torch import nn
loss_functions = {
"MSE": nn.MSELoss,
"MSEPenalty": nn.MSELoss,
"L1": nn.L1Loss,
"CrossEntropy": nn.CrossEntropyLoss,
"CTC": nn.CTCLoss,
"NLL": nn.NLLLoss,
"PoissonNLL": nn.PoissonNLLLoss,
"KLDiv": nn.KLDivLoss,
"BCE": nn.BCELoss,
"BCEWithLogits": nn.BCEWithLogitsLoss,
"MarginRanking": nn.MarginRankingLoss,
"HingeEmbedding": nn.HingeEmbeddingLoss,
"MultiLabelMargin": nn.MultiLabelMarginLoss,
"SmoothL1": nn.SmoothL1Loss,
"SoftMargin": nn.SoftMarginLoss,
"MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss,
"CosineEmbedding": nn.CosineEmbeddingLoss,
"MultiMargin": nn.MultiMarginLoss,
"TripletMargin": nn.TripletMarginLoss
}
| 30.25
| 56
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.305785
|
6244eae08f282f33089c904acc046111406cab02
| 234
|
py
|
Python
|
ex15.py
|
phyupyarko/python-exercises
|
f231ca8c8c1f2614bb166cc72ce45860eff88c1d
|
[
"MIT"
] | null | null | null |
ex15.py
|
phyupyarko/python-exercises
|
f231ca8c8c1f2614bb166cc72ce45860eff88c1d
|
[
"MIT"
] | null | null | null |
ex15.py
|
phyupyarko/python-exercises
|
f231ca8c8c1f2614bb166cc72ce45860eff88c1d
|
[
"MIT"
] | null | null | null |
from sys import argv
script, filename=argv
txt = open(filename)
print(f"Here's your file {filename}:")
print(txt.read())
print("Type the filename again:")
file_again = input("> ")
txt_again = open (file_again)
print(txt_again.read())
| 23.4
| 38
| 0.726496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.260684
|
624637a865a05ff1c7e0a5862f34089e64e6bb76
| 4,042
|
py
|
Python
|
samples/vsphere/vcenter/setup/datacenter.py
|
restapicoding/VMware-SDK
|
edc387a76227be1ad7c03e5eeaf603351574f70c
|
[
"MIT"
] | 589
|
2017-03-09T19:01:22.000Z
|
2022-03-23T08:18:32.000Z
|
samples/vsphere/vcenter/setup/datacenter.py
|
restapicoding/VMware-SDK
|
edc387a76227be1ad7c03e5eeaf603351574f70c
|
[
"MIT"
] | 244
|
2017-03-09T19:37:36.000Z
|
2022-03-29T07:14:21.000Z
|
samples/vsphere/vcenter/setup/datacenter.py
|
restapicoding/VMware-SDK
|
edc387a76227be1ad7c03e5eeaf603351574f70c
|
[
"MIT"
] | 304
|
2017-03-09T19:15:01.000Z
|
2022-03-31T04:26:59.000Z
|
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
from com.vmware.vcenter_client import (Datacenter, Folder)
def folder_list_datacenter_folder(context):
return context.client.vcenter.Folder.list(Folder.FilterSpec(type=Folder.Type.DATACENTER))
def detect_datacenter(context, datacenter_name):
"""Find the datacenter with the given name"""
names = set([datacenter_name])
datacenter_summaries = context.client.vcenter.Datacenter.list(
Datacenter.FilterSpec(names=names))
if len(datacenter_summaries) > 0:
datacenter = datacenter_summaries[0].datacenter
print("Detected Datacenter '{}' as {}".
format(datacenter_name, datacenter))
context.testbed.entities['DATACENTER_IDS'][datacenter_name] = datacenter
return True
else:
print("Datacenter '{}' missing".format(datacenter_name))
return False
def detect_datacenters(context):
"""Find datacenters to run the vcenter samples"""
context.testbed.entities['DATACENTER_IDS'] = {}
# Look for the two datacenters
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
return (detect_datacenter(context, datacenter1_name) and
detect_datacenter(context, datacenter2_name))
def cleanup_datacenters(context):
"""Cleanup datacenters after sample run"""
# Look for the two datacenters
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
names = set([datacenter1_name, datacenter2_name])
datacenter_summaries = context.client.vcenter.Datacenter.list(
Datacenter.FilterSpec(names=names))
print("Found {} Datacenters matching names {}".
format(len(datacenter_summaries), ", ".
join(["'{}'".format(n) for n in names])))
for datacenter_summary in datacenter_summaries:
datacenter = datacenter_summary.datacenter
print("Deleting Datacenter '{}' ({})".
format(datacenter, datacenter_summary.name))
context.client.vcenter.Datacenter.delete(datacenter, force=True)
def setup_datacenters(context):
"""Create datacenters for running vcenter samples"""
# Find a Folder in which to put the Datacenters
folder_summaries = folder_list_datacenter_folder(context)
folder = folder_summaries[0].folder
print("Creating datacenters in Folder '{}' ({})".
format(folder, folder_summaries[0].name))
# Create first datacenter
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter1 = context.client.vcenter.Datacenter.create(
Datacenter.CreateSpec(name=datacenter1_name, folder=folder)
)
print("Created Datacenter '{}' ({})".format(datacenter1, datacenter1_name))
# Create second datacenter
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
datacenter2 = context.client.vcenter.Datacenter.create(
Datacenter.CreateSpec(name=datacenter2_name, folder=folder)
)
print("Created Datacenter '{}' ({})".format(datacenter2, datacenter2_name))
# Save datacenter name to identifier mappings for later use
context.testbed.entities['DATACENTER_IDS'] = {
datacenter1_name: datacenter1,
datacenter2_name: datacenter2
}
def cleanup(context):
cleanup_datacenters(context)
def setup(context):
setup_datacenters(context)
def validate(context):
return detect_datacenters(context)
| 36.414414
| 93
| 0.700643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,353
| 0.334735
|
6247004e81f0b5ed0a8cf58645c7483019728044
| 2,622
|
py
|
Python
|
chesstab/samples/chessboard.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
chesstab/samples/chessboard.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
chesstab/samples/chessboard.py
|
RogerMarsh/chesstab
|
01d375dc6bf025b621612a84513e55c4640a78ad
|
[
"BSD-3-Clause"
] | null | null | null |
# chessboard.py
# Copyright 2008 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Demonstrate chess board class and methods to draw position on board."""
if __name__ == "__main__":
import tkinter
from pgn_read.core.piece import Piece
from pgn_read.core.constants import (
FEN_WHITE_KING,
FEN_WHITE_QUEEN,
FEN_WHITE_ROOK,
FEN_WHITE_BISHOP,
FEN_WHITE_KNIGHT,
FEN_WHITE_PAWN,
FEN_BLACK_KING,
FEN_BLACK_QUEEN,
FEN_BLACK_ROOK,
FEN_BLACK_BISHOP,
FEN_BLACK_KNIGHT,
FEN_BLACK_PAWN,
)
from ..gui import fonts
from ..gui.board import Board
from ..core.constants import NOPIECE
root = tkinter.Tk()
root.wm_title("Demonstrate Board")
f = fonts.make_chess_fonts(root, preferred_pieces=("Chess Lucena",))
b = Board(root, boardborder=10)
del f
b.get_top_widget().pack(fill=tkinter.BOTH, expand=tkinter.TRUE)
b.get_top_widget().pack_propagate(False)
b.set_board(
{
"a8": Piece(FEN_BLACK_ROOK, "a8"),
"b8": Piece(FEN_BLACK_KNIGHT, "b8"),
"c8": Piece(FEN_BLACK_BISHOP, "c8"),
"d8": Piece(FEN_BLACK_QUEEN, "d8"),
"e8": Piece(FEN_BLACK_KING, "e8"),
"f8": Piece(FEN_BLACK_BISHOP, "f8"),
"g8": Piece(FEN_BLACK_KNIGHT, "g8"),
"h8": Piece(FEN_BLACK_ROOK, "h8"),
"a7": Piece(FEN_BLACK_PAWN, "a7"),
"b7": Piece(FEN_BLACK_PAWN, "b7"),
"c7": Piece(FEN_BLACK_PAWN, "c7"),
"d7": Piece(FEN_BLACK_PAWN, "d7"),
"e7": Piece(FEN_BLACK_PAWN, "e7"),
"f7": Piece(FEN_BLACK_PAWN, "f7"),
"g7": Piece(FEN_BLACK_PAWN, "g7"),
"h7": Piece(FEN_BLACK_PAWN, "h7"),
"a2": Piece(FEN_WHITE_PAWN, "a2"),
"b2": Piece(FEN_WHITE_PAWN, "b2"),
"c2": Piece(FEN_WHITE_PAWN, "c2"),
"d2": Piece(FEN_WHITE_PAWN, "d2"),
"e2": Piece(FEN_WHITE_PAWN, "e2"),
"f2": Piece(FEN_WHITE_PAWN, "f2"),
"g2": Piece(FEN_WHITE_PAWN, "g2"),
"h2": Piece(FEN_WHITE_PAWN, "h2"),
"a1": Piece(FEN_WHITE_ROOK, "a1"),
"b1": Piece(FEN_WHITE_KNIGHT, "b1"),
"c1": Piece(FEN_WHITE_BISHOP, "c1"),
"d1": Piece(FEN_WHITE_QUEEN, "d1"),
"e1": Piece(FEN_WHITE_KING, "e1"),
"f1": Piece(FEN_WHITE_BISHOP, "f1"),
"g1": Piece(FEN_WHITE_KNIGHT, "g1"),
"h1": Piece(FEN_WHITE_ROOK, "h1"),
}
)
del b
root.pack_propagate(False)
root.mainloop()
| 33.615385
| 74
| 0.561022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 452
| 0.172387
|
62491889c9281d49192c898e726bfd3b2dfb6e77
| 731
|
py
|
Python
|
fruit.py
|
mattjraives/citrus
|
dd3cd916b59c932aa490058c4a2c5fc347e89e3c
|
[
"MIT"
] | null | null | null |
fruit.py
|
mattjraives/citrus
|
dd3cd916b59c932aa490058c4a2c5fc347e89e3c
|
[
"MIT"
] | null | null | null |
fruit.py
|
mattjraives/citrus
|
dd3cd916b59c932aa490058c4a2c5fc347e89e3c
|
[
"MIT"
] | null | null | null |
class Fruit:
def __init__(self,name,parents):
self.name = name
self.parents = parents
self.children = []
self.family = []
self.siblings = []
self.node = None ## Link to node object in graph
def find_children(self,basket): # basket is a list of Fruit objects
for fruit in basket:
if fruit.name is not self.name:
if self.name in [parent.name for parent in fruit.parents]:
self.children.append(fruit)
self.family = self.parents + self.children
def find_siblings(self,basket):
for fruit in basket:
if fruit.name is not self.name:
if set(fruit.parents).is_disjoint(set(self.parents)):
continue
else:
self.siblings.append(fruit)
| 33.227273
| 69
| 0.645691
| 730
| 0.998632
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.090287
|
624ae61e4b1438e943cbf012e0b99192c749fb82
| 3,694
|
py
|
Python
|
Pandas/8_GroupingAndAggregating .py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | 1
|
2021-10-01T09:59:22.000Z
|
2021-10-01T09:59:22.000Z
|
Pandas/8_GroupingAndAggregating .py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | null | null | null |
Pandas/8_GroupingAndAggregating .py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | null | null | null |
# %%
"""
Let's get familiar with Grouping and Aggregating.
Aggregating means combining multiple pieces of data into a single result.
Mean, median or the mod are aggregating functions.
"""
import pandas as pd
# %%
df = pd.read_csv(
"developer_survey_2019/survey_results_public.csv", index_col="Respondent")
schema_df = pd.read_csv(
"developer_survey_2019/survey_results_schema.csv", index_col="Column")
# %%
pd.set_option('display.max_columns', 85)
pd.set_option('display.max_rows', 85)
# %%
df.head()
# %%
"""In this column NaN means they ignore this question
and don't answer to that."""
df["ConvertedComp"].head(15)
# %%
df["ConvertedComp"].median()
# %%
df.median()
# %%
"""df.describe() gives us count, mean, std, min, max and
some quantiles(25%, 50%, 75%)."""
df.describe()
# %%
df["ConvertedComp"].count()
# %%
df["Hobbyist"]
# %%
df["Hobbyist"].value_counts()
# %%
df["SocialMedia"]
# %%
schema_df.loc["SocialMedia"]
# %%
df["SocialMedia"].value_counts()
# %%
"""Percentage form"""
df["SocialMedia"].value_counts(normalize=True)
# %%
"""
grouping our data:
A group by operation involves some combination of splitting up
our object applying a function and then combining those results
1_Splitting
2_Apply function
3_Combining the results
"""
df["Country"]
# %%
df["Country"].value_counts()
# %%
df.groupby(["Country"])
# %%
country_grp = df.groupby(["Country"])
# %%
country_grp.get_group("United States")
# %%
"""Finding the most popular socialmedia in each country"""
filt = df["Country"] == "United States"
df.loc[filt]["SocialMedia"].value_counts()
# %%
country_grp["SocialMedia"].value_counts()
# %%
country_grp["SocialMedia"].value_counts().head(50)
# %%
"""country_grp method is better than filt way to doing this.
Because we don't need reload filter over and over."""
country_grp["SocialMedia"].value_counts().loc["United States"]
# %%
country_grp["ConvertedComp"].median()
# %%
country_grp["ConvertedComp"].median().loc["Germany"]
# %%
"""agg: Aggregating Methods"""
country_grp["ConvertedComp"].agg(["median", "mean"])
# %%
country_grp["ConvertedComp"].agg(["median", "mean"]).loc["Canada"]
# %%
filt = (df["Country"] == "India")
df.loc[filt]["LanguageWorkedWith"]
# %%
df.loc[filt]["LanguageWorkedWith"].str.contains("Python")
# %%
"""
True : 1
False : 0
"""
df.loc[filt]["LanguageWorkedWith"].str.contains("Python").sum()
# %%
"""
It will raise an error.
country_grp["LanguageWorkedWith"].str.contains("Python").sum()
AttributeError: 'SeriesGroupBy' object has no attribute 'str'
"""
country_grp["LanguageWorkedWith"].apply(
lambda x: x.str.contains("Python").sum())
# %%
country_respondents = df["Country"].value_counts()
country_respondents
# %%
country_uses_python = country_grp["LanguageWorkedWith"].apply(
lambda x: x.str.contains("Python").sum())
country_uses_python
# %%
"""Concatenate two columns to make a new dataframe."""
python_df = pd.concat(
[country_respondents, country_uses_python], axis="columns", sort=False)
python_df
# %%
python_df.rename(columns={"Country": "NumRespondants",
"LanguageWorkedWith": "NumKnowsPython"},
inplace=True)
# %%
python_df
# %%
python_df["PctKnowsPython"] = (
python_df["NumKnowsPython"]/python_df["NumRespondants"]*100)
# %%
python_df
# %%
python_df.sort_values(by="PctKnowsPython", ascending=False, inplace=True)
# %%
python_df
# %%
python_df.head(50)
# %%
python_df.loc["Japan"]
# %%
python_df.sort_values(
by=["NumRespondants", "PctKnowsPython"], ascending=False, inplace=True)
# %%
python_df.head(50)
# %%
| 25.475862
| 79
| 0.666486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,127
| 0.575799
|
624cbb34ddb09a80deca0b22d3e463f92b89210a
| 13,077
|
py
|
Python
|
code/cantera_tools.py
|
goldmanm/RMG_isotopes_paper_data
|
234bd5266de71d6ec9179cb3a7ff490cb56ef91a
|
[
"MIT"
] | null | null | null |
code/cantera_tools.py
|
goldmanm/RMG_isotopes_paper_data
|
234bd5266de71d6ec9179cb3a7ff490cb56ef91a
|
[
"MIT"
] | null | null | null |
code/cantera_tools.py
|
goldmanm/RMG_isotopes_paper_data
|
234bd5266de71d6ec9179cb3a7ff490cb56ef91a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import re
import warnings
import copy
import cantera as ct
def run_simulation(solution, times, conditions=None,
condition_type = 'adiabatic-constant-volume',
output_species = True,
output_reactions = True,
atol = 1e-15,
rtol = 1e-9,
temperature_values=None):
"""
This method iterates through the cantera solution object and outputs information
about the simulation as a pandas.DataFrame object.
This method returns a dictionary with the reaction conditions data, species data,
net reaction data, forward/reverse reaction data, and the rate of production
and consumption (or `None` if a variable not specified).
`solution` = Cantera.Solution object
`conditions` = tuple of temperature, pressure, and mole fraction initial
species (will be deprecated. Set parameters before running)
`times` = an iterable of times which you would like to store information in
`condition_type` = string describing the run type
`output_species` = output a DataFrame of species' concentrations
`output_reactions` = output a DataFrame of net reaction rates
condition_types supported
#########################
'adiabatic-constant-volume' - assumes no heat transfer and no volume change
'constant-temperature-and-pressure' - no solving energy equation or changing
rate constants
'constant-temperature-and-volume' - no solving energy equation but allows
for pressure to change with reactions
'specified-temperature-constant-volume' - the temperature profile specified
`temperature_values`, which corresponds to the
input `times`, alters the temperature right before
the next time step is taken. Constant volume is assumed.
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
elif condition_type == 'constant-temperature-and-pressure':
reactor = ct.IdealGasConstPressureReactor(solution, energy='off')
elif condition_type == 'constant-temperature-and-volume':
reactor = ct.IdealGasReactor(solution, energy='off')
elif condition_type == 'specified-temperature-constant-volume':
reactor = ct.IdealGasReactor(solution, energy='off')
if temperature_values is None:
raise AttributeError('Must specify temperature with `temperature_values` parameter')
elif len(times) != len(temperature_values):
raise AttributeError('`times` (len {0}) and `temperature_values` (len {1}) must have the same length.'.format(len(times),len(temperature_values)))
else:
supported_types = ['adiabatic-constant-volume','constant-temperature-and-pressure',
'constant-temperature-and-volume','specified-temperature-constant-volume']
raise NotImplementedError('only {0} are supported. {1} input'.format(supported_types, condition_type))
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
# setup data storage
outputs = {}
outputs['conditions'] = pd.DataFrame()
if output_species:
outputs['species'] = pd.DataFrame()
if output_reactions:
outputs['net_reactions'] = pd.DataFrame()
for time_index, time in enumerate(times):
if condition_type == 'specified-temperature-constant-volume':
solution.TD = temperature_values[time_index], solution.density
reactor = ct.IdealGasReactor(solution, energy='off')
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
if time_index > 0:
simulator.set_initial_time(times[time_index-1])
simulator.advance(time)
# save data
outputs['conditions'] = outputs['conditions'].append(
get_conditions_series(simulator,reactor,solution),
ignore_index = True)
if output_species:
outputs['species'] = outputs['species'].append(
get_species_series(solution),
ignore_index = True)
if output_reactions:
outputs['net_reactions'] = outputs['net_reactions'].append(
get_reaction_series(solution),
ignore_index = True)
# set indexes as time
time_vector = outputs['conditions']['time (s)']
for output in outputs.values():
output.set_index(time_vector,inplace=True)
return outputs
def run_simulation_till_conversion(solution, species, conversion,conditions=None,
condition_type = 'adiabatic-constant-volume',
output_species = True,
output_reactions = True,
skip_data = 150,
atol = 1e-15,
rtol = 1e-9,):
"""
This method iterates through the cantera solution object and outputs information
about the simulation as a pandas.DataFrame object.
This method returns a dictionary with the reaction conditions data, species data,
net reaction data, forward/reverse reaction data, and the rate of production
and consumption (or `None` if a variable not specified) at the specified conversion value.
`solution` = Cantera.Solution object
`conditions` = tuple of temperature, pressure, and mole fraction initial
species
`species` = a string of the species label (or list of strings) to be used in conversion calculations
`conversion` = a float of the fraction conversion to stop the simulation at
`condition_type` = string describing the run type, currently supports
'adiabatic-constant-volume' and 'constant-temperature-and-pressure'
`output_species` = output a Series of species' concentrations
`output_reactions` = output a Series of net reaction rates
`skip_data` = an integer which reduces storing each point of data.
storage space scales as 1/`skip_data`
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
if condition_type == 'constant-temperature-and-pressure':
reactor = ct.IdealGasConstPressureReactor(solution, energy='off')
else:
raise NotImplementedError('only adiabatic constant volume is supported')
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
# setup data storage
outputs = {}
outputs['conditions'] = pd.DataFrame()
if output_species:
outputs['species'] = pd.DataFrame()
if output_reactions:
outputs['net_reactions'] = pd.DataFrame()
if isinstance(species,str):
target_species_indexes = [solution.species_index(species)]
else: # must be a list or tuple
target_species_indexes = [solution.species_index(s) for s in species]
starting_concentration = sum([solution.concentrations[target_species_index] for target_species_index in target_species_indexes])
proper_conversion = False
new_conversion = 0
skip_count = 1e8
while not proper_conversion:
error_count = 0
while error_count >= 0:
try:
simulator.step()
error_count = -1
except:
error_count += 1
if error_count > 10:
print('Might not be possible to achieve conversion at T={0}, P={1}, with concentrations of {2} obtaining a conversion of {3} at time {4} s.'.format(solution.T,solution.P,zip(solution.species_names,solution.X), new_conversion,simulator.time))
raise
new_conversion = 1-sum([solution.concentrations[target_species_index] for target_species_index in target_species_indexes])/starting_concentration
if new_conversion > conversion:
proper_conversion = True
# save data
if skip_count > skip_data or proper_conversion:
skip_count = 0
outputs['conditions'] = outputs['conditions'].append(
get_conditions_series(simulator,reactor,solution),
ignore_index = True)
if output_species:
outputs['species'] = outputs['species'].append(
get_species_series(solution),
ignore_index = True)
if output_reactions:
outputs['net_reactions'] = outputs['net_reactions'].append(
get_reaction_series(solution),
ignore_index = True)
skip_count += 1
# set indexes as time
time_vector = outputs['conditions']['time (s)']
for output in outputs.values():
output.set_index(time_vector,inplace=True)
return outputs
def get_conditions_series(simulator, reactor, solution,
basics= ['time','temperature','pressure','density','volume','enthalpy','internal energy']):
"""
returns the current conditions of a Solution object contianing ReactorNet
object (simulator) as a pd.Series.
simulator = the ReactorNet object of the simulation
solution = solution object to pull values from
basics =a list of state variables to save
The following are enabled for the conditions:
* time
* temperature
* pressure
* density
* volume
* cp (constant pressure heat capacity)
* cv (constant volume heat capacity)
* enthalpy
"""
conditions = pd.Series()
# add regular conditions
if 'time' in basics:
conditions['time (s)'] = simulator.time
if 'temperature' in basics:
conditions['temperature (K)'] = solution.T
if 'pressure' in basics:
conditions['pressure (Pa)'] = solution.P
if 'density' in basics:
conditions['density (kmol/m3)'] = solution.density_mole
if 'volume' in basics:
conditions['volume (m3)'] = reactor.volume
if 'cp' in basics:
conditions['heat capacity, cp (J/kmol/K)'] = solution.cp_mole
if 'cv' in basics:
conditions['heat capacity, cv (J/kmol/K)'] = solution.cv_mole
if 'enthalpy' in basics:
conditions['enthalpy (J/kg)'] = solution.enthalpy_mass
if 'internal energy' in basics:
conditions['internal energy (J/kg)'] = solution.int_energy_mass
return conditions
def get_species_series(solution, species_names = 'all'):
"""
returns a pandas.Series of the desired species' concentrations
solution = the cantera.Solution object for the simulation
species_names = list of species names to be saved (default is all)
"""
series = pd.Series()
if species_names=='all':
species_recorded = solution.species_names
else:
species_recorded = species_names
mole_fractions = solution.mole_fraction_dict()
for name in species_recorded:
try:
series[name] = mole_fractions[name] * solution.density_mole
except KeyError:
series[name] = 0
# sends warning if user typed species incorrectly
if name not in solution.species_names:
warnings.warn('{} is not listed in the mole fraction dictionary and may be mispelled.'.format(name))
return series
def get_reaction_series(solution, reaction_names = 'all'):
"""
returns a pandas.Series of the desired reactions' net rates
solution = the cantera.Solution object for the simulation
species_names = list of reaction names to be saved (default is all)
"""
series = pd.Series()
if reaction_names=='all':
reaction_names = solution.reaction_equations()
reaction_rates = __get_rxn_rate_dict(solution.reaction_equations(),solution.net_rates_of_progress)
for name in reaction_names:
try:
series[name] = reaction_rates[name]
except KeyError:
series[name] = 0
warnings.warn('{} is not listed in the reaction names.'.format(name))
return series
def __get_rxn_rate_dict(reaction_equations, net_rates):
"""
makes a dictionary out of the two inputs. If identical reactions are encountered,
called duplicates in Cantera, the method will merge them and sum the rate together
"""
rxn_dict = {}
for equation, rate in zip(reaction_equations, net_rates):
try:
rxn_dict[equation] += rate
except KeyError:
rxn_dict[equation] = rate
return rxn_dict
| 44.030303
| 261
| 0.640132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,599
| 0.428156
|
624d1459e0b41f6ddd69ef8b0b14aebd60ee00c3
| 4,495
|
py
|
Python
|
chariquisitor.py
|
strycore/chariquisitor
|
539dcbf5e051222371e747547a8b1e8805db4366
|
[
"WTFPL"
] | 1
|
2017-09-26T09:59:54.000Z
|
2017-09-26T09:59:54.000Z
|
chariquisitor.py
|
strycore/chariquisitor
|
539dcbf5e051222371e747547a8b1e8805db4366
|
[
"WTFPL"
] | null | null | null |
chariquisitor.py
|
strycore/chariquisitor
|
539dcbf5e051222371e747547a8b1e8805db4366
|
[
"WTFPL"
] | null | null | null |
import json
from collections import defaultdict
SEGMENTS = ['workings', 'shinies', 'controls', 'fun']
REVIEWERS = ['venn', 'jordan', 'pedro']
def iter_charis():
with open('charis.json') as charis_file:
charis = json.load(charis_file)
for chari in charis:
if not chari['name']:
continue
if set(chari['scores'].keys()) != set(SEGMENTS):
continue
reviewers_ok = True
for segment in chari['scores']:
if set(chari['scores'][segment].keys()) != set(REVIEWERS):
reviewers_ok = False
break
if not reviewers_ok:
continue
yield chari
def get_totals_per_reviewer():
totals = {}
for segment in SEGMENTS:
totals[segment] = defaultdict(int)
for chari in iter_charis():
for segment in SEGMENTS:
for reviewer, score in chari['scores'][segment].items():
totals[segment][reviewer] += score
return totals
def get_totals_per_game():
totals = {}
for chari in iter_charis():
name = "{} ({})".format(chari['name'], chari['episode'])
totals[name] = defaultdict(int)
for segment in SEGMENTS:
for reviewer in REVIEWERS:
reviewer_score = chari['scores'][segment][reviewer]
totals[name][segment] += reviewer_score
totals[name]['all'] += reviewer_score
return totals
def get_fair_score(scores):
return sum([scores[segment] for segment in SEGMENTS]) / 12
def get_lgc_score(scores):
return sum([scores[segment] // 3 for segment in SEGMENTS]) // 4
def print_reviewers_stats():
grand_totals = defaultdict(int)
for segment, reviews in get_totals_per_reviewer().items():
verdicts = {
'workings': "%s has the best machine! %s and %s can only try to keep up!",
'shinies': "%s appreciates the arts more than %s or %s!",
'controls': "%s is fully in control! %s and %s are just clumsy!",
'fun': "%s knows how to have a good time! Unlike those goths %s and %s!",
'all': "Overall, %s likes video games more than %s or %s."
}
winners = tuple([
reviewer.capitalize()
for reviewer in reversed(
sorted(reviews.keys(), key=(lambda key: reviews[key]))
)
])
print(verdicts[segment] % winners)
print(', '.join(["{}: {}".format(reviewer.capitalize(), reviews[reviewer]) for reviewer in REVIEWERS]))
for reviewer, score in reviews.items():
grand_totals[reviewer] += score
print(verdicts['all'] % tuple([
reviewer.capitalize()
for reviewer in reversed(
sorted(grand_totals.keys(), key=(lambda key: grand_totals[key]))
)
]))
print(', '.join(["{}: {}".format(reviewer.capitalize(), grand_totals[reviewer]) for reviewer in REVIEWERS]))
def print_top_games(games_totals, top=True, size=10):
for segment in SEGMENTS + ['all']:
print("\n=== %s %i GAMES IN %s SEGMENT ===" % ('TOP' if top else 'BOTTOM', size, segment.upper()))
for name in [
name for name in sorted(games_totals.keys(), key=(lambda key: games_totals[key][segment]), reverse=top)
][:10]:
print("{}: {}".format(name, games_totals[name][segment]))
def print_score_fairness(games_totals, fair=True, size=10):
score_fairness = {}
for game, scores in games_totals.items():
fair_score = get_fair_score(scores)
lgc_score = get_lgc_score(scores)
score_fairness[game] = {
'fair': fair_score,
'lgc': lgc_score,
'diff': fair_score - lgc_score,
'game': game
}
print("\n=== %i %s FAIRLY REVIEWED GAMES ===" % (size, 'MOST' if fair else 'LEAST'))
for game in [
game for game in sorted(score_fairness.keys(),
key=(lambda game: score_fairness[game]['diff']),
reverse=not fair)
][:size]:
print("%(game)s -- Fair score: %(fair)0.2f; LGC: %(lgc)i" % score_fairness[game])
if __name__ == '__main__':
games_totals = get_totals_per_game()
print("%i full reviews available" % len(games_totals))
print_top_games(games_totals)
print_top_games(games_totals, top=False)
print_score_fairness(games_totals)
print_score_fairness(games_totals, fair=False)
print()
print_reviewers_stats()
| 34.844961
| 115
| 0.588877
| 0
| 0
| 526
| 0.117019
| 0
| 0
| 0
| 0
| 718
| 0.159733
|
624d23bb02f0a1700a789fe03a84f9cdb053398e
| 2,970
|
py
|
Python
|
dense_main.py
|
Ale-Ba2lero/CNN-FromScratch
|
8337db42f3aa0eae878a2724f382039c27498d70
|
[
"MIT"
] | 1
|
2021-09-17T17:06:16.000Z
|
2021-09-17T17:06:16.000Z
|
dense_main.py
|
Ale-Ba2lero/CNN-FromScratch
|
8337db42f3aa0eae878a2724f382039c27498d70
|
[
"MIT"
] | null | null | null |
dense_main.py
|
Ale-Ba2lero/CNN-FromScratch
|
8337db42f3aa0eae878a2724f382039c27498d70
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import minmax_scale
import matplotlib.pyplot as plt
from model.loss import CategoricalCrossEntropy
from model.layers.dense import Dense
from model.layers.relu import LeakyReLU
from model.layers.softmax import Softmax
from model.neural_network import NeuralNetwork
def spiral_data(points, classes):
X = np.zeros((points * classes, 2))
y = np.zeros(points * classes, dtype='uint8')
for class_number in range(classes):
ix = range(points * class_number, points * (class_number + 1))
r = np.linspace(0.0, 1, points) # radius
t = np.linspace(class_number * 4, (class_number + 1) * 4, points) + np.random.randn(points) * 0.2
X[ix] = np.c_[r * np.sin(t * 2.5), r * np.cos(t * 2.5)]
y[ix] = class_number
return X, y
# ------------------------------------ DATASET
N = 200 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X, y = spiral_data(points=N, classes=K)
print("Scale values")
print('Min: %.3f, Max: %.3f' % (X.min(), X.max()))
X = minmax_scale(X, feature_range=(0, 1))
print('Min: %.3f, Max: %.3f' % (X.min(), X.max()))
# plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
# plt.show()
# ------------------------------------ SPLIT DATA
"""X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.1,
random_state=65)"""
# ------------------------------------ HYPER PARAMETERS
STEP_SIZE = 1e-1
N_EPOCHS = 2000
BATCH_SIZE = 32
# ------------------------------------ BUILD THE MODEL
nn = NeuralNetwork([
Dense(200), LeakyReLU(),
Dense(100), LeakyReLU(),
Dense(50), LeakyReLU(),
Dense(K), Softmax()
], CategoricalCrossEntropy())
# ------------------------------------ FIT THE MODEL
nn.train(dataset=X,
labels=y,
epochs=N_EPOCHS,
batch_size=BATCH_SIZE,
step_size=STEP_SIZE)
# ------------------------------------ EVALUATE THE MODEL
train_loss = nn.metrics.history['train_loss']
val_loss = nn.metrics.history['val_loss']
epochs = range(0, N_EPOCHS)
plt.plot(epochs, train_loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
print(f"train loss: {train_loss}")
print(f"val loss: {val_loss}")
train_acc = nn.metrics.history['train_acc']
val_acc = nn.metrics.history['val_acc']
epochs = range(0, N_EPOCHS)
plt.plot(epochs, train_acc, 'g', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
print(f"train acc: {train_acc}")
print(f"val acc: {val_acc}")
| 31.595745
| 105
| 0.596633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,094
| 0.36835
|
624dc465ea933ab310312f6e6dd327e58c7d9b64
| 3,156
|
py
|
Python
|
Tools/Scripts/webkitpy/common/interrupt_debugging.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
Tools/Scripts/webkitpy/common/interrupt_debugging.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
Tools/Scripts/webkitpy/common/interrupt_debugging.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import linecache
import logging
import os
import signal
import sys
_log = logging.getLogger(__name__)
def log_stack_trace(frame, file):
file.write('Traceback(most recent call last):\n')
def func(frame):
if not frame:
return
func(frame.f_back)
file.write(' File "{}", line {}, in {}\n'.format(frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name))
file.write(' {}\n'.format(linecache.getline(frame.f_code.co_filename, frame.f_lineno).lstrip().rstrip()))
func(frame)
class StackTraceFileContext(object):
def __init__(self, output_file=None):
self.file_name = None
if output_file:
self.file_name = os.path.join(os.path.dirname(output_file), '{}-{}'.format(os.getpid(), os.path.basename(output_file)))
self.file = sys.stderr
def __enter__(self):
if self.file_name:
self.file = open(self.file_name, 'w')
_log.critical('Stack trace saved to {}'.format(self.file_name))
else:
self.file.write('\n')
return self.file
def __exit__(self, *args):
if self.file_name:
self.file.close()
self.file = sys.stderr
def log_stack_trace_on_term(output_file=None):
def handler(signum, frame):
with StackTraceFileContext(output_file=output_file) as file:
file.write('SIGTERM signal received')
log_stack_trace(frame, file)
exit(-1)
signal.signal(signal.SIGTERM, handler)
def log_stack_trace_on_ctrl_c(output_file=None):
def handler(signum, frame):
with StackTraceFileContext(output_file=output_file) as file:
file.write('CTRL+C received\n')
log_stack_trace(frame, file)
raise KeyboardInterrupt
signal.signal(signal.SIGINT, handler)
| 36.697674
| 131
| 0.701521
| 667
| 0.211343
| 0
| 0
| 0
| 0
| 0
| 0
| 1,469
| 0.465463
|
624ded040b53f88852fd60dd292b8fb6fb23b421
| 1,164
|
py
|
Python
|
django_watermark_images/items/migrations/0001_initial.py
|
abarto/django-watermark-images
|
5f01c8f0da7359c4d96650029d5beb70938fbe47
|
[
"MIT"
] | 11
|
2016-12-05T01:12:46.000Z
|
2021-05-05T21:41:14.000Z
|
django_watermark_images/items/migrations/0001_initial.py
|
abarto/django-watermark-images
|
5f01c8f0da7359c4d96650029d5beb70938fbe47
|
[
"MIT"
] | 1
|
2020-11-30T13:26:06.000Z
|
2020-12-05T11:44:59.000Z
|
django_watermark_images/items/migrations/0001_initial.py
|
abarto/django-watermark-images
|
5f01c8f0da7359c4d96650029d5beb70938fbe47
|
[
"MIT"
] | 3
|
2017-02-07T03:36:42.000Z
|
2020-08-10T00:16:04.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-10 16:15
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import items.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('image', models.ImageField(upload_to=items.models.image_upload_to, verbose_name='original image')),
],
options={
'abstract': False,
},
),
]
| 35.272727
| 124
| 0.630584
| 953
| 0.818729
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.166667
|
62508c428e962c8534fc320110d894864b15ebbe
| 11,380
|
py
|
Python
|
cadnano/views/documentwindow.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | 1
|
2022-03-27T14:37:32.000Z
|
2022-03-27T14:37:32.000Z
|
cadnano/views/documentwindow.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | null | null | null |
cadnano/views/documentwindow.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | 1
|
2021-01-22T02:29:38.000Z
|
2021-01-22T02:29:38.000Z
|
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QSettings
from PyQt5.QtCore import QPoint, QSize
from PyQt5.QtWidgets import QGraphicsScene
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtWidgets import QAction, QApplication, QWidget
from cadnano import app
from cadnano.gui.mainwindow import ui_mainwindow
from cadnano.proxies.cnenum import OrthoViewType
from cadnano.views.gridview.gridrootitem import GridRootItem
from cadnano.views.gridview.tools.gridtoolmanager import GridToolManager
from cadnano.views.pathview.colorpanel import ColorPanel
from cadnano.views.pathview.pathrootitem import PathRootItem
from cadnano.views.pathview.tools.pathtoolmanager import PathToolManager
from cadnano.views.sliceview.slicerootitem import SliceRootItem
from cadnano.views.sliceview.tools.slicetoolmanager import SliceToolManager
# from PyQt5.QtOpenGL import QGLWidget
# # check out https://github.com/baoboa/pyqt5/tree/master/examples/opengl
# # for an example of the QOpenGlWidget added in Qt 5.4
class DocumentWindow(QMainWindow, ui_mainwindow.Ui_MainWindow):
"""DocumentWindow subclasses QMainWindow and Ui_MainWindow. It performs
some initialization operations that must be done in code rather than
using Qt Creator.
Attributes:
controller (DocumentController):
"""
def __init__(self, parent=None, doc_ctrlr=None):
super(DocumentWindow, self).__init__(parent)
self.controller = doc_ctrlr
doc = doc_ctrlr.document()
self.setupUi(self)
self.settings = QSettings("cadnano.org", "cadnano2.5")
# Appearance pref
if not app().prefs.show_icon_labels:
self.main_toolbar.setToolButtonStyle(Qt.ToolButtonIconOnly)
# Outliner & PropertyEditor setup
self.outliner_widget.configure(window=self, document=doc)
self.property_widget.configure(window=self, document=doc)
self.property_buttonbox.setVisible(False)
self.tool_managers = None # initialize
self._initSliceview(doc)
self._initGridview(doc)
self._initPathview(doc)
self._initPathviewToolbar()
self._initEditMenu()
self.path_dock_widget.setTitleBarWidget(QWidget())
self.grid_dock_widget.setTitleBarWidget(QWidget())
self.slice_dock_widget.setTitleBarWidget(QWidget())
self.inspector_dock_widget.setTitleBarWidget(QWidget())
self.setCentralWidget(None)
if app().prefs.orthoview_style == OrthoViewType.SLICE:
self.splitDockWidget(self.slice_dock_widget, self.path_dock_widget, Qt.Horizontal)
elif app().prefs.orthoview_style == OrthoViewType.GRID:
self.splitDockWidget(self.grid_dock_widget, self.path_dock_widget, Qt.Horizontal)
self._restoreGeometryandState()
self._finishInit()
doc.setViewNames(['slice', 'path', 'inspector'])
# end def
def document(self):
return self.controller.document()
def destroyWin(self):
self.settings.beginGroup("MainWindow")
self.settings.setValue("state", self.saveState())
self.settings.endGroup()
for mgr in self.tool_managers:
mgr.destroy()
self.controller = None
### ACCESSORS ###
def undoStack(self):
return self.controller.undoStack()
def selectedInstance(self):
return self.controller.document().selectedInstance()
def activateSelection(self, isActive):
self.path_graphics_view.activateSelection(isActive)
self.slice_graphics_view.activateSelection(isActive)
self.grid_graphics_view.activateSelection(isActive)
### EVENT HANDLERS ###
def focusInEvent(self):
"""Handle an OS focus change into cadnano."""
app().undoGroup.setActiveStack(self.controller.undoStack())
def moveEvent(self, event):
"""Handle the moving of the cadnano window itself.
Reimplemented to save state on move.
"""
self.settings.beginGroup("MainWindow")
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("pos", self.pos())
self.settings.endGroup()
def resizeEvent(self, event):
"""Handle the resizing of the cadnano window itself.
Reimplemented to save state on resize.
"""
self.settings.beginGroup("MainWindow")
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("size", self.size())
self.settings.endGroup()
QWidget.resizeEvent(self, event)
def changeEvent(self, event):
QWidget.changeEvent(self, event)
# end def
### DRAWING RELATED ###
### PRIVATE HELPER METHODS ###
def _restoreGeometryandState(self):
self.settings.beginGroup("MainWindow")
geometry = self.settings.value("geometry")
state = self.settings.value("geometry")
if geometry is not None:
result = self.restoreGeometry(geometry)
if result is False:
print("MainWindow.restoreGeometry() failed.")
else:
print("Setting default MainWindow size: 1100x800")
self.resize(self.settings.value("size", QSize(1100, 800)))
self.move(self.settings.value("pos", QPoint(200, 200)))
self.inspector_dock_widget.close()
self.action_inspector.setChecked(False)
state = self.settings.value("state")
if state is not None:
result = self.restoreState(state)
if result is False:
print("MainWindow.restoreState() failed.")
self.settings.endGroup()
# end def
def _initGridview(self, doc):
"""Initializes Grid View.
Args:
doc (cadnano.document.Document): The Document corresponding to
the design
Returns: None
"""
self.grid_scene = QGraphicsScene(parent=self.grid_graphics_view)
self.grid_root = GridRootItem(rect=self.grid_scene.sceneRect(),
parent=None,
window=self,
document=doc)
self.grid_root.setFlag(QGraphicsItem.ItemHasNoContents)
self.grid_scene.addItem(self.grid_root)
self.grid_scene.setItemIndexMethod(QGraphicsScene.NoIndex)
assert self.grid_root.scene() == self.grid_scene
self.grid_graphics_view.setScene(self.grid_scene)
self.grid_graphics_view.scene_root_item = self.grid_root
self.grid_graphics_view.setName("GridView")
self.grid_tool_manager = GridToolManager(self, self.grid_root)
# end def
def _initPathview(self, doc):
"""Initializes Path View.
Args:
doc (cadnano.document.Document): The Document corresponding to
the design
Returns: None
"""
self.path_scene = QGraphicsScene(parent=self.path_graphics_view)
self.path_root = PathRootItem(rect=self.path_scene.sceneRect(),
parent=None,
window=self,
document=doc)
self.path_root.setFlag(QGraphicsItem.ItemHasNoContents)
self.path_scene.addItem(self.path_root)
self.path_scene.setItemIndexMethod(QGraphicsScene.NoIndex)
assert self.path_root.scene() == self.path_scene
self.path_graphics_view.setScene(self.path_scene)
self.path_graphics_view.scene_root_item = self.path_root
self.path_graphics_view.setScaleFitFactor(0.7)
self.path_graphics_view.setName("PathView")
# end def
def _initPathviewToolbar(self):
"""Initializes Path View Toolbar.
Returns: None
"""
self.path_color_panel = ColorPanel()
self.path_graphics_view.toolbar = self.path_color_panel # HACK for customqgraphicsview
self.path_scene.addItem(self.path_color_panel)
self.path_tool_manager = PathToolManager(self, self.path_root)
self.slice_tool_manager.path_tool_manager = self.path_tool_manager
self.path_tool_manager.slice_tool_manager = self.slice_tool_manager
self.grid_tool_manager.path_tool_manager = self.path_tool_manager
self.path_tool_manager.grid_tool_manager = self.grid_tool_manager
self.tool_managers = (self.path_tool_manager, self.slice_tool_manager, self.grid_tool_manager)
self.insertToolBarBreak(self.main_toolbar)
self.path_graphics_view.setupGL()
self.slice_graphics_view.setupGL()
self.grid_graphics_view.setupGL()
# end def
def _initSliceview(self, doc):
"""Initializes Slice View.
Args:
doc (cadnano.document.Document): The Document corresponding to
the design
Returns: None
"""
self.slice_scene = QGraphicsScene(parent=self.slice_graphics_view)
self.slice_root = SliceRootItem(rect=self.slice_scene.sceneRect(),
parent=None,
window=self,
document=doc)
self.slice_root.setFlag(QGraphicsItem.ItemHasNoContents)
self.slice_scene.addItem(self.slice_root)
self.slice_scene.setItemIndexMethod(QGraphicsScene.NoIndex)
assert self.slice_root.scene() == self.slice_scene
self.slice_graphics_view.setScene(self.slice_scene)
self.slice_graphics_view.scene_root_item = self.slice_root
self.slice_graphics_view.setName("SliceView")
self.slice_graphics_view.setScaleFitFactor(0.7)
self.slice_tool_manager = SliceToolManager(self, self.slice_root)
# end def
def _initEditMenu(self):
"""Initializes the Edit menu
Returns: None
"""
self.actionUndo = self.controller.undoStack().createUndoAction(self)
self.actionRedo = self.controller.undoStack().createRedoAction(self)
self.actionUndo.setText(QApplication.translate("MainWindow", "Undo", None))
self.actionUndo.setShortcut(QApplication.translate("MainWindow", "Ctrl+Z", None))
self.actionRedo.setText(QApplication.translate("MainWindow", "Redo", None))
self.actionRedo.setShortcut(QApplication.translate("MainWindow", "Ctrl+Shift+Z", None))
self.sep = QAction(self)
self.sep.setSeparator(True)
self.menu_edit.insertAction(self.sep, self.actionRedo)
self.menu_edit.insertAction(self.actionRedo, self.actionUndo)
# self.main_splitter.setSizes([400, 400, 180]) # balance main_splitter size
self.statusBar().showMessage("")
# end def
def _finishInit(self):
"""
Handle the dockwindow visibility and action checked status.
The console visibility is explicitly stored in the settings file,
since it doesn't seem to work if we treat it like a normal dock widget.
"""
inspector_visible = self.inspector_dock_widget.isVisibleTo(self)
self.action_inspector.setChecked(inspector_visible)
path_visible = self.path_dock_widget.isVisibleTo(self)
self.action_path.setChecked(path_visible)
slice_visible = self.slice_dock_widget.isVisibleTo(self)
self.action_slice.setChecked(slice_visible)
# end def
# end class
| 39.79021
| 102
| 0.675747
| 10,319
| 0.906766
| 0
| 0
| 0
| 0
| 0
| 0
| 2,307
| 0.202724
|
6250a79068b77c4892032d50b57910bd5cac5d15
| 42,245
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/ospfv3_c029fd7cd4a9e9897b7b4e4547458751.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/ospfv3_c029fd7cd4a9e9897b7b4e4547458751.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/ospfv3_c029fd7cd4a9e9897b7b4e4547458751.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Ospfv3(Base):
"""Ospfv3 Interface level Configuration
The Ospfv3 class encapsulates a list of ospfv3 resources that are managed by the user.
A list of resources can be retrieved from the server using the Ospfv3.find() method.
The list can be managed by using the Ospfv3.add() and Ospfv3.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ospfv3'
_SDM_ATT_MAP = {
'Active': 'active',
'AdjSID': 'adjSID',
'AreaId': 'areaId',
'AreaIdIp': 'areaIdIp',
'AuthAlgo': 'authAlgo',
'BFlag': 'bFlag',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DeadInterval': 'deadInterval',
'DemandCircuit': 'demandCircuit',
'DescriptiveName': 'descriptiveName',
'EnableAdjSID': 'enableAdjSID',
'EnableAuthentication': 'enableAuthentication',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableFastHello': 'enableFastHello',
'EnableIgnoreDbDescMtu': 'enableIgnoreDbDescMtu',
'Errors': 'errors',
'ExternalCapability': 'externalCapability',
'GFlag': 'gFlag',
'HelloInterval': 'helloInterval',
'HelloMultiplier': 'helloMultiplier',
'InstanceId': 'instanceId',
'Key': 'key',
'LFlag': 'lFlag',
'LinkMetric': 'linkMetric',
'LocalRouterID': 'localRouterID',
'Multiplier': 'multiplier',
'Name': 'name',
'NetworkType': 'networkType',
'NssaCapability': 'nssaCapability',
'Ospfv3IfaceState': 'ospfv3IfaceState',
'Ospfv3NeighborState': 'ospfv3NeighborState',
'PFlag': 'pFlag',
'Priority': 'priority',
'Router': 'router',
'SaId': 'saId',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TypeAreaId': 'typeAreaId',
'V6': 'v6',
'VFlag': 'vFlag',
'Weight': 'weight',
}
def __init__(self, parent):
super(Ospfv3, self).__init__(parent)
@property
def Connector(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
return LearnedInfo(self)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): An Adjacency Segment Identifier (Adj-SID) represents a router adjacency in Segment Routing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdjSID']))
@property
def AreaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): OSPFv3 Area ID for a non-connected interface, displayed in Interger format
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AreaId']))
@property
def AreaIdIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): OSPFv3 Area ID for a non-connected interface, displayed in IP Address format
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AreaIdIp']))
@property
def AuthAlgo(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Algorithms
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthAlgo']))
@property
def BFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): B Flag: Backup Flag: If set, the Adj-SID refers to an adjacency that is eligible for protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BFlag']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DeadInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Dead Interval
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeadInterval']))
@property
def DemandCircuit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 5
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DemandCircuit']))
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableAdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Makes the Adjacency Segment Identifier (Adj-SID) available
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAdjSID']))
@property
def EnableAuthentication(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Authentication
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAuthentication']))
@property
def EnableBfdRegistration(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable BFD Registration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration']))
@property
def EnableFastHello(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Fast Hello
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableFastHello']))
@property
def EnableIgnoreDbDescMtu(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Ignore DB-Desc MTU
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableIgnoreDbDescMtu']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def ExternalCapability(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 1
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExternalCapability']))
@property
def GFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): G-Flag: Group Flag: If set, the G-Flag indicates that the Adj-SID refers to a group of adjacencies where it may be assigned
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GFlag']))
@property
def HelloInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Interval
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloInterval']))
@property
def HelloMultiplier(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Multiplier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloMultiplier']))
@property
def InstanceId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Instance ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InstanceId']))
@property
def Key(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Key
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Key']))
@property
def LFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): L-Flag: Local Flag. If set, then the value/index carried by the SID has local significance
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LFlag']))
@property
def LinkMetric(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Link Metric
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkMetric']))
@property
def LocalRouterID(self):
"""
Returns
-------
- list(str): Router ID
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterID'])
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NetworkType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Network Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NetworkType']))
@property
def NssaCapability(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 3
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NssaCapability']))
@property
def Ospfv3IfaceState(self):
"""
Returns
-------
- list(str[backup | down | dr | drOther | pointToPoint | unrecognized | waiting]): Logs additional information about the Interface State
"""
return self._get_attribute(self._SDM_ATT_MAP['Ospfv3IfaceState'])
@property
def Ospfv3NeighborState(self):
"""
Returns
-------
- list(str[attempt | down | exchange | exStart | full | init | loading | multiNeighbor | none | twoWay]): Logs additional information about the Neighbor State
"""
return self._get_attribute(self._SDM_ATT_MAP['Ospfv3NeighborState'])
@property
def PFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): P-Flag:Persistent Flag: If set, the SID is persistently allocated. The SID value remains consistent across router restart and session/interface flap
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PFlag']))
@property
def Priority(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Priority (when DR/BDR)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Priority']))
@property
def Router(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 4
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Router']))
@property
def SaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Security Association ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SaId']))
@property
def SessionInfo(self):
"""
Returns
-------
- list(str[ifaceSessInfoAllNbrIn2Way | ifaceSessInfoAllNbrInattempt | ifaceSessInfoAllNbrInDown | ifaceSessInfoAllNbrInExchange | ifaceSessInfoAllNbrInExStart | ifaceSessInfoAllNbrInInit | ifaceSessInfoAllNbrInLoading | ifaceSessInfoFsmNotStarted | ifaceSessInfoSameNbrId | iPAddressNotRcvd | none]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TypeAreaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Area ID Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeAreaId']))
@property
def V6(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 0
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['V6']))
@property
def VFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): V-Flag: Value flag. If set, then the SID carries an absolute value label value
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VFlag']))
@property
def Weight(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Weight of the SID for the purpose of load balancing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Weight']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Updates ospfv3 resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new ospfv3 resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved ospfv3 resources using find and the newly added ospfv3 resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ospfv3 resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, LocalRouterID=None, Multiplier=None, Name=None, Ospfv3IfaceState=None, Ospfv3NeighborState=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves ospfv3 resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ospfv3 resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ospfv3 resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LocalRouterID (list(str)): Router ID
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- Ospfv3IfaceState (list(str[backup | down | dr | drOther | pointToPoint | unrecognized | waiting])): Logs additional information about the Interface State
- Ospfv3NeighborState (list(str[attempt | down | exchange | exStart | full | init | loading | multiNeighbor | none | twoWay])): Logs additional information about the Neighbor State
- SessionInfo (list(str[ifaceSessInfoAllNbrIn2Way | ifaceSessInfoAllNbrInattempt | ifaceSessInfoAllNbrInDown | ifaceSessInfoAllNbrInExchange | ifaceSessInfoAllNbrInExStart | ifaceSessInfoAllNbrInInit | ifaceSessInfoAllNbrInLoading | ifaceSessInfoFsmNotStarted | ifaceSessInfoSameNbrId | iPAddressNotRcvd | none])): Logs additional information about the session state
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching ospfv3 resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ospfv3 data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ospfv3 resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AdjSID=None, AreaId=None, AreaIdIp=None, AuthAlgo=None, BFlag=None, DeadInterval=None, DemandCircuit=None, EnableAdjSID=None, EnableAuthentication=None, EnableBfdRegistration=None, EnableFastHello=None, EnableIgnoreDbDescMtu=None, ExternalCapability=None, GFlag=None, HelloInterval=None, HelloMultiplier=None, InstanceId=None, Key=None, LFlag=None, LinkMetric=None, NetworkType=None, NssaCapability=None, PFlag=None, Priority=None, Router=None, SaId=None, TypeAreaId=None, V6=None, VFlag=None, Weight=None):
"""Base class infrastructure that gets a list of ospfv3 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AdjSID (str): optional regex of adjSID
- AreaId (str): optional regex of areaId
- AreaIdIp (str): optional regex of areaIdIp
- AuthAlgo (str): optional regex of authAlgo
- BFlag (str): optional regex of bFlag
- DeadInterval (str): optional regex of deadInterval
- DemandCircuit (str): optional regex of demandCircuit
- EnableAdjSID (str): optional regex of enableAdjSID
- EnableAuthentication (str): optional regex of enableAuthentication
- EnableBfdRegistration (str): optional regex of enableBfdRegistration
- EnableFastHello (str): optional regex of enableFastHello
- EnableIgnoreDbDescMtu (str): optional regex of enableIgnoreDbDescMtu
- ExternalCapability (str): optional regex of externalCapability
- GFlag (str): optional regex of gFlag
- HelloInterval (str): optional regex of helloInterval
- HelloMultiplier (str): optional regex of helloMultiplier
- InstanceId (str): optional regex of instanceId
- Key (str): optional regex of key
- LFlag (str): optional regex of lFlag
- LinkMetric (str): optional regex of linkMetric
- NetworkType (str): optional regex of networkType
- NssaCapability (str): optional regex of nssaCapability
- PFlag (str): optional regex of pFlag
- Priority (str): optional regex of priority
- Router (str): optional regex of router
- SaId (str): optional regex of saId
- TypeAreaId (str): optional regex of typeAreaId
- V6 (str): optional regex of v6
- VFlag (str): optional regex of vFlag
- Weight (str): optional regex of weight
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ClearAllLearnedInfo(self, *args, **kwargs):
"""Executes the clearAllLearnedInfo operation on the server.
Clear All Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearAllLearnedInfo(SessionIndices=list)
----------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
clearAllLearnedInfo(SessionIndices=string)
------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearAllLearnedInfo', payload=payload, response_object=None)
def ClearAllLearnedInfoInClient(self, *args, **kwargs):
"""Executes the clearAllLearnedInfoInClient operation on the server.
Clears ALL routes from GUI grid for the selected OSPFv3 router.
clearAllLearnedInfoInClient(Arg2=list)list
------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearAllLearnedInfoInClient', payload=payload, response_object=None)
def GetBasicLearnedInfo(self, *args, **kwargs):
"""Executes the getBasicLearnedInfo operation on the server.
Get Basic Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getBasicLearnedInfo(SessionIndices=list)
----------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getBasicLearnedInfo(SessionIndices=string)
------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getBasicLearnedInfo(Arg2=list)list
----------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getBasicLearnedInfo', payload=payload, response_object=None)
def GetDetailedLearnedInfo(self, *args, **kwargs):
"""Executes the getDetailedLearnedInfo operation on the server.
Get Detailed Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getDetailedLearnedInfo(SessionIndices=list)
-------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getDetailedLearnedInfo(SessionIndices=string)
---------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getDetailedLearnedInfo(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getDetailedLearnedInfo', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def ResumeHello(self, *args, **kwargs):
"""Executes the resumeHello operation on the server.
Resume sending OSPFv3 Hellos
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
resumeHello(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
resumeHello(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumeHello', payload=payload, response_object=None)
def Resumehello(self, *args, **kwargs):
"""Executes the resumehello operation on the server.
Starts the protocol state machine for the given protocol session instances.
resumehello(Arg2=list)list
--------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumehello', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def StopHello(self, *args, **kwargs):
"""Executes the stopHello operation on the server.
Stop sending OSPFv3 Hellos
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stopHello(SessionIndices=list)
------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stopHello(SessionIndices=string)
--------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopHello', payload=payload, response_object=None)
def Stophello(self, *args, **kwargs):
"""Executes the stophello operation on the server.
Stops the protocol state machine for the given protocol session instances.
stophello(Arg2=list)list
------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stophello', payload=payload, response_object=None)
| 41.951341
| 565
| 0.639981
| 41,059
| 0.971926
| 0
| 0
| 15,942
| 0.37737
| 0
| 0
| 28,734
| 0.680175
|
6251e5f75335276f9c6c9626a85c2811646b3967
| 1,970
|
py
|
Python
|
train/loss/mss_loss.py
|
jdasam/ddsp-pytorch
|
cefa59881331e0f76eb073317a311c867e331ac2
|
[
"MIT"
] | 88
|
2020-02-26T16:37:53.000Z
|
2022-03-16T23:27:17.000Z
|
train/loss/mss_loss.py
|
hihunjin/my_ddsp-pytorch
|
2f7f9222b20ba34b3976a8f78c8efa696b4665c5
|
[
"MIT"
] | 3
|
2020-07-25T05:03:17.000Z
|
2022-03-23T17:37:38.000Z
|
train/loss/mss_loss.py
|
hihunjin/my_ddsp-pytorch
|
2f7f9222b20ba34b3976a8f78c8efa696b4665c5
|
[
"MIT"
] | 17
|
2020-06-03T09:11:10.000Z
|
2021-11-25T10:24:25.000Z
|
"""
Implementation of Multi-Scale Spectral Loss as described in DDSP,
which is originally suggested in NSF (Wang et al., 2019)
"""
import torch
import torch.nn as nn
import torchaudio
import torch.nn.functional as F
class SSSLoss(nn.Module):
"""
Single-scale Spectral Loss.
"""
def __init__(self, n_fft, alpha=1.0, overlap=0.75, eps=1e-7):
super().__init__()
self.n_fft = n_fft
self.alpha = alpha
self.eps = eps
self.hop_length = int(n_fft * (1 - overlap)) # 25% of the length
self.spec = torchaudio.transforms.Spectrogram(n_fft=self.n_fft, hop_length=self.hop_length)
def forward(self, x_pred, x_true):
S_true = self.spec(x_true)
S_pred = self.spec(x_pred)
linear_term = F.l1_loss(S_pred, S_true)
log_term = F.l1_loss((S_true + self.eps).log2(), (S_pred + self.eps).log2())
loss = linear_term + self.alpha * log_term
return loss
class MSSLoss(nn.Module):
"""
Multi-scale Spectral Loss.
Usage ::
mssloss = MSSLoss([2048, 1024, 512, 256], alpha=1.0, overlap=0.75)
mssloss(y_pred, y_gt)
input(y_pred, y_gt) : two of torch.tensor w/ shape(batch, 1d-wave)
output(loss) : torch.tensor(scalar)
"""
def __init__(self, n_ffts: list, alpha=1.0, overlap=0.75, eps=1e-7, use_reverb=True):
super().__init__()
self.losses = nn.ModuleList([SSSLoss(n_fft, alpha, overlap, eps) for n_fft in n_ffts])
if use_reverb:
self.signal_key = "audio_reverb"
else:
self.signal_key = "audio_synth"
def forward(self, x_pred, x_true):
if isinstance(x_pred, dict):
x_pred = x_pred[self.signal_key]
if isinstance(x_true, dict):
x_true = x_true["audio"]
# cut reverbation off
x_pred = x_pred[..., : x_true.shape[-1]]
losses = [loss(x_pred, x_true) for loss in self.losses]
return sum(losses).sum()
| 28.142857
| 99
| 0.618274
| 1,745
| 0.885787
| 0
| 0
| 0
| 0
| 0
| 0
| 515
| 0.261421
|
6255b36ebec98e609bf24f715546b55d46c7815b
| 8,889
|
py
|
Python
|
tests/test_triton_server.py
|
jishminor/model_analyzer
|
8593a473bcc923f90a892cffe59fa9980b55c27f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_triton_server.py
|
jishminor/model_analyzer
|
8593a473bcc923f90a892cffe59fa9980b55c27f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_triton_server.py
|
jishminor/model_analyzer
|
8593a473bcc923f90a892cffe59fa9980b55c27f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from .mocks.mock_server_docker import MockServerDockerMethods
from .mocks.mock_server_local import MockServerLocalMethods
from .common import test_result_collector as trc
from model_analyzer.triton.server.server_factory import TritonServerFactory
from model_analyzer.triton.server.server_config import TritonServerConfig
from model_analyzer.model_analyzer_exceptions \
import TritonModelAnalyzerException
# Test parameters
MODEL_REPOSITORY_PATH = 'test_repo'
TRITON_LOCAL_BIN_PATH = 'test_bin_path/tritonserver'
TRITON_DOCKER_BIN_PATH = 'tritonserver'
TRITON_IMAGE = 'test_image'
CONFIG_TEST_ARG = 'exit-on-error'
CLI_TO_STRING_TEST_ARGS = {
'allow-grpc': True,
'min-supported-compute-capability': 7.5,
'metrics-port': 8000,
'model-repository': MODEL_REPOSITORY_PATH
}
class TestTritonServerMethods(trc.TestResultCollector):
def setUp(self):
# Mock
self.server_docker_mock = MockServerDockerMethods()
self.server_local_mock = MockServerLocalMethods()
self.server_docker_mock.start()
self.server_local_mock.start()
# server setup
self.server = None
def test_server_config(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
# Check config initializations
self.assertIsNone(server_config[CONFIG_TEST_ARG],
msg="Server config had unexpected initial"
f"value for {CONFIG_TEST_ARG}")
# Set value
server_config[CONFIG_TEST_ARG] = True
# Test get again
self.assertTrue(server_config[CONFIG_TEST_ARG],
msg=f"{CONFIG_TEST_ARG} was not set")
# Try to set an unsupported config argument, expect failure
with self.assertRaises(TritonModelAnalyzerException,
msg="Expected exception on trying to set"
"unsupported argument in Triton server"
"config"):
server_config['dummy'] = 1
# Reset test arg
server_config[CONFIG_TEST_ARG] = None
# Finally set a couple of args and then check the cli string
for arg, value in CLI_TO_STRING_TEST_ARGS.items():
server_config[arg] = value
cli_string = server_config.to_cli_string()
for argstring in cli_string.split():
# Parse the created string
arg, value = argstring.split('=')
arg = arg[2:]
# Make sure each parsed arg was in test dict
self.assertIn(arg,
CLI_TO_STRING_TEST_ARGS,
msg=f"CLI string contained unknown argument: {arg}")
# Make sure parsed value is the one from dict, check type too
test_value = CLI_TO_STRING_TEST_ARGS[arg]
self.assertEqual(
test_value,
type(test_value)(value),
msg=f"CLI string contained unknown value: {value}")
def test_create_server(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Run for both types of environments
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
# Try to create a server without specifying model repository and expect
# error
server_config['model-repository'] = None
with self.assertRaises(
AssertionError,
msg="Expected AssertionError for trying to create"
"server without specifying model repository."):
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
with self.assertRaises(
AssertionError,
msg="Expected AssertionError for trying to create"
"server without specifying model repository."):
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
def test_start_stop_gpus(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Create server in docker, start , wait, and stop
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
# Start server check that mocked api is called
self.server.start()
self.server_docker_mock.assert_server_process_start_called_with(
TRITON_DOCKER_BIN_PATH + ' ' + server_config.to_cli_string(),
MODEL_REPOSITORY_PATH, TRITON_IMAGE, 8000, 8001, 8002)
self.server_docker_mock.raise_exception_on_container_run()
with self.assertRaises(TritonModelAnalyzerException):
self.server.start()
self.server_docker_mock.stop_raise_exception_on_container_run()
# Stop container and check api calls
self.server.stop()
self.server_docker_mock.assert_server_process_terminate_called()
# Create local server which runs triton as a subprocess
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
# Check that API functions are called
self.server.start()
self.server_local_mock.assert_server_process_start_called_with(cmd=[
TRITON_LOCAL_BIN_PATH, '--model-repository', MODEL_REPOSITORY_PATH
])
self.server.stop()
self.server_local_mock.assert_server_process_terminate_called()
def test_get_logs(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Check docker server logs
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server.start()
self.server.stop()
self.server_docker_mock.assert_server_process_terminate_called()
self.assertEqual(self.server.logs(), "Triton Server Test Log")
# Create local server logs
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
self.server.start()
self.server.stop()
self.server_local_mock.assert_server_process_terminate_called()
self.assertEqual(self.server.logs(), "Triton Server Test Log")
def test_cpu_stats(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Test local server cpu_stats
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
self.server.start()
_, _ = self.server.cpu_stats()
self.server_local_mock.assert_cpu_stats_called()
self.server.stop()
# Test docker server cpu stats
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server.start()
# The following needs to be called as it resets exec_run return value
self.server_docker_mock.assert_server_process_start_called_with(
TRITON_DOCKER_BIN_PATH + ' ' + server_config.to_cli_string(),
MODEL_REPOSITORY_PATH, TRITON_IMAGE, 8000, 8001, 8002)
_, _ = self.server.cpu_stats()
self.server_docker_mock.assert_cpu_stats_called()
self.server.stop()
def tearDown(self):
# In case test raises exception
if self.server is not None:
self.server.stop()
# Stop mocking
self.server_docker_mock.stop()
self.server_local_mock.stop()
if __name__ == '__main__':
unittest.main()
| 38.986842
| 79
| 0.673304
| 7,414
| 0.834065
| 0
| 0
| 0
| 0
| 0
| 0
| 2,449
| 0.275509
|
625618420b8b42e1290ca8d84b7cf2668f7fc56c
| 5,490
|
py
|
Python
|
modules/help_urls/help_urls.py
|
xochilt/cousebuilder
|
50c524ad1406b77288efdc616812877e0c85aeb5
|
[
"Apache-2.0"
] | null | null | null |
modules/help_urls/help_urls.py
|
xochilt/cousebuilder
|
50c524ad1406b77288efdc616812877e0c85aeb5
|
[
"Apache-2.0"
] | null | null | null |
modules/help_urls/help_urls.py
|
xochilt/cousebuilder
|
50c524ad1406b77288efdc616812877e0c85aeb5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Help URL resolver.
Help URLs are of the form <base>/<version>/<suffix> where
1) <base> is the base help URL, which defaults to _BASE_URL below.
2) <version> is derived from the GCB_PRODUCT_VERSION environment variable. If
the patch version is zero, it and its leading dot are stripped (so '1.0.0'
becomes '1.0').
3) <suffix> is a string from topics._ALL, which contains a mapping from a
topic_id to a URL suffix.
URLs are normalized to contain correct slashes. To set a help URL, edit
topics.py's _ALL variable.
The flow is:
1) Use services.help_urls.make_learn_more_message() to make a message for
display in the UI.
2) This composes a link with the href set to _REDIRECT_HANDLER_URL, and passes
the topic_id passed in the call to make_learn_more_message().
3) The redirect handler validates the topic_id, then redirects the user to the
real help URL, calculated from the value in topics._ALL.
This allows us control over the help URLs, opening up the ability to version
them, or to have different doc sets for different runtime configurations. It
also gathers the URLs into one place (topics._ALL) rather than scattering them
throughout the codebase.
"""
__author__ = [
'John Cox (johncox@google.com)',
]
import logging
import os
from common import safe_dom
from controllers import utils
from models import custom_modules
from models import services
from modules.help_urls import topics
_BASE_URL = 'https://www.google.com/edu/openonline/course-builder/docs'
# Legacy documentation URL. Fall through to this whenever an item is in
# topics._ALL but its value is topics._DEFAULT.
# TODO(johncox): remove this once topics._ALL is fully populated.
_DEFAULT_URL = 'https://code.google.com/p/course-builder/wiki/Dashboard'
_LOG = logging.getLogger('modules.help_urls.help_urls')
logging.basicConfig()
_REDIRECT_HANDLER_URL = '/modules/help_urls/redirect'
class Service(services.HelpUrls):
def get(self, topic_id):
return _TopicRegistry.get_url(topic_id)
def make_learn_more_message(self, text, topic_id, to_string=True):
message = safe_dom.assemble_text_message(
text, '%s?topic_id=%s' % (_REDIRECT_HANDLER_URL, topic_id))
return str(message) if to_string else message
class _TopicRegistry(object):
_MAP = {}
@classmethod
def build(cls, rows):
for row in rows:
key, value = cls._validate(row)
cls._MAP[key] = value
@classmethod
def get_url(cls, topic_id):
suffix = cls._MAP.get(topic_id)
if not suffix:
raise ValueError('No URL suffix found for topic "%s"' % topic_id)
# Treat as module-protected. pylint: disable=protected-access
if isinstance(suffix, topics._LegacyUrl):
return suffix.value
if suffix.startswith('/'):
suffix = suffix[1:]
return '%s/%s/%s' % (_BASE_URL, cls._get_version_infix(), suffix)
@classmethod
def _get_version_infix(cls):
version = os.environ.get('GCB_PRODUCT_VERSION')
assert version
parts = version.split('.')
assert len(parts) == 3
parts.pop()
return '.'.join(parts)
@classmethod
def _validate(cls, row):
row_length = len(row)
if row_length != 2:
raise ValueError(
'Topic row must have exactly 2 items; got %s for row "%s"' % (
row_length, row))
key, value = row
if not key or not value:
raise ValueError(
'Topic mapping values must both be set; got "%s" and "%s"' % (
key, value))
if key in cls._MAP:
raise ValueError(
'Topic mappings must be unique; "%s" already registered' % key)
return key, value
class _RedirectHandler(utils.BaseHandler):
def get(self):
topic_id = self.request.get('topic_id')
if not topic_id:
_LOG.error('No topic_id')
self.error(400)
return
try:
url = services.help_urls.get(topic_id)
except ValueError:
_LOG.error("topic_id '%s' not found", topic_id)
self.error(400)
return
self.redirect(url, normalize=False)
custom_module = None
def register_module():
# pylint: disable=global-statement
global custom_module
def on_module_enabled():
# Treat as module-protected. pylint: disable=protected-access
_TopicRegistry.build(topics._ALL)
services.help_urls = Service()
global_routes = [
(_REDIRECT_HANDLER_URL, _RedirectHandler),
]
namespaced_routes = []
custom_module = custom_modules.Module(
'Help URL Resolver', 'Creates help URLs for the admin UI',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| 31.193182
| 79
| 0.67541
| 2,368
| 0.43133
| 0
| 0
| 1,477
| 0.269035
| 0
| 0
| 2,678
| 0.487796
|
625670c4163cea1c3e5232cab52845847be981b8
| 4,584
|
py
|
Python
|
imm/samplers/noncollapsed.py
|
tscholak/imm
|
cbf588800ddb3b3b57843d85a92d881f43fd5702
|
[
"Apache-2.0"
] | 9
|
2016-02-15T00:40:18.000Z
|
2020-05-14T10:22:53.000Z
|
imm/samplers/noncollapsed.py
|
tscholak/imm
|
cbf588800ddb3b3b57843d85a92d881f43fd5702
|
[
"Apache-2.0"
] | null | null | null |
imm/samplers/noncollapsed.py
|
tscholak/imm
|
cbf588800ddb3b3b57843d85a92d881f43fd5702
|
[
"Apache-2.0"
] | 2
|
2016-01-29T17:46:42.000Z
|
2020-11-18T04:57:20.000Z
|
# -*- coding: utf-8 -*-
"""
Non-collapsed samplers.
"""
import numpy as np
from .generic import (GenericGibbsSampler, GenericRGMSSampler,
GenericSAMSSampler, GenericSliceSampler)
from ..models import (CollapsedConjugateGaussianMixture,
ConjugateGaussianMixture, NonconjugateGaussianMixture)
from ..models import DP, MFM
class GibbsSampler(GenericGibbsSampler):
"""
Gibbs sampler.
Methods
-------
``infer(x_n, c_n, m, max_iter, warmup, random_state)``
Component and latent variable inference.
Parameters
----------
process_model : compatible GenericProcess instance
Compatible process model
m : None or int, optional
The number of auxiliary components. This must be larger than 0.
Default is 10
max_iter : None or int, optional
The maximum number of iterations. The algorithm will be terminated
once this many iterations have elapsed. This must be greater than 0.
Default is 1000
warmup : None or int, optional
The number of warm-up iterations. The algorithm will discard the
results of all iterations until this many iterations have elapsed.
This must be non-negative and smaller than max_iter. Default is
max_iter / 2
References
----------
Neal, R. M. (2000). Markov chain sampling methods for Dirichlet process
mixture models. Journal of Computational and Graphical Statistics, 9:
249-265.
"""
compatible_process_models = set([DP, MFM])
compatible_mixture_models = set([CollapsedConjugateGaussianMixture,
ConjugateGaussianMixture,
NonconjugateGaussianMixture])
class RGMSSampler(GenericRGMSSampler):
"""
Restricted Gibbs merge-split sampler.
Methods
-------
``infer(x_n, c_n, m, scheme, max_iter, warmup, random_state)``
Component inference.
Parameters
----------
process_model : compatible GenericProcess instance
Compatible process model
m : None or int, optional
The number of auxiliary components. This must be larger than 0.
Default is 10
scheme : None or array-like, optional
Computation scheme. Default is (5,1,1,5): 5 intermediate scans to
reach the split launch state, 1 split-merge move per iteration, 1
incremental Gibbs scan per iteration, and 5 intermediate moves to
reach the merge launch state
max_iter : None or int, optional
The maximum number of iterations. The algorithm will be terminated
once this many iterations have elapsed. This must be greater than 0.
Default is 1000
warmup : None or int, optional
The number of warm-up iterations. The algorithm will discard the
results of all iterations until this many iterations have elapsed.
This must be non-negative and smaller than max_iter. Default is
max_iter / 2
References
----------
Jain, S. and Neal, R. M. (2007). Splitting and merging components of a
nonconjugate Dirichlet process mixture model. Bayesian Analysis 2:
445-472.
"""
compatible_process_models = set([DP, MFM])
compatible_mixture_models = set([ConjugateGaussianMixture,
NonconjugateGaussianMixture])
class SliceSampler(GenericSliceSampler):
"""
Slice sampler.
Methods
-------
``infer(x_n, c_n, max_iter, warmup, random_state)``
Component inference.
Parameters
----------
process_model : compatible GenericProcess instance
Compatible process model
max_iter : None or int, optional
The maximum number of iterations. The algorithm will be terminated
once this many iterations have elapsed. This must be greater than 0.
Default is 1000
warmup : None or int, optional
The number of warm-up iterations. The algorithm will discard the
results of all iterations until this many iterations have elapsed.
This must be non-negative and smaller than max_iter. Default is
max_iter / 2
References
----------
Ge, H., Chen, Y., Wan, M., and Ghahramani, Z. (2015). Distributed
inference for Dirichlet process mixture models. In Proceedings of The
32nd International Conference on Machine Learning, 2276-2284.
"""
compatible_process_models = set([DP])
compatible_mixture_models = set([ConjugateGaussianMixture,
NonconjugateGaussianMixture])
| 34.208955
| 77
| 0.662522
| 4,236
| 0.924084
| 0
| 0
| 0
| 0
| 0
| 0
| 3,553
| 0.775087
|
62578dce0eabf4b8ae7fad7a5b39c7aa9bac6caa
| 393
|
py
|
Python
|
api/urls.py
|
Emmastro/medmer-api
|
c17366a92506b6ac1bdedc85ad0c29c3d2b36b5d
|
[
"Apache-2.0"
] | null | null | null |
api/urls.py
|
Emmastro/medmer-api
|
c17366a92506b6ac1bdedc85ad0c29c3d2b36b5d
|
[
"Apache-2.0"
] | 1
|
2021-07-12T06:32:14.000Z
|
2021-07-12T06:32:14.000Z
|
api/urls.py
|
Emmastro/medmer
|
c17366a92506b6ac1bdedc85ad0c29c3d2b36b5d
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('accounts/', include('django.contrib.auth.urls')),
path('', include('home.urls')),
path('admin/', admin.site.urls),
path('registration/medic', include('medic.urls')),
path('registration/patient', include('patient.urls')),
path('help-request/', include('helprequest.urls')),
]
| 32.75
| 59
| 0.676845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.40458
|
6258836d166a8d6c1882a706cc1e2bf3153eda25
| 55
|
py
|
Python
|
src/evolvepy/integrations/__init__.py
|
EltonCN/evolvepy
|
4489264d6c03ea4f3c23ea665fdf12fe4ead1ccc
|
[
"MIT"
] | 1
|
2022-01-13T21:11:53.000Z
|
2022-01-13T21:11:53.000Z
|
src/evolvepy/integrations/__init__.py
|
EltonCN/evolvepy
|
4489264d6c03ea4f3c23ea665fdf12fe4ead1ccc
|
[
"MIT"
] | null | null | null |
src/evolvepy/integrations/__init__.py
|
EltonCN/evolvepy
|
4489264d6c03ea4f3c23ea665fdf12fe4ead1ccc
|
[
"MIT"
] | null | null | null |
'''
EvolvePy's integrations with other modules.
'''
| 18.333333
| 47
| 0.672727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 1
|
62588608a3f5e4881c91b92770889c28b45edea4
| 587
|
py
|
Python
|
subdomain.py
|
ouldevloper/subDomainFinder
|
3b888e8267d8b89401a468d2622edd6716a88293
|
[
"MIT"
] | null | null | null |
subdomain.py
|
ouldevloper/subDomainFinder
|
3b888e8267d8b89401a468d2622edd6716a88293
|
[
"MIT"
] | null | null | null |
subdomain.py
|
ouldevloper/subDomainFinder
|
3b888e8267d8b89401a468d2622edd6716a88293
|
[
"MIT"
] | null | null | null |
import requests
import re
url=input("Enter Url [ex: example.com]: ")
def getSubDomain(url):
url=url.replace("www.","").replace("https://","").replace("http://","")
pattern = "[\w]{1,256}\.[a-zA-Z0-9()]{1,6}"
_l = re.compile(pattern)
if _l.match(url):
response = requests.get(f"https://sonar.omnisint.io/subdomains/{url}").text
urls = response.split("\n")
for u in set(urls):
if u=="" or len(u)<=3:
pass
print("[+] ",u.replace("\"","").replace("'","").replace(",","").replace(" ",""))
getSubDomain(url)
| 34.529412
| 95
| 0.524702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.29983
|
62592611062846e8ddc9453d08b3f9cc749f88fa
| 129
|
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/20.03-File-handling.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/20.03-File-handling.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/20.03-File-handling.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
file = open("text.txt", "r")
file2 = open("text2.txt", "w")
for data in file:
file2.write(data)
file.close()
file2.close()
| 14.333333
| 30
| 0.620155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.209302
|
625a19aeeb78d1a163e46b551accd53b6ef2d20c
| 532
|
py
|
Python
|
torch2trt/__init__.py
|
SnowMasaya/torch2trt
|
d526b2473805f9b9a704a201bef3ce5be25d284f
|
[
"MIT"
] | 2
|
2020-07-10T06:26:03.000Z
|
2020-07-10T07:38:08.000Z
|
torch2trt/__init__.py
|
SnowMasaya/torch2trt
|
d526b2473805f9b9a704a201bef3ce5be25d284f
|
[
"MIT"
] | 1
|
2020-02-16T09:43:35.000Z
|
2020-02-16T09:43:35.000Z
|
torch2trt/__init__.py
|
SnowMasaya/torch2trt
|
d526b2473805f9b9a704a201bef3ce5be25d284f
|
[
"MIT"
] | 1
|
2019-10-14T01:11:23.000Z
|
2019-10-14T01:11:23.000Z
|
from .torch2trt import *
from .converters import *
import tensorrt as trt
def load_plugins():
import os
import ctypes
ctypes.CDLL(os.path.join(os.path.dirname(__file__), 'libtorch2trt.so'))
registry = trt.get_plugin_registry()
torch2trt_creators = [c for c in registry.plugin_creator_list if c.plugin_namespace == 'torch2trt']
for c in torch2trt_creators:
registry.register_creator(c, 'torch2trt')
try:
load_plugins()
PLUGINS_LOADED = True
except OSError:
PLUGINS_LOADED = False
| 24.181818
| 103
| 0.716165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.073308
|
625bb84667ccfd99b5f46c321c52127e25ca0ad0
| 4,614
|
py
|
Python
|
ediel-parser/lib/cli/com.py
|
sun-labs/ediclue
|
22836afc3eca6eebd800cf5d843166656ceaeaae
|
[
"MIT"
] | 3
|
2020-05-30T09:15:40.000Z
|
2021-11-17T20:06:27.000Z
|
ediel-parser/lib/cli/com.py
|
sun-labs/ediclue
|
22836afc3eca6eebd800cf5d843166656ceaeaae
|
[
"MIT"
] | null | null | null |
ediel-parser/lib/cli/com.py
|
sun-labs/ediclue
|
22836afc3eca6eebd800cf5d843166656ceaeaae
|
[
"MIT"
] | 1
|
2020-12-25T16:37:13.000Z
|
2020-12-25T16:37:13.000Z
|
import os
from lib.EDICommunicator import EDICommunicator
from lib.EDIParser import EDIParser
import lib.cli.tools as tools
from types import SimpleNamespace
def set_args(subparsers):
parser = subparsers.add_parser('com', description='communication between EDI systems')
parser.add_argument('--send-to')
parser.add_argument('--send-from')
parser.add_argument('--from', dest='from_type', choices=['mail'], default='mail', help='The input content type'),
parser.add_argument('--username', default=os.environ.get('SL_COM_USERNAME'))
parser.add_argument('--password', default=os.environ.get('SL_COM_PASSWORD'))
parser.add_argument('--server', default=os.environ.get('SL_COM_SERVER'))
parser.add_argument('--outgoing-server', default=os.environ.get('SL_COM_OUTGOING_SERVER'))
parser.add_argument('--incoming-server', default=os.environ.get('SL_COM_INCOMING_SERVER'))
parser.add_argument('--dont-store', help='do not store sent email in sent folder')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--send', action='store_true', help='Send mail')
parser.add_argument('--list-labels', action='store_true')
parser.add_argument('--imap-search-query')
parser.add_argument('--imap-store-query', nargs='+', help='two arguments required: command flags')
parser.add_argument('--set-label', nargs='+')
parser.add_argument('--input-dir')
parser.add_argument('--output-dir')
def handle_send(payload, args):
com = get_com(args)
mail = None # result email
if args.from_type == "mail":
mail = com.mail_from_str(payload)
if args.send is True:
com.send_mail(mail)
return mail
def get_com(args):
com = EDICommunicator()
com.server = args.server
com.username = args.username
com.password = args.password
com.init_imap()
return com
def vprint(args, *margs):
if args.verbose is True:
print(*margs)
def handle_store_query(args, mail_ids_str: str):
mail_ids = mail_ids_str.split(',') # to list
com = get_com(args)
query = args.imap_store_query
if len(query) < 2: raise ValueError("You need to supply two arguments for imap-store-query, command and flags")
cmd, flags = query[0], query[1]
result_email_ids = com.imap_store_query(mail_ids_str, cmd, '({})'.format(flags))
return result_email_ids
def run(args):
# dependencies on other arguments
args.outgoing_server = args.server if args.outgoing_server is None else args.outgoing_server
args.incoming_server = args.server if args.incoming_server is None else args.incoming_server
args.send_from = args.username if args.send_from is None else args.send_from
com = get_com(args)
# single commands
if args.list_labels is True:
exit(com.list_labels())
# parse inputs
load = SimpleNamespace()
load.files = False
if args.input_dir: # load mails from directory
load.files = True
load.filenames, load.paths = tools.get_files(args.input_dir)
mail_ids = com.str_mail_ids(com.mail_ids_from_filenames(load.filenames))
else:
if args.imap_search_query: # fetch from imap server
mail_ids = com.imap_search_query(args.imap_search_query)
mail_ids = com.str_mail_ids(com.format_mail_ids(mail_ids))
else: # read stdin
mail_ids = args.input.read()
mail_ids = mail_ids.replace('\n', '')
mail_ids_lst = mail_ids.split(',')
mail_ids_lst = list(filter(lambda m: m is not '', map(lambda m: m.strip(), mail_ids_lst)))
n_mail_ids = len(mail_ids_lst)
if n_mail_ids == 0: raise SystemExit(1)
# send emails
if args.send is True:
if load.files is True:
for i, path in enumerate(load.paths):
fh = open(path, 'r')
content = fh.read()
mail = handle_send(content, args)
if args.imap_store_query:
mail_id = mail_ids_lst[i]
response_id = handle_store_query(args, mail_id)
fh.close()
else: # write emails
if args.output_dir:
for mail_id in mail_ids_lst:
mail = com.get_mail_with(mail_id)
file_name = '{}.eml'.format(mail_id)
file_path = os.path.join(args.output_dir, file_name)
fh = open(file_path, 'w')
fh.write(mail)
fh.close()
if args.send is False:
if args.imap_store_query:
mail_ids = handle_store_query(args, mail_ids)
print(mail_ids)
| 39.435897
| 117
| 0.657781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 840
| 0.182055
|
625c8a42b10a793670359b3599bb4463084222aa
| 154
|
py
|
Python
|
04_working_with_list/4_2_animals.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
04_working_with_list/4_2_animals.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
04_working_with_list/4_2_animals.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
animals = ['cat', 'dog', 'pig']
for animal in animals :
print (animal + 'would make a great pet.')
print ('All of those animals would makea great pet')
| 30.8
| 53
| 0.668831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.551948
|
625d706044520dc3362905ca933c2db2e59ae151
| 145
|
py
|
Python
|
backend/mytutorials/telegrambot/urls.py
|
mahmoodDehghan/MyTests
|
a67693e14eda2257490f295909d17b6f3f962543
|
[
"MIT"
] | null | null | null |
backend/mytutorials/telegrambot/urls.py
|
mahmoodDehghan/MyTests
|
a67693e14eda2257490f295909d17b6f3f962543
|
[
"MIT"
] | null | null | null |
backend/mytutorials/telegrambot/urls.py
|
mahmoodDehghan/MyTests
|
a67693e14eda2257490f295909d17b6f3f962543
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import start_bot, end_bot
urlpatterns = [
path('startbot/', start_bot),
path('endbot/', end_bot),
]
| 20.714286
| 37
| 0.717241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.137931
|
62602e529718a96dbd2a4603b293f7ef9ea48276
| 420
|
py
|
Python
|
tests/performance/cte-arm/tests/csvm_ijcnn1.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 36
|
2018-10-22T19:21:14.000Z
|
2022-03-22T12:10:01.000Z
|
tests/performance/cte-arm/tests/csvm_ijcnn1.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 329
|
2018-11-22T18:04:57.000Z
|
2022-03-18T01:26:55.000Z
|
tests/performance/cte-arm/tests/csvm_ijcnn1.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 21
|
2019-01-10T11:46:39.000Z
|
2022-03-17T12:59:45.000Z
|
import performance
import dislib as ds
from dislib.classification import CascadeSVM
def main():
x_ij, y_ij = ds.load_svmlight_file(
"/fefs/scratch/bsc19/bsc19029/PERFORMANCE/datasets/train",
block_size=(5000, 22), n_features=22, store_sparse=True)
csvm = CascadeSVM(c=10000, gamma=0.01)
performance.measure("CSVM", "ijcnn1", csvm.fit, x_ij, y_ij)
if __name__ == "__main__":
main()
| 22.105263
| 66
| 0.697619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.192857
|
6261861dfa046a0934777f8f23b5ec284278ef51
| 1,115
|
py
|
Python
|
py_connect/exceptions.py
|
iparaskev/py_connect
|
43476cddfb25130d058fcf59928454f867af8feb
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T07:05:50.000Z
|
2021-03-31T22:53:52.000Z
|
py_connect/exceptions.py
|
iparaskev/py_connect
|
43476cddfb25130d058fcf59928454f867af8feb
|
[
"BSD-3-Clause"
] | null | null | null |
py_connect/exceptions.py
|
iparaskev/py_connect
|
43476cddfb25130d058fcf59928454f867af8feb
|
[
"BSD-3-Clause"
] | null | null | null |
"""Exceptions of the library"""
class PyConnectError(Exception):
"""Base class for all exceptions in py_connect."""
class InvalidPowerCombination(PyConnectError):
"""Connection of different power pins."""
class MaxConnectionsError(PyConnectError):
"""Interface has exceeded it's max connections limit."""
class InvalidGpioError(PyConnectError):
"""Invalid connection of two gpio pins."""
class AlreadyConnectedError(PyConnectError):
"""One or more pins of the interface are already connected."""
class TwoMasterError(PyConnectError):
"""Error when connecting two master interfaces."""
class TwoSlaveError(PyConnectError):
"""Error when connecting two slave interfaces."""
class ChipEnabledFullError(PyConnectError):
"""All chip enable pins are in use."""
class NotImplementedDriverError(PyConnectError):
"""This peripheral doesn't have an implemented driver."""
class UnicludedDeviceError(PyConnectError):
"""Device hasn't been included in connections specification."""
class EmptyListError(PyConnectError):
"""Empty list given for an attribute."""
| 24.23913
| 67
| 0.744395
| 1,050
| 0.941704
| 0
| 0
| 0
| 0
| 0
| 0
| 579
| 0.519283
|
6261f7a9c9b18a89ffbec87fba08c79cb2839e13
| 1,151
|
py
|
Python
|
code/glucocheck/homepage/migrations/0007_auto_20210315_1807.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/migrations/0007_auto_20210315_1807.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/migrations/0007_auto_20210315_1807.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-15 22:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0006_carbohydrate_glucose_insulin_recordingcategory'),
]
operations = [
migrations.RenameField(
model_name='carbohydrate',
old_name='reading',
new_name='carb_reading',
),
migrations.RemoveField(
model_name='glucose',
name='categories',
),
migrations.AddField(
model_name='glucose',
name='categories',
field=models.ManyToManyField(to='homepage.RecordingCategory'),
),
migrations.AlterField(
model_name='recordingcategory',
name='name',
field=models.CharField(choices=[('fasting', 'Fasting'), ('before breakfast', 'Before Breakfast'), ('after breakfast', 'After Breakfast'), ('before lunch', 'Before Lunch'), ('after lunch', 'After Lunch'), ('snacks', 'Snacks'), ('before dinner', 'Before Dinner'), ('after dinner', 'After Dinner')], max_length=255, unique=True),
),
]
| 34.878788
| 338
| 0.600348
| 1,058
| 0.919201
| 0
| 0
| 0
| 0
| 0
| 0
| 458
| 0.397915
|
6262bae7dfc3df2c02ba7e5efae6983d3daa02cb
| 1,826
|
py
|
Python
|
models/SnapshotTeam.py
|
Fa1c0n35/RootTheBoxs
|
4f2a9886c8eedca3039604b93929c8c09866115e
|
[
"Apache-2.0"
] | 1
|
2019-06-29T08:40:54.000Z
|
2019-06-29T08:40:54.000Z
|
models/SnapshotTeam.py
|
Fa1c0n35/RootTheBoxs
|
4f2a9886c8eedca3039604b93929c8c09866115e
|
[
"Apache-2.0"
] | null | null | null |
models/SnapshotTeam.py
|
Fa1c0n35/RootTheBoxs
|
4f2a9886c8eedca3039604b93929c8c09866115e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mar 11, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import Integer
from models import dbsession
from models.Team import Team
from models.Relationships import snapshot_team_to_flag, snapshot_team_to_game_level
from models.BaseModels import DatabaseObject
class SnapshotTeam(DatabaseObject):
"""
Used by game history; snapshot of a single team in history
"""
team_id = Column(Integer, ForeignKey("team.id"), nullable=False)
money = Column(Integer, nullable=False)
bots = Column(Integer, nullable=False)
game_levels = relationship(
"GameLevel",
secondary=snapshot_team_to_game_level,
backref=backref("snapshot_team", lazy="select"),
)
flags = relationship(
"Flag",
secondary=snapshot_team_to_flag,
backref=backref("snapshot_team", lazy="select"),
)
@property
def name(self):
return dbsession.query(Team._name).filter_by(id=self.team_id).first()[0]
@classmethod
def all(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).all()
| 29.451613
| 83
| 0.7092
| 839
| 0.459474
| 0
| 0
| 244
| 0.133625
| 0
| 0
| 864
| 0.473165
|
6263cf2679c6dfa1a07724e0812c51922a103bc9
| 2,544
|
py
|
Python
|
src/train.py
|
DanCh11/virtual-assistant
|
b6601f20bd851864f4a76dd4c73c8c5266a0014f
|
[
"MIT"
] | null | null | null |
src/train.py
|
DanCh11/virtual-assistant
|
b6601f20bd851864f4a76dd4c73c8c5266a0014f
|
[
"MIT"
] | null | null | null |
src/train.py
|
DanCh11/virtual-assistant
|
b6601f20bd851864f4a76dd4c73c8c5266a0014f
|
[
"MIT"
] | null | null | null |
import json
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from model import NeuralNetwork
from nltk_utils import stem, tokenize, bag_of_words
with open('./data/data.json', 'r') as f:
data = json.load(f)
all_words = []
tags = []
xy = []
for intents in data['intents']:
tag = intents['tag']
tags.append(tag)
for pattern in intents['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))
ignore_words = ['?', '!', '.', ',']
all_words = [stem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(tags)
x_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, all_words)
x_train.append(bag)
label = tags.index(tag)
y_train.append(label)
x_train = np.array(x_train)
y_train = np.array(y_train)
class ChatDataset(Dataset):
def __init__(self) -> None:
self.n_samples = len(x_train)
self.x_data = x_train
self.y_data = y_train
#dataset[index]
def __getitem__(self, index: int) -> None:
return self.x_data[index], self.y_data[index]
def __len__(self) -> int:
return self.n_samples
# Hyperparams
batch_size = 8
hidden_size = 8
output_size = len(tags)
input_size = len(x_train[0])
learning_rate = 0.001
num_epochs = 1000
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNetwork(input_size, hidden_size, output_size).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(device)
outputs = model(words)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print(f'epoch [{epoch+1}/{num_epochs}], loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"output_size": output_size,
"hidden_size": hidden_size,
"all_words": all_words,
"tags": tags
}
FILE = './data/data.pth'
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
print(x_train)
| 22.315789
| 94
| 0.66195
| 340
| 0.133648
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.123035
|
62643e087525aca4ccc614812b7bfd674336652f
| 411
|
py
|
Python
|
pythonexercicios/ex101-funcvotacao.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
pythonexercicios/ex101-funcvotacao.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
pythonexercicios/ex101-funcvotacao.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
def voto(num):
from datetime import date
anoatual = date.today().year
idade = anoatual - num
if idade < 16:
return f"Com {idade} anos: NÃO VOTA"
elif 16 <= idade < 18 or idade > 65:
return f'Com {idade} anos: VOTO OPCIONAL'
else:
return f"Com {idade} anos: VOTO OBRIGATORIO"
print('-' * 30)
anonasc = int(input('Em que ano você nasceu? '))
print(voto(anonasc))
| 25.6875
| 52
| 0.610706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.317191
|
62665baa3c795d7ea68ea728720da3de2371a899
| 4,289
|
py
|
Python
|
map_objects/tile.py
|
matteobarbieri/libtcod-tutorial
|
2be59978483d1c754b736a0fe96c9554e9ba8547
|
[
"MIT"
] | 1
|
2019-03-09T14:20:51.000Z
|
2019-03-09T14:20:51.000Z
|
map_objects/tile.py
|
matteobarbieri/libtcod-tutorial
|
2be59978483d1c754b736a0fe96c9554e9ba8547
|
[
"MIT"
] | null | null | null |
map_objects/tile.py
|
matteobarbieri/libtcod-tutorial
|
2be59978483d1c754b736a0fe96c9554e9ba8547
|
[
"MIT"
] | null | null | null |
import random
import libtcodpy as libtcod
GRAY_PALETTE = [
# libtcod.Color(242, 242, 242),
libtcod.Color(204, 204, 204),
libtcod.Color(165, 165, 165),
libtcod.Color(127, 127, 127),
libtcod.Color(89, 89, 89),
]
class Tile:
"""
A tile on a map. It may or may not be blocked, and may or may not block
sight.
"""
def __init__(self, blocked, block_sight=None):
self._blocked = blocked
# By default, if a tile is blocked, it also blocks sight
if block_sight is None:
block_sight = blocked
self._block_sight = block_sight
self._fg_symbol = ' '
self.explored = False
@property
def fg_symbol(self):
return self._fg_symbol
@property
def blocked(self):
return self._blocked
@property
def block_sight(self):
return self._block_sight
def render_at(self, con, x, y, visible):
"""
Render a tile at position x, y
"""
# Set color for background
if type(self) == Tile:
return
libtcod.console_set_char_background(
con, x, y, self.bg_color, libtcod.BKGND_SET)
if self.fg_symbol is not None:
# Draw symbol on foreground
libtcod.console_put_char(
con, x, y, self.fg_symbol, libtcod.BKGND_NONE)
# Set color for foreground symbol
libtcod.console_set_char_foreground(con, x, y, self.fg_color)
class Floor(Tile):
"""
A block representing traversable terrain
"""
def __init__(self, bg_color=libtcod.Color(20, 20, 20), fg_symbol=250,
alternate_fg_symbols=['[', ']', '{', '}', '*', '%'],
alternate_symbol_chance=0.1,
# fg_color=libtcod.Color(70, 70, 70)):
fg_color=libtcod.Color(65, 65, 65)):
# Declare it as non-blocking
super().__init__(False)
# self.bg_color = libtcod.black
# self.bg_color = libtcod.Color(10, 10, 10)
# self.bg_color = libtcod.Color(32, 32, 32)
# self.bg_color = libtcod.Color(16, 16, 16)
self.bg_color = bg_color
self.fg_color = fg_color
# Choose one of the available symbols every once in a while
if random.random() < alternate_symbol_chance:
# The alternate symbol
self._fg_symbol = random.choice(alternate_fg_symbols)
else:
# The default symbol
self._fg_symbol = fg_symbol
class Door(Tile):
"""
A door
"""
def __init__(self, bg_color=libtcod.Color(139,69,19),
fg_color=libtcod.orange, is_open=False):
# Declare it as blocked
super().__init__(False)
self.bg_color = bg_color
self.fg_color = fg_color
self.is_open = is_open
def open(self):
self.is_open = True
def close(self):
self.is_open = False
@property
def fg_symbol(self):
"""
Return a different symbol based on status
"""
if self.is_open:
return '-'
else:
return '+'
@property
def block_sight(self):
return not self.is_open
class Wall(Tile):
"""
A block of wall
"""
def __init__(self, bg_color, fg_symbol='#', fg_color=libtcod.black):
# Declare it as blocked
super().__init__(True)
self.bg_color = bg_color
self.fg_color = fg_color
self._fg_symbol = fg_symbol
def create_from_palette(palette=GRAY_PALETTE):
"""
palette: list
Each element is a libtcod.Color object
"""
return Wall(random.choice(palette))
# def create(base_color=libtcod.Color(159, 89, 66), color_variance=20):
# # Extract colors
# b, g, r = base_color.b, base_color.g, base_color.r
# # Slightly alter them
# b += random.randint(-color_variance, color_variance)
# b = max(0, b)
# b = min(255, b)
# g += random.randint(-color_variance, color_variance)
# g = max(0, g)
# g = min(255, g)
# r += random.randint(-color_variance, color_variance)
# r = max(0, r)
# r = min(255, r)
# return Wall(libtcod.Color(b, g, r))
| 24.508571
| 75
| 0.569597
| 3,458
| 0.806249
| 0
| 0
| 456
| 0.106318
| 0
| 0
| 1,453
| 0.338774
|
62668c6700d6f2b1513772cf655859cb23f0af9f
| 16,717
|
py
|
Python
|
src/mrnet/utils/reaction.py
|
hpatel1567/mrnet
|
b9989b63ba7aa39cfaf484e78d872ba2cc2d2a20
|
[
"BSD-3-Clause-LBNL"
] | 9
|
2020-11-06T23:02:29.000Z
|
2021-04-28T01:49:34.000Z
|
src/mrnet/utils/reaction.py
|
hpatel1567/mrnet
|
b9989b63ba7aa39cfaf484e78d872ba2cc2d2a20
|
[
"BSD-3-Clause-LBNL"
] | 118
|
2020-11-09T06:49:10.000Z
|
2021-07-05T01:16:32.000Z
|
src/mrnet/utils/reaction.py
|
hpatel1567/mrnet
|
b9989b63ba7aa39cfaf484e78d872ba2cc2d2a20
|
[
"BSD-3-Clause-LBNL"
] | 8
|
2020-11-06T23:02:36.000Z
|
2021-04-20T00:39:52.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from mip import BINARY, CBC, MINIMIZE, Model, xsum
from mrnet.core.mol_entry import MoleculeEntry
__author__ = "Mingjian Wen"
__maintainer__ = "Mingjian Wen"
__email__ = "mjwen@lbl.gov"
__version__ = "0.2"
__status__ = "Alpha"
__date__ = "April, 2021"
# typing
Bond = Tuple[int, int]
AtomMappingDict = Dict[int, int]
def get_reaction_atom_mapping(
reactants: List[MoleculeEntry],
products: List[MoleculeEntry],
max_bond_change: int = 10,
msg: bool = False,
threads: int = 1,
) -> Tuple[List[AtomMappingDict], List[AtomMappingDict], int]:
"""
Get the atom mapping between the reactants and products of a reaction.
This works for reactions with any number of reactant/product molecules, provided
that the reaction is stoichiometrically balanced. This implementation respects atom
type and the connection between atoms, and ignore other information like bond type
(e.g. single vs double) as well and stereo centers.
There could be multiple mappings (due to, e.g. symmetry in molecules and the fact
that bond type is not considered), and this function only returns one such mapping.
The algorithm treats the reactants as a single disjoint graph (same for the products)
and using integer programming to find the smallest number of bond edits to transform
the reactant graph to the product graph. See the paper in `Reference` for details of
the algorithm.
Args:
reactants: reactant molecules
products: product molecules
max_bond_change: maximum number of allowed bond changes (break and form) between
the reactants and products.
msg: whether to show the integer programming solver running message to stdout.
threads: number of threads for the integer programming solver.
Returns:
reactants_map_number: rdkit style atom map number for the reactant molecules
(starting from 1 in rdkit but from 0 here). Each dict holds the map number
for one molecule {atom_index: map_number}. This should be used together
with `products_map_number` to determine the correspondence of atoms.
Atoms in the reactants and products having the same map number corresponds
to each other in the reaction. For example, given
`reactants_map_number=[{0:3, 1:0}, {0:2, 1:1}]` and
`products_map_number = [{0:1}, {0:0, 1:2, 2:3}]`, we can conclude that
atom 0 in reactant molecule 0 maps to atom 2 in product molecule 1 (both
with map number 3);
atom 1 in reactant molecule 0 maps to atom 0 in product molecule 1 (both
with map number 0);
atom 0 in reactant molecule 1 maps to atom 1 in product molecule 1 (both
with map number 2);
atom 1 in reactant molecule 1 maps to atom 0 in product molecule 0 both
with map number 1).
products_map_number: rdkit style atom map number for the product molecules.
See `reactants_map_number` for more.
num_bond_change: number of changed bond in the reaction
References:
`Stereochemically Consistent Reaction Mapping and Identification of Multiple
Reaction Mechanisms through Integer Linear Optimization`,
J. Chem. Inf. Model. 2012, 52, 84–92, https://doi.org/10.1021/ci200351b
"""
# preliminary check
# check 1: reactants and products have the same atom counts
rct_species = defaultdict(int) # type: Dict[str, int]
prdt_species = defaultdict(int) # type: Dict[str, int]
for m in reactants:
for s in m.species:
rct_species[s] += 1
for m in products:
for s in m.species:
prdt_species[s] += 1
if rct_species != prdt_species:
raise ReactionMappingError(
"Expect reactants and products to have the same atom count, "
f"but got {dict(rct_species)} and {dict(prdt_species)}."
)
# check 2: number of bond change smaller than allowed maximum
# This only checks the number of bonds and thus actual num changes could be larger,
# which will be checked later.
num_bond_change = abs(
sum(len(m.bonds) for m in reactants) - sum(len(m.bonds) for m in products)
)
if num_bond_change > max_bond_change:
raise ReactionMappingError(
f"Number of changed bond is at least {num_bond_change}, larger than allowed "
f"maximum {max_bond_change}"
)
# local and global atom index mapping
(
reactant_species,
reactant_bonds,
_,
reactant_idx_mapping,
) = get_local_global_atom_index_mapping(reactants)
(
product_species,
product_bonds,
_,
product_idx_mapping,
) = get_local_global_atom_index_mapping(products)
# solve integer programming problem to get atom mapping
if len(reactant_bonds) != 0 and len(product_bonds) != 0:
num_bond_change, r2p_mapping, p2r_mapping = solve_integer_programing(
reactant_species,
product_species,
reactant_bonds,
product_bonds,
msg,
threads,
)
else:
# corner case that integer programming cannot handle
out = get_atom_mapping_no_bonds(
reactant_species, product_species, reactant_bonds, product_bonds
)
num_bond_change, r2p_mapping, p2r_mapping = out # type: ignore
# final check
if num_bond_change > max_bond_change:
raise ReactionMappingError(
f"Number of bond change {num_bond_change} larger than allowed maximum number "
f"of bond change {max_bond_change}."
)
if None in r2p_mapping:
global_idx = r2p_mapping.index(None)
mol_idx, atom_idx = reactant_idx_mapping[global_idx]
raise ReactionMappingError(
f"Cannot find mapping for atom {atom_idx} of reactant molecule {mol_idx}."
)
if None in p2r_mapping:
global_idx = p2r_mapping.index(None)
mol_idx, atom_idx = product_idx_mapping[global_idx]
raise ReactionMappingError(
f"Cannot find mapping for atom {atom_idx} of product molecule {mol_idx}."
)
# Everything is alright, create atom map number.
# Atoms in reactants will have their global index as map number.
# Map number for atoms in products are determined accordingly based on the results
# of integer programming
reactants_map_number = [
{} for _ in range(len(reactants))
] # type: List[Dict[int,int]]
products_map_number = [
{} for _ in range(len(products))
] # type: List[Dict[int,int]]
for rct_idx, prdt_idx in enumerate(r2p_mapping):
map_number = rct_idx
mol_idx, atom_idx = reactant_idx_mapping[rct_idx] # type: ignore
reactants_map_number[mol_idx][atom_idx] = map_number
mol_idx, atom_idx = product_idx_mapping[prdt_idx] # type: ignore
products_map_number[mol_idx][atom_idx] = map_number
return reactants_map_number, products_map_number, num_bond_change
def get_local_global_atom_index_mapping(
molecules: List[MoleculeEntry],
) -> Tuple[List[str], List[Bond], List[List[int]], List[Tuple[int, int]]]:
"""
Map the local and global indices of atoms in a sequence of molecules.
This is a utility function for `get_reaction_atom_mapping()`.
Think of this as combining a sequence of molecules into a single molecule and then
relabelling the atom index in each mol to form a consecutive global index in the
combined molecule.
Local indices for atoms in each mol are [0, ..., N-1], where N is the number of
atoms in the corresponding atoms.
Global indices for atoms in the 1st mol is [0, ..., N1-1],
in the 2nd mol is [N1, ..., N1+N2-1],
in the 3rd mol is [N1+N2, ..., N1+N2+N3-1]
...
where N1, N2, and N3 are the number of atoms in molecules 1, 2, and 3.
Args:
molecules: A sequence of molecule entry.
Returns:
global_species: species of atoms in the combined molecule.
global_bonds: all bonds in the combine molecule; each bond is specified by a
tuple of global atom index.
local_to_global: local atom index to global atom index. Each inner list holds
the global atom indexes of a molecule. E.g. local_to_global[0][2] gives 4,
meaning atom 2 of molecule 0 has a global index of 4.
global_to_local: global atom index to local atom index. Each tuple
(mol_index, atom_index) is for one atom, with `mol_index` the index of the
molecule from which the atom is from and `atom_index` the local index of the
atom in the molecule. E.g. global[4] gives a tuple (0, 2), meaning atom with
global index 4 corresponds to atom 2 in molecule 0.
"""
global_species = []
global_bonds = []
local_to_global = []
global_to_local = []
n = 0
for i, m in enumerate(molecules):
global_species.extend(m.species)
bonds = [(b[0] + n, b[1] + n) for b in m.bonds]
global_bonds.extend(bonds)
mp_l2g = [j + n for j in range(m.num_atoms)]
local_to_global.append(mp_l2g)
mp_g2l = [(i, j) for j in range(m.num_atoms)]
global_to_local.extend(mp_g2l)
n += m.num_atoms
return global_species, global_bonds, local_to_global, global_to_local
def solve_integer_programing(
reactant_species: List[str],
product_species: List[str],
reactant_bonds: List[Bond],
product_bonds: List[Bond],
msg: bool = True,
threads: Optional[int] = None,
) -> Tuple[int, List[Union[int, None]], List[Union[int, None]]]:
"""
Solve an integer programming problem to get atom mapping between reactants and
products.
This is a utility function for `get_reaction_atom_mapping()`.
Args:
reactant_species: species string of reactant atoms
product_species: species string of product atoms
reactant_bonds: bonds in reactant
product_bonds: bonds in product
msg: whether to show the solver running message to stdout.
threads: number of threads for the solver. `None` to use default.
Returns:
objective: minimized objective value. This corresponds to the number of changed
bonds (both broken and formed) in the reaction.
r2p_mapping: mapping of reactant atom to product atom, e.g. r2p_mapping[0]
giving 3 means that reactant atom 0 maps to product atom 3. A value of
`None` means a mapping cannot be found for the reactant atom.
p2r_mapping: mapping of product atom to reactant atom, e.g. p2r_mapping[3]
giving 0 means that product atom 3 maps to reactant atom 0. A value of
`None` means a mapping cannot be found for the product atom.
Reference:
`Stereochemically Consistent Reaction Mapping and Identification of Multiple
Reaction Mechanisms through Integer Linear Optimization`,
J. Chem. Inf. Model. 2012, 52, 84–92, https://doi.org/10.1021/ci200351b
"""
atoms = list(range(len(reactant_species)))
# init model and variables
model = Model(name="Reaction_Atom_Mapping", sense=MINIMIZE, solver_name=CBC)
model.emphasis = 1
if threads is not None:
model.threads = threads
if msg:
model.verbose = 1
else:
model.verbose = 0
y_vars = {
(i, k): model.add_var(var_type=BINARY, name=f"y_{i}_{k}")
for i in atoms
for k in atoms
}
alpha_vars = {
(i, j, k, l): model.add_var(var_type=BINARY, name=f"alpha_{i}_{j}_{k}_{l}")
for (i, j) in reactant_bonds
for (k, l) in product_bonds
}
# add constraints
# constraint 2: each atom in the reactants maps to exactly one atom in the products
# constraint 3: each atom in the products maps to exactly one atom in the reactants
for i in atoms:
model += xsum([y_vars[(i, k)] for k in atoms]) == 1
for k in atoms:
model += xsum([y_vars[(i, k)] for i in atoms]) == 1
# constraint 4: allows only atoms of the same type to map to one another
for i in atoms:
for k in atoms:
if reactant_species[i] != product_species[k]:
model += y_vars[(i, k)] == 0
# constraints 5 and 6: define each alpha_ijkl variable, permitting it to take the
# value of one only if the reactant bond (i,j) maps to the product bond (k,l)
for (i, j) in reactant_bonds:
for (k, l) in product_bonds:
model += alpha_vars[(i, j, k, l)] <= y_vars[(i, k)] + y_vars[(i, l)]
model += alpha_vars[(i, j, k, l)] <= y_vars[(j, k)] + y_vars[(j, l)]
# create objective
obj1 = xsum(
1 - xsum(alpha_vars[(i, j, k, l)] for (k, l) in product_bonds)
for (i, j) in reactant_bonds
)
obj2 = xsum(
1 - xsum(alpha_vars[(i, j, k, l)] for (i, j) in reactant_bonds)
for (k, l) in product_bonds
)
obj = obj1 + obj2
# solve the problem
try:
model.objective = obj
model.optimize()
except Exception:
raise ReactionMappingError("Failed solving integer programming.")
if not model.num_solutions:
raise ReactionMappingError("Failed solving integer programming.")
# get atom mapping between reactant and product
r2p_mapping = [None for _ in atoms] # type: List[Union[int, None]]
p2r_mapping = [None for _ in atoms] # type: List[Union[int, None]]
for (i, k), v in y_vars.items():
if v.x == 1:
r2p_mapping[i] = k
p2r_mapping[k] = i
objective = model.objective_value # type: int
return objective, r2p_mapping, p2r_mapping
def get_atom_mapping_no_bonds(
reactant_species: List[str],
product_species: List[str],
reactant_bonds: List[Bond],
product_bonds: List[Bond],
) -> Tuple[int, List[int], List[int]]:
"""
Get the atom mapping for reaction where there is no bonds in either the reactants
or products. For example, a reaction C-O -> C + O.
This is a complement function to `solve_integer_programing()`, which cannot deal
with the case where there is no bonds in the reactants or products.
The arguments and returns are the same as `solve_integer_programing()`.
"""
if len(reactant_bonds) != 0 and len(product_bonds) != 0:
raise ReactionMappingError(
"Expect either reactants or products has 0 bonds, but reactants has "
f"{len(reactant_bonds)} and products has {len(product_bonds)}."
)
# the only thing we need to do is to match species
product_species_to_index = defaultdict(list)
for i, s in enumerate(product_species):
product_species_to_index[s].append(i)
r2p_mapping = []
for s in reactant_species:
r2p_mapping.append(product_species_to_index[s].pop())
p2r_mapping = [r2p_mapping.index(i) for i in range(len(product_species))]
# objective, i.e. number of bond change
objective = abs(len(reactant_bonds) - len(product_bonds))
return objective, r2p_mapping, p2r_mapping
def generate_atom_mapping_1_1(
node_mapping: Dict[int, int]
) -> Tuple[AtomMappingDict, AtomMappingDict]:
"""
Generate rdkit style atom mapping for reactions with one reactant and one product.
For example, given `node_mapping = {0:2, 1:0, 2:1}`, which means atoms 0, 1,
and 2 in the reactant maps to atoms 2, 0, and 1 in the product, respectively,
the atom mapping number for reactant atoms are simply set to their index,
and the atom mapping number for product atoms are determined accordingly.
As a result, this function gives: `({0:0, 1:1, 2:2}, {0:1 1:2 2:0})` as the output.
Atoms in the reactant and product with the same atom mapping number
(keys in the dicts) are corresponding to each other.
Args:
node_mapping: node mapping from reactant to product
Returns:
reactant_atom_mapping: rdkit style atom mapping for the reactant
product_atom_mapping: rdkit style atom mapping for the product
"""
reactant_atom_mapping = {k: k for k in node_mapping}
product_atom_mapping = {v: k for k, v in node_mapping.items()}
return reactant_atom_mapping, product_atom_mapping
class ReactionMappingError(Exception):
def __init__(self, msg=None):
super().__init__(msg)
self.msg = msg
| 38.166667
| 90
| 0.664413
| 125
| 0.007476
| 0
| 0
| 0
| 0
| 0
| 0
| 9,461
| 0.565815
|