text stringlengths 8 6.05M |
|---|
from equadratures import *
import numpy as np
import matplotlib.pyplot as plt
def function(x):
return 1.0/(2.0 + 16*(x[0] - 0.1)**2 + 25*(x[1] + 0.1)**2 )
def tensor():
order = 100
x1 = Parameter(lower=-1, upper=1, order=order, distribution='Uniform')
x2 = Parameter(lower=-1, upper=1, order=order, distribution ='Uniform')
tensor = Basis('tensor-grid')
myPoly = Poly([x1, x2], tensor, method='numerical-integration')
myPoly.set_model(function)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.scatter(myPoly.get_points()[:,0], myPoly.get_points()[:,1] , marker='o', s=2, color='tomato')
plt.xlabel('$s_1$', fontsize=13)
plt.ylabel('$s_2$', fontsize=13)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.savefig('../Figures/tutorial_8_fig_a.png' , dpi=200, bbox_inches='tight', pad_inches=0.1)
x, y, z, max_order = vector_to_2D_grid(myPoly.get_coefficients(), myPoly.basis.get_elements() )
G = np.log10(np.abs(z))
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
cax = plt.scatter(x, y, s=30, marker='o', c=G, cmap='jet', alpha=1.0, vmin=-16.0, vmax=0.)
plt.xlim(-0.5, max_order)
plt.ylim(-0.5, max_order)
plt.xlabel('$i_1$', fontsize=13)
plt.ylabel('$i_2$', fontsize=13)
cbar = plt.colorbar(extend='neither', spacing='proportional',
orientation='vertical', shrink=0.8, format="%.0f")
cbar.ax.tick_params(labelsize=13)
plt.savefig('../Figures/tutorial_8_fig_b.png', dpi=300, bbox_inches='tight')
def sparse():
|
from MarkovBuilder import *
from restore import *
from itertools import *
import numpy as np
from eval import *
class MusicMatrix:
def __init__(self, order):
self.previous_state = [None for i in range(order)]
self.order = order
self.current_note_num = 0 # 记录当前输入了几个note
# 这里应该传入所有需要考虑到的音级,我这里只传入了例子中包含的音级,之后可以改这里就可以更改状态空间
self.note_list=[]
for level in range(3,6):
for c in range(7):
note=chr(c+ord('A'))
self.note_list.append(note+str(level))
self.note_list.append(note+'-'+str(level))
self.note_list.append(note+'--'+str(level))
self.note_list.append(note+'#'+str(level))
self.note_list.append(note+'##'+str(level))
#print(len(self.note_list))
self.interval_list = ["whole", "half", "quarter", "eighth", "16th", "32nd", "64th"]
# 计算note和interval的笛卡尔积得到和在一起的状态
self.state_list = list(product(self.note_list, self.interval_list))
self.state_markov_matrix = MarkovBuilder(self.state_list, order)
# add函数只用于初始化markov transition matrix
def reset(self, to_note, to_interval):
to_state = (to_note, to_interval)
#print(to_state)
self.previous_state[self.current_note_num-1] = to_state
self.current_note_num += 1
def add(self, to_note, to_interval):
to_state = (to_note, to_interval)
if self.current_note_num < self.order:
self.reset(to_note, to_interval)
return
self.state_markov_matrix.add(self.previous_state, to_state)
self.previous_state = self.previous_state[1:] + [to_state]
# from_note 和 form_interval是有order个元素的list,这个用于生成音乐
def next_state(self, from_note, from_interval):
return self.state_markov_matrix.next_value(list(zip(from_note, from_interval)))
def get_matrix(self):
prob_matrix = self.state_markov_matrix.matrix.astype(float)
state_num = len(self.state_list)
self.previous_state_cnt = np.array(self.state_markov_matrix.previous_state, dtype=float)
self.previous_state_cnt = self.previous_state_cnt/np.sum(self.previous_state_cnt)
self.enum(state_num, 0, prob_matrix)
return prob_matrix
def enum(self, state_num, depth, matrix):
if depth == self.order:
# 如果所有转移概率都是0
if np.sum(matrix) == 0:
for i in range(state_num):
matrix[i] = self.previous_state_cnt[i]
else:
s = np.sum(matrix)
for i in range(state_num):
matrix[i] = matrix[i]/s
return
else:
for i in range(state_num):
self.enum(state_num, depth + 1, matrix[i])
class MusicGenerator:
# 初始化一个generator,需要提供之前训练好的一个markov转移矩阵,generator的order会自动选择和markov矩阵的order一致
# 同时需要提供初始的order个note和interval
def __init__(self, music_matrix, initial_note, initial_interval):
order = music_matrix.order
self.music_matrix = music_matrix
self.previous_note = initial_note
self.previous_interval = initial_interval
self.order = order
self.current_note_num = 0 # 记录当前输入了几个note
def next_state(self):
# 根据generator当前状态生成下一个状态
next_state = self.music_matrix.next_state(self.previous_note, self.previous_interval)
self.previous_note = self.previous_note[1:] + [next_state[0]]
self.previous_interval = self.previous_interval[1:] + [next_state[1]]
return next_state
def Markov_Generate_Single_Music(note_list,interval_list,tune,save_dir,TOPK,LOOPS):
order = 1
# 这部分负责训练一个markov matrix
markov_instance = MusicMatrix(order)
length = len(interval_list)
for i in range(length):
#每次向markov_instance中丢一个note和interval,这些全部用来训练markov chain
markov_instance.add(note_list[i], interval_list[i])
#获取概率转移矩阵
Mat=markov_instance.get_matrix()
with open(save_dir+'matrix.txt','w') as fp:
N=735
valid=[0]*N
statestr=''
for x in markov_instance.note_list:
for y in markov_instance.interval_list:
for i in range(length):
if(x==note_list[i] and y == interval_list[i]):
statestr+=str((x,y))+','
break
print(statestr[:-1],file=fp)
for i in range(N):
for j in range(N):
if(Mat[i][j]>0.00001):
valid[j]=1
for i in range(N):
if(valid[i]):
strtemp=''
for j in range(N):
if(valid[j]):
strtemp+=str(Mat[i][j])+','
print(strtemp[:-1],file=fp)
#获取计数矩阵,和转移矩阵有两个区别,一个是计数矩阵没有归一化,一个是全0行在转移矩阵中被设置为了按照之前state出现的次数产生概率
print('Generating '+save_dir)
# 这部分负责生成一个新的音乐,先用之前训练好的markov_instance初始化generator,并为其传入初始值
# 初始值需要传order个note和interval,order是Markov Chain的阶数
#generator = MusicGenerator(markov_instance, note_list[:order], interval_list[:order])
best=[0.0]*TOPK
result=[stream.Stream()]*TOPK
for loop in range(LOOPS):
generator=MusicGenerator(markov_instance, note_list[:order], interval_list[:order])
note_gen=[]
interval_gen=[]
for i in range(0,100):
#不断调用generator的next_state方法获取下一个音符
next_state = generator.next_state()
note_gen.append(next_state[0])
interval_gen.append(next_state[1])
ret=evaluate(note_gen,interval_gen,note_list,interval_list)
for i in range(TOPK):
if(ret > best[i]):
print(loop,ret)
pos=TOPK-1
while(not pos==i):
best[pos]=best[pos-1]
result[pos]=result[pos-1]
pos-=1
best[i]=ret
result[i]=generate_stream(note_gen, interval_gen, tune)
break
if(loop % 100==0):
print('Loop',loop,best)
for i in range(TOPK):
save_stream(result[i],save_dir,'score='+str(best[i]))
ori_stream=generate_stream(note_list, interval_list, tune)
save_stream(ori_stream,save_dir,'origin')
def Markov_Generate_Mixed_Music(note_list,interval_list,note_list2,interval_list2,tune,save_dir,TOPK,LOOPS):
note_list1=note_list
interval_list1=interval_list
order = 1
# 这部分负责训练一个markov matrix
markov_instance = MusicMatrix(order)
length = len(interval_list)
for i in range(length):
#每次向markov_instance中丢一个note和interval,这些全部用来训练markov chain
markov_instance.add(note_list[i], interval_list[i])
for i in range(len(note_list2)):
note_list.append(note_list2[i])
interval_list.append(interval_list2[i])
if(i == 0):
markov_instance.reset(note_list2[i], interval_list2[i])
else:
markov_instance.add(note_list2[i], interval_list2[i])
#获取概率转移矩阵
Mat=markov_instance.get_matrix()
with open(save_dir+'matrix.txt','w') as fp:
N=735
valid=[0]*N
statestr=''
for x in markov_instance.note_list:
for y in markov_instance.interval_list:
for i in range(length):
if(x==note_list[i] and y == interval_list[i]):
statestr+=str((x,y))+','
break
print(statestr[:-1],file=fp)
for i in range(N):
for j in range(N):
if(Mat[i][j]>0.00001):
valid[j]=1
for i in range(N):
if(valid[i]):
strtemp=''
for j in range(N):
if(valid[j]):
strtemp+=str(Mat[i][j])+','
print(strtemp[:-1],file=fp)
#获取计数矩阵,和转移矩阵有两个区别,一个是计数矩阵没有归一化,一个是全0行在转移矩阵中被设置为了按照之前state出现的次数产生概率
print('Generating '+save_dir)
# 这部分负责生成一个新的音乐,先用之前训练好的markov_instance初始化generator,并为其传入初始值
# 初始值需要传order个note和interval,order是Markov Chain的阶数
#generator = MusicGenerator(markov_instance, note_list[:order], interval_list[:order])
best=[0.0]*TOPK
result=[stream.Stream()]*TOPK
for loop in range(LOOPS):
generator=MusicGenerator(markov_instance, note_list[:order], interval_list[:order])
note_gen=[]
interval_gen=[]
for i in range(0,100):
#不断调用generator的next_state方法获取下一个音符
next_state = generator.next_state()
note_gen.append(next_state[0])
interval_gen.append(next_state[1])
ret=evaluate2(note_gen,interval_gen,note_list1,interval_list1,note_list2,interval_list2)
for i in range(TOPK):
if(ret > best[i]):
print(loop,ret)
pos=TOPK-1
while(not pos==i):
best[pos]=best[pos-1]
result[pos]=result[pos-1]
pos-=1
best[i]=ret
result[i]=generate_stream(note_gen, interval_gen, tune)
break
if(loop % 100==0):
print('Loop',loop,best)
for i in range(TOPK):
save_stream(result[i],save_dir,'score='+str(best[i]))
ori_stream=generate_stream(note_list, interval_list, tune)
save_stream(ori_stream,save_dir,'origin')
if __name__ == '__main__':
'''
Markov_Generate_Single_Music(['A3','C4','D4','C4', 'D4','C4','D4','F4', 'E4','D4','D4','C4','D4', 'D4'],
['quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter', 'quarter','quarter','quarter','quarter']
,'4/4','result/music1/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['G4','B-4','A4','B-4', 'G4','D4','A4','F#4', 'D4','G4','E-4','C4','A3', 'D4','B-3', 'G3', 'C4', 'A3', 'D4', 'B-3','A3','G3'],
['quarter','eighth','eighth','quarter','eighth','eighth','quarter','eighth','eighth','half', 'quarter','eighth','eighth','quarter','eighth','eighth','eighth','eighth','quarter','quarter','eighth','eighth']
,'4/4','result/music2/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['G4','F#4','G4','A4', 'B-4','A4','F4','D4', 'G4','G4','F#4','G4','A4', 'B4','A4', 'G4', 'G4'],
['quarter','quarter','quarter','quarter','half','half','half','half','half','quarter', 'quarter','quarter','quarter','half','half','half','half']
,'3/2','result/music3/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['C4','C4','G4','G4', 'A4','A4','G4','F4', 'F4','E4','E4','D4','D4', 'C4'],
['quarter','quarter','quarter','quarter','quarter','quarter','half','quarter','quarter','quarter', 'quarter','quarter','quarter','half']
,'2/4','result/music4/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['G4','G4','E4','G4', 'G4','C5','G4','G4', 'F4','E4','C4','C4','B3','B3', 'B3','C4','E4','E4'],
['quarter','quarter','quarter','quarter','quarter','quarter','quarter','half','quarter','half', 'quarter','eighth','eighth','quarter','quarter','quarter','quarter','quarter']
,'3/4','result/music5/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['D4','E4','F#4','G4', 'A4','B-4','C5','D5', 'G5','F#5','G5','D5'],
['eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','half']
,'4/4','result/music6/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['E4','E4','G4','A4', 'C5','C5','A4','G4', 'G4','A4','G4'],
['quarter','eighth','eighth','eighth','eighth','eighth','eighth','quarter','eighth','eighth', 'half']
,'4/4','result/music7/',TOPK=3,LOOPS=1024)
Markov_Generate_Single_Music(['E4','G4','G4','G4', 'E4','A4','A4','B4','A4', 'A4','G4','C5','C5','C5','A4','C5','A4','G4'],
['eighth','eighth','quarter','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','quarter','eighth', 'eighth','quarter','eighth','eighth','half']
,'2/4','result/music8/',TOPK=3,LOOPS=1024)
'''
'''
Markov_Generate_Mixed_Music(['E4','E4','G4','A4', 'C5','C5','A4','G4', 'G4','A4','G4'],
['quarter','eighth','eighth','eighth','eighth','eighth','eighth','quarter','eighth','eighth', 'half']
,['D4','E4','F#4','G4', 'A4','B-4','C5','D5', 'G5','F#5','G5','D5'],
['eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','half'],'4/4','result/music67/',TOPK=3,LOOPS=1024)
Markov_Generate_Mixed_Music(['A3','C4','D4','C4', 'D4','C4','D4','F4', 'E4','D4','D4','C4','D4', 'D4'],
['quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter', 'quarter','quarter','quarter','quarter']
,['D4','E4','F#4','G4', 'A4','B-4','C5','D5', 'G5','F#5','G5','D5'],
['eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','half'],'4/4','result/music16/',TOPK=3,LOOPS=1024)
Markov_Generate_Mixed_Music(['G4','B-4','A4','B-4', 'G4','D4','A4','F#4', 'D4','G4','E-4','C4','A3', 'D4','B-3', 'G3', 'C4', 'A3', 'D4', 'B-3','A3','G3'],
['quarter','eighth','eighth','quarter','eighth','eighth','quarter','eighth','eighth','half', 'quarter','eighth','eighth','quarter','eighth','eighth','eighth','eighth','quarter','quarter','eighth','eighth']
,['D4','E4','F#4','G4', 'A4','B-4','C5','D5', 'G5','F#5','G5','D5'],
['eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','eighth','half'],'4/4','result/music26/',TOPK=3,LOOPS=1024)
Markov_Generate_Mixed_Music(['G4','B-4','A4','B-4', 'G4','D4','A4','F#4', 'D4','G4','E-4','C4','A3', 'D4','B-3', 'G3', 'C4', 'A3', 'D4', 'B-3','A3','G3'],
['quarter','eighth','eighth','quarter','eighth','eighth','quarter','eighth','eighth','half', 'quarter','eighth','eighth','quarter','eighth','eighth','eighth','eighth','quarter','quarter','eighth','eighth']
,['A3','C4','D4','C4', 'D4','C4','D4','F4', 'E4','D4','D4','C4','D4', 'D4'],
['quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter','quarter', 'quarter','quarter','quarter','quarter'],'4/4','result/music12/',TOPK=3,LOOPS=1024)
'''
|
#!/usr/bin/env python
"""
Calculate the consecutive sum of
each digit passed in as parameter.
"""
def consecutive_sum(number):
if number // 10 < 1:
return number
return number % 10 + consecutive_sum(number // 10)
def main():
assert consecutive_sum(123456789) == 45
assert consecutive_sum(12) == 3
assert consecutive_sum(999999999) == 81
print("Passed all tests!")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
* :google:`Spatially Efficient Tree Layout for GPU Ray-tracing of Constructive Solid Geometry Scenes`
PCT 2016
Parallel Computing Technologies
Proceedings of the 10th Annual International Scientific Conference on Parallel Computing Technologies
Arkhangelsk, Russia, March 29-31, 2016.
* http://ceur-ws.org/Vol-1576/
* http://ceur-ws.org/Vol-1576/090.pdf
D.Y. Ulyanov(1,2)
D.K. Bogolepov(2)
V.E. Turlapov(1)
(1) University of Nizhniy Novgorod
(2) OpenCASCADE https://www.opencascade.com
* :google:`Ulyanov Bogolepov Turlapov`
* https://otik.uk.zcu.cz/bitstream/handle/11025/10619/Bogolepov.pdf?sequence=1
* denisbogol@gmail.com Denis Bogolepov
* danila-ulyanov@ya.ru Danila Ulyanov
* vadim.turlapov@gmail.com Vadim Turlapov
* https://github.com/megaton?tab=repositories
* https://github.com/megaton/csg-tools
* https://github.com/megaton/csg-tools/blob/master/src/csgviewer.cpp
* https://github.com/megaton/csg-format/blob/master/CSG-format.md
Author
Danila Ya. Ulyanov
Journal of instrument engineering
pribor.ifmo.ru/en/person/5983/ulyanov_danila...
Denis K. Bogolepov,
Dmitry P. Sopin,
Danila Ya. Ulyanov,
Vadim E. Turlapov
CONSTRUCTION OF SAH BVH TREES FOR RAY TRACING WITH THE USE OF GRAPHIC PROCESSORS
Other ray trace of CSG tree implementations
* https://github.com/POV-Ray/povray/search?q=CSG
* https://cadcammodelling.wordpress.com/2011/01/23/13-steps-to-perform-csg-tree-raycasting/
* https://github.com/search?p=5&q=CSG&ref=searchresults&type=Repositories
* :google:`ray trace csg tree`
* https://www.clear.rice.edu/comp360/lectures/old/Solidstext.pdf
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
from intersect import intersect_primitive, Node, Ray, UNION, INTERSECTION, DIFFERENCE, BOX, SPHERE, EMPTY, desc
from boolean import boolean_table
from boolean import desc_state, Enter, Exit, Miss
from boolean import desc_acts, RetMiss, RetL, RetR, RetLIfCloser, RetRIfCloser, LoopL, LoopLIfCloser, LoopR, LoopRIfCloser, FlipR
# actions
GotoLft = 0x1 << 1
GotoRgh = 0x1 << 2
LoadLft = 0x1 << 3
LoadRgh = 0x1 << 4
Compute = 0x1 << 5
SaveLft = 0x1 << 6
Start = 0x1 << 7
Return = 0x1 << 8
def desc_action(action):
if action is None:return "NONE"
s = ""
if action & GotoLft:s+="GotoLft " ;
if action & GotoRgh:s+="GotoRgh " ;
if action & LoadLft:s+="LoadLft " ;
if action & LoadRgh:s+="LoadRgh " ;
if action & Compute:s+="Compute " ;
if action & SaveLft:s+="SaveLft " ;
if action & Start:s+="Start " ;
if action & Return:s+="Return " ;
return s
def intersectBox(node):
return True # skipping bbox optimization, just test against the primitive
def dump(label):
return "%s %s tl:%s nl:%s tr:%s nr:%s " % (label, desc_action(action),repr(tl),repr(nl),repr(tr),repr(nr))
class Stack(object):
def __init__(self, name, desc_ ):
self.name = name
self.desc_ = desc_
self._stack = []
def reset(self):
self._stack = []
def push(self, obj, debug=False):
if debug and self.desc_ is not None:
log.info("%s.push %s " % (self.name, self.desc_(obj)))
pass
self._stack.append(obj)
def pop(self, debug=False):
if len(self._stack) == 0:
assert 0
obj = self._stack.pop()
if debug and self.desc_ is not None:
log.info("%s.pop -> %s rest: %s " % (self.name, self.desc_(obj), self.desc() ))
return obj
def count(self):
return len(self._stack)
def desc(self):
return ",".join(map(self.desc_,self._stack))
class CSG(object):
def __init__(self, level=1):
self.level = level
self.actionStack = Stack("actionStack", desc_action)
self.tminStack = Stack("tminStack", lambda tmin:"%5.2f" % (tmin if tmin else -1))
self.primStack = Stack("primStack", lambda _:"%5.2f:%s" % (_[0] if _[0] else -1, _[2]))
self.typ = None
self._traverse = {"RECURSIVE":[], "ITERATIVE":[] }
self._actions = {"RECURSIVE":[], "ITERATIVE":[] }
self.reset()
def _get_debug(self):
return self.level if self.iray in self._debug else 0
debug = property(_get_debug)
def reset(self, ray=None, iray=0, debug=[]):
self.ray = ray
self.iray = iray
self.count = 0
self.lname = None
self.rname = None
self._debug = debug
self._node = None
self._tl = None
self._tr = None
self._nr = None
self._nl = None
self._tmin = 0
self._prev = None
self._stage = None
self._act = None
if self.typ in self._traverse:
self._traverse[self.typ] = []
self._actions[self.typ] = []
self.actionStack.reset()
self.tminStack.reset()
self.primStack.reset()
def __repr__(self):
return "[%d] %s %s : %s : %s -> %s " % (self.count, self.typ, self.actionStack.desc(), self.stage, self.prevname, self.nodename )
prevname = property(lambda self:self.prev.name if self.prev else "-")
nodename = property(lambda self:self.node.name if self.node else "-")
def _get_act(self):
return self._act
def _set_act(self, act):
self._act = act
act = property(_get_act, _set_act)
def _get_stage(self):
prev = self._prev
node = self._node
if (prev is None):
stage = "going down tree from prev None"
elif (node is prev.left):
stage = "going left down tree"
elif (node is prev.right):
stage = "going right down tree"
elif node is not None and prev is node.left:
stage = "up from left child"
elif node is not None and prev is node.right:
stage = "up from right child"
else:
stage = "other"
pass
return stage
def _set_node(self, n):
if n is None:
pass
#if self.debug:
#log.warning("_set_node to None from prev %s act %s " % (self._node,desc_action(self.act)) )
#assert n
self._prev = self._node
self._node = n
self.record_traversal(n)
self._stage = self._get_stage()
if self.debug > 2:
log.info("%s _set_node iray %d : %s -> %s : %s " % (self.typ, self.iray, self.prevname, self.nodename, self.stage))
#pass
def _get_node(self):
return self._node
node = property(_get_node, _set_node)
def record_traversal(self, n):
assert self.typ in self._traverse, "bad typ %s " % self.typ
self._traverse[self.typ].append(n)
def record_action(self, a):
assert self.typ in self._traverse, "bad typ %s " % self.typ
self._actions[self.typ].append(a)
prev = property(lambda self:self._prev)
stage = property(lambda self:self._stage)
def dump_traversal(self):
for typ in self._traverse:
log.info(" (%3d) %20s : %s " % (self.iray, typ, repr(self._traverse[typ]) ))
pass
def dump_actions(self):
for typ in self._traverse:
log.info(" (%3d) %20s : %s " % (self.iray, typ, repr(map(desc_action,self._actions[typ])) ))
pass
def compare_traversal(self):
"""
Iterative requires some manual record_traversal for left/right primitives in Intersect
Note that missers are coming up with different traversal: the recursive traversal
never returns to root, but the iterative does several times
"""
if self.debug:
self.dump_traversal()
self.dump_actions()
r = self._traverse["RECURSIVE"]
i = self._traverse["ITERATIVE"]
return i == r
def _set_action(self, a):
self._action = a
self.record_action(a)
if self.debug > 2:
log.info("_set_action %s " % desc_action(a))
def _get_action(self):
return self._action
action = property(_get_action, _set_action)
def _set_tl(self, tl):
self._tl = tl
def _get_tl(self):
return self._tl
tl = property(_get_tl, _set_tl)
def _set_tr(self, tr):
self._tr = tr
def _get_tr(self):
return self._tr
tr = property(_get_tr, _set_tr)
def _set_nr(self, nr):
self._nr = nr
def _get_nr(self):
return self._nr
nr = property(_get_nr, _set_nr)
def _set_nl(self, nl):
self._nl = nl
def _get_nl(self):
return self._nl
nl = property(_get_nl, _set_nl)
def _set_tmin(self, tmin):
self._tmin = tmin
def _get_tmin(self):
return self._tmin
tmin = property(_get_tmin, _set_tmin)
def classify(self, tt, nn, tmin):
if tt > tmin:
state = Enter if np.dot(nn, self.ray.direction) < 0. else Exit
else:
state = Miss
pass
return state
def recursive_intersect(self, root, depth=0, tmin=0):
"""
* minimizing use of member vars makes recursive algorithm easier to understand
* instead use local vars, which will have different existance at the
different levels of the recursion
* can think of member vars as effective globals wrt the recursion
* loopers result in recursive call repeating the same node, with tmin advanced
* recursively never traverse the root again ? are always going down with root.left root.right
never going up ?
* although the traverse never goes up, completion of the recursive instance calls
does go back up once hitting primitives in the leaves
"""
assert root
assert self.typ == "RECURSIVE"
if depth == 0:
self.top = root
pass
self.node = root
# above are just for debug comparison against iterative algo, not used below
if root.is_primitive:
return intersect_primitive(root, self.ray, tmin)
elif root.is_operation:
tl, nl, lname, lact = self.recursive_intersect(root.left, depth=depth+1, tmin=tmin)
tr, nr, rname, ract = self.recursive_intersect(root.right, depth=depth+1, tmin=tmin)
root.rstack = (tl, nl, lname, lact, tr, nr, rname, ract) # for debug comparison with iterative
loopcount = 0
looplimit = 10
while loopcount < looplimit:
loopcount += 1
stateL = self.classify(tl, nl, tmin) # tmin_l tmin_r ?
stateR = self.classify(tr, nr, tmin)
acts = boolean_table(root.operation, stateL, stateR )
opr = "%s(%s:%s,%s:%s)" % ( desc[root.operation],lname,desc_state[stateL], rname,desc_state[stateR] )
act_RetMiss = (RetMiss & acts)
act_RetL = (RetL & acts)
act_RetR = (RetR & acts)
act_LoopL = (LoopL & acts)
act_LoopR = (LoopR & acts)
act_RetLIfCloser = ((RetLIfCloser & acts) and tl <= tr)
act_LoopLIfCloser = ((LoopLIfCloser & acts) and tl <= tr)
act_RetRIfCloser = ((RetRIfCloser & acts) and tr < tl)
act_LoopRIfCloser = ((LoopRIfCloser & acts) and tr < tl)
trep = self.trep_fmt(tmin, tl, tr )
ret = ()
if act_RetMiss:
act = RetMiss
ret = None, None, None, act
elif act_RetL or act_RetLIfCloser:
act = RetLIfCloser if act_RetLIfCloser else RetL
ret = tl, nl, lname, act
elif act_RetR or act_RetRIfCloser:
act = RetRIfCloser if act_RetRIfCloser else RetR
if (FlipR & acts): nr = -nr
ret = tr, nr, rname, act
elif act_LoopL or act_LoopLIfCloser:
act = LoopLIfCloser if act_LoopLIfCloser else LoopL
tl, nl, lname, _ = self.recursive_intersect(root.left, depth=depth+1, tmin=tl)
elif act_LoopR or act_LoopRIfCloser:
act = LoopRIfCloser if act_LoopRIfCloser else LoopR
tr, nr, rname, _ = self.recursive_intersect(root.right, depth=depth+1, tmin=tr)
else:
log.fatal("[%d] RECURSIVE UNHANDLED acts " % (loopcount))
assert 0
self.act = act
if self.debug > 1:
log.info("(%d)[%d] RECURSIVE %s : %s -> %s : %s " % (self.iray,loopcount,root.name,opr,desc_acts(self.act),trep ))
if len(ret) > 0:
return ret
else:
# only Loop-ers hang aroud here to intersect again with advanced tmin, the rest return up to caller
assert act in [LoopLIfCloser, LoopL, LoopRIfCloser, LoopR]
pass
else:
log.fatal(" depth %d root %s root.is_operation %d root.is_primitive %d " % (depth, root,root.is_operation, root.is_primitive) )
assert 0
pass
log.fatal("[%d] RECURSIVE count EXCEEDS LIMIT %d " % (loopcount, looplimit))
assert 0
return None, None, None, 0
def iterative_intersect(self, root):
"""
Iterative CSG boolean intersection
* https://www.hackerearth.com/practice/notes/iterative-tree-traversals/
"""
assert self.typ == "ITERATIVE"
self.top = root
self.node = root
if self.node.is_primitive:
return intersect_primitive(self.node, self.ray, self.tmin)
self.count = 0
limit = 10
self.actionStack.push(Compute)
#self.actionStack.push(GotoLft)
self.action = GotoLft
do_goto = False
# ignoring the first GotoLft avoids the virtual root and associated termination complications
while self.actionStack.count() > 0:
self.count += 1
#self.action = self.actionStack.pop(debug=self.debug > 2)
if self.action == SaveLft:
self.SaveLft_()
self.action = GotoRgh
if self.action == GotoLft or self.action == GotoRgh:
if do_goto:
self.GoTo()
pass
do_goto = True
if self.action == GotoLft or self.action == GotoRgh:
self.Intersect()
if self.action == LoadLft or self.action == LoadRgh:
self.Load_()
self.action = Compute
if self.action == Compute:
self.Compute_()
pass
if self.action == Return:
break
if self.count == limit:
log.fatal("iray %d ray %s count %d reaches limit %d " % (self.iray, self.ray, self.count, limit))
assert 0
pass
return self.Return_()
def SaveLft_(self):
self.tmin = self.tminStack.pop(debug=self.debug > 1)
self.primStack.push((self.tl,self.nl,self.lname, self.lact), debug=self.debug > 1)
def Load_(self):
if self.action == LoadLft:
self.tl, self.nl, self.lname, self.lact = self.primStack.pop(debug=self.debug > 1)
elif self.action == LoadRgh:
self.tr, self.nr, self.rname, self.ract = self.primStack.pop(debug=self.debug > 1)
else:
assert 0, action
pass
def GoTo(self):
assert self.action in [GotoLft, GotoRgh]
self.node = self.node.left if self.action == GotoLft else self.node.right
if self.debug > 3:
log.info("GoTo: node %s after action %s from parent %r " % (self.node, desc_action(self.action),self.prev) )
assert self.node
if self.node is None:
log.fatal("GoTo: node None after action %s from parent %r " % (desc_action(self.action),self.prev) )
return
def Intersect(self):
"""
# the below handling of a operation holding primitives
# initially seems a special case cop out, subverting the iterative approach,
# that is liable to to work for simple trees, but not for complex ones
#
# BUT on deeper refeclection that isnt the case, need to allow to keep going left until
# find primitives one level below in order to get the ball rolling
# and start filling the primStack, as go back upwards
"""
action = self.action
assert action in [GotoLft, GotoRgh]
if self.node.is_primitive:
tt, nn, name, act = intersect_primitive( self.node, self.ray, self.tmin )
if action == GotoLft:
self.tl = tt
self.nl = nn
self.lname = name
self.lact = act
if self.debug > 3:
log.info("pr.Intersect.GotoLft %s tl %5.2f lname %s " % (self.node.name, self.tl if self.tl else -1, self.lname ))
pass
elif action == GotoRgh:
self.tr = tt
self.nr = nn
self.rname = name
self.ract = act
if self.debug > 3:
log.info("pr.Intersect.GotoRgh %s tr %5.2f rname %s " % (self.node.name, self.tr if self.tr else -1, self.rname ))
pass
pass
self.action = Compute
self.node = self.node.parent
elif self.node.is_operation:
gotoL = intersectBox(self.node.left)
gotoR = intersectBox(self.node.right)
if gotoL and self.node.left.is_primitive:
self.record_traversal(self.node.left)
tt, nn, name, act = intersect_primitive(self.node.left, self.ray, self.tmin)
self.tl = tt
self.nl = nn
self.lname = name
self.lact = act
gotoL = False
if self.debug > 3:
log.info("op.Intersect.gotoL %s tl %5.2f lname %s " % (self.node.left.name, self.tl, self.lname ))
if gotoR and self.node.right.is_primitive:
self.record_traversal(self.node.right)
tt, nn, name, act = intersect_primitive(self.node.right, self.ray, self.tmin)
self.tr = tt
self.nr = nn
self.rname = name
self.ract = act
gotoR = False
if self.debug > 3:
log.info("op.Intersect.gotoR %s tr %5.2f rname %s " % (self.node.right.name, self.tr, self.rname ))
# immediate right/left primitives are not stacked, as are ready for compute
if gotoL or gotoR:
if gotoL:
# non-primitive subtree intersect
self.primStack.push((self.tl, self.nl, self.lname, self.lact), debug=self.debug > 1)
self.actionStack.push(LoadLft, debug=self.debug > 1)
elif gotoR:
self.primStack.push((self.tr, self.nr, self.rname, self.ract), debug=self.debug > 1)
self.actionStack.push(LoadRgh, debug=self.debug > 1)
pass
else:
# both gotoL and gotoR False means miss OR both prim intersects done, so are ready for compute
self.tminStack.push(self.tmin, debug=self.debug > 1)
self.actionStack.push(LoadLft,debug=self.debug > 1)
self.actionStack.push(SaveLft,debug=self.debug > 1)
pass
# NB not the same as doing this interleaved within the above, as this places
# no demands on (gotoL or gotoR)
if gotoL:
self.action = GotoLft
elif gotoR:
self.action = GotoRgh
else:
self.action = Compute
pass
if self.debug > 3:
log.info("Intersect -> %s tr/tl %5.2f/%5.2f rname/lname %s/%s " % (desc_action(self.action),self.tr if self.tr else -1,self.tl if self.tl else -1,self.rname,self.lname ))
else:
assert 0
@classmethod
def trep_fmt(cls, tmin, tl, tr ):
return "tmin/tl/tr %5.2f %5.2f %5.2f " % (tmin if tmin else -1, tl if tl else -1, tr if tr else -1 )
def _get_trep(self):
return self.trep_fmt(self.tmin, self.tl, self.tr)
trep = property(_get_trep)
def Up(self):
"""
Up only called from RetMiss, RetL, RetLIfCloser, RetR, RetRIfCloser branches of Compute_
which when combined with no parent node seems like a good termination signal
... but action stack may not be emptied ?
"""
if self.node.parent is None:
if self.debug > 4:
log.info("Up setting Return... actionStack %s " % self.actionStack.desc())
log.info("Up setting Return... primStack %s " % self.primStack.desc())
log.info("Up setting Return... tminStack %s " % self.tminStack.desc())
pass
self.action = Return
#self.actionStack.push(Return)
#self.action = self.actionStack.pop(debug=self.debug > 1)
else:
self.action = self.actionStack.pop(debug=self.debug > 1)
self.node = self.node.parent
pass
def Compute_(self):
"""
Hmm, surely the loopers can be more simply implemented (without stacks and action),
they just correspond to repeating the lookup with tmin advanced for one side.
"""
assert self.node.is_operation
stateL = self.classify( self.tl, self.nl, self.tmin )
stateR = self.classify( self.tr, self.nr, self.tmin )
if hasattr(self.node, "rstack"):
rstack = self.node.rstack
if self.debug > 3:
log.info("rstack %s " % repr(rstack))
pass
pass
#if self.tl != rstack[0] or self.tr != rstack[3] or self.nr != rstack[1] or self.rname != rstack[2]
acts = boolean_table(self.node.operation, stateL, stateR )
opr = "%s(%s:%s,%s:%s)" % ( desc[self.node.operation],self.lname,desc_state[stateL], self.rname,desc_state[stateR] )
act = 0
act_RetMiss = (RetMiss & acts)
act_RetL = (RetL & acts)
act_RetR = (RetR & acts)
act_LoopL = (LoopL & acts)
act_LoopR = (LoopR & acts)
act_RetLIfCloser = ((RetLIfCloser & acts) and self.tl <= self.tr)
act_LoopLIfCloser = ((LoopLIfCloser & acts) and self.tl <= self.tr)
act_RetRIfCloser = ((RetRIfCloser & acts) and self.tr < self.tl)
act_LoopRIfCloser = ((LoopRIfCloser & acts) and self.tr < self.tl)
trep = self.trep # prior to the mods below
node = self.node # local copy, as Up may change self.node to parent
if act_RetMiss:
act = RetMiss
self.tr = None
self.nr = None
self.rname = None
self.ract = act
self.tl = None
self.nl = None
self.lname = None
self.lact = act
self.Up()
elif act_RetL or act_RetLIfCloser:
act = RetLIfCloser if act_RetLIfCloser else RetL
self.lact = act
self.tr = self.tl
self.nr = self.nl
self.ract = self.lact
self.Up()
elif act_RetR or act_RetRIfCloser:
act = RetRIfCloser if act_RetRIfCloser else RetR
self.ract = act
if (FlipR & acts): self.nr = -self.nr
self.tl = self.tr
self.nl = self.nr
self.lact = self.ract
self.Up()
elif act_LoopL or act_LoopLIfCloser:
act = LoopLIfCloser if act_LoopLIfCloser else LoopL
self.tmin = self.tl
self.primStack.push((self.tr,self.nr,self.rname,act), debug=self.debug > 1)
self.actionStack.push(LoadRgh, debug=self.debug > 1)
self.action = GotoLft
elif act_LoopR or act_LoopRIfCloser:
act = LoopRIfCloser if act_LoopRIfCloser else LoopR
self.tmin = self.tr
self.primStack.push((self.tl,self.nl,self.lname,act), debug=self.debug > 1)
self.actionStack.push(LoadLft, debug=self.debug > 1)
self.action = GotoRgh
else:
assert 0
pass
self.act = act
if self.debug > 1:
log.info("(%d)[%d] ITERATIVE %s : %s -> %s : %s" % (self.iray,self.count, node.name, opr, desc_acts(act), trep))
pass
def Return_(self):
#assert self.action == Return
if self.act in [RetMiss]:
return None, None, None, self.act
elif self.act in [RetL, RetLIfCloser]:
return self.tl, self.nl, self.lname, self.act
elif self.act in [RetR, RetRIfCloser]:
return self.tr, self.nr, self.rname, self.act
else:
log.warning("%d iray %d iterative returned to top with unexpected act %s " % (self.count, self.iray,desc_action(self.act)))
pass
def compare_intersects(self, tst):
nray = len(tst.rays)
self.ipos = np.zeros((2,nray, 3), dtype=np.float32 )
self.ndir = np.zeros((2,nray, 3), dtype=np.float32 )
self.tval = np.zeros((2,nray), dtype=np.float32 )
self.aval = np.zeros((2,nray), dtype=np.int32 )
self.prob = []
self.trob = []
for iray, ray in enumerate(tst.rays):
for recursive in [1,0]:
self.typ = "RECURSIVE" if recursive else "ITERATIVE"
self.reset(ray=ray, iray=iray, debug=tst.debug)
if iray in tst.skip:
log.warning("skipping iray %d " % iray)
continue
if self.debug > 0:
log.info(" ray(%d) %r " % (iray,ray) )
if self.debug > 1:
log.info(" %r " % (tst.root))
if recursive:
tt, nn, nname, act = self.recursive_intersect(tst.root, depth=0)
else:
tt, nn, nname, act = self.iterative_intersect(tst.root)
pass
if self.debug:
log.info("[%d] %s intersect tt %s nn %r " % (-1, self.typ, tt, nn ))
if not tt is None:
ix = 0 if self.typ == "ITERATIVE" else 1
self.ipos[ix,iray] = ray.position(tt)
self.ndir[ix,iray] = nn
self.tval[ix,iray] = tt
self.aval[ix,iray] = act
pass
pass
ok_pos = np.allclose( self.ipos[0,iray], self.ipos[1,iray] )
ok_dir = np.allclose( self.ndir[0,iray], self.ndir[1,iray] )
ok_tva = np.allclose( self.tval[0,iray], self.tval[1,iray] )
if not (ok_pos and ok_dir and ok_tva):
self.prob.append(iray)
ok_tra = self.compare_traversal()
if not ok_tra:
self.trob.append(iray)
pass
pass
log.info("%10s %d/%d rays with intersect mismatches : %s " % (tst.name, len(self.prob),nray,repr(self.prob)))
log.info("%10s %d/%d rays with traversal mismatches : %s " % (tst.name, len(self.trob),nray,repr(self.trob)))
def plot_intersects(self, plt, normal=False):
sc = 10
prob = self.trob
for recursive in [1, 0]:
xoff = 600 if recursive else 0
plt.scatter( xoff + self.ipos[recursive,:,0] , self.ipos[recursive,:,1] )
if normal:
plt.scatter( xoff + self.ipos[recursive,:,0]+self.ndir[recursive,:,0]*sc , self.ipos[recursive,:,1]+self.ndir[recursive,:,1]*sc )
if len(prob) > 0:
plt.scatter( xoff + self.ipos[recursive, prob,0], self.ipos[recursive, prob,1], c="r" )
plt.scatter( xoff + self.ipos[recursive, prob,0], self.ipos[recursive, prob,1], c="g" )
def traverse(top):
for act in ["label","dump"]:
node = top
idx = 0
q = []
q.append(node)
while len(q) > 0:
node = q.pop(0) # bottom of q (ie fifo)
if act == "label":
node.name = "%s_%s%d" % (node.name, "p" if node.is_primitive else "o", idx)
elif act == "dump":
pass
#log.info("[%d] %r " % (idx, node))
else:
pass
if not node.is_primitive:
if not node.left is None:q.append(node.left)
if not node.right is None:q.append(node.right)
pass
idx += 1
def test_intersect(csg, tst):
"""
* only 1st intersect is returned, so to see inside and outside of
a shape need to send rays from inside and outside
"""
traverse(tst.root)
csg.compare_intersects( tst )
csg.plot_intersects( plt )
plt.show()
class T(object):
def __init__(self, root, debug=[], skip=[], notes="", source="aringlight,origlight", num=200, level=1):
"""
:param root:
:param name:
:param debug: list of ray index for dumping
"""
self.root = root
self.name = root.name
self.debug = debug
self.skip = skip
self.notes = notes
self.source = source
self.num = num
self.level = level
def _get_rays(self):
rays = []
if "xray" in self.source:
rays += [Ray(origin=[0,0,0], direction=[1,0,0])]
if "aringlight" in self.source:
ary = Ray.aringlight(num=self.num, radius=1000)
rays += Ray.make_rays(ary)
if "origlight" in self.source:
rays += Ray.origlight(num=self.num)
if "lsquad" in self.source:
rays += [Ray(origin=[-300,y,0], direction=[1,0,0]) for y in range(-50,50+1,10)]
pass
return rays
rays = property(_get_rays)
if __name__ == '__main__':
plt.ion()
plt.close()
logformat = "%(asctime)s %(name)s %(levelname)-8s %(message)s"
logging.basicConfig(level=logging.INFO,format=logformat)
log = logging.getLogger(__name__)
## need to clone to avoid inadvertent parent connections between different roots
## TODO: manage this inside Node and think what parent connections should be when cloning
cbox = Node(BOX, param=[0,0,0,100], name="cbox")
lbox = Node(BOX, param=[-200,0,0,50], name="lbox")
rbox = Node(BOX, param=[ 200,0,0,50], name="rbox")
lrbox = Node(None,lbox.clone(), rbox.clone(), UNION, name="lrbox")
bms = Node(None, Node(BOX, param=[0,0,0,200], name="box"), Node(SPHERE,param=[0,0,0,150],name="sph"), DIFFERENCE, name="bms")
smb = Node(None, Node(SPHERE,param=[0,0,0,200], name="sph"), Node(BOX,param=[0,0,0,150], name="box"), DIFFERENCE , name="smb")
ubo = Node(None, bms.clone(), lrbox.clone(), UNION , name="ubo")
bmslrbox = Node( None, Node(None, bms.clone(), rbox.clone(), UNION,name="bmsrbox"),lbox.clone(),UNION, name="bmslrbox" )
bmsrbox = Node(None, bms.clone(), rbox.clone(), UNION,name="bmsrbox")
smblbox = Node(None, smb.clone(), lbox.clone(), UNION,name="smblbox")
# bmslrbox :
# U( bms_rbox_u :
# U( bms :
# D(bms_box : BX ,
# bms_sph : SP ),
# rbox : BX ),
# lbox : BX )
#
bmsrlbox = Node( None, Node(bms.clone(), lbox.clone(), UNION,name="bms_lbox"),rbox.clone(),UNION, name="bmsrlbox" )
csph = Node(SPHERE, param=[0,0,0,100], name="csph")
lsph = Node(SPHERE, param=[-50,0,0,100], name="lsph")
rsph = Node(SPHERE, param=[50,0,0,100], name="rsph")
lrsph_u = Node(None, lsph.clone(), rsph.clone(), UNION, name="lrsph_u")
lrsph_i = Node(None, lsph.clone(), rsph.clone(), INTERSECTION, name="lrsph_i")
lrsph_d = Node(None, lsph.clone(), rsph.clone(), DIFFERENCE , name="lrsph_d")
ok0 = [
#T(lrsph_i, source="origlight"),
#T(lrsph_i, source="aringlight", notes="all iterative aringlight miss in actionStack while mode", level=2, debug=[0]),
#T(lrbox),
#T(bms, level=4, debug=[0]),
#T(csph, source="origlight", debug=[0], level=2),
T(smb, source="aringlight,origlight", debug=[23], skip=[], level=4),
]
ok = [
T(smb),
T(bms),
T(csph),
T(cbox),
T(lbox),
T(rbox),
T(lrbox),
T(lrsph_d),
T(lrsph_u, notes="fixed all rightside mismatched with origlight by adopting clone to avoid inadventent parent relationship to other shape"),
T(lrsph_i),
T(bmsrbox),
]
nok = [
#T(bmslrbox, notes="left box protrusion is missed for iterative", debug=[92], level=2),
T(smblbox, notes="box corners are discrepantly present for iterative", debug=[23], level=2),
#T(bmslrbox, notes="left box protrusion is missed for iterative", source="lsquad", debug=[1]),
#T(bmsrlbox, notes="right box protrusion is missed for iterative"),
#T(ubo, [], notes="looks to be missing most intersects???"),
]
for tst in ok0:
csg = CSG(level=tst.level)
test_intersect(csg,tst)
pass
|
import tweepy
from tweepy.auth import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
import socket
import json
# Set up your credentials from http://apps.twitter.com
consumer_key = '********************'
consumer_secret = '**************************************************'
access_token = '**************************************************'
access_secret = '*******************************************'
class TweetsListener(StreamListener):
def __init__(self, csocket):
self.client_socket = csocket
def on_data(self, data):
try:
s=self.client_socket
s.listen(5)
c, addr = s.accept()
print("Received request from: " + str(addr))
msg = json.loads( data )
user=json.loads( json.dumps(msg['user']) )
sdata=msg['text'].replace('\n','')+' ~@ '+(user['location'] if user['location'] is not None else 'None')+' ~@ '+msg['source']
print(sdata.encode('utf-8'))
c.send(sdata.encode('utf-8'))
c.close()
except BaseException as e:
print("Error on_data: %s" % str(e))
return True
def on_error(self, status):
print(status)
return True
def sendData(c_socket):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
twitter_stream = Stream(auth, TweetsListener(c_socket))
twitter_stream.filter(track=['Coronavirus', 'COVID-19', 'Pandemic', 'COVID19', 'covid19', 'covid-19', 'coronavirus', 'pandemic'])
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "192.168.1.239"
port = 5551
s.bind((host, port))
print("Listening on port: %s" % str(port))
sendData(s)
|
#!/usr/bin/env python
import sys
sys.path.insert(0,'../redis_')
from confluent_kafka import Consumer, KafkaError
import json
import time
from pprint import pprint
from redis_ import redisDriver
topics = ['add_build', 'delete_build', 'add_user', 'delete_user', 'add_build_component', 'remove_build_component', 'add_decoration', 'remove_decoration', 'remove_all_decorations']
settings = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'buildHunter',
'client.id': 'redis',
'enable.auto.commit': False,
'session.timeout.ms': 6000,
'default.topic.config': {'auto.offset.reset': 'latest'}
}
def repl():
c = Consumer(settings)
c.subscribe(topics)
try:
while True:
if not red.ping():
time.sleep(1)
continue
msg = c.poll(0.1)
# No message present
if msg is None:
continue
# Found message
elif not msg.error():
# Try to handle
if msg.topic() == u'add_build':
result = add_build(msg.value())
elif msg.topic() == u'delete_build':
result = delete_build(msg.value())
elif msg.topic() == u'add_user':
result = add_user(msg.value())
elif msg.topic() == u'delete_user':
result = delete_user(msg.value())
elif msg.topic() == u'add_build_component':
result = add_build_component(msg.value())
elif msg.topic() == u'remove_build_component':
result = remove_build_component(msg.value())
elif msg.topic() == u'add_decoration':
result = add_decoration(msg.value())
elif msg.topic() == u'remove_decoration':
result = remove_decoration(msg.value())
elif msg.topic() == u'remove_all_decorations':
result = remove_all_decorations(msg.value())
if result:
pprint('Success ' + msg.value())
c.commit()
else:
c.unsubscribe()
c.subscribe(topics)
print('Error Occurred Adding to Redis')
elif msg.error().code() == KafkaError._PARTITION_EOF:
print('End of partition reached {0}/{1}'.format(msg.topic(), msg.partition()))
else:
print('Error occurred: {0}'.format(msg.error().str()))
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
c.close()
def add_build(msg):
args = json.loads(msg)
try:
red.add_build(args['user'], args['build_id'])
return True
except Exception as e:
print(e)
return False
def delete_build(msg):
args = json.loads(msg)
try:
red.delete_build(args['user'], args['build_id'], args['build_parts'])
return True
except Exception as e:
print(e)
return False
def add_user(msg):
args = json.loads(msg)
try:
red.add_user(args['user'])
return True
except Exception as e:
print(e)
return False
def delete_user(msg):
args = json.loads(msg)
try:
red.delete_user(args['user'])
return True
except Exception as e:
print(e)
return False
def add_build_component(msg):
args = json.loads(msg)
try:
red.add_build_component(args['build_id'], args['part'], args['item_id'])
return True
except Exception as e:
print(e)
return False
def remove_build_component(msg):
args = json.loads(msg)
try:
red.remove_build_component(args['part'], args['build_id'])
return True
except Exception as e:
print(e)
return False
def add_decoration(msg):
args = json.loads(msg)
try:
red.add_decoration(args['build_id'], args['part'], args['item_id'])
return True
except Exception as e:
print(e)
return False
def remove_decoration(msg):
args = json.loads(msg)
try:
red.remove_decoration(args['build_id'], args['part'], args['item_id'])
return True
except Exception as e:
print(e)
return False
def remove_all_decorations(msg):
args = json.loads(msg)
try:
red.remove_all_decorations(args['build_id'], args['part'])
return True
except Exception as e:
print(e)
return False
def main():
print('Starting Redis Consumer')
global red
red = redisDriver.RedisDriver(is_master=True)
repl()
if __name__ == "__main__":
main() |
import time
list_1=[]
i=0
n=int(input("Enter the no of items you want to insert in the list : "))
while i <n:
element=input("Enter the element of the list :")
list_1.append(element)
i+=1
time.sleep(1)
print("Appending items in the list ...")
time.sleep(1)
print("List : ",list_1)
string_1="Appending lists"
string_2=str(list_1)
time.sleep(1)
print("Appending string and lists ...")
final_string=string_1+string_2
time.sleep(1)
print("Final_Result :",final_string)
|
from .models import OnlineCourse
from workprogramsapp.models import WorkProgram, Topic, DisciplineSection
import pandas as pd
import numpy as np
import re
import nltk; nltk.download('stopwords')
from nltk.corpus import stopwords
from pymorphy2 import MorphAnalyzer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Вспомогательные функции
stop_words = stopwords.words('russian')
stop_words.extend(stopwords.words('english'))
stop_words.extend(['', 'p', 'br', 'strong', 'li', 'ol', 'em', 'ul', 'span', 'tr', 'td', 'align', 'justify',
'b', 'h2', 'hr', 'f', 'href'])
morph = MorphAnalyzer(lang='ru')
# объединение контента курса с его описанием
def unite_course(courses, titles, content, description):
course_text = []
for i in courses.index:
if type(description[i]) != float and type(content[i]) != float and type(titles[i]) != float:
course_text.append(str(titles[i]) + ' ' + str(description[i]) + ' ' + str(content[i]))
continue
if type(content[i]) == float:
course_text.append(titles[i] + ' ' + description[i])
continue
if type(description[i]) == float:
course_text.append(titles[i] + ' ' + content[i])
continue
return course_text
def preprocessing(sentences):
"""Общая обработка текста: разделение на слова, приведение к начальной форме, удаление стоп-слов"""
for sentence in sentences:
sentence = re.split("\W+", sentence.lower())
sentence = [morph.normal_forms(word)[0] for word in sentence if word not in stop_words]
yield ' '.join(sentence)
def prepare_courses():
"""Функция для подготовки курсов к рекомендациям"""
courses = OnlineCourse.objects.all()
courses_df = pd.DataFrame(courses.values())
# Удаление дубликатов
courses_df.drop_duplicates('title', inplace=True)
# Удаление тестовых названий
drop = courses_df[courses_df.title.isin(['Title 1234', 'тест', 'test'])].index.to_list()
drop.extend(courses_df[courses_df.description == 'тест'].index.to_list())
titles = courses_df.title.str.lower()
new_titles = titles.map(
lambda x: '-1' if x.find('тест ') != -1 or x.find('тестов') != -1 or x.find('ааа') != -1 or x.find(
'title') != -1 or x.find('1111') != -1 else x)
drop.extend(new_titles[new_titles == '-1'].index.to_list())
drop.extend(titles[titles.duplicated()].index.to_list())
# Удаление курсов с большим кол-вом пропусков
nulls = courses_df.isnull().sum(axis=1)
drop.extend(nulls[nulls > 11].index.to_list())
courses_df.drop(drop, inplace=True)
processed_courses = courses_df.reset_index()
processed_courses.drop(['index'], axis=1, inplace=True)
# обработка текста
titles, description, content = processed_courses.title, processed_courses.description, processed_courses.content
urls = processed_courses.external_url
# объединение названий, описания и контента курсов в общий текст
course_text = unite_course(processed_courses, titles, content, description)
# обработка текста курсов
data = list(preprocessing(course_text))
return data, titles, urls
def get_topics(id_workprogram):
"""По id рабочей программы получаются все разделы.
Для каждого раздела получаются все темы.
Возвращается список тем"""
all_topics = []
sections = DisciplineSection.objects.filter(work_program=id_workprogram).values('id', 'name')
for section in sections:
section_topics = Topic.objects.filter(discipline_section=section['id']).values('id', 'description')
all_topics.extend(section_topics)
return all_topics
def create_matrix(courses):
"""Векторизация текста.
Создание матрицы косинусных расстояний.
Вычисление максимально близкого текста для последнего документа в матрице."""
tfidf = TfidfVectorizer().fit_transform(courses)
similarity_matrix = cosine_similarity(tfidf, tfidf)
np.fill_diagonal(similarity_matrix, np.nan)
recommendation = np.nanargmax(similarity_matrix[-1])
return recommendation, similarity_matrix[-1][recommendation]
def get_recommendation(course_data, titles, urls, topics=''):
"""Функция для формирования рекомендаций на входе названия и id тем"""
recommended = []
# Достаются только названия тем (без id)
topic_names = [x['description'] for x in topics]
topics_processed = list(preprocessing(topic_names))
# установка порога
recommended_min = 0.40
max_rec = ['course_id', 'topic', recommended_min]
for i in range(len(topics_processed)):
# для каждой темы ведется поиск курса
topic = topics_processed[i]
# добавление темы в датасет с курсами последней записью
course_data.append(topic)
rec, similarity_count = create_matrix(course_data)
# Проверка на преодоление порога для самого близкого курса
if similarity_count > max_rec[2]:
max_rec[0] = int(rec) # id курса
max_rec[1] = topics[i] # Тема с id и названием
max_rec[2] = similarity_count # значение кос близости
recommended.append([max_rec[1], similarity_count, titles[max_rec[0]], urls[max_rec[0]]])
# удаление темы из датасета с курсами
course_data.remove(topic)
return recommended
|
#!/usr/bin/env python
import sys
import os
import math
import argparse
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='path of directory to create')
parser.add_argument('dirsize', help='how many files', type=int, default=1, nargs='?')
parser.add_argument('filesize', help='file size in KB', type=int, default=1, nargs='?')
args = parser.parse_args()
# sanity checks
if args.dirsize < 1:
parser.error('dirsize must be 1 or greater')
if args.filesize > 1073741824:
parser.error('filesize over 1TB')
def digits(n):
'''
Returns the number of digits in a positive integer
'''
if n == 0:
return 1
# fast solution before rounding errors kick in
# https://stackoverflow.com/questions/2189800/length-of-an-integer-in-python
if n <= 999999999999997:
return int(math.log10(n)) + 1
return len(str(n))
# make test directory
os.makedirs(args.directory)
# make zero filled files in directory
for i in range(1, args.dirsize + 1):
# pad file name suffix with zeroes
suffix_len = digits(args.dirsize)
file_name = 'test_{i:0>{suffix_len}}.txt'.format(**locals())
file_path = os.path.join(args.directory, file_name)
with open(file_path, 'wb') as binfile:
binfile.write(b'\x00' * args.filesize * 1024)
sys.exit(0)
|
# -*- coding: utf-8 -*-
import os
import re
import cv2
import glob
import pickle
import keras
import time
import csv
import sys
import openpyxl
import numpy as np
from mlc.function import check_existfile,show,Roi_Reduction,draw_contours,load_image_from_txt,Read_Parameter
from PIL import Image,ImageDraw
from utils.self_unet import unet_2d, unet_2d_GAM
import tensorflow as tf
np.set_printoptions(threshold=400000000)
"""
以下、全体的な処理関数
"""
def load_image_s(filename):
"""
# load pickle #
"""
print("Load Pickle Data.")
with open(filename,mode='rb') as f:
image = pickle.load(f)
print("Load Complete.")
return image
#Amed Test imageをload
def test(dict):
"""
# # Amed Test Imageで抽出器の性能を評価する #
Parameter
---------
dict :辞書型
-Parameter.txtから必要な情報を格納したdict
Result
------
抽出結果を //aka/share/amed/amed_unet_result に書き出し
"""
print("start predict")
size = dict["size"]
root = dict["amedroot_path"]
target_size = (size[0],size[1],1)
cimage,gimage,data_name= load_image_from_txt(dict)
#cimage,gimage = load_image_s('./image/image.pkl')
model = unet_2d_GAM(input_shape=target_size)
model.load_weights(dict["model_path"])
os.system("cls")
for i in range(len(cimage)):
#start = time.time()
print("\r{0:d}".format(i),end="")#
w,h,c = gimage[i].shape
target = cv2.resize(gimage[i],size)
target = target.reshape((1,size[0],size[1],1))
target = target / 255#正規化
pred = model.predict(target)[0]
pred = np.reshape(pred, (size[0],size[1],1), order='F')
#閾値
Threshold = 0.7
#推測
pred[pred >= Threshold] = 1
pred[pred < Threshold] = 0
pred= np.asarray(pred, dtype=np.uint8)
val = np.copy(cimage[i])
"""
val :検出した領域の輪郭をoriginal画像と重ねた画像
label :検出した領域の輪郭
val :検出した領域
"""
pred_resize = cv2.resize(pred,(h,w))
pred_resize = np.reshape(pred_resize, (w,h,1), order='F')
val,label,relabel =draw_contours(val,pred_resize)
#pred_resize_2 = np.concatenate((pred_resize,pred_resize),axis=2)
#pred_resize = np.concatenate((pred_resize_2,pred_resize),axis=2)
result_image = (relabel/255)*cimage[i]
#1枚の処理時間
#elapsed_time = time.time() - start
#print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
#print("pred_resize.shape",pred_resize.shape)
#print("cimage[i].shape",cimage[i].shape)
#save result
result_rootpath = "/amed_unet_result/"+os.path.dirname(data_name[i])
result_dir_path= root+ result_rootpath
check_existfile(result_dir_path)
check_existfile(root+ result_rootpath+"/val")
check_existfile(root+ result_rootpath+"/target")
check_existfile(root+ result_rootpath+"/pred")
cv2.imwrite(root+ result_rootpath+"/val/val_"+os.path.basename(data_name[i]), val)
cv2.imwrite(root+ "/amed_unet_result/"+data_name[i], result_image*255)
cv2.imwrite(root+ result_rootpath+"/target/target_"+os.path.basename(data_name[i]), cimage[i])
cv2.imwrite(root+ result_rootpath+"/pred/"+os.path.basename(data_name[i]), relabel*255)
#cv2.imwrite(root+ result_rootpath+"/pred/"+os.path.basename(data_name[i]), pred_resize*255)
def unet_result(dict):
"""
# 推測結果から包含率を求める
-test.txtのデータと,それに対応した腫瘍領域(ROI)を読み込む
Parameter
---------
dict :辞書型
-Parameter.txtを読み込んだdictを受け取る
-Read_Parameter()を使用する事
Process
------
包含率(Inclusion rate)を求める
"""
cimage_list,gimage_list ,imagename_list= [],[],[]
#root :"//aka/share/amed"
data_path = dict["amedabdomen_path"]
#text_path = dict["amedtest_path"]
text_path = "./my_testdata.txt"
amedresult_path = dict["amedresult_path"]
result_ex,count,count= 0,0,0
list_result ,list_result= [],[]
color_cord = (0,255,255)
thickness = 3
with open(text_path) as f:
for i,line in enumerate(f):
#if i <1600:
# continue
root_path = os.path.basename(line)
#load unet pred and val
unet_predpath = (amedresult_path+"/"+line+"/pred/*.png").replace('\n', '')
unet_valpath = (amedresult_path+"/"+line+"/val/*.png").replace('\n', '')
pred_files = glob.glob(unet_predpath)
val_files = glob.glob(unet_valpath)
for fs in pred_files:
base_path = os.path.basename(fs)
pred = cv2.imread(fs,cv2.IMREAD_GRAYSCALE)
for fs in val_files:
val = cv2.imread(fs,1)
#各データまでのパス
file_path = (data_path+line + "/"+ "*roi.csv").replace('\n', '')
#file_path = ("//aka/share/amed/amed_abdomen/"+line + "/"+ "*roi.csv").replace('\n', '')
files = glob.glob(file_path)
list,list_t1,list_t2 = [],[],[]
flag = 0
coord_list = []
#腫瘍位置座標
#作るのがめんどくさいため,原始的なつくり
"""
if lenで長さ図るのは,腫瘍の数分の座標を考慮したから
例えば
腫瘍2つ -> 座標8つ (腫瘍①->x1 y1 x2 y2 腫瘍②->x3,y3,x4,y4)
"""
for ff in files:
with open(ff) as f:
"""
①ROIの中身読み取り->list型で格納
②改行文字による文字列分断-> split(222\n250) -> 222 , 250
"""
list = (f.read()).split(",")
if len(list) > 4:
list[3],x3=(list[3]).split("\n")
flag = 1
if len(list) > 7:
list[6],x5=(list[6]).split("\n")
if len(list)>10:
list[9],x7=(list[9]).split("\n")
list[12],_= (list[12]).split("\n")
x7,y7,x8,y8 = int(x7),int(list[10]),int(list[11]),int(list[12])
flag = 3
x5,y5,x6,y6 = int(x5),int(list[7]),int(list[8]),int(list[9])
flag = 2
x3,y3,x4,y4 = int(x3),int(list[4]),int(list[5]),int(list[6])
x1,y1,x2,y2 = int(list[0]),int(list[1]),int(list[2]),int(list[3])
else:
x1,y1,x2,y2 = int(list[0]),int(list[1]),int(list[2]),int(list[3])
file_path = (data_path +"/" +line + "/"+ "*_denoised.png").replace('\n', '')
files = glob.glob(file_path)
for f in files:
#image_path :1.2.840.113619.2.256.50119124685.1563429440.1915_denoised.png
image_path = os.path.basename(f)
gimage = cv2.imread(f,cv2.IMREAD_GRAYSCALE)
#gimage = cv2.imread(f,cv2.IMREAD_COLOR)
#座標をリストへ
if flag is 0:
coord_list.append( ((x1, y1), (x2, y2)))
if flag is 1:
coord_list.append( ((x1, y1), (x2, y2)))
coord_list.append( ((x3, y3), (x4, y4)))
if flag is 2:
coord_list.append( ((x1, y1), (x2, y2)))
coord_list.append( ((x3, y3), (x4, y4)))
coord_list.append( ((x5, y5), (x6, y6)))
if flag is 3:
coord_list.append( ((x1, y1), (x2, y2)))
coord_list.append( ((x3, y3), (x4, y4)))
coord_list.append( ((x5, y5), (x6, y6)))
coord_list.append( ((x7, y7), (x8, y8)))
"""
else:
val = cv2.rectangle(val, (x1, y1), (x2, y2),color_cord,thickness)
"""
"""
tumor_coord :全てのroiを映した画像
val :書き出した画像
"""
#腫瘍領域の画像作成
tumor_coord = np.zeros(pred.shape)
one_coord = np.zeros(pred.shape)
tumor_area = 0
for co in range(len(coord_list)):
#座標受け取り
one_coord = np.zeros(pred.shape)
one_area = 0
coord_1,coord_2 = coord_list[co]
x1, y1 = coord_1
x2, y2 = coord_2
#roi書き出し
one_coord[y1:y2,x1:x2] = 255
#tumor_area += np.count_nonzero(one_coord == 255)#画素数の面積
one_area = np.count_nonzero(one_coord == 255)#画素数の面積
#val = cv2.rectangle(gimage, (x1, y1), (x2, y2),(255,0,0),thickness)#図形の書き出し
#面積チェック
if one_area > 100000:
x1, y1, x2, y2 = Roi_Reduction(( x1, y1, x2, y2),0.6)
else:
x1, y1, x2, y2 = Roi_Reduction(( x1, y1, x2, y2),0.6)
#全てのチェックが終わったら書き出す
val = cv2.rectangle(val, (x1, y1), (x2, y2),color_cord,thickness)#図形の書き出し
cv2.imwrite("./result/new_roi/"+image_path,val)
tumor_coord[y1:y2,x1:x2] = 255
tumor_area = np.count_nonzero(tumor_coord == 255)
check_existfile("./unet_result/val/")
cv2.imwrite("./unet_result/val/"+image_path,val)
cv2.imwrite("./unet_result/tumor/"+image_path,tumor_coord)
cv2.imwrite("./unet_result/target/"+image_path,gimage)
#検出結果の論理和
result_pred = tumor_coord * pred
cv2.imwrite("./unet_result/pred/"+image_path,pred)
cv2.imwrite("./unet_result/result_pred/"+image_path,result_pred)
result_pred = tumor_coord * pred
result_area = np.count_nonzero(result_pred >0)
#大きいroiは0.6倍すると腫瘍の大きさ(決められた範囲)になるらしい
inclusion_rate = result_area /tumor_area
if inclusion_rate >=1:
inclusion_rate = 1.0
list_result.append([i+1,line.replace("\n",""),base_path,inclusion_rate,None])
result_ex += inclusion_rate
if inclusion_rate > 0.8:
count+=1
print("\r{0:d}".format(i+1),end="")
#show(tumor_coord)
#if i ==30:
# break
#os.system("cls")
#包含率の計算結果をxlsxで出力
ws = openpyxl.Workbook()
sheet = ws.active
sheet.title = "Inclusion_rate"
result = result_ex / i
list_result.append([None,None,None,None,result])
list_result.append([None,None,None,"80%以上",count])
sheet.cell(row = 1,column = 1).value = "num"
sheet.cell(row = 1,column = 2).value = "filename"
sheet.cell(row = 1,column = 3).value = "dataname"
sheet.cell(row = 1,column = 4).value = "result"
sheet.cell(row = 1,column = 5).value = "total"
for x in range(len(list_result)):
data = list_result[x]
for kk in range(4):
sheet.cell(row = x+2,column = kk+1).value =data[kk]
ws.save("./result/result_inclusion_rate.xlsx")
if __name__ == "__main__":
Dict = Read_Parameter("./Parameters.txt")
#Make_Mytest_Datasettext(Dict)
test(Dict)
unet_result(Dict)
|
# Задача для самопроверки (то есть за неё не даются баллы).
# Реализуйте метод update_mini_batch класса Neuron. Когда вы решите сдать задачу, вам нужно будет просто скопировать соответствующие функции (которые вы написали в ноутбуке ) сюда. Копируем без учёта отступов; шаблон в поле ввода ответа уже будет, ориентируйтесь по нему. Сигнатура функции указана в ноутбуке, она остаётся неизменной.
# update_mini_batch считает градиент и обновляет веса нейрона на основе всей переданной ему порции данных, кроме того, возвращает 1, если алгоритм сошелся (абсолютное значение изменения целевой функции до и после обновления весов < eps), иначе возвращает 0.
# Мы будем проверять ваш алгоритм на данных разного размера. Пример данных, на которых вы можете проверить работу своего решения самостоятельно:
# np.random.seed(42)
# n = 10
# m = 5
# X = 20 * np.random.sample((n, m)) - 10
# y = (np.random.random(n) < 0.5).astype(np.int)[:, np.newaxis]
# w = 2 * np.random.random((m, 1)) - 1
# neuron = Neuron(w)
# neuron.update_mini_batch(X, y, 0.1, 1e-5)
# Если вы посмотрите на веса нейрона neuron после выполнения этого кода, то они должны быть такими:
# >>> print(neuron.w)
# [[-0.22571548]
# [-0.45367083]
# [ 0.65670199]
# [-0.27851325]
# [-0.41341191]]
import numpy as np
def update_mini_batch(self, X, y, learning_rate, eps):
before = J_quadratic(self, X, y)
grad = compute_grad_analytically(self, X, y)
dw = -grad * learning_rate
self.w += dw
after = J_quadratic(self, X, y)
return 1 if abs(after - before) < eps else 0 |
import torch
import numpy as np
import os
import os.path as osp
import cv2
import pandas as pd
import time
from .base_dataset import BaseDataset
from .base_dataset import pil_loader
class Dataset(BaseDataset):
def __init__(self, args, split='train', **kwargs):
super().__init__(args)
self.data_dir = osp.join(args.data_dir, split)
self.class2id = args.get('class2id', {'pos': 1, 'neg': 0})
self.split = split
if split == 'train':
self.transform = self.transform_train()
elif split == 'val':
self.transform = self.transform_validation()
elif split == 'test':
self.transform = self.transform_validation()
else:
raise ValueError
def get_all_files(dir, ext):
for e in ext:
if dir.lower().endswith(e):
return [dir]
file_list = os.listdir(dir)
ret = []
for i in file_list:
ret += get_all_files(osp.join(dir, i), ext)
return ret
self.img_list = get_all_files(self.data_dir, ['jpg', 'jpeg', 'png'])
self.metas = [(i, self.class2id[i.split('/')[-2]]) for i in self.img_list]
self._num = len(self.metas)
print('%s set has %d images' % (self.split, self.__len__()))
# logger.info('%s set has %d images' % (self.split, self.__len__()))
self._labels = [i[1] for i in self.metas]
self._cls_num_list = pd.Series(self._labels).value_counts().sort_index().values
self._freq_info = [
num * 1.0 / sum(self._cls_num_list) for num in self._cls_num_list
]
self._num_classes = len(self._cls_num_list)
self._class_dim = len(set(self._labels))
def load_image(self, img_filename):
return pil_loader(img_filename)
def get_class_dim(self):
return self._class_dim
def get_labels(self):
return self._labels
def get_cls_num_list(self):
return self._cls_num_list
def get_freq_info(self):
return self._freq_info
def get_num_classes(self):
return self._num_classes
def __len__(self):
return self._num
def __str__(self):
return self.args.data_dir + ' split=' + str(self.split)
def _getitem(self, idx):
sample = {
'image': self.load_image(self.metas[idx][0]),
'label': self.metas[idx][1]
}
sample = self.transform(sample)
return sample['image'], sample['label']
def __getitem__(self, idx):
return self._getitem(idx)
|
import pygame
from network import Network
pygame.font.init()
# Window width and height
width = 500
height = 500
win = pygame.display.set_mode((width, height))
pygame.display.set_caption("Client")
class Button:
"""
To create and interface a button in pygame
"""
def __init__(self, text, x, y, color):
"""
:param text: Text to write in the button
:param x: width of button
:param y: height of button
:param color: color of button
"""
self.text = text
self.x = x
self.y = y
self.color = color
self.width = 120
self.height = 50
def draw(self, win):
"""
Draws the button on the pygame window passed as param
:param win: pygame window
:return: None
"""
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height))
font = pygame.font.SysFont('comicsans', 30)
text = font.render(self.text, 1, (255, 255, 255))
x = (self.x + round(self.width/2) - round(text.get_width()/2))
y = (self.y + round(self.height/2) - round(text.get_height()/2))
win.blit(text, (x, y))
def click(self, pos):
"""
Determines of the button is clicked
:param pos: coordinates of mouse click
:return: True if clicked, False otherwise
"""
x1 = pos[0]
y1 = pos[1]
if self.x <= x1 <= self.x + self.width and self.y <= y1 <= self.y + self.height:
return True
else:
return False
def redraw_window(win, game, p):
"""
Updates the whole pygame window
:param win: pygame window instance
:param game: current game object that determines the game state
:param p: [0, 1] player no.
:return: None
"""
win.fill((255, 255, 255))
if not game.connected():
font = pygame.font.SysFont("comicsans", 50)
text = font.render("Waiting for player...", 1, (180, 0, 255), True)
win.blit(text, (width/2 - text.get_width()/2, height/2 - text.get_height()/2))
else:
font = pygame.font.SysFont("comicsans", 40)
text = font.render("You", 1, (180, 0, 255))
win.blit(text, (80, 50))
text = font.render("Opponent", 1, (180, 0, 255))
win.blit(text, (280, 50))
move1 = game.get_player_move(0)
move2 = game.get_player_move(1)
if game.both_went():
text1 = font.render(move1, 1, (0, 0, 0))
text2 = font.render(move2, 1, (0, 0, 0))
else:
if game.p1Went and p == 0:
text1 = font.render(move1, 1, (0, 0, 0))
elif game.p1Went:
text1 = font.render("Locked in", 1, (0, 0, 0))
else:
text1 = font.render("Waiting", 1, (0, 0, 0))
if game.p2Went and p == 1:
text2 = font.render(move2, 1, (0, 0, 0))
elif game.p2Went:
text2 = font.render("Locked in", 1, (0, 0, 0))
else:
text2 = font.render("Waiting", 1, (0, 0, 0))
if p == 1:
win.blit(text2, (80, 170))
win.blit(text1, (280, 170))
else:
win.blit(text1, (80, 170))
win.blit(text2, (280, 170))
for btn in btns:
btn.draw(win)
pygame.display.update()
black = (0, 0, 0)
red = (255, 0, 0)
blue = (0, 0, 255)
# 3 buttons for rock, paper and scissors
btns = [Button("Rock", 40, 300, black), Button("Scissors", 190, 300, red), Button("Paper", 340, 300, blue)]
def main_game():
run = True
clock = pygame.time.Clock()
n = Network()
player = int(n.get_p())
print("You are player: ", player)
while run:
clock.tick(60)
try:
game = n.send("get")
except:
run = False
print("Couldn't get game")
break
if game.both_went():
redraw_window(win, game, player)
pygame.time.delay(200)
try:
game = n.send("reset")
except TypeError as e:
run = False
print("Couldn't get game", e)
break
font = pygame.font.SysFont("comicsans", 70)
if game.winner() == player:
text = font.render("You won!", 1, (0, 150, 0))
elif game.winner() == -1:
text = font.render("Draw", 1, (80, 80, 80))
else:
text = font.render("You Lost!", 1, (255, 0, 0))
win.blit(text, ((width/2-text.get_width()/2), (height/2 - text.get_height()/2)))
pygame.display.update()
pygame.time.delay(2000)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
# Button click detection
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for btn in btns:
if btn.click(pos) and game.connected():
if player == 0 and not game.p1Went:
n.send(btn.text)
elif player == 1 and not game.p2Went:
n.send(btn.text)
redraw_window(win, game, player)
def menu_screen():
run = True
clock = pygame.time.Clock()
while run:
clock.tick(60)
win.fill((255, 255, 255))
font = pygame.font.SysFont('comicsans', 50)
if disconnected:
text = font.render("Opponent disconnected!", 1, (0, 80, 255))
win.blit(text, (40, 150))
text = font.render("Click to connect!", 1, (0, 80, 255))
win.blit(text, (105, 230))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
run = False
main_game()
# Whether the opponent has disconnected or not.
disconnected = False
if __name__ == '__main__':
while True:
menu_screen()
disconnected = True
|
import copy
import keras as K
import numpy as np
from keras.datasets import mnist
from keras import layers
from matplotlib import pyplot
def select_real_samples(data, n):
x = data[np.random.randint(0, data.shape[0], n)]
y = np.ones((n, 1))
return x, y
def create_null_samples(n):
x = np.random.rand(28 * 28 * n).reshape((n, 28, 28, 1))
y = np.zeros((n, 1))
return x, y
class MNISTDiscriminator(object):
## Takes a 28x28 image, gives a binary classification (real or fake)
def __init__(self, input_shape=(28, 28, 1)):
discrim_layers = [
layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same',
input_shape=input_shape),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.4),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.4),
layers.Flatten(),
layers.Dense(1, activation='sigmoid')
]
self.model = K.models.Sequential(discrim_layers)
self.model.compile(loss='binary_crossentropy',
optimizer=K.optimizers.Adam(lr=0.002, beta_1=0.5),
metrics=['accuracy'])
def train(self, data, epochs=128, batch_size=256):
print("--- TRAIN DISCRIMINATOR ---")
for i in range(epochs):
x_real, y_real = select_real_samples(data, int(batch_size / 2))
x_fake, y_fake = create_null_samples(int(batch_size / 2))
real_acc = self.model.train_on_batch(x_real, y_real)[1]
fake_acc = self.model.train_on_batch(x_fake, y_fake)[1]
avg_acc = (real_acc + fake_acc) / 2
print("epoch {} accuracy {}%".format(i, avg_acc*100))
def train_on_batch(self, x, y):
return self.model.train_on_batch(x, y)[0] ## return loss
def summary(self): self.model.summary()
class MNISTGenerator(object):
def __init__(self, latent_space_dim=100, init_dim=(7, 7, 128)):
self.latent_space_dim = latent_space_dim
init_dim_m = init_dim[0] * init_dim[1] * init_dim[2]
gen_layers = [
layers.Dense(init_dim_m, input_dim=latent_space_dim),
layers.LeakyReLU(alpha=0.2),
layers.Reshape(init_dim),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), activation='sigmoid', padding='same')
]
self.model = K.models.Sequential(gen_layers)
def create_latent_elements(self, n):
return np.random.randn(self.latent_space_dim * n).reshape(n,
self.latent_space_dim)
def create_generated_samples(self, n):
latent = self.create_latent_elements(n)
x = self.model.predict(latent)
y = np.zeros((n, 1)) ## speed up backprop by claiming gen'd images are real
return x, y
def summary(self): self.model.summary()
class MNISTGAN(object):
def __init__(self, generator, discriminator):
self.generator = generator
self.discriminator = discriminator
self.model = K.models.Sequential()
self.model.add(generator.model)
discriminator.model.trainable = False
self.model.add(discriminator.model)
optimizer = K.optimizers.Adam(lr=0.0002, beta_1=0.5)
self.model.compile(loss='binary_crossentropy', optimizer=optimizer)
def train_on_batch(self, x, y):
return self.model.train_on_batch(x, y)
def train(self, data, epochs=100, batch_size=256):
bpe = int(data.shape[0] / batch_size)
half_batch = int(batch_size / 2) ## discriminator takes half real, half fake
for i in range(epochs):
print("--- Epoch {} ---".format(i))
for j in range(bpe):
x_real, y_real = select_real_samples(data, half_batch)
x_fake, y_fake = self.generator.create_generated_samples(
half_batch
)
x_discrim, y_discrim = np.vstack((x_real, x_fake)), np.vstack((y_real, y_fake))
discrim_loss = self.discriminator.train_on_batch(
x_discrim, y_discrim
)
x_gan = self.generator.create_latent_elements(batch_size)
y_gan = np.ones((batch_size, 1))
gan_loss = self.train_on_batch(x_gan, y_gan)
print("Batch {0:d}: GAN Loss {1:0.3f} / Discrim Loss {2:0.3f}".format(
j, gan_loss, discrim_loss
))
def summary(self): self.model.summary()
def main():
(x_mnist, _), (_, _) = mnist.load_data()
real_mnist = np.expand_dims(x_mnist, axis=-1)
real_mnist = real_mnist.astype('float32') / 255.0
gen = MNISTGenerator()
discrim = MNISTDiscriminator()
gan = MNISTGAN(gen, discrim)
gan.summary()
gan.train(real_mnist)
if __name__ == '__main__':
main()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" CRC computation gateware for USB3. """
import unittest
import operator
import functools
from amaranth import *
from ....test import LunaSSGatewareTestCase, ss_domain_test_case
def compute_usb_crc5(protected_bits):
""" Generates a 5-bit signal equivalent to the CRC5 check of a given 11-bits.
Intended for link command words / link control words.
Parameters
----------
protected_bits: 11-bit Signal()
The 11-bit signal to generate a CRC5 for.
Returns
-------
Signal(5)
A five-bit signal equivalent to the CRC5 of the protected bits.
"""
def xor_bits(*indices):
bits = (protected_bits[len(protected_bits) - 1 - i] for i in indices)
return functools.reduce(operator.__xor__, bits)
# Implements the CRC polynomial from the USB specification.
return Cat(
xor_bits(10, 9, 8, 5, 4, 2),
~xor_bits(10, 9, 8, 7, 4, 3, 1),
xor_bits(10, 9, 8, 7, 6, 3, 2, 0),
xor_bits(10, 7, 6, 4, 1),
xor_bits(10, 9, 6, 5, 3, 0)
)
class HeaderPacketCRC(Elaboratable):
""" Gateware that computes a running CRC-16 for the first three words of a header packet.
Attributes
----------
clear: Signal(), input
Strobe; clears the CRC, restoring it to its Initial Value.
data_input: Signal(32), input
Data word to add to our running CRC.
advance_crc: Signal(), input
When asserted, the current data input will be added to the CRC.
crc: Signal(16), output
The current CRC value.
Parameters
----------
initial_value: int, Const
The initial value of the CRC shift register; the USB default is used if not provided.
"""
def __init__(self, initial_value=0xFFFF):
self._initial_value = initial_value
#
# I/O port
#
self.clear = Signal()
self.data_input = Signal(32)
self.advance_crc = Signal()
self.crc = Signal(16, reset=initial_value)
def _generate_next_crc(self, current_crc, data_in):
""" Generates the next round of a wordwise USB CRC16. """
def xor_data_bits(*indices):
bits = (data_in[len(data_in) - 1 - i] for i in indices)
return functools.reduce(operator.__xor__, bits)
def xor_past_bits(*indices):
bits = (current_crc[i] for i in indices)
return functools.reduce(operator.__xor__, bits)
# Extracted from the USB3 spec's definition of the CRC16 polynomial.
# This is hideous, but it's lifted directly from the specification, so it's probably safer
# not to try and "clean it up" by expanding the polynomial ourselves.
return Cat(
xor_past_bits(4, 5, 7, 10, 12, 13, 15)
^ xor_data_bits(0, 4, 8, 12, 13, 15, 20, 21, 23, 26, 28, 29, 31),
xor_past_bits(0, 4, 6, 7, 8, 10, 11, 12, 14, 15)
^ xor_data_bits(0, 1, 4, 5, 8, 9, 12, 14, 15, 16, 20, 22, 23, 24, 26, 27, 28, 30, 31),
xor_past_bits(0, 1, 5, 7, 8, 9, 11, 12, 13, 15)
^ xor_data_bits(1, 2, 5, 6, 9, 10, 13, 15, 16, 17, 21, 23, 24, 25, 27, 28, 29, 31),
xor_past_bits(0, 1, 2, 4, 5, 6, 7, 8, 9, 14, 15)
^ xor_data_bits(0, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 30, 31),
xor_past_bits(0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 15)
^ xor_data_bits(1, 3, 4, 5, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 31),
xor_past_bits(0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11)
^ xor_data_bits(2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27),
xor_past_bits(0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12)
^ xor_data_bits(3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28),
xor_past_bits(0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13)
^ xor_data_bits(4, 6, 7, 8, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29),
xor_past_bits(0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14)
^ xor_data_bits(5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30),
xor_past_bits(0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15)
^ xor_data_bits(6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 31),
xor_past_bits(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15)
^ xor_data_bits(7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31),
xor_past_bits(0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15)
^ xor_data_bits(8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31),
xor_past_bits(0, 1, 3, 6, 8, 9, 11, 12, 14)
^ xor_data_bits(0, 4, 8, 9, 11, 16, 17, 19, 22, 24, 25, 27, 28, 30),
xor_past_bits(1, 2, 4, 7, 9, 10, 12, 13, 15)
^ xor_data_bits(1, 5, 9, 10, 12, 17, 18, 20, 23, 25, 26, 28, 29, 31),
xor_past_bits(2, 3, 5, 8, 10, 11, 13, 14)
^ xor_data_bits(2, 6, 10, 11, 13, 18, 19, 21, 24, 26, 27, 29, 30),
xor_past_bits(3, 4, 6, 9, 11, 12, 14, 15)
^ xor_data_bits(3, 7, 11, 12, 14, 19, 20, 22, 25, 27, 28, 30, 31),
)
def elaborate(self, platform):
m = Module()
# Register that contains the running CRCs.
crc = Signal(16, reset=self._initial_value)
# If we're clearing our CRC in progress, move our holding register back to
# our initial value.
with m.If(self.clear):
m.d.ss += crc.eq(self._initial_value)
# Otherwise, update the CRC whenever we have new data.
with m.Elif(self.advance_crc):
m.d.ss += crc.eq(self._generate_next_crc(crc, self.data_input))
# Convert from our intermediary "running CRC" format into the current CRC-16...
m.d.comb += self.crc.eq(~crc[::-1])
return m
class DataPacketPayloadCRC(Elaboratable):
""" Gateware that computes a running CRC-32 for a data packet payload.
This CRC is more complicated than others, as Data Packet Payloads are not
required to end on a word boundary. Accordingly, we'll need to handle cases
where we have an incomplete word of 1, 2, or 3 bytes.
Attributes
----------
clear: Signal(), input
Strobe; clears the CRC, restoring it to its Initial Value.
data_input: Signal(32), input
Data word to add to our running CRC.
advance_word: Signal(), input
When asserted, the current data word will be added to our CRC.
advance_3B: Signal(), input
When asserted, the last three bytes of the current data word will be added to our CRC.
advance_2B: Signal(), input
When asserted, the last two bytes of the current data word will be added to our CRC.
advance_1B: Signal(), input
When asserted, the last byte of the current data word will be added to our CRC.
crc: Signal(32), output
The current CRC value.
next_crc_3B: Signal(32), output
The CRC value for the next cycle, assuming we advance 3B.
next_crc_2B: Signal(32), output
The CRC value for the next cycle, assuming we advance 2B.
next_crc_1B: Signal(32), output
The CRC value for the next cycle, assuming we advance 1B.
Parameters
----------
initial_value: int, Const
The initial value of the CRC shift register; the USB default is used if not provided.
"""
def __init__(self, initial_value=0xFFFFFFFF):
self._initial_value = initial_value
#
# I/O port
#
self.clear = Signal()
self.data_input = Signal(32)
self.advance_word = Signal()
self.advance_3B = Signal()
self.advance_2B = Signal()
self.advance_1B = Signal()
self.crc = Signal(32)
self.next_crc_3B = Signal(32)
self.next_crc_2B = Signal(32)
self.next_crc_1B = Signal(32)
def _generate_next_full_crc(self, current_crc, data_in):
""" Generates the next round of our CRC; given a full input word . """
# Helper functions that help us more clearly match the expanded polynomial form.
d = lambda i : data_in[len(data_in) - i - 1]
q = lambda i : current_crc[i]
# These lines are extremely long, but there doesn't seem any advantage in clarity to splitting them.
return Cat(
q(0) ^ q(6) ^ q(9) ^ q(10) ^ q(12) ^ q(16) ^ q(24) ^ q(25) ^ q(26) ^ q(28) ^ q(29) ^ q(30) ^ q(31) ^ d(0) ^ d(6) ^ d(9) ^ d(10) ^ d(12) ^ d(16) ^ d(24) ^ d(25) ^ d(26) ^ d(28) ^ d(29) ^ d(30) ^ d(31),
q(0) ^ q(1) ^ q(6) ^ q(7) ^ q(9) ^ q(11) ^ q(12) ^ q(13) ^ q(16) ^ q(17) ^ q(24) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(6) ^ d(7) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(16) ^ d(17) ^ d(24) ^ d(27) ^ d(28),
q(0) ^ q(1) ^ q(2) ^ q(6) ^ q(7) ^ q(8) ^ q(9) ^ q(13) ^ q(14) ^ q(16) ^ q(17) ^ q(18) ^ q(24) ^ q(26) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7) ^ d(8) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(17) ^ d(18) ^ d(24) ^ d(26) ^ d(30) ^ d(31),
q(1) ^ q(2) ^ q(3) ^ q(7) ^ q(8) ^ q(9) ^ q(10) ^ q(14) ^ q(15) ^ q(17) ^ q(18) ^ q(19) ^ q(25) ^ q(27) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(7) ^ d(8) ^ d(9) ^ d(10) ^ d(14) ^ d(15) ^ d(17) ^ d(18) ^ d(19) ^ d(25) ^ d(27) ^ d(31),
q(0) ^ q(2) ^ q(3) ^ q(4) ^ q(6) ^ q(8) ^ q(11) ^ q(12) ^ q(15) ^ q(18) ^ q(19) ^ q(20) ^ q(24) ^ q(25) ^ q(29) ^ q(30) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(8) ^ d(11) ^ d(12) ^ d(15) ^ d(18) ^ d(19) ^ d(20) ^ d(24) ^ d(25) ^ d(29) ^ d(30) ^ d(31),
q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(5) ^ q(6) ^ q(7) ^ q(10) ^ q(13) ^ q(19) ^ q(20) ^ q(21) ^ q(24) ^ q(28) ^ q(29) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(19) ^ d(20) ^ d(21) ^ d(24) ^ d(28) ^ d(29),
q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(6) ^ q(7) ^ q(8) ^ q(11) ^ q(14) ^ q(20) ^ q(21) ^ q(22) ^ q(25) ^ q(29) ^ q(30) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(20) ^ d(21) ^ d(22) ^ d(25) ^ d(29) ^ d(30),
q(0) ^ q(2) ^ q(3) ^ q(5) ^ q(7) ^ q(8) ^ q(10) ^ q(15) ^ q(16) ^ q(21) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(28) ^ q(29) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7) ^ d(8) ^ d(10) ^ d(15) ^ d(16) ^ d(21) ^ d(22) ^ d(23) ^ d(24) ^ d(25) ^ d(28) ^ d(29),
q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(8) ^ q(10) ^ q(11) ^ q(12) ^ q(17) ^ q(22) ^ q(23) ^ q(28) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(8) ^ d(10) ^ d(11) ^ d(12) ^ d(17) ^ d(22) ^ d(23) ^ d(28) ^ d(31),
q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(9) ^ q(11) ^ q(12) ^ q(13) ^ q(18) ^ q(23) ^ q(24) ^ q(29) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(18) ^ d(23) ^ d(24) ^ d(29),
q(0) ^ q(2) ^ q(3) ^ q(5) ^ q(9) ^ q(13) ^ q(14) ^ q(16) ^ q(19) ^ q(26) ^ q(28) ^ q(29) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(19) ^ d(26) ^ d(28) ^ d(29) ^ d(31),
q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(9) ^ q(12) ^ q(14) ^ q(15) ^ q(16) ^ q(17) ^ q(20) ^ q(24) ^ q(25) ^ q(26) ^ q(27) ^ q(28) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(9) ^ d(12) ^ d(14) ^ d(15) ^ d(16) ^ d(17) ^ d(20) ^ d(24) ^ d(25) ^ d(26) ^ d(27) ^ d(28) ^ d(31),
q(0) ^ q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(6) ^ q(9) ^ q(12) ^ q(13) ^ q(15) ^ q(17) ^ q(18) ^ q(21) ^ q(24) ^ q(27) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(9) ^ d(12) ^ d(13) ^ d(15) ^ d(17) ^ d(18) ^ d(21) ^ d(24) ^ d(27) ^ d(30) ^ d(31),
q(1) ^ q(2) ^ q(3) ^ q(5) ^ q(6) ^ q(7) ^ q(10) ^ q(13) ^ q(14) ^ q(16) ^ q(18) ^ q(19) ^ q(22) ^ q(25) ^ q(28) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(22) ^ d(25) ^ d(28) ^ d(31),
q(2) ^ q(3) ^ q(4) ^ q(6) ^ q(7) ^ q(8) ^ q(11) ^ q(14) ^ q(15) ^ q(17) ^ q(19) ^ q(20) ^ q(23) ^ q(26) ^ q(29) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(15) ^ d(17) ^ d(19) ^ d(20) ^ d(23) ^ d(26) ^ d(29),
q(3) ^ q(4) ^ q(5) ^ q(7) ^ q(8) ^ q(9) ^ q(12) ^ q(15) ^ q(16) ^ q(18) ^ q(20) ^ q(21) ^ q(24) ^ q(27) ^ q(30) ^ d(3) ^ d(4) ^ d(5) ^ d(7) ^ d(8) ^ d(9) ^ d(12) ^ d(15) ^ d(16) ^ d(18) ^ d(20) ^ d(21) ^ d(24) ^ d(27) ^ d(30),
q(0) ^ q(4) ^ q(5) ^ q(8) ^ q(12) ^ q(13) ^ q(17) ^ q(19) ^ q(21) ^ q(22) ^ q(24) ^ q(26) ^ q(29) ^ q(30) ^ d(0) ^ d(4) ^ d(5) ^ d(8) ^ d(12) ^ d(13) ^ d(17) ^ d(19) ^ d(21) ^ d(22) ^ d(24) ^ d(26) ^ d(29) ^ d(30),
q(1) ^ q(5) ^ q(6) ^ q(9) ^ q(13) ^ q(14) ^ q(18) ^ q(20) ^ q(22) ^ q(23) ^ q(25) ^ q(27) ^ q(30) ^ q(31) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14) ^ d(18) ^ d(20) ^ d(22) ^ d(23) ^ d(25) ^ d(27) ^ d(30) ^ d(31),
q(2) ^ q(6) ^ q(7) ^ q(10) ^ q(14) ^ q(15) ^ q(19) ^ q(21) ^ q(23) ^ q(24) ^ q(26) ^ q(28) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15) ^ d(19) ^ d(21) ^ d(23) ^ d(24) ^ d(26) ^ d(28) ^ d(31),
q(3) ^ q(7) ^ q(8) ^ q(11) ^ q(15) ^ q(16) ^ q(20) ^ q(22) ^ q(24) ^ q(25) ^ q(27) ^ q(29) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15) ^ d(16) ^ d(20) ^ d(22) ^ d(24) ^ d(25) ^ d(27) ^ d(29),
q(4) ^ q(8) ^ q(9) ^ q(12) ^ q(16) ^ q(17) ^ q(21) ^ q(23) ^ q(25) ^ q(26) ^ q(28) ^ q(30) ^ d(4) ^ d(8) ^ d(9) ^ d(12) ^ d(16) ^ d(17) ^ d(21) ^ d(23) ^ d(25) ^ d(26) ^ d(28) ^ d(30),
q(5) ^ q(9) ^ q(10) ^ q(13) ^ q(17) ^ q(18) ^ q(22) ^ q(24) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(5) ^ d(9) ^ d(10) ^ d(13) ^ d(17) ^ d(18) ^ d(22) ^ d(24) ^ d(26) ^ d(27) ^ d(29) ^ d(31),
q(0) ^ q(9) ^ q(11) ^ q(12) ^ q(14) ^ q(16) ^ q(18) ^ q(19) ^ q(23) ^ q(24) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(23) ^ d(24) ^ d(26) ^ d(27) ^ d(29) ^ d(31),
q(0) ^ q(1) ^ q(6) ^ q(9) ^ q(13) ^ q(15) ^ q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^ d(6) ^ d(9) ^ d(13) ^ d(15) ^ d(16) ^ d(17) ^ d(19) ^ d(20) ^ d(26) ^ d(27) ^ d(29) ^ d(31),
q(1) ^ q(2) ^ q(7) ^ q(10) ^ q(14) ^ q(16) ^ q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(27) ^ q(28) ^ q(30) ^ d(1) ^ d(2) ^ d(7) ^ d(10) ^ d(14) ^ d(16) ^ d(17) ^ d(18) ^ d(20) ^ d(21) ^ d(27) ^ d(28) ^ d(30),
q(2) ^ q(3) ^ q(8) ^ q(11) ^ q(15) ^ q(17) ^ q(18) ^ q(19) ^ q(21) ^ q(22) ^ q(28) ^ q(29) ^ q(31) ^ d(2) ^ d(3) ^ d(8) ^ d(11) ^ d(15) ^ d(17) ^ d(18) ^ d(19) ^ d(21) ^ d(22) ^ d(28) ^ d(29) ^ d(31),
q(0) ^ q(3) ^ q(4) ^ q(6) ^ q(10) ^ q(18) ^ q(19) ^ q(20) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(26) ^ q(28) ^ q(31) ^ d(0) ^ d(3) ^ d(4) ^ d(6) ^ d(10) ^ d(18) ^ d(19) ^ d(20) ^ d(22) ^ d(23) ^ d(24) ^ d(25) ^ d(26) ^ d(28) ^ d(31),
q(1) ^ q(4) ^ q(5) ^ q(7) ^ q(11) ^ q(19) ^ q(20) ^ q(21) ^ q(23) ^ q(24) ^ q(25) ^ q(26) ^ q(27) ^ q(29) ^ d(1) ^ d(4) ^ d(5) ^ d(7) ^ d(11) ^ d(19) ^ d(20) ^ d(21) ^ d(23) ^ d(24) ^ d(25) ^ d(26) ^ d(27) ^ d(29),
q(2) ^ q(5) ^ q(6) ^ q(8) ^ q(12) ^ q(20) ^ q(21) ^ q(22) ^ q(24) ^ q(25) ^ q(26) ^ q(27) ^ q(28) ^ q(30) ^ d(2) ^ d(5) ^ d(6) ^ d(8) ^ d(12) ^ d(20) ^ d(21) ^ d(22) ^ d(24) ^ d(25) ^ d(26) ^ d(27) ^ d(28) ^ d(30),
q(3) ^ q(6) ^ q(7) ^ q(9) ^ q(13) ^ q(21) ^ q(22) ^ q(23) ^ q(25) ^ q(26) ^ q(27) ^ q(28) ^ q(29) ^ q(31) ^ d(3) ^ d(6) ^ d(7) ^ d(9) ^ d(13) ^ d(21) ^ d(22) ^ d(23) ^ d(25) ^ d(26) ^ d(27) ^ d(28) ^ d(29) ^ d(31),
q(4) ^ q(7) ^ q(8) ^ q(10) ^ q(14) ^ q(22) ^ q(23) ^ q(24) ^ q(26) ^ q(27) ^ q(28) ^ q(29) ^ q(30) ^ d(4) ^ d(7) ^ d(8) ^ d(10) ^ d(14) ^ d(22) ^ d(23) ^ d(24) ^ d(26) ^ d(27) ^ d(28) ^ d(29) ^ d(30),
q(5) ^ q(8) ^ q(9) ^ q(11) ^ q(15) ^ q(23) ^ q(24) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ q(30) ^ q(31) ^ d(5) ^ d(8) ^ d(9) ^ d(11) ^ d(15) ^ d(23) ^ d(24) ^ d(25) ^ d(27) ^ d(28) ^ d(29) ^ d(30) ^ d(31),
)
def _generate_next_3B_crc(self, current_crc, data_in):
""" Generates the next round of our CRC; given a 3B trailing input word . """
# Helper functions that help us more clearly match the expanded polynomial form.
d = lambda i : data_in[len(data_in) - i - 1]
q = lambda i : current_crc[i]
# These lines are extremely long, but there doesn't seem any advantage in clarity to splitting them.
return Cat(
q(8) ^ q(14) ^ q(17) ^ q(18) ^ q(20) ^ q(24) ^ d(0) ^ d(6) ^ d(9) ^ d(10) ^ d(12) ^ d(16),
q(8) ^ q(9) ^ q(14) ^ q(15) ^ q(17) ^ q(19) ^ q(20) ^ q(21) ^ q(24) ^ q(25) ^ d(0) ^ d(1) ^ d(6) ^ d(7) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(16) ^ d(17),
q(8) ^ q(9) ^ q(10) ^ q(14) ^ q(15) ^ q(16) ^ q(17) ^ q(21) ^ q(22) ^ q(24) ^ q(25) ^ q(26) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7) ^ d(8) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(17) ^ d(18),
q(9) ^ q(10) ^ q(11) ^ q(15) ^ q(16) ^ q(17) ^ q(18) ^ q(22) ^ q(23) ^ q(25) ^ q(26) ^ q(27) ^ d(1) ^ d(2) ^ d(3) ^ d(7) ^ d(8) ^ d(9) ^ d(10) ^ d(14) ^ d(15) ^ d(17) ^ d(18) ^ d(19),
q(8) ^ q(10) ^ q(11) ^ q(12) ^ q(14) ^ q(16) ^ q(19) ^ q(20) ^ q(23) ^ q(26) ^ q(27) ^ q(28) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(8) ^ d(11) ^ d(12) ^ d(15) ^ d(18) ^ d(19) ^ d(20),
q(8) ^ q(9) ^ q(11) ^ q(12) ^ q(13) ^ q(14) ^ q(15) ^ q(18) ^ q(21) ^ q(27) ^ q(28) ^ q(29) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(19) ^ d(20) ^ d(21),
q(9) ^ q(10) ^ q(12) ^ q(13) ^ q(14) ^ q(15) ^ q(16) ^ q(19) ^ q(22) ^ q(28) ^ q(29) ^ q(30) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(20) ^ d(21) ^ d(22),
q(8) ^ q(10) ^ q(11) ^ q(13) ^ q(15) ^ q(16) ^ q(18) ^ q(23) ^ q(24) ^ q(29) ^ q(30) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7) ^ d(8) ^ d(10) ^ d(15) ^ d(16) ^ d(21) ^ d(22) ^ d(23),
q(8) ^ q(9) ^ q(11) ^ q(12) ^ q(16) ^ q(18) ^ q(19) ^ q(20) ^ q(25) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(8) ^ d(10) ^ d(11) ^ d(12) ^ d(17) ^ d(22) ^ d(23),
q(9) ^ q(10) ^ q(12) ^ q(13) ^ q(17) ^ q(19) ^ q(20) ^ q(21) ^ q(26) ^ q(31) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(18) ^ d(23),
q(8) ^ q(10) ^ q(11) ^ q(13) ^ q(17) ^ q(21) ^ q(22) ^ q(24) ^ q(27) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(19),
q(8) ^ q(9) ^ q(11) ^ q(12) ^ q(17) ^ q(20) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(28) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(9) ^ d(12) ^ d(14) ^ d(15) ^ d(16) ^ d(17) ^ d(20),
q(8) ^ q(9) ^ q(10) ^ q(12) ^ q(13) ^ q(14) ^ q(17) ^ q(20) ^ q(21) ^ q(23) ^ q(25) ^ q(26) ^ q(29) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(9) ^ d(12) ^ d(13) ^ d(15) ^ d(17) ^ d(18) ^ d(21),
q(9) ^ q(10) ^ q(11) ^ q(13) ^ q(14) ^ q(15) ^ q(18) ^ q(21) ^ q(22) ^ q(24) ^ q(26) ^ q(27) ^ q(30) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(22),
q(10) ^ q(11) ^ q(12) ^ q(14) ^ q(15) ^ q(16) ^ q(19) ^ q(22) ^ q(23) ^ q(25) ^ q(27) ^ q(28) ^ q(31) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(15) ^ d(17) ^ d(19) ^ d(20) ^ d(23),
q(11) ^ q(12) ^ q(13) ^ q(15) ^ q(16) ^ q(17) ^ q(20) ^ q(23) ^ q(24) ^ q(26) ^ q(28) ^ q(29) ^ d(3) ^ d(4) ^ d(5) ^ d(7) ^ d(8) ^ d(9) ^ d(12) ^ d(15) ^ d(16) ^ d(18) ^ d(20) ^ d(21),
q(8) ^ q(12) ^ q(13) ^ q(16) ^ q(20) ^ q(21) ^ q(25) ^ q(27) ^ q(29) ^ q(30) ^ d(0) ^ d(4) ^ d(5) ^ d(8) ^ d(12) ^ d(13) ^ d(17) ^ d(19) ^ d(21) ^ d(22),
q(9) ^ q(13) ^ q(14) ^ q(17) ^ q(21) ^ q(22) ^ q(26) ^ q(28) ^ q(30) ^ q(31) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14) ^ d(18) ^ d(20) ^ d(22) ^ d(23),
q(10) ^ q(14) ^ q(15) ^ q(18) ^ q(22) ^ q(23) ^ q(27) ^ q(29) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15) ^ d(19) ^ d(21) ^ d(23),
q(11) ^ q(15) ^ q(16) ^ q(19) ^ q(23) ^ q(24) ^ q(28) ^ q(30) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15) ^ d(16) ^ d(20) ^ d(22),
q(12) ^ q(16) ^ q(17) ^ q(20) ^ q(24) ^ q(25) ^ q(29) ^ q(31) ^ d(4) ^ d(8) ^ d(9) ^ d(12) ^ d(16) ^ d(17) ^ d(21) ^ d(23),
q(13) ^ q(17) ^ q(18) ^ q(21) ^ q(25) ^ q(26) ^ q(30) ^ d(5) ^ d(9) ^ d(10) ^ d(13) ^ d(17) ^ d(18) ^ d(22),
q(8) ^ q(17) ^ q(19) ^ q(20) ^ q(22) ^ q(24) ^ q(26) ^ q(27) ^ q(31) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(23),
q(8) ^ q(9) ^ q(14) ^ q(17) ^ q(21) ^ q(23) ^ q(24) ^ q(25) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(6) ^ d(9) ^ d(13) ^ d(15) ^ d(16) ^ d(17) ^ d(19) ^ d(20),
q(0) ^ q(9) ^ q(10) ^ q(15) ^ q(18) ^ q(22) ^ q(24) ^ q(25) ^ q(26) ^ q(28) ^ q(29) ^ d(1) ^ d(2) ^ d(7) ^ d(10) ^ d(14) ^ d(16) ^ d(17) ^ d(18) ^ d(20) ^ d(21),
q(1) ^ q(10) ^ q(11) ^ q(16) ^ q(19) ^ q(23) ^ q(25) ^ q(26) ^ q(27) ^ q(29) ^ q(30) ^ d(2) ^ d(3) ^ d(8) ^ d(11) ^ d(15) ^ d(17) ^ d(18) ^ d(19) ^ d(21) ^ d(22),
q(2) ^ q(8) ^ q(11) ^ q(12) ^ q(14) ^ q(18) ^ q(26) ^ q(27) ^ q(28) ^ q(30) ^ q(31) ^ d(0) ^ d(3) ^ d(4) ^ d(6) ^ d(10) ^ d(18) ^ d(19) ^ d(20) ^ d(22) ^ d(23),
q(3) ^ q(9) ^ q(12) ^ q(13) ^ q(15) ^ q(19) ^ q(27) ^ q(28) ^ q(29) ^ q(31) ^ d(1) ^ d(4) ^ d(5) ^ d(7) ^ d(11) ^ d(19) ^ d(20) ^ d(21) ^ d(23),
q(4) ^ q(10) ^ q(13) ^ q(14) ^ q(16) ^ q(20) ^ q(28) ^ q(29) ^ q(30) ^ d(2) ^ d(5) ^ d(6) ^ d(8) ^ d(12) ^ d(20) ^ d(21) ^ d(22),
q(5) ^ q(11) ^ q(14) ^ q(15) ^ q(17) ^ q(21) ^ q(29) ^ q(30) ^ q(31) ^ d(3) ^ d(6) ^ d(7) ^ d(9) ^ d(13) ^ d(21) ^ d(22) ^ d(23),
q(6) ^ q(12) ^ q(15) ^ q(16) ^ q(18) ^ q(22) ^ q(30) ^ q(31) ^ d(4) ^ d(7) ^ d(8) ^ d(10) ^ d(14) ^ d(22) ^ d(23),
q(7) ^ q(13) ^ q(16) ^ q(17) ^ q(19) ^ q(23) ^ q(31) ^ d(5) ^ d(8) ^ d(9) ^ d(11) ^ d(15) ^ d(23),
)
def _generate_next_2B_crc(self, current_crc, data_in):
""" Generates the next round of our CRC; given a 2B trailing input word . """
# Helper functions that help us more clearly match the expanded polynomial form.
d = lambda i : data_in[len(data_in) - i - 1]
q = lambda i : current_crc[i]
# These lines are extremely long, but there doesn't seem any advantage in clarity to splitting them.
return Cat(
q(16) ^ q(22) ^ q(25) ^ q(26) ^ q(28) ^ d(0) ^ d(6) ^ d(9) ^ d(10) ^ d(12),
q(16) ^ q(17) ^ q(22) ^ q(23) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ d(0) ^ d(1) ^ d(6) ^ d(7) ^ d(9) ^ d(11) ^ d(12) ^ d(13),
q(16) ^ q(17) ^ q(18) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(29) ^ q(30) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7) ^ d(8) ^ d(9) ^ d(13) ^ d(14),
q(17) ^ q(18) ^ q(19) ^ q(23) ^ q(24) ^ q(25) ^ q(26) ^ q(30) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(7) ^ d(8) ^ d(9) ^ d(10) ^ d(14) ^ d(15),
q(16) ^ q(18) ^ q(19) ^ q(20) ^ q(22) ^ q(24) ^ q(27) ^ q(28) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(8) ^ d(11) ^ d(12) ^ d(15),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(21) ^ q(22) ^ q(23) ^ q(26) ^ q(29) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13),
q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(22) ^ q(23) ^ q(24) ^ q(27) ^ q(30) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14),
q(16) ^ q(18) ^ q(19) ^ q(21) ^ q(23) ^ q(24) ^ q(26) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7) ^ d(8) ^ d(10) ^ d(15),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(24) ^ q(26) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(8) ^ d(10) ^ d(11) ^ d(12),
q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(9) ^ d(11) ^ d(12) ^ d(13),
q(16) ^ q(18) ^ q(19) ^ q(21) ^ q(25) ^ q(29) ^ q(30) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(9) ^ d(13) ^ d(14),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(25) ^ q(28) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(9) ^ d(12) ^ d(14) ^ d(15),
q(16) ^ q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(22) ^ q(25) ^ q(28) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(9) ^ d(12) ^ d(13) ^ d(15),
q(17) ^ q(18) ^ q(19) ^ q(21) ^ q(22) ^ q(23) ^ q(26) ^ q(29) ^ q(30) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(14),
q(18) ^ q(19) ^ q(20) ^ q(22) ^ q(23) ^ q(24) ^ q(27) ^ q(30) ^ q(31) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(15),
q(19) ^ q(20) ^ q(21) ^ q(23) ^ q(24) ^ q(25) ^ q(28) ^ q(31) ^ d(3) ^ d(4) ^ d(5) ^ d(7) ^ d(8) ^ d(9) ^ d(12) ^ d(15),
q(0) ^ q(16) ^ q(20) ^ q(21) ^ q(24) ^ q(28) ^ q(29) ^ d(0) ^ d(4) ^ d(5) ^ d(8) ^ d(12) ^ d(13),
q(1) ^ q(17) ^ q(21) ^ q(22) ^ q(25) ^ q(29) ^ q(30) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14),
q(2) ^ q(18) ^ q(22) ^ q(23) ^ q(26) ^ q(30) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15),
q(3) ^ q(19) ^ q(23) ^ q(24) ^ q(27) ^ q(31) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15),
q(4) ^ q(20) ^ q(24) ^ q(25) ^ q(28) ^ d(4) ^ d(8) ^ d(9) ^ d(12),
q(5) ^ q(21) ^ q(25) ^ q(26) ^ q(29) ^ d(5) ^ d(9) ^ d(10) ^ d(13),
q(6) ^ q(16) ^ q(25) ^ q(27) ^ q(28) ^ q(30) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14),
q(7) ^ q(16) ^ q(17) ^ q(22) ^ q(25) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^ d(6) ^ d(9) ^ d(13) ^ d(15),
q(8) ^ q(17) ^ q(18) ^ q(23) ^ q(26) ^ q(30) ^ d(1) ^ d(2) ^ d(7) ^ d(10) ^ d(14),
q(9) ^ q(18) ^ q(19) ^ q(24) ^ q(27) ^ q(31) ^ d(2) ^ d(3) ^ d(8) ^ d(11) ^ d(15),
q(10) ^ q(16) ^ q(19) ^ q(20) ^ q(22) ^ q(26) ^ d(0) ^ d(3) ^ d(4) ^ d(6) ^ d(10),
q(11) ^ q(17) ^ q(20) ^ q(21) ^ q(23) ^ q(27) ^ d(1) ^ d(4) ^ d(5) ^ d(7) ^ d(11),
q(12) ^ q(18) ^ q(21) ^ q(22) ^ q(24) ^ q(28) ^ d(2) ^ d(5) ^ d(6) ^ d(8) ^ d(12),
q(13) ^ q(19) ^ q(22) ^ q(23) ^ q(25) ^ q(29) ^ d(3) ^ d(6) ^ d(7) ^ d(9) ^ d(13),
q(14) ^ q(20) ^ q(23) ^ q(24) ^ q(26) ^ q(30) ^ d(4) ^ d(7) ^ d(8) ^ d(10) ^ d(14),
q(15) ^ q(21) ^ q(24) ^ q(25) ^ q(27) ^ q(31) ^ d(5) ^ d(8) ^ d(9) ^ d(11) ^ d(15),
)
def _generate_next_1B_crc(self, current_crc, data_in):
""" Generates the next round of our CRC; given a 2B trailing input word . """
# Helper functions that help us more clearly match the expanded polynomial form.
d = lambda i : data_in[len(data_in) - i - 1]
q = lambda i : current_crc[i]
return Cat(
q(24) ^ q(30) ^ d(0) ^ d(6),
q(24) ^ q(25) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(6) ^ d(7),
q(24) ^ q(25) ^ q(26) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7),
q(25) ^ q(26) ^ q(27) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(7),
q(24) ^ q(26) ^ q(27) ^ q(28) ^ q(30) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6),
q(24) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7),
q(25) ^ q(26) ^ q(28) ^ q(29) ^ q(30) ^ q(31) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7),
q(24) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7),
q(0) ^ q(24) ^ q(25) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(3) ^ d(4),
q(1) ^ q(25) ^ q(26) ^ q(28) ^ q(29) ^ d(1) ^ d(2) ^ d(4) ^ d(5),
q(2) ^ q(24) ^ q(26) ^ q(27) ^ q(29) ^ d(0) ^ d(2) ^ d(3) ^ d(5),
q(3) ^ q(24) ^ q(25) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(3) ^ d(4),
q(4) ^ q(24) ^ q(25) ^ q(26) ^ q(28) ^ q(29) ^ q(30) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6),
q(5) ^ q(25) ^ q(26) ^ q(27) ^ q(29) ^ q(30) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7),
q(6) ^ q(26) ^ q(27) ^ q(28) ^ q(30) ^ q(31) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7),
q(7) ^ q(27) ^ q(28) ^ q(29) ^ q(31) ^ d(3) ^ d(4) ^ d(5) ^ d(7),
q(8) ^ q(24) ^ q(28) ^ q(29) ^ d(0) ^ d(4) ^ d(5),
q(9) ^ q(25) ^ q(29) ^ q(30) ^ d(1) ^ d(5) ^ d(6),
q(10) ^ q(26) ^ q(30) ^ q(31) ^ d(2) ^ d(6) ^ d(7),
q(11) ^ q(27) ^ q(31) ^ d(3) ^ d(7),
q(12) ^ q(28) ^ d(4),
q(13) ^ q(29) ^ d(5),
q(14) ^ q(24) ^ d(0),
q(15) ^ q(24) ^ q(25) ^ q(30) ^ d(0) ^ d(1) ^ d(6),
q(16) ^ q(25) ^ q(26) ^ q(31) ^ d(1) ^ d(2) ^ d(7),
q(17) ^ q(26) ^ q(27) ^ d(2) ^ d(3),
q(18) ^ q(24) ^ q(27) ^ q(28) ^ q(30) ^ d(0) ^ d(3) ^ d(4) ^ d(6),
q(19) ^ q(25) ^ q(28) ^ q(29) ^ q(31) ^ d(1) ^ d(4) ^ d(5) ^ d(7),
q(20) ^ q(26) ^ q(29) ^ q(30) ^ d(2) ^ d(5) ^ d(6),
q(21) ^ q(27) ^ q(30) ^ q(31) ^ d(3) ^ d(6) ^ d(7),
q(22) ^ q(28) ^ q(31) ^ d(4) ^ d(7),
q(23) ^ q(29) ^ d(5),
)
def elaborate(self, platform):
m = Module()
# Register that contains the running CRCs.
crc = Signal(32, reset=self._initial_value)
# Internal signals representing our next internal state given various input sizes.
next_crc_3B = Signal.like(crc)
next_crc_2B = Signal.like(crc)
next_crc_1B = Signal.like(crc)
# Compute each of our theoretical partial "next-CRC" values.
m.d.comb += [
next_crc_3B.eq(self._generate_next_3B_crc(crc, self.data_input[0:24])),
next_crc_2B.eq(self._generate_next_2B_crc(crc, self.data_input[0:16])),
next_crc_1B.eq(self._generate_next_1B_crc(crc, self.data_input[0:8])),
]
# If we're clearing our CRC in progress, move our holding register back to
# our initial value.
with m.If(self.clear):
m.d.ss += crc.eq(self._initial_value)
# Otherwise, update the CRC whenever we have new data.
with m.Elif(self.advance_word):
m.d.ss += crc.eq(self._generate_next_full_crc(crc, self.data_input))
with m.Elif(self.advance_3B):
m.d.ss += crc.eq(next_crc_3B)
with m.Elif(self.advance_2B):
m.d.ss += crc.eq(next_crc_2B)
with m.Elif(self.advance_1B):
m.d.ss += crc.eq(next_crc_1B)
# Convert from our intermediary "running CRC" format into the correct CRC32 outputs.
m.d.comb += [
self.crc .eq(~crc[::-1]),
self.next_crc_3B .eq(~next_crc_3B[::-1]),
self.next_crc_2B .eq(~next_crc_2B[::-1]),
self.next_crc_1B .eq(~next_crc_1B[::-1])
]
return m
class DataPacketPayloadCRCTest(LunaSSGatewareTestCase):
FRAGMENT_UNDER_TEST = DataPacketPayloadCRC
@ss_domain_test_case
def test_aligned_crc(self):
dut = self.dut
#yield dut.advance_word.eq(1)
for i in (0x02000112, 0x40000000):
yield dut.data_input.eq(i)
yield from self.pulse(dut.advance_word, step_after=False)
self.assertEqual((yield dut.crc), 0x34984B13)
@ss_domain_test_case
def test_unaligned_crc(self):
dut = self.dut
# Aligned section of a real USB data capture, from a USB flash drive.
aligned_section =[
0x03000112,
0x09000000,
0x520013FE,
0x02010100,
]
# Present the aligned section...
for i in aligned_section:
yield dut.data_input.eq(i)
yield from self.pulse(dut.advance_word, step_after=False)
# ... and then our unaligned data.
yield dut.data_input.eq(0x0000_0103)
yield
# Our next-CRC should indicate the correct value...
self.assertEqual((yield dut.next_crc_2B), 0x540aa487)
# ...and after advancing, we should see the same value on our CRC output.
yield from self.pulse(dut.advance_2B)
self.assertEqual((yield dut.crc), 0x540aa487)
if __name__ == "__main__":
unittest.main()
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib2
import json
import os
import tarfile
import shutil
import subprocess
def main():
latest = latest_version()
download(latest['name'], latest['browser_download_url'])
docker_build(latest['name'])
def docker_build(file_name):
tar = tarfile.open(file_name)
tar.extractall()
tar.close()
if os.path.isdir('frp'):
shutil.rmtree('frp')
target = file_name.replace('.tar.gz', '')
os.rename(target, "frp")
tag = target.split('_')
cmd = 'docker build -f Dockerfile.server -t lyf362345/frp-server:' + tag[1] + ' -t lyf362345/frp-server:latest .'
code = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
if code == 0:
print 'server image build done'
cmd = 'docker build -f Dockerfile.client -t lyf362345/frp-client:' + tag[1] + ' -t lyf362345/frp-client:latest .'
code = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
if code == 0:
print 'client image build done'
def download(name, url):
if not os.path.isfile(name) or os.path.getsize(name) <= 0:
f = urllib2.urlopen(url)
with open(name, 'wb') as code:
code.write(f.read())
print name + ' downloaded'
return os.path.getsize(name)
def latest_version():
request = urllib2.Request('https://api.github.com/repos/fatedier/frp/releases')
if 'DOCKER_FRP_BUILD_TOKEN' in os.environ:
request.add_header('Authorization', 'token ' + os.getenv('DOCKER_FRP_BUILD_TOKEN'))
response = urllib2.urlopen(request)
data = json.load(response)
for item in data:
if item['draft']:
continue
print 'latest version: ' + item['tag_name']
f = open('version', 'w')
f.write(item['tag_name'])
f.close()
for asset in item['assets']:
if 'linux_amd64' not in asset['name']:
continue
return asset
if __name__ == '__main__':
main()
|
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework import status, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
# import jwt
# from rest_framework_jwt.utils import jwt_payload_handler
from backend import settings
from .serializers import CustomUserSerializer
# def create_token(user):
# payload = jwt_payload_handler(user)
# token = jwt.encode(payload, settings.SECRET_KEY)
# return token.decode('unicode_escape')
class CustomUserCreate(APIView):
def post(self, request, format='json'):
data = request.data
serializer = CustomUserSerializer(data=data)
if serializer.is_valid():
user = serializer.save()
if user:
refresh = TokenObtainPairSerializer.get_token(user)
access = refresh.access_token
return Response({'access': str(access), 'refresh': str(refresh)}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Hello(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return Response(data={'name': request.user.username}, status=status.HTTP_200_OK)
|
#!/usr/bin/env python
def many(name, *values):
print name, values
args = input('Gimme some juice!: ')
many(args[0], [arg for arg in args[1:]])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from models import *
@admin.register(Facility)
class FacilityAdmin(admin.ModelAdmin):
list_display = ['scraped_name', 'name','street_address', 'city', 'state', 'zip_code', 'created_by', 'mailing_address']
|
import os
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import json
from PIL import Image
def normalize(I):
I = np.copy(I)
mean = np.mean(I)
std = np.std(I)
if std:
I = (I-mean)/std
else:
I = np.ones(I.shape)
return I
def non_maximum_suppression(candidates, IOU_threshold=0.5):
if candidates is None:
return None
candidates = sorted(candidates, key=lambda x: x[-1], reverse=True)
outputs = []
for candidate in candidates:
x1, y1, x2, y2, score = candidate[1], candidate[0], candidate[3], candidate[2], candidate[4]
for output in outputs:
x3, y3, x4, y4 = output[1], output[0], output[3], output[2]
#calculate the IOU here
x5, y5, x6, y6 = max(x1, x3), max(y1, y3), min(x2, x4), min(y2, y4)
if x5 <= x6 and y5 <= y6:
intersection = (x6 - x5) * (y6 - y5)
else:
intersection = 0
union = (x2 - x1) * (y2 - y1) + (x4 - x3) * (y4 - y3) - intersection
IOU = intersection / union
if IOU >= IOU_threshold:
break
else:
outputs.append([y1, x1, y2, x2, score])
return outputs
def compute_convolution(I, T, padding=0, stride=1):
'''
This function takes an image <I> and a template <T> (both numpy arrays)
and returns a heatmap where each grid represents the output produced by
convolution at each location. You can add optional parameters (e.g. stride,
window_size, padding) to create additional functionality.
'''
(n_rows,n_cols,n_channels) = np.shape(I)
'''
BEGIN YOUR CODE
'''
template_rows, template_cols, template_channels = np.shape(T)
heatmap = np.random.random([(n_rows-template_rows+2*padding)//stride+1, (n_cols-template_cols+2*padding)//stride+1])
I_with_padding = np.zeros([n_rows+2*padding, n_cols+2*padding, n_channels])
I_with_padding[padding: padding+n_rows, padding: padding+n_cols] = I
for i in range(0, I_with_padding.shape[0]-template_rows+1, stride):
for j in range(0, I_with_padding.shape[1]-template_cols+1, stride):
window = I_with_padding[i: i+template_rows, j: j+template_cols]
window_normalized = normalize(window)
heatmap[i//stride, j//stride] = np.sum(np.multiply(T, window_normalized))/(template_rows*template_cols*template_channels)
heatmap[i//stride, j//stride] = (1+heatmap[i//stride, j//stride])/2
'''
END YOUR CODE
'''
return heatmap
def predict_boxes(heatmap, I, T, padding=0, stride=1, conf_thr=0.8):
'''
This function takes heatmap and returns the bounding boxes and associated
confidence scores.
'''
output = []
'''
BEGIN YOUR CODE
'''
for i in range(heatmap.shape[0]):
for j in range(heatmap.shape[1]):
tl_row = max(i*stride-padding, 0)
tl_col = max(j*stride-padding, 0)
br_row = min(i*stride-padding+T.shape[0], I.shape[0]-1)
br_col = min(j*stride-padding+T.shape[1], I.shape[1]-1)
score = heatmap[i, j]
if score >= conf_thr:
output.append([tl_row, tl_col, br_row, br_col, score])
'''
END YOUR CODE
'''
return output
def detect_red_light_mf(I):
'''
This function takes a numpy array <I> and returns a list <output>.
The length of <output> is the number of bounding boxes predicted for <I>.
Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>.
The first four entries are four integers specifying a bounding box
(the row and column index of the top left corner and the row and column
index of the bottom right corner).
<score> is a confidence score ranging from 0 to 1.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
'''
BEGIN YOUR CODE
'''
# You may use multiple stages and combine the results
T = np.array(Image.open('../data/RedLights2011_Medium/RL-001.jpg'))
T = T[154: 161, 316: 322]
T = normalize(T)
heatmap = compute_convolution(I, T, padding=0, stride=1)
output = predict_boxes(heatmap, I, T, padding=0, stride=1, conf_thr=0.9)
IOU_threshold = 0.5
output = non_maximum_suppression(output, IOU_threshold)
'''
END YOUR CODE
'''
for i in range(len(output)):
assert len(output[i]) == 5
assert (output[i][4] >= 0.0) and (output[i][4] <= 1.0)
return output
# Note that you are not allowed to use test data for training.
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# load splits:
split_path = '../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# set a path for saving predictions:
preds_path = '../data/hw02_preds'
os.makedirs(preds_path, exist_ok=True) # create directory if needed
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Make predictions on the training set.
'''
preds_train = {}
for i in range(len(file_names_train)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names_train[i]))
# convert to numpy array:
I = np.asarray(I)
print(i)
preds_train[file_names_train[i]] = detect_red_light_mf(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds_train.json'),'w') as f:
json.dump(preds_train,f)
if done_tweaking:
'''
Make predictions on the test set.
'''
preds_test = {}
for i in range(len(file_names_test)):
print(i)
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names_test[i]))
# convert to numpy array:
I = np.asarray(I)
preds_test[file_names_test[i]] = detect_red_light_mf(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds_test.json'),'w') as f:
json.dump(preds_test,f)
|
import os
from flask import Flask, render_template, jsonify, request
import json
import requests
import time
import ConfigParser
import IncidentCache
config = ConfigParser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/../../server.cfg')
cps_address = config.get('CPS', 'address')
subscribe_address = config.get('OS', 'subscribe')
app = Flask(__name__)
app.debug = True
def the_print(info):
print "[LOADER]", info
@app.route('/')
def index():
return render_template("index.html", subscribe_address=subscribe_address)
@app.route('/incident')
def get_incident():
from_value = int(request.args['from'])
to_value = None
to_str = request.args['to']
if len(to_str) > 0:
to_value = int(to_str)
# print from_value, to_value
incidents = IncidentCache.get(from_value, to_value)
if incidents is None:
incidents = load_incident_from_cps(from_value, to_value)
IncidentCache.insert(incidents, from_value, to_value)
return jsonify(incidents)
def load_incident_from_cps(from_value, to_value):
the_print("try to load incident from cps.")
type_list = ["fire", "haze", "dengue", "illness", "rescue", "gas"]
query = {
"type": type_list,
"completeTime": {
"after": from_value,
"allowIncomplete": True
},
"isLatest": True
}
if to_value is not None:
query = {
"type": type_list,
"startTime": {
"before": to_value
},
"completeTime": {
"after": from_value,
"allowIncomplete": False
},
"isLatest": True
}
post_pcakage = {
"query": query,
"operator": {
"username": "operator5",
"password": "1234"
}
}
the_print(json.dumps(post_pcakage))
r = requests.post(cps_address + "request", data=json.dumps(post_pcakage))
# print r.text
incidents = json.loads(r.text)
the_print("loaded incidents from cps.")
return incidents
@app.route('/update', methods=['POST'])
def update_incident():
data_str = request.stream.read()
data = json.loads(data_str)
print data_str
print data
ret = {
"success": True
}
return jsonify(ret)
@app.route('/weather')
def get_weather():
the_print("try to load weather.")
import WeatherParser
weather_list = WeatherParser.get_weather()
# print weather_list
the_print("weather loaded.")
return json.dumps(weather_list)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=16000)
|
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
todos = {}
class TodoSimple(Resource):
def get(self, todo_id):
return {todo_id: todos[todo_id]},
def put(self, todo_id):
todos[todo_id] = request.form['data']
return {todo_id: todos[todo_id]}
class Echo(Resource):
def get(self, echo1, echo2):
parser = reqparse.RequestParser()
parser.add_argument('x_times', type=int, help='help text here')
args = parser.parse_args()
resp = 'GET /Echo/ {} {} {}'.format(echo1, echo2, args[x_times])
return resp
def put(self, echo1, echo2):
return 'PUT /Echo/ {} {}'.format(echo1, echo2)
api.add_resource(TodoSimple, '/<string:todo_id>')
api.add_resource(Echo, '/echo/<echo1>/<echo2>')
if __name__ == '__main__':
app.run(debug=True)
|
import json
import os
import sys
import warnings
import deepsecurity as api
from deepsecurity.rest import ApiException
def format_for_csv(line_item):
"""Converts a list into a string of comma-separated values, ending with a newline character.
:param line_item: The list of lists to convert to a string of comma-spearated values
:return: A string that can be saved as a CSV file.
"""
csv_line = ""
for num, item in enumerate(line_item):
csv_line += str(item)
if num != (len(line_item) - 1):
csv_line += ";"
else:
csv_line += "\n"
return csv_line
# Setup
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Get the DSM URL and API key from a JSON file
property_file = os.path.dirname(os.path.abspath(__file__)) + '/../properties.json'
with open(property_file) as raw_properties:
properties = json.load(raw_properties)
secret_key = properties['secretkey']
url = properties['url']
api_version = 'v1'
# Add DSM host information to the API client configuration
configuration = api.Configuration()
configuration.host = url
configuration.api_key['api-secret-key'] = secret_key
# Initialization
# Set Any Required Values
api_instance = api.AntiMalwareConfigurationsApi(api.ApiClient(configuration))
api_directories = api.DirectoryListsApi(api.ApiClient(configuration))
api_files = api.FileListsApi(api.ApiClient(configuration))
api_file_extensions = api.FileExtensionListsApi(api.ApiClient(configuration))
# Add column titles to comma-separated values string
am_csv = "Configuration ID;Name;Alert enabled;Excluded directory list;Excluded file extension list;Excluded file list;Excluded process image file list;Files to scan;Network directories enabled;Real time scan\n"
dl_csv = "id;name;description;items\n"
fl_csv = "id;name;description;items\n"
fel_csv = "id;name;description;items\n"
try:
api_response = api_instance.list_anti_malwares(api_version)
directory_response = api_directories.list_directory_lists(api_version)
files_response = api_files.list_file_lists(api_version)
file_extension_response = api_file_extensions.list_file_extension_lists(api_version)
dl_dict = {}
for dlist in directory_response.directory_lists:
dl_dict[dlist.id] = dlist.name
module_info = []
module_info.append(dlist.id)
module_info.append(dlist.name)
module_info.append(dlist.description.replace('\n', ' ').replace('\r', ''))
# add all items into a single entry separated by spaces
module_info.append(" ".join(dlist.items))
# Add the module info to the CSV string
dl_csv += format_for_csv(module_info)
fl_dict = {}
for flist in files_response.file_lists:
fl_dict[flist.id] = flist.name
module_info = []
module_info.append(flist.id)
module_info.append(flist.name)
module_info.append(flist.description.replace('\n', ' ').replace('\r', ''))
# add all items into a single entry separated by spaces
module_info.append(" ".join(flist.items))
# Add the module info to the CSV string
fl_csv += format_for_csv(module_info)
fel_dict = {}
for felist in file_extension_response.file_extension_lists:
fel_dict[felist.id] = felist.name
module_info = []
module_info.append(felist.id)
module_info.append(felist.name)
module_info.append(felist.description.replace('\n', ' ').replace('\r', ''))
# add all items into a single entry separated by spaces
module_info.append(" ".join(felist.items))
# Add the module info to the CSV string
fel_csv += format_for_csv(module_info)
for amconfig in api_response.anti_malware_configurations:
module_info = []
module_info.append(amconfig.id)
module_info.append(amconfig.name)
module_info.append(amconfig.alert_enabled)
if amconfig.excluded_directory_list_id:
module_info.append(dl_dict[amconfig.excluded_directory_list_id])
else:
module_info.append("None")
if amconfig.excluded_file_extension_list_id:
module_info.append(fel_dict[amconfig.excluded_file_extension_list_id])
else:
module_info.append("None")
if amconfig.excluded_file_list_id:
module_info.append(fl_dict[amconfig.excluded_file_list_id])
else:
module_info.append("None")
if amconfig.excluded_process_image_file_list_id:
module_info.append(fl_dict[amconfig.excluded_process_image_file_list_id])
else:
module_info.append("None")
module_info.append(amconfig.files_to_scan)
module_info.append(amconfig.network_directories_enabled)
module_info.append(amconfig.real_time_scan)
# Add the module info to the CSV string
am_csv += format_for_csv(module_info)
with open("../output/AMSettings.csv", "w") as text_file:
text_file.write(am_csv)
with open("../output/DirectoryLists.csv", "w") as text_file:
text_file.write(dl_csv)
with open("../output/FileExtensionsList.csv", "w") as text_file:
text_file.write(fel_csv)
with open("../output/FileLists.csv", "w") as text_file:
text_file.write(fl_csv)
except ApiException as e:
print("An API exception occurred: %s\n" % e)
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedPiratesTutorialAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPiratesTutorialAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
def generate(self):
DistributedObjectAI.generate(self)
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
def clientEnterAct0Tutorial(self):
pass
def makeAPirateComplete(self):
self.sendUpdate('makeAPirateCompleteResp', [])
|
#this file is just a modification of an example script to interact with APIC-EM available in the cisco dev-net zone,
#adapted to be used as a function and with some slight modifications
import requests
import json
import sys
requests.packages.urllib3.disable_warnings() # Disable warnings for not verifying the SSL certificate
GET = "get"
POST = "post"
def run_api(command, controller='x.x.x.x'):
ticket = getServiceTicket(controller)
if command == 'show_run':
command = 'api/v1/network-device/config'
if command == 'node_info':
command = 'api/v1/network-device'
if ticket:
return doRestCall(ticket, GET, "https://%s/%s" % (controller, command))
else:
print("No service ticket was received. Ending program!")
def getServiceTicket(controller_ip):
ticket = None
url = "https://%s/api/v1/ticket" % controller_ip
payload = {"username": "x", "password": "x"}
header = {"content-type": "application/json"}
response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)
if not response:
print("No data returned!")
else:
r_json = response.json()
ticket = r_json["response"]["serviceTicket"]
print("ticket: ", ticket)
return ticket
def doRestCall(aTicket, command, url, aData=None):
payload = None
try:
if aData != None:
payload = json.dumps(aData)
header = {"X-Auth-Token": aTicket, "content-type": "application/json"}
if command == GET:
r = requests.get(url, data=payload, headers=header, verify=False)
elif command == POST:
r = requests.post(url, data=payload, headers=header, verify=False)
else:
print("Unknown command!")
return
if not r:
print("No data returned!")
else:
print("Returned status code: %d" % r.status_code)
return r.json()
except:
err = sys.exc_info()[0]
msg_det = sys.exc_info()[1]
print("Error: %s Details: %s StackTrace: %s" % (err, msg_det, traceback.format_exc()))
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'refs_to_shard_external_lib',
'type': 'static_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_exe',
'type': 'executable',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_dll',
'type': 'shared_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
]
}
|
def main():
try:
lue = input("Syötä luettavan tiedoston nimi: ")
kirjoita = input("Syötä kirjoitettavan tiedoston nimi: ")
lukutiedosto = open(lue, "r")
kirjoitustiedosto = open(kirjoita, "w")
rivimäärä = sum(1 for line in open(lue))
rivi = lukutiedosto.readline()
kirjoitustiedosto.write(rivi+"\n".rstrip())
n = 0
while n < rivimäärä-1:
rivi = lukutiedosto.readline()
tunti, minuutti, sekuntit_ja_data = rivi.split(":")
sekunti = sekuntit_ja_data[0] + sekuntit_ja_data[1]
data = sekuntit_ja_data[2:]
aika = 3600 * int(tunti) + 60 * int(minuutti) + int(sekunti)
siirrä = str(aika) + data + "\n".rstrip()
kirjoitustiedosto.write(siirrä)
n += 1
lukutiedosto.close()
kirjoitustiedosto.close()
print("Tietojen tallennus onnistui.")
except:
print("Virhe tiedoston lukemisessa!")
main()
|
#BAREBONE APP
from flask import flask
app = flask(__name__)
@app.route('/hello/<stringname>')
def hello(name):
return ("hello, world", 200)
#ROUTING
@app.route('/test') #default, only allow get method
@app.route('/test', method=['GET', 'POST']) #default, only allow get method
@app.route('/test', method=['PUT']) #allows only put
#CONFIGURATION
#direct access to config
app.config['CONFIG_NAME'] = 'config value'
#import an env var with a path to a config file
app.config.from_envar['ENV_VAR_NAME']
#TEMPALTES
from flask import render_template
@app.route('/')
def index():
return render_template('template_file.html', var1 = value1, ...)
#JSON RESPONSE
import jsonify
@app.route('/returnstuff')
def returnstuff():
return jsonify({'output':num_dict})
#ACCESS REQUEST DATA
requests.args['name'] # query string argument
request.form['name'] #get form data with fieldname name
request.method #request type
request.cookies.get('cookie_name') #cookies
request.files['name'] #files
#REDIRECT
from flask import url_for, redirect
#normal template
@app.route('/home/<var1>')
def index(var):
return render_template('home.html')
#redirect template
@app.route('/redirect')
def redirect_example():
return redirect(url_for('index'), variable = "sam") #redirect user to /home which defined by def index()
#ABORT
from falsk import abort
@app.route('/home')
def index():
abort(404)
#SET COOKIE
@app.route('/home')
def index():
resp = make_response(render_template('index.html'))
resp.set_cookie('cookie_name', 'cookie_value')
return resp
#SESSION HANDLING
import session
app.config['SECRET_KEY'] = 'any random string'
#set session
@app.route('/login_success')
def login_success():
session['key_name'] = 'key_value' #stores a secure cookie in browser
return redirect(url_for('index'))
#read session
@app.route('/')
def index():
if 'key_name' in session: #session exist and has key
session_var = session['key_value']
else:#session does not exist
return ("session not found", 401)
#USEFUL PLUGIN
flask-pymongo
flask-sqlalchemy
flask-wtf #a form helper for flask
flask-mail
flask_restful
flask-uploads
flask-user
flask-login
|
import os
from django.conf import settings
DEBUG = False
TEMPLATE_DEBUG = False
DATABASES = settings.DATABASES
import dj_database_url
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Allow all host headers
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'iexam',
'USER': 'root',
'PASSWORD': '123',
'HOST': '',
'PORT': '',
}
}
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from drp_python.exceptions.http_exceptions import ConnectionError, \
AuthorizationError
class ConnectionStatus(Enum):
OPEN = 1
CLOSED = 2
ERROR = 3
class ApiHttp(object):
"""
Base for All HTTP based API Calls
"""
def __init__(self, session):
self.session = session
def connection_status(self):
if self.session.is_authorized():
return ConnectionStatus.OPEN
else:
return ConnectionStatus.CLOSED
def open(self):
try:
if not self.session.is_authorized():
self.session.authorize()
return ConnectionStatus.OPEN
except ConnectionError as error:
raise error
except AuthorizationError as error:
raise error
|
from ckeditor_uploader.fields import RichTextUploadingField
from datetime import date
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
from django.db import models
from django.utils.functional import cached_property
from django.utils.text import slugify
def user_photo(self, filename):
return "user_profile/%s/%s" % (self.username, slugify(filename))
def team_photo(self, filename):
return "team_profile/%s/%s" % (self.name, slugify(filename))
class User(AbstractUser):
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Номер телефона должен быть в формате: '+999999999'")
SEX_CHOICES = (
('m', 'Мужской'),
('f', 'Женский'),
)
photo = models.ImageField(upload_to=user_photo, blank=True, null=True)
birth_date = models.DateField(null=False, blank=False)
phone = models.CharField(validators=[phone_regex], max_length=16, blank=True, null=True)
sex = models.CharField(max_length=8, choices=SEX_CHOICES, blank=False, null=False)
class Meta(object):
unique_together = ('email',)
ordering = ('id',)
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.get_full_name()
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
def get_age(self):
today = date.today()
return today.year - self.birth_date.year - \
((today.month, today.day) < (self.birth_date.month, self.birth_date.day))
def is_in_team(self, team_id):
return True if self in Team.objects.get(pk=str(team_id)).members.all() else False
def is_played_for_team(self, team_id):
# Fix here
return False
def is_team_admin(self, team_id):
if self.is_in_team(team_id):
tm = TeamMember.objects.get(team=team_id, user=self)
return True if tm.is_admin else False
return False
def is_in_future_team_entries(self, team_id): # TEST THIS FUNCTION PLZ
if self.is_in_team(team_id):
future_events = Event.objects.filter(date_start__gt=date.today())
for event in future_events:
for category in event.event_categories:
if category.is_individual_category():
continue
else:
team_entry = TeamEntry.objects.get(team=team_id, category=category)
if team_entry:
if self in team_entry.member_entires:
return True
return False
@cached_property
def get_avatar_url(self):
return self.photo.url if self.photo else 'http://poehali.net/images/avatars/ava-3.jpg'
class Team(models.Model):
name = models.CharField(max_length=64, unique=True)
date_created = models.DateField(auto_now_add=True)
photo = models.ImageField(upload_to=team_photo, blank=True, null=True)
members = models.ManyToManyField(User, through='TeamMember')
creator = models.ForeignKey(User, related_name='team_creator')
def save(self, *args, **kwargs):
super(Team, self).save(*args, **kwargs)
def __str__(self):
return self.name
def get_members(self):
return self.members.order_by('tm_users')
@cached_property
def get_avatar_url(self):
return self.photo.url if self.photo else\
'http://pogmogoal.com/wp-content/uploads/visa-fifa-worldcup-2014-campaign-star-wars-the-simpsons-2.jpg'
class TeamMember(models.Model):
team = models.ForeignKey(Team, related_name='tm_teams')
user = models.ForeignKey(User, related_name='tm_users')
date_joined = models.DateField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
class Meta:
unique_together = ('team', 'user')
ordering = ('-is_admin',)
def __str__(self):
return self.user.get_full_name()
class Event(models.Model):
title = models.CharField(max_length=128)
date_start = models.DateField(help_text='Дата Начала События')
date_end = models.DateField(null=True, blank=True, help_text='Дата Окончания События')
organizer = models.ManyToManyField(User, help_text='Организатор (Может редактировать)')
description = RichTextUploadingField(default='Описание...')
is_published = models.BooleanField(default=False, help_text='Опубликовать на главной')
class Meta:
ordering = ['-date_start']
verbose_name = 'Соревнование'
verbose_name_plural = 'Соревнования'
def __str__(self):
return self.title
class EventCategory(models.Model):
# example categories list
CATEGORY_CHOICES = (
('velo', 'ВЕЛО-класс'),
('pro', 'ПРО-класс'),
('trek', 'ТРЕК-класс'),
('multi', 'MULTI-класс'),
('sport', 'СПОРТ-класс'),
('fintess', 'ФИТНЕСС-класс'),
('buisness', 'БИЗНЕС-класс'),
('family', 'Семейный класс'),
('amateur', 'Любительский класс'),
('solo', 'Соло класс'),
)
event = models.ForeignKey(Event, related_name='event_categories')
category = models.CharField(max_length=8, choices=CATEGORY_CHOICES)
time_limit = models.DurationField()
leader_time = models.DurationField()
geo_length = models.PositiveSmallIntegerField(null=True, blank=True) # kilometers
description = models.TextField()
min_team_members = models.PositiveSmallIntegerField()
max_team_members = models.PositiveSmallIntegerField()
class Meta:
verbose_name = 'Категория соревнования'
verbose_name_plural = 'Категории соревнования'
def __str__(self):
return '{} - {}'.format(self.event.title, self.get_category_display())
def is_individual_category(self):
return self.max_team_members == self.min_team_members == 1
class IndividualEntry(models.Model):
user = models.ForeignKey(User)
event_category = models.ForeignKey(EventCategory)
def __str__(self):
return '{} - {} - {}'.format(self.event_category.event.title, self.event_category.get_display_name(),
self.user.get_full_name())
class TeamEntry(models.Model):
team = models.ForeignKey(Team)
event_category = models.ForeignKey(EventCategory, related_name='event_category_team_entry')
class Meta:
unique_together = ('team', 'event_category')
class TeamEntryMember(models.Model):
entry = models.ForeignKey(TeamEntry, on_delete=models.CASCADE, related_name='member_entries')
user_id = models.ForeignKey(User)
class Results(models.Model):
category = models.ForeignKey(EventCategory)
team = models.ForeignKey(Team, null=True, blank=True)
individual = models.ForeignKey(User, null=True, blank=True)
time = models.DurationField()
place = models.PositiveSmallIntegerField()
control_points = models.PositiveSmallIntegerField(null=True, blank=True)
bonus_points = models.PositiveSmallIntegerField(null=True, blank=True)
notes = models.CharField(max_length=128) # DNF, other notes
penalty = models.DurationField(null=True, blank=True)
is_dnf = models.BooleanField(default=False)
class Volunteers(models.Model):
VOLUNTEERS_CHOICES = (
('director', 'Директор гонки'),
('main_referee', 'Главный судья'),
('referee', 'Cудья'),
('volunteers', 'Волонтер'),
)
event = models.ForeignKey(Event, related_name='event_volunteers')
user = models.ForeignKey(User)
type = models.CharField(choices=VOLUNTEERS_CHOICES, max_length=16)
class Meta:
verbose_name = 'Волонтер'
verbose_name_plural = 'Волонтеры'
def __str__(self):
return "{} - {} - {}".format(self.event.title, self.get_type_display(), self.user.get_full_name())
|
import os
import socket
import sys
import SocketServer
from lib.constants import *
from threading import Thread
class ThreadedSocketServerClient(Thread):
def __init__(self, server: SocketServer, conn: socket):
Thread.__init__(self)
self.daemon = True
self.server: SocketServer = server
self.conn: socket = conn
self.identity: str = None
self.client = None
def run(self):
# try:
self.client = self.identify()
self.conn.sendall(SOCKET_ID_APPROVED.encode())
print('Connected', self.identity)
self.listen()
print('Disconnected', self.identity)
# except Exception as e:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print('EXCEPTION:', e, 'in ' + fname + ' line ' + str(exc_tb.tb_lineno))
# self.disconnect()
def identify(self):
"""Assigns client based on identification."""
self.identity = self.conn.recv(1024).decode().strip(SOCKET_EOL)
if self.identity == SOCKET_ID_RECOGNITION:
return self.client_recognition
if self.identity == SOCKET_ID_VEHICLE:
return self.client_vehicle
if self.identity == SOCKET_ID_JOYSTICK:
return self.client_joystick
if self.identity == SOCKET_ID_FAKE:
return self.client_fake
raise Exception('Unknown identification: ' + self.identity)
def listen(self):
while True:
try:
data = self.conn.recv(1024)
except ConnectionResetError:
break
except Disconnect:
break
# On disconnect
if not data:
return
# Due to busy traffic data can be attached to each other
messages = data.decode().strip(SOCKET_EOL).split(SOCKET_EOL)
for message in messages:
self.on_message(message)
def on_message(self, message: str):
"""When receiving a message from connected client."""
attributes = message.split()
if len(attributes) >= 1:
command = attributes.pop(0)
if not self.client_global(command, attributes) or not self.client(command, attributes):
self.send(SOCKET_ERR_UNKNOWN_CMD)
def send(self, command, *params):
if len(params) >= 1:
command += ' ' + ' '.join(params[0])
message = command.strip() + SOCKET_EOL
try:
self.conn.sendall(message.encode())
return True
except BrokenPipeError:
return False
def disconnect(self):
print('Disconnecting connection with identity', self.identity)
self.conn.close()
def client_global(self, command, payload):
if command == SOCKET_DISCONNECT:
raise Disconnect
return True
def client_vehicle(self, command: str, payload):
print('Vehicle')
return False
def client_joystick(self, command, payload):
if command == SOCKET_JOY_FORWARD:
speed = '0'
if len(payload) >= 1:
speed = payload[0]
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_FORWARD, speed)
if command == SOCKET_JOY_BACKWARD:
speed = '0'
if len(payload) >= 1:
speed = payload[0]
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_BACKWARD, speed)
if command == SOCKET_JOY_NEUTRAL:
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_NEUTRAL)
if command == SOCKET_JOY_DIR_LEFT:
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_DIR_LEFT)
if command == SOCKET_JOY_DIR_RIGHT:
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_DIR_RIGHT)
if command == SOCKET_JOY_DIR_NEUTRAL:
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_JOY_DIR_NEUTRAL)
return False
def client_recognition(self, command, payload):
if command == SOCKET_RECOGNITION_DETECTED:
print('[BROADCAST] Person detected')
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_RECOGNITION_DETECTED)
if command == SOCKET_RECOGNITION_FREE:
return self.server.broadcast(SOCKET_ID_VEHICLE, SOCKET_RECOGNITION_FREE)
return False
def client_fake(self, command, payload):
print('Received:', command, payload)
return True
class Disconnect(ValueError):
pass
|
phone_numbers = {"John Smith": "+37682929928", "Marry Simpons": "+423998200919"}
for v in phone_numbers.values():
print(v.replace("+", "00")) |
import time
import zmq
def producer:
streamer_pull_port = 5559
context = zmq.Context()
socket = context.socket(zmq.PUSH)
#connectC to streamer PULL socket
socket.connect("tcp://127.0.0.1:%s" % streamer_pull_port)
publisher_id = random.randrange(0,9999)
#create some work
for num in range(20000):
work_message = {'num': num, 'pId':producer_id}
print "Sending work message num: ", num
socket.send_json(work_message)
time.sleep(0.5)
producer() |
#Program on performing Addition Operation
m1 = 60
m2 = 70
m3 = 80
totmarks= m1 + m2 + m3
print("total marks are:",totmarks) |
import warnings
from torchvision.transforms._functional_pil import * # noqa
warnings.warn(
"The torchvision.transforms.functional_pil module is deprecated "
"in 0.15 and will be **removed in 0.17**. Please don't rely on it. "
"You probably just need to use APIs in "
"torchvision.transforms.functional or in "
"torchvision.transforms.v2.functional."
)
|
import torch, sys
import torch.nn as nn
import baseline_model_layers as bml
from transformers import BertForSequenceClassification
class BiCondLSTMModel(torch.nn.Module):
'''
Bidirectional Coniditional Encoding LSTM (Augenstein et al, 2016, EMNLP)
Single layer bidirectional LSTM where initial states are from the topic encoding.
Topic is also with a bidirectional LSTM. Prediction done with a single layer FFNN with
tanh then softmax, to use cross-entropy loss.
'''
def __init__(self, hidden_dim, embed_dim, input_dim, drop_prob=0, num_layers=1, num_labels=3,
use_cuda=False):
super(BiCondLSTMModel, self).__init__()
self.use_cuda = use_cuda
self.num_labels = num_labels
self.bilstm = bml.BiCondLSTMLayer(hidden_dim, embed_dim, input_dim, drop_prob, num_layers,
use_cuda=use_cuda)
self.dropout = nn.Dropout(p=drop_prob) # so we can have dropouts on last layer
self.pred_layer = bml.PredictionLayer(input_size=2 * num_layers * hidden_dim,
output_size=self.num_labels,
pred_fn=nn.Tanh(), use_cuda=use_cuda) # This is BiCond specific
def forward(self, text, topic, text_l, topic_l):
text = text.transpose(0, 1) # (T, B, E)
topic = topic.transpose(0, 1) # (C,B,E)
_, combo_fb_hn, _, _ = self.bilstm(text, topic, topic_l, text_l)
# dropout
combo_fb_hn = self.dropout(combo_fb_hn) # (B, H*N, dir*N_layers)
y_pred = self.pred_layer(combo_fb_hn) # (B, 2)
return y_pred
class AdversarialBasic(torch.nn.Module):
def __init__(self, enc_params, enc_type, stance_dim, topic_dim, num_labels, num_topics,
drop_prob=0.0, use_cuda=False):
super(AdversarialBasic, self).__init__()
self.enc_type = enc_type
self.use_cuda = use_cuda
self.hidden_dim = enc_params['h']
self.embed_dim = enc_params['embed_dim']
self.stance_dim = stance_dim
self.num_labels = num_labels
self.num_topics = num_topics
if self.enc_type == 'bicond':
self.enc = bml.BiCondLSTMLayer(hidden_dim=self.hidden_dim, embed_dim=self.embed_dim, input_dim=self.embed_dim,
drop_prob=enc_params['drop_prob'], num_layers=1, use_cuda=use_cuda)
self.att_layer = bml.ScaledDotProductAttention(input_dim=2*self.hidden_dim, use_cuda=self.use_cuda)
else:
print("ERROR: invalid encoder type. exiting")
sys.exit(1)
self.in_dropout = nn.Dropout(p=drop_prob)
self.out_dropout = nn.Dropout(p=drop_prob)
self.recon_layer = bml.ReconstructionLayer(hidden_dim=self.hidden_dim, embed_dim=self.embed_dim,
use_cuda=self.use_cuda)
self.topic_recon_layer = bml.ReconstructionLayer(hidden_dim=self.hidden_dim, embed_dim=self.embed_dim, use_cuda=self.use_cuda)
self.trans_layer = bml.TransformationLayer(input_size=2*self.hidden_dim)
multiplier = 4
self.stance_classifier = bml.TwoLayerFFNNLayer(input_dim=multiplier*self.hidden_dim, hidden_dim=stance_dim,
out_dim=self.num_labels, nonlinear_fn=nn.ReLU())
self.topic_classifier = bml.TwoLayerFFNNLayer(input_dim=2*self.hidden_dim, hidden_dim=topic_dim,
out_dim=self.num_topics, nonlinear_fn=nn.ReLU())
def forward(self, text, topic, text_l, topic_l, text_mask=None, topic_mask=None):
# text: (B, T, E), topic: (B, C, E), text_l: (B), topic_l: (B), text_mask: (B, T), topic_mask: (B, C)
# apply dropout on the input
dropped_text = self.in_dropout(text)
# encode the text
if self.enc_type == 'bicond':
output, _, last_top_hn, topic_output = self.enc(dropped_text.transpose(0, 1),
topic.transpose(0, 1),
topic_l, text_l)
output = output.transpose(0, 1) #output represents the token level text encodings of size (B,T,2*H)
topic_output = topic_output.transpose(0, 1) #Token levek topic embeddings of size (B, C, 2*H)
last_top_hn = last_top_hn.transpose(0, 1).reshape(-1, 2*self.hidden_dim) #(B, 2*H)
att_vecs = self.att_layer(output, last_top_hn) #(B, 2H)
# reconstruct the original embeddings
recon_embeds = self.recon_layer(output, text_mask) #(B, L, E)
# reconstruct topic embeddings
topic_recon_embeds = self.topic_recon_layer(topic_output, topic_mask)
# transform the representation
trans_reps = self.trans_layer(att_vecs) #(B, 2H)
trans_reps = self.out_dropout(trans_reps) # adding dropout
last_top_hn = self.out_dropout(last_top_hn)
# stance prediction
# added topic input to stance classifier
stance_input = torch.cat((trans_reps, last_top_hn), 1) #(B, 4H)
stance_preds = self.stance_classifier(stance_input)
# topic prediction
topic_preds = self.topic_classifier(trans_reps)
topic_preds_ = self.topic_classifier(trans_reps.detach())
pred_info = {'text': text, 'text_l': text_l,
'topic': topic, 'topic_l': topic_l,
'adv_pred': topic_preds, 'adv_pred_':topic_preds_, 'stance_pred': stance_preds,
'topic_recon_embeds': topic_recon_embeds, 'recon_embeds': recon_embeds}
return pred_info
class JointSeqBERTLayer(torch.nn.Module):
def __init__(self, num_labels=3, use_cuda=False):
super(JointSeqBERTLayer, self).__init__()
self.num_labels = num_labels
self.use_cuda = use_cuda
self.bert_layer = BertForSequenceClassification.from_pretrained('bert-base-uncased')
self.dim = 768
if self.use_cuda:
self.bert_layer = self.bert_layer.to('cuda')
def forward(self, **kwargs):
output = self.bert_layer(input_ids=kwargs['text_topic_batch'].to('cuda'),
token_type_ids=kwargs['token_type_ids'].to('cuda'),
attention_mask=kwargs['attention_mask'].to('cuda'))
return output[0]
class WordEmbedLayer(torch.nn.Module):
def __init__(self, vecs, static_embeds=True, use_cuda=False):
super(WordEmbedLayer, self).__init__()
vec_tensor = torch.tensor(vecs)
self.embeds = nn.Embedding.from_pretrained(vec_tensor, freeze=static_embeds)
self.dim = vecs.shape[1]
print("Input layer embedding size - ", self.dim)
self.vocab_size = float(vecs.shape[0])
self.use_cuda = use_cuda
def forward(self, **kwargs):
embed_args = {'txt_E': self.embeds(kwargs['text']).type(torch.FloatTensor), # (B, T, E)
'top_E': self.embeds(kwargs['topic']).type(torch.FloatTensor)} # (B, C, E)
return embed_args |
import json
from engines.ios import EngineIOS
from engines.android import EngineAndroid
from engines.ionic import EngineIonic
from managers.translator import TranslateManager
from managers.cache import CacheItem
from tabulate import tabulate
class CoreManager(object):
def __init__(self,params):
self.save_cache = params.get('save_cache',True)
self.print_table = params.get('print_table',True)
self.params = params
self.data = {}
self.translated = {}
self.input_engine = None
self.output_engines = []
self.langs = params.get('langs',[])
self.translate_manager = TranslateManager(langs=self.langs)
self.engine_builder()
def process(self):
self.data = self.input_engine.parse()
def keys(self):
temp = {}
for lang in self.langs:
data = self.translated[lang]
for k in data.keys():
temp[k] = k
return temp
def data(self):
return self.data
def translate(self):
"""Process and translate all files to specified languages and platforms"""
self.process()
for lang in self.langs:
specific_lang = {}
for k,v in self.data.items():
item = CacheItem(k,v,lang)
specific_lang[k] = word = self.translate_manager.translate(item)
self.translated[lang] = specific_lang
if(self.log()):
print(item.key," <--> ",item.lang," <---> ",item.value)
for engine in self.output_engines:
engine.write(lang,self.translated[lang])
if self.save_cache:
self.finalize()
if self.print_table:
self.display()
def engine_builder(self):
"""Read configuration and initalize all needed engines"""
targets = self.params.get('targets',[])
for target in targets:
engine = self.select_engine(target)
self.output_engines.append(engine)
input = self.params.get('input',None)
self.input_engine = self.select_engine(input)
def select_engine(self,target):
if target == 'ios': return EngineIOS()
elif target == 'android': return EngineAndroid()
elif target == 'ionic': return EngineIonic()
else: return None
def finalize(self):
self.translate_manager.finalize()
def display(self):
table = []
lang_table = ['key']
for lang in self.langs:
lang_table.append(lang)
table = [lang_table]
for k in self.keys():
item = CacheItem(key=k)
translations = self.translate_manager.translations_for_key(item)
translations.insert(0,k)
table.append(translations)
print(tabulate(table))
def log(self):
return self.params['log'] |
from abc import ABCMeta, abstractmethod
class SignalObserver(metaclass=ABCMeta):
"""
Абстрактный суперкласс для всех наблюдателей.
"""
def __init__(self, model, plot_canvas):
self.model = model
self.model.AddObserver(self)
self.plot_canvas = plot_canvas
def UpdateModel(self, title=''):
self.plot_canvas.plot(
self.model.x,
self.model.y,
color='blue',
marker='.'
)
self.plot_canvas.setTitle(title) |
from clarifai.rest import ClarifaiApp
from clarifai.rest import Image as ClImage
from cvpm.solver import Solver
from api_example.bundle import ApiExampleBundle as Bundle
class ImageRecognitionSolver(Solver):
def __init__(self, toml_file=None):
super().__init__(toml_file)
self.app = ClarifaiApp(api_key=Bundle.API_KEY)
self.set_bundle(Bundle)
self.set_ready()
def infer(self, image_file, config):
image = ClImage(file_obj=open(image_file, 'rb'))
model = self.app.models.get(config["model"])
result = model.predict([image])
print(result)
return result
|
from geocode import getGeocodeLocation
import json
import httplib2
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
foursquare_client_id = "F1OAP3TOGKR1HGKPVHV44NOZXRY0XSIA45MCUEWRZ13EJW43"
foursquare_client_secret = "MQFJB1QUTLDXAW3PSYUCSEUAJ2GVIXJDSBZNTBURJFQWCPHM"
def findARestaurant(mealType,location):
coordinates = getGeocodeLocation(location)
lat = coordinates[0]
lng = coordinates[1]
#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.
#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi
#https://api.foursquare.com/v2/venues/search?v=20161016&ll=38.897478%2C%20-77.000147&query=donuts&intent=browse&radius=10&client_id=F1OAP3TOGKR1HGKPVHV44NOZXRY0XSIA45MCUEWRZ13EJW43&client_secret=MQFJB1QUTLDXAW3PSYUCSEUAJ2GVIXJDSBZNTBURJFQWCPHM
url = ('https://api.foursquare.com/v2/venues/search?v=20161016&ll={}%2C%{}&query={}&intent=browse&radius=555&client_id={}&client_secret={}'.format(lat, lng, mealType, foursquare_client_id, foursquare_client_secret))
h = httplib2.Http()
response, content = h.request(url, 'GET')
result = json.loads(content)
print(result)
#3. Grab the first restaurant
#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture
#5. Grab the first image
#6. If no image is available, insert default a image url
#7. Return a dictionary containing the restaurant name, address, and image url
if __name__ == '__main__':
#findARestaurant("Pizza", "Tokyo, Japan")
findARestaurant("Tacos", "Jakarta, Indonesia")
findARestaurant("Tapas", "Maputo, Mozambique")
findARestaurant("Falafel", "Cairo, Egypt")
findARestaurant("Spaghetti", "New Delhi, India")
findARestaurant("Cappuccino", "Geneva, Switzerland")
findARestaurant("Sushi", "Los Angeles, California")
findARestaurant("Steak", "La Paz, Bolivia")
findARestaurant("Gyros", "Sydney Australia")
|
# -*- coding: utf-8 -*-
__all__ = ['Point']
class Point(object):
def __init__(self, x, y):
super(Point, self).__init__()
self.x = x
self.y = y
def draw(self):
print 'point x:%s, y:%s' % (self.x, self.y)
|
# Parametric tapered sandwich with cohesive zone and XFEM with curvature
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
import boundaryUtils
import regionToolset
#----------------------------------------------------------------
# Create model
#----------------------------------------------------------------
tmpModel = mdb.Model(name='tmp')
if mdb.models.has_key('Model-1'):
del mdb.models['Model-1']
if mdb.models.has_key('TaperModelCurveXFEM1'):
del mdb.models['TaperModelCurveXFEM1']
taperModel = mdb.Model(name='TaperModelCurveXFEM1')
taperAssem = taperModel.rootAssembly
del mdb.models['tmp']
#----------------------------------------------------------------
# Parameters
#----------------------------------------------------------------
# Geometry parameters
face1Thick = 0.00633
face2Thick = 0.00377
faceThick = face1Thick + face2Thick
coreThick = 0.03
singleSkinLen = 0.1
taperAngle = 30*pi/180
taperLen = coreThick/tan(taperAngle)
sandwichLen = 0.7
loadLen = sandwichLen - 0.35
#cohesiveThick = 1.0e-6
cohesiveThick =0.001
#cohesiveThick =0.0001
#cohesiveThick =0.00001
sandwichThick = 0.05
filletRadius = 0.03
midFace1 = 0.5*face1Thick
midFace2 = 0.5*face2Thick
midCohesive = 0.5*cohesiveThick
midPanel = 0.5*sandwichThick
# Mesh parameters
#hSize = face1Thick/4.0
hSize = face1Thick/2.0
#hSize = face1Thick
# Load parameters
tractionMag = 1000
# Material properties
coreE11 = 125.0e6
coreE22 = 125.0e6
coreE33 = 125.0e6
corenu12 = 0.3
corenu13 = 0.3
corenu23 = 0.3
coreG12 = 38.0e6
coreG13 = 38.0e6
coreG23 = 38.0e6
face1E11 = 11.871e9
face1E22 = 11.871e9
face1E33 = 11.871e9
face1nu12 = 0.3
face1nu13 = 0.3
face1nu23 = 0.3
face1G12 = 4.6e9
face1G13 = 4.6e9
face1G23 = 4.6e9
face2E11 = 10.229e9
face2E22 = 10.229e9
face2E33 = 10.229e9
face2nu12 = 0.3
face2nu13 = 0.3
face2nu23 = 0.3
face2G12 = 3.9e9
face2G13 = 3.9e9
face2G23 = 3.9e9
faceFaceSign = 60e6
faceFaceSigt1 = 50e6
faceFaceSigt2 = 50e6
faceFaceDispf = 0.001
faceFaceEKnn = 69e12
faceFaceG1Kss = 69e12
faceFaceG2Ktt = 69e12
faceCoreSign = 60e6
faceCoreSigt1 = 50e6
faceCoreSigt2 = 50e6
faceCoreDispf = 0.001
faceCoreEKnn = 69e12
faceCoreG1Kss = 69e12
faceCoreG2Ktt = 69e12
coreElastic = ((coreE11, coreE22, coreE33, corenu12, corenu13, corenu23,
coreG12, coreG13, coreG23), )
face1Elastic = ((face1E11, face1E22, face1E33, face1nu12, face1nu13, face1nu23,
face1G12, face1G13, face1G23), )
face2Elastic = ((face2E11, face2E22, face2E33, face2nu12, face2nu13, face2nu23,
face2G12, face2G13, face2G23), )
faceFaceDamageInit = ((faceFaceSign, faceFaceSigt1, faceFaceSigt2), )
faceCoreDamageInit = ((faceCoreSign, faceCoreSigt1, faceCoreSigt2), )
faceFaceElastic = ((faceFaceEKnn, faceFaceG1Kss, faceFaceG2Ktt), )
faceCoreElastic = ((faceCoreEKnn, faceCoreG1Kss, faceCoreG2Ktt), )
#----------------------------------------------------------------
# Set up geometry
#----------------------------------------------------------------
# Section point coordinates
x1 = 0
y1 = 0
x2 = singleSkinLen
y2 = 0
x3 = x2 + taperLen
y3 = 0
x4 = loadLen
y4 = 0
x5 = sandwichLen
y5 = 0
x6 = x5
y6 = face1Thick
x7 = x5
y7 = y6 + coreThick
x8 = x5
y8 = y7 + face2Thick
x9 = x4
y9 = y8
x10 = x3
y10 = y8
x11 = x2
y11 = faceThick
x12 = x1
y12 = faceThick
x13 = x1
y13 = face1Thick
p01 = (x1, y1)
p02 = (x2, y2)
p03 = (x3, y3)
p04 = (x4, y4)
p05 = (x5, y5)
p06 = (x6, y6)
p07 = (x7, y7)
p08 = (x8, y8)
p09 = (x9, y9)
p10 = (x10, y10)
p11 = (x11, y11)
p12 = (x12, y12)
p13 = (x13, y13)
# Create sketch
taperSketch = taperModel.ConstrainedSketch(name='__profile__', sheetSize=3.0)
taperSketch.Spot(point=p01)
taperSketch.Spot(point=p02)
taperSketch.Spot(point=p03)
taperSketch.Spot(point=p04)
taperSketch.Spot(point=p05)
taperSketch.Spot(point=p06)
taperSketch.Spot(point=p07)
taperSketch.Spot(point=p08)
taperSketch.Spot(point=p09)
taperSketch.Spot(point=p10)
taperSketch.Spot(point=p11)
taperSketch.Spot(point=p12)
taperSketch.Spot(point=p13)
taperSketch.Line(point1=p01, point2=p02)
taperSketch.Line(point1=p02, point2=p03)
taperSketch.Line(point1=p03, point2=p04)
taperSketch.Line(point1=p04, point2=p05)
taperSketch.Line(point1=p05, point2=p06)
taperSketch.Line(point1=p06, point2=p07)
taperSketch.Line(point1=p07, point2=p08)
taperSketch.Line(point1=p08, point2=p09)
taperSketch.Line(point1=p09, point2=p10)
taperSketch.Line(point1=p10, point2=p11)
taperSketch.Line(point1=p11, point2=p12)
taperSketch.Line(point1=p12, point2=p13)
taperSketch.Line(point1=p13, point2=p01)
# Create fillets
taperGeom = taperSketch.geometry
pCurve1 = (0.5*(x11+x12), y11)
curve1 = taperGeom.findAt(pCurve1)
pCurve2 = (0.5*(x11+x10), 0.5*(y11+y10))
curve2 = taperGeom.findAt(pCurve2)
taperSketch.FilletByRadius(radius=filletRadius, curve1=curve1, nearPoint1=pCurve1,
curve2=curve2, nearPoint2=pCurve2)
taperGeom = taperSketch.geometry
pCurve1 = pCurve2
curve1 = taperGeom.findAt(pCurve1)
pCurve2 = (0.5*(x9+x10), y9)
curve2 = taperGeom.findAt(pCurve2)
taperSketch.FilletByRadius(radius=filletRadius, curve1=curve1, nearPoint1=pCurve1,
curve2=curve2, nearPoint2=pCurve2)
# Extrude
taperPart = taperModel.Part(dimensionality=THREE_D, name='TaperPart', type=
DEFORMABLE_BODY)
taperPart.BaseSolidExtrude(depth=sandwichThick, sketch=taperSketch)
del taperSketch
# Find the extra points in the filleted region
pEdgeMid = (0.5*(x11+x12), 0.5*(y11+y12), 0.0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pEdgeMid)
vertexList = taperPart.vertices
vertEdge = edgeToPartition.getVertices()
pFillet11 = vertexList[vertEdge[1]].pointOn[0]
pEdgeMid = (0.5*(x11+x10), 0.5*(y10+y12), 0.0)
pFillet11a = vertexList[vertEdge[0]].pointOn[0]
edgeToPartition = edgeList.findAt(pEdgeMid)
vertexList = taperPart.vertices
vertEdge = edgeToPartition.getVertices()
pFillet11a = vertexList[vertEdge[0]].pointOn[0]
pFillet10a = vertexList[vertEdge[1]].pointOn[0]
pEdgeMid = (0.5*(x10+x9), 0.5*(y10+y9), 0.0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pEdgeMid)
vertexList = taperPart.vertices
vertEdge = edgeToPartition.getVertices()
pFillet10 = vertexList[vertEdge[0]].pointOn[0]
# Partition the geometry to create the top facesheet and associated cohesive zone
faceList = taperPart.faces
edgeList = taperPart.edges
sketchPlane = faceList.findAt((0.0, 0.0, sandwichThick))
sketchUpEdge = edgeList.findAt((sandwichLen, midFace1, sandwichThick))
sketchTransform = taperPart.MakeSketchTransform(sketchPlane=sketchPlane,
sketchUpEdge=sketchUpEdge, sketchPlaneSide=SIDE1, origin=(0.0, 0.0, sandwichThick))
partitionSketch = taperModel.ConstrainedSketch(name='__profile__',
sheetSize = 3.0, transform = sketchTransform)
geomList = partitionSketch.geometry
partitionSketch.setPrimaryObject(option=SUPERIMPOSE)
taperPart.projectReferencesOntoSketch(sketch=partitionSketch, filter=COPLANAR_EDGES)
## Create the partition on the sketch
pp = (0.5*(x12+x11), y12)
geomID = geomList.findAt(pp).id
partitionEdge = (geomList[geomID],)
partitionSketch.offset(distance=face2Thick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
partitionSketch.offset(distance=face2Thick+cohesiveThick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
pp = (0.5*(x11+x10), 0.5*(y11+y10))
geomID = geomList.findAt(pp).id
partitionEdge = (geomList[geomID],)
partitionSketch.offset(distance=face2Thick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
partitionSketch.offset(distance=face2Thick+cohesiveThick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
pp = (0.5*(x10+x9), 0.5*(y10+y9))
geomID = geomList.findAt(pp).id
partitionEdge = (geomList[geomID],)
partitionSketch.offset(distance=face2Thick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
partitionSketch.offset(distance=face2Thick+cohesiveThick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
pp = (0.5*(x9+x8), 0.5*(y9+y8))
geomID = geomList.findAt(pp).id
partitionEdge = (geomList[geomID],)
partitionSketch.offset(distance=face2Thick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
partitionSketch.offset(distance=face2Thick+cohesiveThick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
xmin1 = pFillet11[0]
xmax1 = pFillet11a[0]
xmin2 = pFillet10a[0]
xmax2 = pFillet10[0]
for jj in range(len(geomList)):
key = geomList.keys()[jj]
pointOn = geomList[key].pointOn
px = pointOn[0]
#if geomList[key].curveType == ARC:
if (px > xmin1 and px < xmax1) or (px > xmin2 and px < xmax2) :
print geomList[key].curveType
partitionEdge = (geomList[key],)
partitionSketch.offset(distance=face2Thick, objectList=partitionEdge, side=RIGHT,
filletCorners=FALSE)
partitionSketch.offset(distance=face2Thick+cohesiveThick, objectList=partitionEdge,
side=RIGHT, filletCorners=FALSE)
## Select the face to partition
faceList = taperPart.faces
faceToPartition = faceList.findAt((0, 0, sandwichThick))
sketchUpEdge = edgeList.findAt((sandwichLen, midFace1, sandwichThick))
taperPart.PartitionFaceBySketch(sketchUpEdge=sketchUpEdge, faces=faceToPartition,
sketch=partitionSketch)
#partitionSketch.unsetPrimaryObject()
del partitionSketch
## Select the cell to partition and sweep path
cellList = taperPart.cells
vertList = taperPart.vertices
edgeList = taperPart.edges
edgeAlongSweep = edgeList.findAt((0, 0, midPanel))
## Select edges to sweep
offsetThickY = face2Thick*cos(taperAngle)
offsetThickX = face2Thick*sin(taperAngle)
offsetCohThickY = (face2Thick+cohesiveThick)*cos(taperAngle)
offsetCohThickX = (face2Thick+cohesiveThick)*sin(taperAngle)
x11 = pFillet11[0]
x11a = pFillet11a[0]
x10a = pFillet10a[0]
x10 = pFillet10[0]
y11 = pFillet11[1]
y11a = pFillet11a[1]
y10a = pFillet10a[1]
y10 = pFillet10[1]
### First edge
pp = (0.5*(x12+x11), y12-face2Thick, sandwichThick)
e1 = edgeList.findAt(pp)
### Second edge
e2 = boundaryUtils.getNextEdge(edgeList, vertList, e1)
### Third edge
e3 = boundaryUtils.getNextEdge(edgeList, vertList, e2)
### Fourth edge
e4 = boundaryUtils.getNextEdge(edgeList, vertList, e3)
### Fifth edge
e5 = boundaryUtils.getNextEdge(edgeList, vertList, e4)
### Sixth edge
e6 = boundaryUtils.getNextEdge(edgeList, vertList, e5)
partitionEdges = (e1, e2, e3, e4, e5, e6)
taperPart.PartitionCellBySweepEdge(cells=cellList, edges=partitionEdges,
sweepPath=edgeAlongSweep)
## Select the cell to partition and sweep path
cellList = taperPart.cells
vertList = taperPart.vertices
edgeList = taperPart.edges
edgeAlongSweep = edgeList.findAt((0, 0, midPanel))
### First edge
ppc = (0.5*(x12+x11), y12-face2Thick-cohesiveThick, sandwichThick)
e1c = edgeList.findAt(ppc)
### Second edge
e2c = boundaryUtils.getNextEdge(edgeList, vertList, e1c)
### Third edge
e3c = boundaryUtils.getNextEdge(edgeList, vertList, e2c)
### Fourth edge
e4c = boundaryUtils.getNextEdge(edgeList, vertList, e3c)
### Fifth edge
e5c = boundaryUtils.getNextEdge(edgeList, vertList, e4c)
### Sixth edge
e6c = boundaryUtils.getNextEdge(edgeList, vertList, e5c)
partitionEdges = (e1c, e2c, e3c, e4c, e5c, e6c)
taperPart.PartitionCellBySweepEdge(cells=cellList, edges=partitionEdges,
sweepPath=edgeAlongSweep)
# Partition the geometry to isolate the tapered region
pEdgeMid = (0.5*(x11+x12), 0.5*(y11+y12), 0.0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pEdgeMid)
vertexList = taperPart.vertices
vertEdge = edgeToPartition.getVertices()
cellList = taperPart.cells
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=pFillet11,
cells=cellList)
pEdgeMid = (0.5*(x10+x9), 0.5*(y10+y9), 0.0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pEdgeMid)
vertexList = taperPart.vertices
vertEdge = edgeToPartition.getVertices()
cellList = taperPart.cells
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=pFillet10,
cells=cellList)
# Partition the geometry to create the bottom facesheet
p6 = (x6, y6, 0.0)
pTaper = (0.5*(x2+x3), 0.0, 0.0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(p6)
cellList = taperPart.cells
cellToPartition = cellList.findAt((p6,),(pTaper,))
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=p6,
cells=cellToPartition)
# Partition the sharp corner on in the tapered region
pFaceMid = (x10a, 0.5*(y10+y3), sandwichThick)
faceList = taperPart.faces
vertexList = faceList.findAt(pFaceMid).getVertices()
v1 = taperPart.vertices[vertexList[0]].pointOn
v2 = taperPart.vertices[vertexList[1]].pointOn
v3 = taperPart.vertices[vertexList[2]].pointOn
v4 = taperPart.vertices[vertexList[3]].pointOn
v5 = taperPart.vertices[vertexList[4]].pointOn
xVert = (v1[0][0], v2[0][0], v3[0][0], v4[0][0], v5[0][0])
yVert = (v1[0][1], v2[0][1], v3[0][1], v4[0][1], v5[0][1])
xCorner = min(xVert)
xRightAngle = max(xVert)
yCorner = min(yVert)
pCorner = (xCorner, yCorner, sandwichThick)
pRightAngle = (0.5*(xRightAngle+xCorner), yCorner, sandwichThick)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pRightAngle)
cellList = taperPart.cells
pCell1 = (0.5*(x2+x3), 0, 0)
pCell2 = (0.5*(x2+x3), face1Thick-0.5*cohesiveThick, 0)
cellToPartition = cellList.findAt((pCell1,), (pCell2,))
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=pCorner,
cells=cellToPartition)
# Partition the geometry to create the bottom cohesive zone
x14 = x6
y14 = y6 - cohesiveThick
p14 = (x14, y14, 0.0)
tPartition = 1.0 - (y14 - y1)/(y6 - y1)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(p14)
taperPart.PartitionEdgeByParam(edges=edgeToPartition, parameter=tPartition)
cellList = taperPart.cells
cellToPartition = cellList.findAt((p14,),(pTaper,))
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=p14,
cells=cellToPartition)
# Partition the geometry to indicate load application point
p09 = (x9, y9, 0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(p09)
cellList = taperPart.cells
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=p09,
cells=cellList)
# Partition the tapered facesheet for orientation purposes
p10a = (x10a, y10a, 0)
p11a = (x11a, y11a, 0)
pMid = (0.5*(x10a+x11a), 0.5*(y10a+y11a), 0)
#pMidCoh = (0.5*(x10a+x11a), 0.5*(y10a+y11a)-face2Thick-0.5*offsetThickY, 0)
pMidCoh = (x11+cohesiveThick, y11-face2Thick-0.5*cohesiveThick, 0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pMid)
cellList = taperPart.cells
cellToPartition = cellList.findAt((pMid,),(pMidCoh,))
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=p11a,
cells=cellToPartition)
pMidCoh = (x10-cohesiveThick, y10-face2Thick-0.5*cohesiveThick, 0)
edgeList = taperPart.edges
edgeToPartition = edgeList.findAt(pMid)
cellList = taperPart.cells
cellToPartition = cellList.findAt((pMid,),(pMidCoh,))
taperPart.PartitionCellByPlaneNormalToEdge(edge=edgeToPartition, point=p10a,
cells=cellToPartition)
# Create virtual topology
#taperPart.createVirtualTopology(mergeShortEdges=True, shortEdgeThreshold=0.0017,
# mergeSmallFaces=True, smallFaceAreaThreshold=1.4e-05,
# mergeSliverFaces=True, faceAspectRatioThreshold=10.0,
# mergeSmallAngleFaces=True, smallFaceCornerAngleThreshold=10.0,
# mergeThinStairFaces=True, thinStairFaceThreshold=0.00034,
# ignoreRedundantEntities=True, cornerAngleTolerance=30.0,
# applyBlendControls=True, blendSubtendedAngleTolerance=60.0,
# blendRadiusTolerance=0.0085)
#----------------------------------------------------------------
# Set up material properties
#----------------------------------------------------------------
matCore = taperModel.Material(name='Core')
matCoreXFEM = taperModel.Material(name='CoreXFEM')
matFace1 = taperModel.Material(name='Face1')
matFace2 = taperModel.Material(name='Face2')
matFaceFace = taperModel.Material(name='CohesiveFaceFace')
matFaceCore = taperModel.Material(name='CohesiveFaceCore')
matCore.Elastic(type=ENGINEERING_CONSTANTS, table=coreElastic)
matCoreXFEM.Elastic(type=ENGINEERING_CONSTANTS, table=coreElastic)
matCoreXFEM.MaxpeDamageInitiation(table=((0.005, ), ))
matCoreXFEM.maxpeDamageInitiation.DamageEvolution(type=DISPLACEMENT,
table=((0.005, ), ))
matFace1.Elastic(type=ENGINEERING_CONSTANTS, table=face1Elastic)
matFace2.Elastic(type=ENGINEERING_CONSTANTS, table=face2Elastic)
matFaceFace.Elastic(type=TRACTION, table=faceFaceElastic)
matFaceFace.MaxsDamageInitiation(table=faceFaceDamageInit)
matFaceFace.maxsDamageInitiation.DamageEvolution(type=DISPLACEMENT,
table=((faceFaceDispf, ), ))
matFaceCore.Elastic(type=TRACTION, table=faceCoreElastic)
matFaceCore.MaxsDamageInitiation(table=faceCoreDamageInit)
matFaceCore.maxsDamageInitiation.DamageEvolution(type=DISPLACEMENT,
table=((faceCoreDispf, ), ))
#----------------------------------------------------------------
# Set up sections
#----------------------------------------------------------------
secCore = taperModel.HomogeneousSolidSection(name='Core', material='Core', thickness=None)
secCoreXFEM = taperModel.HomogeneousSolidSection(name='CoreXFEM', material='CoreXFEM', thickness=None)
secFace1 = taperModel.HomogeneousSolidSection(name='Face1', material='Face1', thickness=None)
secFace2 = taperModel.HomogeneousSolidSection(name='Face2', material='Face2', thickness=None)
secFaceFace = taperModel.CohesiveSection(name='CohesiveFaceFace',
material='CohesiveFaceFace', response=TRACTION_SEPARATION,
outOfPlaneThickness=None)
secFaceCore = taperModel.CohesiveSection(name='CohesiveFaceCore',
material='CohesiveFaceCore', response=TRACTION_SEPARATION,
outOfPlaneThickness=None)
#----------------------------------------------------------------
# Assign sections
#----------------------------------------------------------------
# Lower facesheet
pCellMid1 = (0.5*(x1+x2), 0.5*(y1), midPanel)
pCellMid2 = (0.5*(x11+pCorner[0]), 0.5*(y2), midPanel)
pCellMid3 = (0.5*(pCorner[0]+x3), 0.5*(pCorner[1]), midPanel)
pCellMid4 = (0.5*(x3+x4), 0.5*(y4), midPanel)
pCellMid5 = (0.5*(x4+x5), 0.5*(y5), midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,),(pCellMid2,),
(pCellMid3,), (pCellMid4,), (pCellMid5,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='Face1')
setFace1 = taperPart.Set(name='Face1', cells=cellSequence)
# Face-Face cohesive region
pCellMid1 = (0.5*(x1+x11), 0.5*(y13+y14), midPanel)
cellSequence = cellList.findAt((pCellMid1,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='CohesiveFaceFace',
offset=0.0, offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
setCohesiveFaceFace = taperPart.Set(name='CohesiveFaceFace', cells=cellSequence)
# Bottom Face-Core cohesive region
#pCellMid6 = (pCorner[0]-cohesiveThick, pCorner[1]-cohesiveThick, midPanel)
#cellSequence = cellList.findAt((pCellMid6,))
#setFace1ExtraBit = taperPart.Set(name='Face1ExtraBit', cells=cellSequence)
pCellMid1 = (0.5*(pCorner[0]+x3), 0.5*(y13+y14), midPanel)
pCellMid2 = (0.5*(x3+x4), 0.5*(y13+y14), midPanel)
pCellMid3 = (0.5*(x4+x5), 0.5*(y13+y14), midPanel)
cellSequence = cellList.findAt((pCellMid1,),(pCellMid2,),
(pCellMid3,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='CohesiveFaceCore',
offset=0.0, offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
setCohesiveFaceCoreBot = taperPart.Set(name='CohesiveFaceCoreBot', cells=cellSequence)
# Upper facesheet
pCellMid1 = (0.5*(x1+x11), 0.5*(y12+y13), midPanel)
pCellMid2 = (0.5*(x11a+x10a), 0.5*(y11a+y10a), midPanel)
pCellMid3 = (0.5*(x3+x4), y10, midPanel)
pCellMid4 = (0.5*(x4+x5), y10, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,),(pCellMid2,),
(pCellMid3,), (pCellMid4,))
region = regionToolset.Region(cells=cellSequence)
cellSequence = cellList.findAt((pCellMid1,), (pCellMid3,), (pCellMid4,))
taperPart.SectionAssignment(region=region, sectionName='Face2')
setFace2 = taperPart.Set(name='Face2', cells=cellSequence)
cellSequence = cellList.findAt((pCellMid2,))
setFace2Taper = taperPart.Set(name='Face2Taper', cells=cellSequence)
# Top Face-Core cohesive region
#pCellMid1 = (0.5*(x11+x3), 0.5*(y11a+y10a)-face2Thick-0.5*offsetThickY, midPanel)
pCellMid1 = ((x11a+offsetThickX+0.5*cohesiveThick), (y11a-offsetThickY-0.5*cohesiveThick), midPanel)
pCellMid2 = (0.5*(x3+x4), y10-face2Thick-0.5*cohesiveThick, midPanel)
pCellMid3 = (0.5*(x4+x5), y10-face2Thick-0.5*cohesiveThick, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,),(pCellMid2,),
(pCellMid3,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='CohesiveFaceCore',
offset=0.0, offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
cellSequence = cellList.findAt((pCellMid1,))
setCohesiveFaceCoreTaper = taperPart.Set(name='CohesiveFaceCoreTaper', cells=cellSequence)
cellSequence = cellList.findAt((pCellMid2,),(pCellMid3,))
setCohesiveFaceCoreTop = taperPart.Set(name='CohesiveFaceCoreTop', cells=cellSequence)
# Core region
pCellMid1 = (0.5*(x10a+x3), 0.5*(y3+face1Thick+y10-face2Thick-cohesiveThick), midPanel)
pCellMid2 = (0.5*(x3+x4), 0.5*(y3+y10), midPanel)
pCellMid3 = (0.5*(x4+x5), 0.5*(y3+y10), midPanel)
cellList = taperPart.cells
#cellSequence = cellList.findAt((pCellMid1,),(pCellMid2,),
# (pCellMid3,))
#region = regionToolset.Region(cells=cellSequence)
#taperPart.SectionAssignment(region=region, sectionName='Core')
cellSequence = cellList.findAt((pCellMid1,))
setCoreTaper = taperPart.Set(name='CoreTaper', cells=cellSequence)
taperPart.SectionAssignment(region=setCoreTaper, sectionName='CoreXFEM')
cellSequence = cellList.findAt((pCellMid2,),(pCellMid3,))
setCore = taperPart.Set(name='Core', cells=cellSequence)
taperPart.SectionAssignment(region=setCore, sectionName='Core')
# Curved upper facesheet regions
pCellMid1 = (0.5*(x11+x11a), y11-0.5*face2Thick, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='Face2')
setFace2Curve1 = taperPart.Set(name='Face2Curve1', cells=cellSequence)
pCellMid1 = (0.5*(x10+x10a), y10-0.5*face2Thick, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='Face2')
setFace2Curve2 = taperPart.Set(name='Face2Curve2', cells=cellSequence)
# Curved upper cohesive regions
pCellMid1 = (x11+cohesiveThick, y11-face2Thick-0.5*cohesiveThick, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='CohesiveFaceCore')
setCohesiveFaceCoreCurve1 = taperPart.Set(name='CohesiveFaceCoreCurve1', cells=cellSequence)
pCellMid1 = (x10-cohesiveThick, y10-face2Thick-0.5*cohesiveThick, midPanel)
cellList = taperPart.cells
cellSequence = cellList.findAt((pCellMid1,))
region = regionToolset.Region(cells=cellSequence)
taperPart.SectionAssignment(region=region, sectionName='CohesiveFaceCore')
setCohesiveFaceCoreCurve2 = taperPart.Set(name='CohesiveFaceCoreCurve2', cells=cellSequence)
#----------------------------------------------------------------
# Create datum coordinate systems
#----------------------------------------------------------------
vertList = taperPart.vertices
# Lower facesheet
pOrig = (x1, y1, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x11, y1, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x1, y1, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Face1', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Lower cohesive zone
pOrig = (x1, y1+face1Thick-cohesiveThick, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x10, y12-face2Thick-cohesiveThick, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x1, y1+face1Thick-cohesiveThick, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Cohesive1', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Upper facesheet
pOrig = (x12, y12-face2Thick, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x11, y12-face2Thick, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x12, y12-face2Thick, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Face2', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Upper cohesive zone
pOrig = (x10, y10-face2Thick-cohesiveThick, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x9, y10-face2Thick-cohesiveThick, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x9, y10-face2Thick-cohesiveThick, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Cohesive2', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Core
pOrig = (x4, y4+face1Thick-cohesiveThick, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x5, y4+face1Thick-cohesiveThick, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x5, y4+face1Thick-cohesiveThick, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Core', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Tapered top facesheet
pOrig = (x11a+offsetThickX, y11a-offsetThickY, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x10a+offsetThickX, y10a-offsetThickY, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x11a+offsetThickX, y11a-offsetThickY, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Face2 Taper', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Tapered cohesive region
pOrig = (x11a+offsetCohThickX, y11a-offsetCohThickY, sandwichThick)
vertOrig = vertList.findAt(pOrig)
pXAxis = (x10a+offsetCohThickX, y10a-offsetCohThickY, sandwichThick)
vertX = vertList.findAt(pXAxis)
pYAxis = (x11a+offsetCohThickX, y11a-offsetCohThickY, 0)
vertXY = vertList.findAt(pYAxis)
taperPart.DatumCsysByThreePoints(name='Datum Csys Cohesive2 Taper', coordSysType=CARTESIAN,
origin=vertOrig, point1=vertX, point2=vertXY)
# Curved top facesheet 1
pOrig = (x11, y11+filletRadius, sandwichThick)
taperPart.DatumPointByCoordinate(pOrig)
vertOrig = taperPart.datums[taperPart.datums.keys()[len(taperPart.datums)-1]]
pRaxis = (x11, y11, sandwichThick)
vertR = vertList.findAt(pRaxis)
pRThetaPlane = (x11a, y11a, sandwichThick)
vertRTheta = vertList.findAt(pRThetaPlane)
taperPart.DatumCsysByThreePoints(name='Datum Csys Face2 Curve1', coordSysType=CYLINDRICAL,
origin=vertOrig, point1=vertR, point2=vertRTheta)
# Curved top facesheet 2
pOrig = (x10, y10-filletRadius, sandwichThick)
taperPart.DatumPointByCoordinate(pOrig)
vertOrig = taperPart.datums[taperPart.datums.keys()[len(taperPart.datums)-1]]
pRaxis = (x10, y10, sandwichThick)
vertR = vertList.findAt(pRaxis)
pRThetaPlane = (x10a, y10a, sandwichThick)
vertRTheta = vertList.findAt(pRThetaPlane)
taperPart.DatumCsysByThreePoints(name='Datum Csys Face2 Curve2', coordSysType=CYLINDRICAL,
origin=vertOrig, point1=vertR, point2=vertRTheta)
#----------------------------------------------------------------
# Assign orientations
#----------------------------------------------------------------
datumList = taperPart.datums
# Lower facesheet
datumKey = datumList.keys()[0]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setFace1, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Lower cohesive zone
#datumKey = datumList.keys()[1]
#datumCsys = datumList[datumKey]
#taperPart.MaterialOrientation(region=setCohesiveFaceFace, localCsys=datumCsys, axis=AXIS_1,
# additionalRotationField='', additionalRotationType=ROTATION_NONE,
# angle=0.0, stackDirection=STACK_1, fieldName='', orientationType=SYSTEM)
#taperPart.MaterialOrientation(region=setCohesiveFaceCoreBot, localCsys=datumCsys, axis=AXIS_1,
# additionalRotationField='', additionalRotationType=ROTATION_NONE,
# angle=0.0, stackDirection=STACK_1, fieldName='', orientationType=SYSTEM)
# Upper facesheet
datumKey = datumList.keys()[2]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setFace2, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Upper cohesive zone
#datumKey = datumList.keys()[3]
#datumCsys = datumList[datumKey]
#taperPart.MaterialOrientation(region=setCohesiveFaceCoreTop, localCsys=datumCsys, axis=AXIS_1,
# additionalRotationField='', additionalRotationType=ROTATION_NONE,
# angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Core
datumKey = datumList.keys()[4]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setCore, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
taperPart.MaterialOrientation(region=setCoreTaper, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Tapered top facesheet
datumKey = datumList.keys()[5]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setFace2Taper, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Tapered cohesive region
#datumKey = datumList.keys()[6]
#datumCsys = datumList[datumKey]
#taperPart.MaterialOrientation(region=setCohesiveFaceCoreTaper, localCsys=datumCsys,
# additionalRotationField='', additionalRotationType=ROTATION_NONE,
# axis=AXIS_1, angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Curved top facesheet 1
datumKey = datumList.keys()[8]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setFace2Curve1, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
# Curved top facesheet 2
datumKey = datumList.keys()[10]
datumCsys = datumList[datumKey]
taperPart.MaterialOrientation(region=setFace2Curve2, localCsys=datumCsys, axis=AXIS_1,
additionalRotationField='', additionalRotationType=ROTATION_NONE,
angle=0.0, stackDirection=STACK_3, fieldName='', orientationType=SYSTEM)
#----------------------------------------------------------------
# Create instances and step
#----------------------------------------------------------------
taperInst = taperAssem.Instance(dependent=ON, name='TaperAssembly', part=taperPart)
taperAssem.regenerate()
taperStep = taperModel.StaticStep(name='Step-1', previous='Initial')
taperStep.setValues(nlgeom=ON, maxNumInc=10000,
stabilizationMagnitude=0.0002,
stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=False, adaptiveDampingRatio=0.05, initialInc=1e-05,
minInc=1.0e-8, maxInc=0.001)
#----------------------------------------------------------------
# Create regions for applying bcs
#----------------------------------------------------------------
edgeList = taperInst.edges
faceList = taperInst.faces
# Left bottom edge
pLeftBot = (x1, y1, midPanel)
edgeLeftBot = edgeList.findAt((pLeftBot,))
setEdgeLeftBot = taperAssem.Set(name='EdgeLeftBot', edges=edgeLeftBot)
regEdgeLeftBot = regionToolset.Region(edges=edgeLeftBot)
# Left symmetry face
pLeftFace1 = (x1, y1+midFace1, midPanel)
pLeftCohesive = (x1, y1+face1Thick-midCohesive, midPanel)
pLeftFace2 = (x12, y12-midFace2, midPanel)
symmLeftFace = faceList.findAt((pLeftFace1,), (pLeftCohesive,), (pLeftFace2,))
setSymmLeftFace = taperAssem.Set(name='SymmLeftFace', faces=symmLeftFace)
regSymmLeftFace = regionToolset.Region(faces=symmLeftFace)
# Loading edge
pLoad = (x9, y9, midPanel)
edgeLoad = edgeList.findAt((pLoad,))
setEdgeLoad = taperAssem.Set(name='EdgeLoad', edges=edgeLoad)
regEdgeLoad = regionToolset.Region(edges=edgeLoad)
# Right support edge
pRightBot = (x5, y5, midPanel)
edgeRightBot = edgeList.findAt((pRightBot,))
setEdgeRightBot = taperAssem.Set(name='EdgeRightBot', edges=edgeRightBot)
regEdgeRightBot = regionToolset.Region(edges=edgeRightBot)
# Thickness symmetry face
symmThickFace = faceList[0:0]
for jj in range(len(faceList)):
curFace = faceList[jj:jj+1]
centroid = curFace[0].getCentroid()
zCentroid = centroid[0][2]
print zCentroid
if zCentroid == 0.0:
print curFace
symmThickFace += curFace
setSymmThickFace = taperAssem.Set(name='SymmThickFace', faces=symmThickFace)
regSymmThickFace = regionToolset.Region(faces=symmThickFace)
#----------------------------------------------------------------
# Create displacement BCs
#----------------------------------------------------------------
taperModel.DisplacementBC(name='Left support BC', createStepName='Step-1',
region=regEdgeLeftBot, u2 = 0.0)
taperModel.DisplacementBC(name='Right support BC', createStepName='Step-1',
region=regEdgeRightBot, u2 = 0.0)
taperModel.DisplacementBC(name='Left Symmetry BC', createStepName='Step-1',
region=regSymmLeftFace, u1 = 0.0)
taperModel.DisplacementBC(name='Thickness Symmetry BC', createStepName='Step-1',
region=setSymmThickFace, u3 = 0.0)
taperModel.DisplacementBC(name='Load BC', createStepName='Step-1',
region=setEdgeLoad, u2 = -0.01)
#----------------------------------------------------------------
# Create mesh
#----------------------------------------------------------------
# Element types
elemSolid1 = ElemType(elemCode=C3D8R, elemLibrary=STANDARD,
kinematicSplit=AVERAGE_STRAIN, secondOrderAccuracy=OFF,
hourglassControl=DEFAULT, distortionControl=DEFAULT)
elemSolid2 = ElemType(elemCode=C3D6, elemLibrary=STANDARD)
elemCohesive1 = ElemType(elemCode=COH3D8, elemLibrary=STANDARD)
elemCohesive2 = ElemType(elemCode=COH3D6, elemLibrary=STANDARD)
# Seed the part
taperPart.seedPart(size=hSize, deviationFactor=0.1)
# Mesh controls
taperPart.setMeshControls(regions=setFace1.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setFace2.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setFace2Taper.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
#taperPart.setMeshControls(regions=setFace1ExtraBit.cells, technique=SWEEP,
# algorithm=ADVANCING_FRONT, elemShape=WEDGE)
taperPart.setMeshControls(regions=setCore.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCoreTaper.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceFace.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceCoreBot.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceCoreTop.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceCoreTaper.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceCoreCurve1.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setCohesiveFaceCoreCurve2.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setFace2Curve1.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
taperPart.setMeshControls(regions=setFace2Curve2.cells, technique=SWEEP,
algorithm=ADVANCING_FRONT, elemShape=HEX)
# Set element type
taperPart.setElementType(regions=setFace1, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setFace2, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setFace2Taper, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setCore, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setCoreTaper, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setCohesiveFaceFace,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setCohesiveFaceCoreBot,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setCohesiveFaceCoreTop,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setCohesiveFaceCoreTaper,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setCohesiveFaceCoreCurve1,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setCohesiveFaceCoreCurve2,
elemTypes=(elemCohesive1, elemCohesive2))
taperPart.setElementType(regions=setFace2Curve1, elemTypes=(elemSolid1, elemSolid2))
taperPart.setElementType(regions=setFace2Curve2, elemTypes=(elemSolid1, elemSolid2))
# Generate mesh
taperPart.generateMesh()
#----------------------------------------------------------------
# Add XFEM controls
#----------------------------------------------------------------
taperAssem = taperModel.rootAssembly
setCoreTaper = taperInst.sets['CoreTaper']
taperAssem.engineeringFeatures.XFEMCrack(name='Crack-1', crackDomain=setCoreTaper)
taperInteract = taperModel.XFEMCrackGrowth(name='Int-1', createStepName='Initial', crackName='Crack-1')
#: The interaction "Int-1" has been created.
taperInteract.setValues(allowGrowth=False)
taperInteract.setValuesInStep(stepName='Step-1', allowGrowth=True)
taperContact = taperModel.ContactProperty('IntProp-1')
taperContact.NormalBehavior(
pressureOverclosure=HARD, allowSeparation=ON, contactStiffness=DEFAULT,
contactStiffnessScaleFactor=1.0, clearanceAtZeroContactPressure=0.0,
stiffnessBehavior=LINEAR, constraintEnforcementMethod=PENALTY)
#: The interaction property "IntProp-1" has been created.
#taperModel.historyOutputRequests['H-Output-1'].setValues(
# contourIntegral='Crack-1', sectionPoints=DEFAULT, rebar=EXCLUDE,
# numberOfContours=2)
taperModel.fieldOutputRequests['F-Output-1'].setValues(
variables=('S', 'PE', 'PEEQ', 'PEMAG', 'LE', 'U', 'RF', 'CF', 'CSTRESS', 'CDISP', 'CSDMG', 'CSMAXSCRT', 'CSMAXUCRT', 'PHILSM', 'PSILSM'))
#----------------------------------------------------------------
# Create Job
#----------------------------------------------------------------
mdb.Job(atTime=None, contactPrint=OFF, description='', echoPrint=OFF,
explicitPrecision=SINGLE, getMemoryFromAnalysis=True, historyPrint=OFF,
memory=90, memoryUnits=PERCENTAGE, model='TaperModelCurveXFEM1', modelPrint=OFF,
multiprocessingMode=DEFAULT, name='TaperModelCurveXFEM1', nodalOutputPrecision=SINGLE,
numCpus=1, numDomains=1, queue=None, scratch='', type=ANALYSIS,
userSubroutine='', waitHours=0, waitMinutes=0)
|
# Template Class
# as a template for file halding and such
#
from dateutil import parser
from decimal import Decimal
import pandas as pd
import time
import datetime
import csv
import os, sys
import re
class Template(object):
def __init__(self, log, **kwargs):
'''
kwargs:
templog='',
tempCol=3,
'''
start= kwargs.get('start', '1970-1-1,01:01:01')
end= kwargs.get('end', '2099-9-9,09:09:09')
self.startTS = parser.parse(start)
self.endTS = parser.parse(end)
self.debug = kwargs.get('debug', False)
self.logFullPath = log #use this variable for input log file
if os.path.exists(log) == False:
print 'Path does not exist: %s' % log
sys.exit(0)
else:
self.outputFullPath = self.OutputFile(self.logFullPath, '.csv') #use this variable for output csv file
self.macID = self.GetMac(self.outputFullPath)
self.timeStart = self.DT2Epoch(start)
self.timeEnd = self.DT2Epoch(end)
self.deltatime = self.timeStart-self.timeEnd
def OutputFile(self, inputfile, ext=None):
'''
generate output file by appending _output to the end of file
The input file must be full path
e.g.
inputfile = r'c:\temp\WAN_Ping_Plotter.txt'
self.OutputFile(self, inputfile, ext='.csv')
output file: c:\temp\test_output.csv
'''
ext = ext
input_directory = os.path.dirname(inputfile)
input_bname = os.path.basename(inputfile)
input_name, input_ext = os.path.splitext(input_bname)
output_ext = ext if ext is not None else input_ext
outputName = input_name + '_output' + output_ext
return os.path.join(input_directory, outputName)
def RemoveFile(self, logdir, ext='.csv'):
'''
remove all .csv files in log dir
by default, log dir only has txt files and macIDs.
.csv are generated by running GenCSV()
'''
[os.remove(os.path.join(logdir, f)) for f in os.listdir(logdir) if f.endswith(ext)]
def GetMac(self, filename):
'''
get mac ID from filename
'''
mac = re.search(r'[0-9a-z]{16}', filename, flags=re.IGNORECASE)
if mac is not None:
return mac.group()
def GetUUT(self, filename):
'''
get uut number from filename
'''
uut = re.search(r'(?<=uut)[0-9]{1,2}', filename)
if uut is not None:
return ''.join(['uut','%.2d' % int(uut.group())])
def dateparserCombiner(self, x, y):
try:
DateTimeData = datetime.datetime.strptime(' '.join([x,y]), '%m/%d/%Y %H:%M:%S')
except:
DateTimeData = datetime(month=1, day=1, year=1970, hour=0, minute=0, second=0)
return DateTimeData
def dateparser(self, x):
'''
if x is this format: 11-25-2017 23:16:14
then dateformat_str should be: '%m-%d-%Y %H:%M:%S'
'''
return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
def DT2Epoch(self, dateTime):
'''
convert date time to epoch time
i.e. convert string '2016-05-13,12:52:03' to 1461024652.0
or convert string '05/13/2016 12:52:03' to 1461024652.0
'''
dateTime = (', ').join(dateTime.split(','))
try:
dt = parser.parse(dateTime)
except Exception as e:
print "DT2Epoch encountered an error %s" % e
print 'dateTime variable: %s' % dateTime
#ts = time.mktime(datetime.datetime.strptime(dateTime, "%Y-%m-%d,%H:%M:%S").timetuple())
ts = time.mktime(dt.timetuple())
return ts
def Epoch2DT(self, timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def GetTimeStamp(self, line):
timestamp = re.match(r"\d{4}-\d{2}-\d{2},\d{2}:\d{2}:\d{2}", line)
if timestamp is not None:
date, time = timestamp.group().split(',')
dateTime = (' ').join([date,time])
return dateTime, date, time
else:
return None
def CheckStartStop(self, **kwargs):
'''
function to check if its time to begin to parser
or time to stop the parser based on pre-defined time window
argument:
date, time,
or
datetime
'''
date = kwargs.get('date_input')
time = kwargs.get('time_input')
dateTime = kwargs.get('datetime_input')
try:
if date is not None and time is not None:
rowtime = self.DT2Epoch(','.join([date,time]))
elif dateTime is not None:
rowtime = self.DT2Epoch(dateTime)
else:
'Does not understand data input\ndate: %s\ntime: %s\ndatetime: %s' % (date, time, dateTime)
if rowtime >= self.timeStart and rowtime <= self.timeEnd:
return True
else:
return False
except NameError:
print 'Check kwargs input'
def DictAdd(self, adict, key, value):
'''
update a dictionary element with new value
'''
if adict.get(key) is None:
newValue = value
else:
newValue = adict.get(key) + value
adict.update({key:newValue})
#3/20/2017 removed fn, to generate csv from each WAN_Ping_Plotter python files.
def GetDF(self, usecols, **kwargs):
'''
Generate panda data frame based on csv input and header requirement
'''
csvpath = kwargs.get('csvpath', self.outputFullPath)
df = pd.read_csv(csvpath, usecols=usecols)
df['DateTime'] = pd.to_datetime(df['DateTime'])
df = df.set_index('DateTime', drop=True)
print df.describe()
return df
def Run(self):
'''
use self.Run to make the plot
'''
print 'this is self.Run(self)'
|
#!/usr/bin/env python\n
# -*- coding: utf-8 -*-
import sys
import os
import re
import sublime
import sublime_plugin
from . import sqlodbccon
from .te_sql_alias import SQLAlias
sys.path.append(os.path.join(os.path.dirname(__file__), "../libs"))
from ..libs.terminaltables.other_tables import WindowsTable as SingleTable
DEFAULT_SYNTAX = 'Packages/TSQLEasy/TSQL.tmLanguage'
DEFAULT_REPORT_SYNTAX = 'Packages/Markdown/Markdown.tmLanguage'
global_alias = SQLAlias()
class TsqlEasyInsertTextCommand(sublime_plugin.TextCommand):
def run(self, edit, position, text):
self.view.insert(edit, position, text)
def te_get_setting(key, default_value=None):
settings = sublime.load_settings('TSQLEasy.sublime-settings')
return settings.get(key, default_value)
def te_set_setting(key, value):
settings = sublime.load_settings('TSQLEasy.sublime-settings')
settings.set(key, value)
sublime.save_settings('TSQLEasy.sublime-settings')
def te_get_connection():
server_active = te_get_setting('te_server_active')
server_list = te_get_setting('te_sql_server')
driver = server_list[server_active].get('driver', None)
if driver:
dsn = server_list[server_active].get('dsn', None)
server = server_list[server_active].get('server', None)
server_port = server_list[server_active].get('server_port', 1433)
username = server_list[server_active]['username']
password = server_list[server_active]['password']
database = server_list[server_active]['database']
autocommit = server_list[server_active]['autocommit'] if 'autocommit' in server_list[server_active] else True
timeout = server_list[server_active]['timeout'] if 'timeout' in server_list[server_active] else 0
try:
sqlcon = sqlodbccon.SQLCon(dsn=dsn, server=server, driver=driver, serverport=server_port, username=username, password=password, database=database, sleepsecs=5, autocommit=autocommit, timeout=timeout)
return sqlcon
except Exception as e:
sublime.message_dialog(e.args[1])
else:
return None
# def te_get_encodings():
# profile = te_get_setting('te_sql_profile')
# if profile:
# encodings = te_get_setting('te_encodings')
# return encodings[profile]
# else:
# return 'utf-8'
def te_get_alias(string_data):
''' get string alias by caps characters '''
rs = re.findall('[A-Z][^A-Z]*', string_data)
fs = ''
for word in rs:
fs += word[0]
return fs
def te_get_all_aliases(text):
# get aliases from substrings FROM and JOIN
text = text.lower()
# edited by Caio Hamamura - will get schema and square brackets (optionally)
pattern = r'[^\w](from|join)\s{0,}(\[?\w+?\]?\.?\[?\w+\]?)\s(as\s)?\s{0,}(\w+)'
aliases_strings = re.findall(pattern, text)
if aliases_strings:
for alias in aliases_strings:
if alias[1] and alias[3]:
global_alias.set_alias(alias[3].strip('\n').strip(), alias[1].strip('\n').strip())
del aliases_strings
# get aliases from section FROM whithou JOINs..
is_from_section = False
words = ('where', 'join', 'order', 'select', 'insert', 'update', 'with', 'group')
for line in text.split('\n'):
line = line.strip().strip(',')
if line.startswith('from') or ' from' in line:
is_from_section = True
if is_from_section and any(wo in line for wo in words):
is_from_section = False
if is_from_section:
pattern = r'^(.*?)\s(as\s)?(.*)'
aliases_strings = re.findall(pattern, line)
if aliases_strings:
for alias in aliases_strings:
if alias[0] and alias[2]:
global_alias.set_alias(alias[2].strip('\n').strip(), alias[0].strip('\n').strip())
del aliases_strings
def te_reload_aliases_from_file():
if not sublime.active_window():
return
view = sublime.active_window().active_view()
if view is not None:
all_text_region = sublime.Region(0, view.size())
text = view.substr(all_text_region)
if global_alias.set_text_hash(text.encode('utf-8')):
global_alias.aliases = {}
te_get_all_aliases(text)
sublime.active_window().status_message('Aliases were reloaded.')
else:
sublime.active_window().status_message('Text unchanged. Using old aliases.')
def te_get_title():
''' returns page title of active tab from view_name or from file_name'''
if not sublime.active_window() or sublime.active_window() is None:
return ''
if not sublime.active_window().active_view() or sublime.active_window().active_view() is None:
return ''
view_name = sublime.active_window().active_view().name()
if view_name:
return view_name
else:
file_name = sublime.active_window().active_view().file_name()
if file_name:
return file_name
def te_get_columns(position=None):
''' get table columns for completions '''
# NOTE: Get error: Invalid character value for cast specification (22018):
# incorrect casting when like `WHERE int_obj = SOMEFUNC(str_obj)`?
# P.S. it works fine on pyodbc 3.0.7
# sqlreq_columns = "SELECT c.name FROM sys.columns as c WHERE c.object_id = OBJECT_ID(?)"
sqlreq_columns = "SELECT c.name FROM sys.columns as c WHERE c.object_id = ?"
te_reload_aliases_from_file()
view = sublime.active_window().active_view()
if position is None:
position = view.sel()[0].begin() - 1
table_name = view.substr(view.word(position))
columns = []
# check auto aliases
al = global_alias.get_alias(table_name.lower())
if al:
table_name = al
sqlcon = te_get_connection()
if sqlcon.sqlconnection is not None:
sql_params = (table_name,)
# NOTE: workaround!: check comment at request definition
# sqlcon.dbexec(sqlreq_columns, sql_params)
sqlcon.dbexec('SELECT OBJECT_ID(?) as object_id', sql_params)
object_id = sqlcon.sqldataset[0].object_id
if not object_id:
return
sql_params = (object_id,)
sqlcon.dbexec(sqlreq_columns, sql_params)
if sqlcon.sqldataset:
for row in sqlcon.sqldataset:
column = row.name
if column:
columns.append(('%s\tTable column' % column, column))
sqlcon.dbdisconnect()
return columns
def te_get_tables(schema=None):
''' get tables list with filter by schema for completions '''
sqlreq_tables = 'SELECT DISTINCT TABLE_NAME as name FROM information_schema.TABLES WHERE TABLE_SCHEMA = ?'
tables = []
sqlcon = te_get_connection()
if schema is None:
schema = sqlcon.defaultschema
if sqlcon.sqlconnection is not None and schema:
sqlcon.dbexec(sqlreq_tables, (schema,))
if sqlcon.sqldataset:
for row in sqlcon.sqldataset:
tables.append(('%s\tSQL table' % row.name, row.name))
sqlcon.dbdisconnect()
return tables
def te_show_data(title, sql_query, sql_params, setup_columns):
def cut(val, maxlen):
if maxlen:
if len(val) > maxlen:
return '%s..' % val[:maxlen - 2]
return val
cols = te_get_setting(setup_columns, {})
if cols:
shortcuts = [
'[Enter](View query)',
'[r](Refresh rows)']
content_header = '%s\n\n' % ' '.join(shortcuts)
content_header += '## %s\n' % title
sqlcon = te_get_connection()
if sqlcon.sqlconnection is not None and sql_query:
sqlcon.dbexec(sql_query, sql_params)
rows = sqlcon.sqldataset
columns_indexes = {v[0]: k for k, v in enumerate(sqlcon.sqlcolumns)}
sqlcon.dbdisconnect()
content_header += 'Total processes: %s\n\n' % len(rows)
table_data = []
if rows:
table_header = [col['colname'] for col in cols]
table_data.append(table_header)
for row in rows:
table_row = []
for col in cols:
value = ''
maxlen = col.get('maxlen', None)
col_prop = col['prop']
value = str(row[columns_indexes.get(col_prop)]).strip().replace('\r', ' ')
if maxlen:
value = ' '.join(value.splitlines())
value = cut(value, maxlen)
table_row.append(value)
table_data.append(table_row)
else:
table_data.append([' No data found '])
table_text = SingleTable(table_data).table
return ''.join(
[
content_header,
table_text
]
)
|
#!/usr/bin/env python
#! -*- coding:utf-8 -*-
#!@Author: faple
#!@Time: 2019/4/1 10:59
from arcsoft_all.lib import face_dll, face_detect_sdk, face_property_sdk
from arcsoft_all.util import faceUtil
APP_ID = b'4fEUb81MxFozpgncw2k8a7ZAEArfvNGZjvvo8jZDLgkn'
SDK_KEY = b'7ewepBAnhYgQdsGyJ9aqDchQ9aZGy8U8sYdHMHehx9ps'
# Appkey = b'4fEUb81MxFozpgncw2k8a7ZAEArfvNGZjvvo8jZDLgkn'
# SDKey = b'7ewepBAnhYgQdsGyJ9aqDchQ9aZGy8U8sYdHMHehx9ps'
img1 = faceUtil.IM()
img2 = faceUtil.IM()
# img1.filePath = 'E:/PycharmProjects/face_recognition_test/arcsoft_all/3.jpg'
# img2.filePath = 'E:/PycharmProjects/face_recognition_test/arcsoft_all/6.jpg'
# 激活设备
res = faceUtil.asfActivation(APP_ID, SDK_KEY)
if res == 0 or res == 90114:
print("激活成功!", res)
else:
print("激活失败!", res)
# 0xFFFFFFFF, 0x1, 16, 50, 5, byref(Handle))
res, faceEngine = faceUtil.asfInitEngine(0xFFFFFFFF, 0x5, 16, 50, 61)
if res == 0:
print("初始化成功!", res)
else:
print("初始化失败!", res)
image1 = faceUtil.loadImage(img1)
image2 = faceUtil.loadImage(img2)
print('image1: ', image1)
print('image2: ', image2)
# 分开检测以及获取属性
def test():
res1, faces1 = faceUtil.asfDetectFaces(image1, 0x201)
if res1 == 0:
print('image1:', res1, faces1.faceNum, faces1.faceRect)
else:
print('image1:', res1)
res1 = faceUtil.asfFaceFeatureExtract(image1, 0x201, faceUtil.getSingleFaceInfo(faces1, 0))
print('image1:', res1)
# 获取人脸属性
# print(1 | 4)
# print(8 | 16 | 32)
res = faceUtil.asfProcess(image1, 0x201, faces1, 56)
print(res)
print(faceUtil.asfGetAge()[1].ageArray[0])
print(faceUtil.asfGetGender()[1].genderArray[0])
print(faceUtil.asfGetFace3DAngle()[1].roll[0])
res2, faces2 = faceUtil.asfDetectFaces(image2, 0x201)
if res2 == 0:
print('image2:', res2, faces2.faceNum, faces2.faceRect)
else:
print('image2:', res2)
res2 = faceUtil.asfFaceFeatureExtract(image2, 0x201, faceUtil.getSingleFaceInfo(faces2, 0))
print('image2:', res2)
# 人脸比对
res, score = faceUtil.asfFaceFeatureCompare(res1[1], res2[1])
print(res, score)
test()
# 连续检测有问题 应该是有单脸和多脸获取RECT时的指针问题
def test1():
# 检测人脸
res1, faces1 = faceUtil.asfDetectFaces(image1, 0x201)
if res1 == 0:
print('image1:', res1, faces1.faceNum, faces1.faceRect)
else:
print('image1:', res1)
res2, faces2 = faceUtil.asfDetectFaces(image2, 0x201)
if res2 == 0:
print('image2:', res2, faces2.faceNum, faces2.faceRect)
else:
print('image2:', res2)
# 测试多张人脸如何获取值
# img3 = faceUtil.IM()
# img3.filePath = 'E:/PycharmProjects/face_recognition_test/arcsoft_all/aaa.jpeg'
# image3 = faceUtil.loadImage(img3)
# res3, faces3 = faceUtil.asfDetectFaces(image3, 0x201)
# print(faces3.faceOrient[1], faces3.faceRect[0])
# 获取人脸特征, 先单脸
# res1, detect1 = faceUtil.asfFaceFeatureExtract(image1, 0x201, face_detect_sdk.ASF_SingleFaceInfo(faces1.faceRect.contents, faces1.faceOrient.contents))
res1 = faceUtil.asfFaceFeatureExtract(image1, 0x201, faceUtil.getSingleFaceInfo(faces1, 0))
print('image1:', res1)
# res2, detect2 = faceUtil.asfFaceFeatureExtract(image2, 0x201, face_detect_sdk.ASF_SingleFaceInfo(faces2.faceRect.contents, faces2.faceOrient.contents))
# res2 = faceUtil.asfFaceFeatureExtract(image2, 0x201, faceUtil.getSingleFaceInfo(faces2, 0))
# print('image2:', res2)
# 人脸比对
# res, score = faceUtil.asfFaceFeatureCompare(detect1, detect2)
# print(res, score)
|
import numpy as np
from sklearn.linear_model import ElasticNet
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
boston = load_boston()
X,y = mglearn.datasets.load_extended_boston()
#데이터 나누기 TRAIN, TEST
X_train , X_test , y_train , y_test = train_test_split(X,y,random_state=0)
ENreg = ElasticNet(alpha=1,l1_ratio=0.5,normalize=False)
ENreg.fit(X_train , y_train)
pred_cv = ENreg.predict(X_test)
#mse
mse = np.mean((pred_cv - y_test))
print("MSE :{:.2f}".format(mse))
#predict score
ENreg_score=ENreg.score(X_test,y_test)
print("ENreg_score:{:.2f}".format(ENreg_score)) |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
tprism.py : Comparison of simulation with analytic expectation
================================================================
Analysis of "prism" and "newton" event categories
* "prism" uses all incident angles, see `tprism-`
* "newton" uses one incident angle, see `tnewton-`
TODO:
* handle multiple wavelengths
* make comparison after histogramming, like reflection.py does
* but here have incident angle as well as the deviation, so need 2d
* http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.histogram2d.html
Prism Deviation Angle Calculation
------------------------------------
Hecht p163, two refractions thru a prism, CB and CD are normal to surface planes::
.
A
/ \
/ \
/ \
/ \
/ \
/ \
/ \
B. . . . . . . .D
. / \
/ C \
/ \
/ \
--------------------------
Polygon ABCD
BAD : alpha (apex angle)
CBA : 90 degrees
CDA : 90 degrees
BCD : 180 - alpha
Triangle BCD has angles:
CBD: t1
CDB: i2
BCD: 180 - alpha
==> alpha = t1 + i2
Ray path thru prism, BD
Deviation at B: (i1 - t1)
Deviation at D: (t2 - i2)
Total
delta: (i1 - t1) + (t2 - i2)
delta = i1 + t2 - alpha
Aiming for expression providing delta as function of theta_i1,
apex angle alpha and refractive index n
Snells law at 2nd interface, prism refractive index n in air
sin(t2) = n sin(i2)
t2 = arcsin( n sin(i2) )
= arcsin( n sin(alpha - t1) )
= arcsin( sin(alpha) n cos(t1) - cos(alpha) n sin(t1) )
[ n cos(t1) ]^2 = (n^2 - n^2 sin(t1)^2 )
= (n^2 - sin(i1)^2 ) n sin(t1) = sin(i1)
t2 = arcsin( sin(alpha) sqrt(n^2 - sin^2(i1)) - sin(i1) cos(alpha) )
2nd refraction has a critical angle where t2 = pi above which TIR will occur
n sin(i2) = 1*sin(t2) = 1
i2c = arcsin( 1./n )
Propagate that back to 1st refraction
sin(i1) = n sin(t1) = n sin(alpha - i2)
i1 = arcsin( n sin(alpha - arcsin(1/n) ) )
But there is another constraint in there with edge
n sin(alpha - arcsin(1/n)) = 1
alpha - arcsin(1/n) = arcsin(1/n)
alpha/2 = arcsin(1/n) = i2c
alpha = 2*i2c (i2_c 43.304 n = 1.458)
This indicates that a 90 degree apex angle is not a good choice
for dispersing prism, use 60 degree instead.
At minimum deviation delta, ray are parallel to base and have symmetric ray
i1 = t2
t1 = i2
alpha = t1 + i2 ==> t1 = i2 = alpha/2
delta = i1 + t2 - alpha
sin(delta + alpha) = sin( i1 + t2 )
sin(i1) = n sin(t1)
i1 = arcsin( n sin(alpha/2) )
Where to shoot from to get minimum deviation ?
* Use intersect frame coordinate with the transform explicitly specifified
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from opticks.ana.base import opticks_main
from opticks.ana.nbase import count_unique
from opticks.ana.evt import Evt, costheta_
from opticks.ana.ana import Rat, theta
from opticks.ana.geometry import Shape, Plane, Boundary, Ray, Intersect, IntersectFrame, mat4_tostring, mat4_fromstring
rad = np.pi/180.
deg = 180./np.pi
np.set_printoptions(suppress=True, precision=3)
np.seterr(divide="ignore", invalid="ignore")
rat_ = lambda n,d:float(len(n))/float(len(d))
X,Y,Z,W = 0,1,2,3
class Box(Shape):
def __init__(self, parameters, boundary, wavelength=380 ):
Shape.__init__(self, parameters, boundary)
class Prism(Shape):
def __init__(self, parameters, boundary):
Shape.__init__(self, parameters, boundary)
alpha = self.parameters[0]
height = self.parameters[1]
depth = self.parameters[2]
a = alpha*np.pi/180.
pass
self.a = a
self.sa = np.sin(a)
self.ca = np.cos(a)
self.alpha = alpha
ymax = height/2.
ymin = -height/2.
hwidth = height*np.tan(a/2.)
apex = [0,ymax,0]
base = [0,ymin,0]
front = [0,ymin,depth/2.]
back = [0,ymin,-depth/2.]
lhs = Plane([-height, hwidth, 0], apex )
rhs = Plane([ height, hwidth, 0], apex )
bot = Plane([ 0, -1, 0], base )
bck = Plane([ 0, 0, -1], back )
frt = Plane([ 0, 0, 1], front )
self.lhs = lhs
self.rhs = rhs
self.bot = bot
self.ymin = ymin
self.planes = [rhs, lhs, bot, frt, bck ]
self.height = height
self.depth = depth
self.apex = np.array(apex)
def __repr__(self):
return "Prism(%s,%s) alpha %s " % (repr(self.parameters), repr(self.boundary), self.alpha)
def intersectframe(self, ray):
"""
Form coordinate system based centered on intersection point,
with surface normal along Y.
Another point in the plane of the intersected face,
is used to pick the X direction.
"""
isect = self.intersect(ray)
assert len(isect)>0
i0, i1 = isect
ifr = IntersectFrame( i0, a=prism.apex)
## check transforming from world frame into intersect frame
p_if = ifr.world_to_intersect(ifr.p)
n_if = ifr.world_to_intersect(ifr.n, w=0) # direction, not coordinate
a_if = ifr.world_to_intersect(ifr.a)
pa_wf = ifr.a - ifr.p
pa_if = ifr.world_to_intersect(pa_wf, w=0)
pa_if /= np.linalg.norm(pa_if)
log.info("intersect position ifr.p %s in p_if %s " % (ifr.p, p_if ))
log.info("intersect surface normal ifr.n %s in n_if %s " % (ifr.n, n_if ))
log.info(" ifr.a %s in a_if %s " % (ifr.a, a_if ))
log.info(" pa_wf %s in pa_if %s " % (pa_wf, pa_if ))
assert np.allclose( p_if, np.array([0,0,0,1]))
assert np.allclose( n_if, np.array([0,1,0,0]))
assert np.allclose( pa_if, np.array([1,0,0,0]))
## check transforming from intersect frame into world frame
o_if = np.array([0,0,0])
o_wf = ifr.intersect_to_world(o_if)
log.info(" o_if %s o_wf %s " % (o_if, o_wf ))
assert np.allclose( o_wf[:3], ifr.p )
return ifr
def intersect(self, ray):
t0 = -np.inf
t1 = np.inf
n0 = np.array([0,0,0])
n1 = np.array([0,0,0])
for i, pl in enumerate(self.planes):
n = pl.n
denom, t = pl.intersect(ray)
if denom < 0.:
if t > t0:
i0 = i
t0 = t
n0 = n
else:
if t < t1:
i1 = i
t1 = t
n1 = n
log.info("i %2d denom %10.4f t %10.4f t0 %10.4f t1 %10.4f n0 %25s n1 %25s " % (i, denom, t, t0, t1, n0, n1 ))
if t0 > t1:
log.into("no intersect")
return []
else:
p0 = ray.position(t0)
p1 = ray.position(t1)
return Intersect(i0, t0, n0, p0),Intersect(i1, t1, n1, p1)
def intersect_unrolled(self, ray):
"""
http://tavianator.com/fast-branchless-raybounding-box-intersections-part-2-nans/
# 0 * inf = nan comparisons with nan always false
"""
pl = self.planes[0]
t0 = pl
class PrismExpected(object):
def __init__(self, a, n):
"""
:param a: apex angle of prism
:param n: refractive index of prism
"""
self.a = a
self.sa = np.sin(a)
self.ca = np.cos(a)
self.single = type(n) is np.float64
self.n = n
self.nn = n*n
self.ni = 1./n
self.i2c = self.i2c_()
self.i1c = self.i1c_()
def i2c_(self):
"""
Critical angle
"""
return np.arcsin(1./self.n)
def i1c_(self):
"""
Other Critical angle
"""
return np.arcsin( self.n*np.sin(self.a - np.arcsin(self.ni)))
def st2_(self, i1):
return self.sa*np.sqrt(self.nn - np.sin(i1)*np.sin(i1)) - np.sin(i1)*self.ca
def t2_(self, i1):
return np.arcsin(self.sa*np.sqrt(self.nn - np.sin(i1)*np.sin(i1)) - np.sin(i1)*self.ca)
def delta_(self, i1):
return i1 + self.t2_(i1) - self.a
def i1mindev_(self):
return np.arcsin( self.n*np.sin(self.a/2.) ) # incident angle with minimum deviation
def i1_domain(self):
assert self.single
_i1c = self.i1c/rad
_i2c = self.i2c/rad
log.debug( "_i1c %s " % _i1c)
log.debug( "_i2c %s " % _i2c)
log.info("plt.plot(dom,dl)")
return np.linspace(_i1c+1e-9,90,200)
def expected(self):
assert self.single
dom = self.i1_domain()
dl = self.delta_(dom*rad)/rad
return dom, dl
def spawn_singles(self):
"""
For plotting need a domains and corresponding values, BUT must
split up by refractive index to avoid spagetti
"""
un = np.unique(self.n)
if len(un) > 10:
log.warn("too many distinct indices");
return []
return [PrismExpected(self.a, n) for n in un]
class PrismCheck(object):
"""
Assumes canonical passage thru prism (ie not out the bottom)
"""
def title(self):
return "prism.py deviation vs incident angle"
def __init__(self, prism, xprism, sel, mask=True):
self.prism = prism
self.xprism = xprism
self.sel = sel
p0 = sel.rpost_(0)[:,:3] # light source position
p1 = sel.rpost_(1)[:,:3] # 1st refraction point
p2 = sel.rpost_(2)[:,:3] # 2nd refraction point
p3 = sel.rpost_(3)[:,:3] # light absorption point
assert len(p0) == len(p1) == len(p2) == len(p3)
N = len(p0)
w0 = sel.recwavelength(0)
w1 = sel.recwavelength(1)
w2 = sel.recwavelength(2)
w3 = sel.recwavelength(3)
assert len(w0) == len(w1) == len(w2) == len(w3)
assert len(w0) == len(p0)
assert np.all(w0 == w1)
assert np.all(w0 == w2)
assert np.all(w0 == w3)
self.wx = w0
# prism does spatial sorting, no wavelength changes as no reemission
self.p0 = p0
self.p1 = p1
self.p2 = p2
self.p3 = p3
p01 = p1 - p0
p12 = p2 - p1
p23 = p3 - p2
self.p01 = p01
self.p12 = p12
self.p23 = p23
cdv = costheta_( p01, p23 ) # total deviation angle
dv = np.arccos(cdv)
self.cdv = cdv
self.dv = dv # simulated deviation
lno = prism.lhs.ntile(N) # prism lhs normal, repeated
rno = prism.rhs.ntile(N) # prism rhs normal, repeated
ci1 = costheta_(-p01, lno ) # incident 1
ct1 = costheta_(-p12, lno ) # transmit 1
ci2 = costheta_( p12, rno ) # incident 2
ct2 = costheta_( p23, rno ) # transmit 2
i1 = np.arccos(ci1)
t1 = np.arccos(ct1)
i2 = np.arccos(ci2)
t2 = np.arccos(ct2)
self.i1 = i1
self.t1 = t1
self.i2 = i2
self.t2 = t2
# Snell check : refractive index from the angles at the 2 refractions
n1 = np.sin(i1)/np.sin(t1)
n2 = np.sin(t2)/np.sin(i2)
dn = n1 - n2
#assert dn.max() < 1e-3
#assert dn.min() > -1e-3
self.n1 = n1
self.n2 = n2
self.expected_deviation()
self.compare_expected_with_simulated()
def expected_deviation(self):
"""
Get 8 nan close to critical angle
In [108]: pc.i1[pc.inan]*deg
Out[108]:
array([ 24.752, 24.752, 24.752, 24.752, 24.752, 24.752, 24.752,
24.752])
simulating just those around critical angle,
see that they are just sneaking out the prism grazing the face
"""
i1 = self.i1
xdv = self.xprism.delta_(i1)
ina = np.arange(len(xdv))[np.isnan(xdv)]
msk = np.arange(len(xdv))[~np.isnan(xdv)]
if len(ina)>0:
log.warning("expected_deviation needs nan masking ina:%s len(ina):%s " % (ina, len(ina)))
self.xdv = xdv
self.ina = ina
self.msk = msk
i1c = self.xprism.i1c_()
log.info("ina: %s " % self.ina)
log.info("i1[ina]/rad: %s " % (i1[self.ina]/rad) )
log.info("i1c/rad : %s " % (i1c/rad) )
def compare_expected_with_simulated(self):
"""
"""
msk = self.msk
mdf = self.dv[msk] - self.xdv[msk]
log.info(" dv[msk]/rad %s " % (self.dv[msk]/rad) )
log.info(" xdv[msk]/rad %s " % (self.xdv[msk]/rad) )
log.info("mdf/rad:%s max:%s min:%s " % (mdf/rad, mdf.min()/rad, mdf.max()/rad ))
self.mdf = mdf
self.mdv = self.dv[msk]
self.mi1 = self.i1[msk]
def test_intersectframe(prism):
"""
Establish a frame at midface lhs intersection point with the prism
"""
ray = Ray([-600,0,0], [1,0,0])
ifr = prism.intersectframe(ray)
i1m = prism.i1mindev()
ti1m = np.tan(i1m)
pm_if = np.array([-1, 1./ti1m,0])*400
pm_wf = ifr.intersect_to_world(pm_if)
log.info(" mindev position pm_if %s pm_wf %s " % (pm_if, pm_wf ))
s_i2w = ifr.i2w_string()
log.info(" s_i2w %s " % s_i2w );
def scatter_plot(xq, yq, sl):
if sl is not None:
x = xq[sl]
y = yq[sl]
else:
x = xq
y = yq
pass
plt.scatter(x*deg, y*deg)
def vanity_plot(pc, sl=None):
for xprism in pc.xprism.spawn_singles():
dom, dl = xprism.expected()
plt.plot(dom,dl)
scatter_plot(pc.i1, pc.dv, sl)
def deviation_plot(pc,sl=None):
"""
masked plotting to avoid critical angle nan
large (+-0.5 degree) deviations between expected and simulated delta
all occuring close to critical angle
away from critical angle, deviations less that 0.1 degree
"""
scatter_plot(pc.mi1, pc.mdf, sl)
def oneplot(pc, log_=False):
fig = plt.figure()
plt.title(pc.title())
ax = fig.add_subplot(111)
vanity_plot(pc, sl=slice(0,10000))
#deviation_plot(pc, sl=slice(0,10000))
#deviation_plot(pc, sl=None)
fig.show()
def spatial(pc):
"""
"""
w = pc.wx
x = pc.p3[:,0]
y = pc.p3[:,1]
z = pc.p3[:,2]
#assert np.all(x == 1200.)
#off = x != 1200.
#print pc.p3[off]
from matplotlib.colors import LogNorm
ax.hist2d( w, y, bins=100, norm=LogNorm())
if __name__ == '__main__':
args = opticks_main(tag="1", det="prism")
plt.ion()
#wl = np.arange(10, dtype=np.float32)*70. + 100.
# low range is off edge of the refractive index values
seqs = ["TO BT BT SA"]
try:
sel = Evt(tag=args.tag, det=args.det, seqs=seqs, args=args)
except IOError as err:
log.fatal(err)
sys.exit(args.mrc)
log.info("sel %s " % sel.brief)
if not sel.valid:
log.fatal("failed to load tag %s det %s " % (args.tag, args.det))
sys.exit(1)
boundary = Boundary("Vacuum///GlassSchottF2")
prism = Prism("60.,300,300,0", boundary)
log.info("prism %s " % repr(prism))
n = boundary.imat.refractive_index(sel.wl)
xprism = PrismExpected(prism.a, n)
pc = PrismCheck(prism, xprism, sel )
oneplot(pc, log_=False)
#spatial(pc)
plt.show()
|
from pathlib import Path
OFFSET = (10, 10, 10)
SHAPE = (20, 20, 20)
PADDED_SHAPE = tuple(s + o * 2 for s, o in zip(SHAPE, OFFSET))
PAD_VALUE = 1
INTERNAL_PATH = "volume"
TEST_DIR = Path(__file__).resolve().parent
PROJECT_DIR = TEST_DIR / "tests"
|
from datetime import datetime, time, timedelta
from flask.blueprints import Blueprint
import logging
from flask.templating import render_template
from flask_login import login_required, current_user
from flask.globals import request
import flask
from werkzeug.utils import redirect
from flask.helpers import url_for
from waitlist.base import db
from waitlist.storage.database import Account, CCVote, Role
from sqlalchemy.sql.expression import asc
from flask_babel import gettext
bp = Blueprint('cc_vote', __name__)
logger = logging.getLogger(__name__)
endTime = datetime(2016, 8, 7, 11, 0, 0)
startTime = datetime(2016, 7, 4, 11, 0, 0)
@bp.route("/", methods=["GET"])
@login_required
def index():
current_time = datetime.utcnow()
if current_time < startTime or current_time > endTime:
flask.abort(404, "Voting period is from %s to %s and is over or did not start yet" % (startTime, endTime))
if current_user.type != "character":
flask.abort(403, "For voting you need to be on a normal linemember login," +
" please log out and use the linemember auth.")
if has_voted_today(current_user.get_eve_id()):
flask.abort(403, "You already voted during this Eve-Day, you can vote again after Downtime!")
# noinspection PyPep8
active_fc_accounts = db.session.query(Account).join(Account.roles).filter(
((Role.name == 'fc') | (Role.name == 'tbadge'))
& (Account.disabled == False)).order_by(asc(Account.username)).all()
# noinspection PyPep8
active_lm_accounts = db.session.query(Account).join(Account.roles).filter(
((Role.name == 'lm') | (Role.name == 'rbadge'))
& (Account.disabled == False)).order_by(asc(Account.username)).all()
return render_template("waitlist/ccvote.html", fcs=active_fc_accounts, lms=active_lm_accounts)
@bp.route("/", methods=["POST"])
@login_required
def submit():
current_time = datetime.utcnow()
if current_time < startTime or current_time > endTime:
flask.abort(404, "Voting period is from %s to %s and is over or did not start yet" % (startTime, endTime))
if current_user.type != "character":
flask.abort(403, "For voting you need to be on a normal linemember login," +
" please log out and use the linemember auth.")
fc_vote = int(request.form.get('fc-vote'))
lm_vote = int(request.form.get('lm-vote'))
if has_voted_today(current_user.get_eve_id()):
flask.abort(500, "You already voted today!")
if (not is_fc(fc_vote)) or (not is_lm(lm_vote)):
flask.abort("Either the FC you voted for is not an FC or the LM you voted for is not an LM!")
logger.info("%s is voting for fc=%d and lm=%d", current_user.get_eve_name(), fc_vote, lm_vote)
if fc_vote == -1:
fc_vote = None
if lm_vote == -1:
lm_vote = None
add_vote(current_user.get_eve_id(), fc_vote, lm_vote)
flask.flash(gettext("Thank you for voting, you can vote again after the next eve downtime!"), "success")
return redirect(url_for('index'))
def add_vote(voter_id, fc_id, lm_id):
vote = CCVote(voterID=voter_id, lmvoteID=lm_id, fcvoteID=fc_id, time=datetime.utcnow())
db.session.add(vote)
db.session.commit()
def is_fc(account_id):
if account_id == -1:
return True
account = db.session.query(Account).filter(Account.id == account_id).one()
for role in account.roles:
if role.name == 'fc' or role.name == 'tbadge':
return True
return False
def is_lm(account_id):
if account_id == -1:
return True
account = db.session.query(Account).filter(Account.id == account_id).one()
for role in account.roles:
if role.name == 'lm' or role.name == 'rbadge':
return True
return False
def has_voted_today(eve_id):
lastdaystart = get_serverday_start()
lastvote = db.session.query(CCVote).filter((CCVote.voterID == eve_id) & (CCVote.time > lastdaystart)).first()
return lastvote is not None
def get_serverday_start():
utc_current = datetime.utcnow()
today = utc_current.date()
c_time = utc_current.time()
# if we are over 11:00:00 we need the current day, else we need the previous day
if c_time < time(11, 0, 0):
today = today - timedelta(1)
return datetime.combine(today, time(11, 0, 0))
|
from azure.common.client_factory import get_client_from_cli_profile
from azure.mgmt.resource import ResourceManagementClient
resource_client = get_client_from_cli_profile(ResourceManagementClient)
resource_client.resource_groups.delete("PythonAzureExample-Storage-rg")
|
import pygame, pygame.font
from menu import *
class Score():
def __init__(self):
self.current_player_score = 0
self.current_enemy_score = 0
self.games_player_score = 0
self.games_enemy_score = 0
self.sets_player_score = 0
self.sets_enemy_score = 0
self.advantage = False
self.score_font = pygame.font.SysFont(None, 50)
def upgrade_current(self):
if self.current_player_scoreplayer_score < 40:
self.current_player_score += 15
elif self.current_player_score == 40:
self.advantage = True
def reinit_current(self):
self.current_player_score = 0
self.current_enemy_score = 0
def upgrade_games(self):
if self.games_player_score < 6:
self.games_player_score += 1
def reinit_games(self):
reinit_current()
self.games_player_score = 0
self.games_player_score = 0
def main():
table_score = pygame.display.set_mode(100,50)
table_score.fill((0, 0, 255)) |
#encoding:utf-8
### Calculate category probs from fasttext model and store them into dictionary
dct={}
with open('model_fasttext.txt') as file:
lines=file.readlines()
for line in lines:
lst=line.strip('\n').split('\t')
if not dct.has_key(lst[1]):
dct.setdefault(lst[1],[0,0])
else:
dct[lst[1]][0]+=1
if lst[1]==lst[2]:
dct[lst[1]][1]+=1
probs={}
for key in dct.keys():
if dct[key][1]==0:
probs[key]=0
else:
probs[key]=float(dct[key][1])/dct[key][0]
### Extract accurate data from fasttext model based on threshold value
dict_fasttext={}
num=0
with open('model_fasttext.txt') as file:
lines=file.readlines()
for line in lines:
num+=1
lst=line.strip('\n').split('\t')
#if float(lst[-1])>=0.85 and float(probs[lst[1]])>=0.59: # 0.85,0.59
dict_fasttext[num]=[lst[0],lst[1],lst[2],float(lst[-1]),float(probs[lst[1]])]
"""
for x, y in dict_fasttext.items():
print(x,y)
"""
### Extract accurate data from CNN model based on threshold value
dict_cnn={}
num=0
with open('report_cnn.txt') as file:
lines=file.readlines()
for line in lines:
num+=1
lst=line.strip('\n').split('\t')
#if float(lst[-3])>=0.91 and float(lst[-1])>=0.71: # 0.91,0.71
dict_cnn[num]=[lst[0],lst[1],lst[2],float(lst[-3]),float(lst[-1])]
"""
for x, y in dict_cnn.items():
print(x,y)
"""
### Calculate category probs from RNN model and store them into dictionary
dct1={}
with open('predicted1.txt') as file:
lines=file.readlines()
for line in lines:
lst=line.strip('\n').split('\t')
if not dct1.has_key(lst[1]):
dct1.setdefault(lst[1],[0,0])
else:
dct1[lst[1]][0]+=1
if lst[1]==lst[2]:
dct1[lst[1]][1]+=1
probs1={}
for key in dct1.keys():
if dct1[key][1]==0:
probs1[key]=0
else:
probs1[key]=float(dct1[key][1])/dct1[key][0]
### Extract accrate data from RNN model based on threshold value
dict_rnn={}
num=0
with open('predicted1.txt') as file:
lines=file.readlines()
for line in lines:
num+=1
lst=line.strip('\n').split('\t')
#if float(lst[-1])>=0.96 and float(probs1[lst[1]])>=0.6: # 0.96, 0.6
dict_rnn[num]=[lst[0],lst[1],lst[2],float(lst[-1]),float(probs1[lst[1]])]
### Merge three models
list1=[]
list2=[]
list3=[]
for num1 in dict_fasttext.keys():
list1.append(num1)
for num2 in dict_cnn.keys():
list2.append(num2)
for num3 in dict_rnn.keys():
list3.append(num3)
list_merged=list(set().union(list1,list2,list3))
print "Fasttext: ", len(dict_fasttext.keys())
print "CNN: ", len(dict_cnn.keys())
print "RNN: ", len(dict_rnn.keys())
print "Merged: ", len(list_merged)
"""
import pdb
### Print outcomes
from itertools import chain
from collections import defaultdict
dict_combine={}
for i, j in chain(dict_fasttext.items(),dict_cnn.items()):
dict_combine.setdefault(i, [])
dict_combine[i].append(j)
dict_combine2={}
for i, j in chain(dict_combine.items(),dict_rnn.items()):
dict_combine2.setdefault(i, [])
dict_combine2[i].append(j)
for i, j in dict_combine2.items():
print i, j
"""
"""
total=0
count=0
for i in dict_combine2.keys():
total+=1
print len(dict_combine2[i])
"""
"""
if len(dict_combine2)==3:
if dict_combine2[i][1]==dict_combine2[i][2]:
count+=1
elif len(dict_combine2)==6:
if dict_combine2[i][1]==dict_combine2[i][2] or
"""
"""
combined={}
for i in list(set().union(list1,list2,list3)):
if dict_fasttext.has_key(i):
if dict_cnn.has_key(i):
if dict_rnn.has_key(i):
combined[i]=[dict_fasttext[i],dict_cnn[i],dict_rnn[i]]
else:
combined[i]=[dict_fasttext[i],dict_cnn[i]]
else:
if dict_rnn.has_key(i):
combined[i]=[dict_fasttext[i],dict_rnn[i]]
else:
combined[i]=[dict_fasttext[i]]
elif dict_fasttext.has_key(i)==False:
if dict_cnn.has_key(i):
if dict_rnn.has_key(i):
combined[i]=[dict_cnn[i],dict_rnn[i]]
else:
combined[i]=[dict_cnn[i]]
else:
if dict_rnn.has_key(i):
combined[i]=[dict_rnn[i]]
for i ,j in combined.items():
for m in j:
print j
total=0
count=0
for i in combined.keys():
total+=1
if len(combined[i])==1:
if combined[i][0][1]==combined[i][0][2]:
count+=1
elif len(combined[i])==2:
if combined[i][0][1]==combined[i][0][2] or combined[i][1][1]==combined[i][1][2]:
count+=1
#elif len(combined[i])==3:
#if combined[i][0][1]==combined[i][0][2] or combined[i][1][1]==combined[i][1][2] or combined[i][2][1]==combined[i][2][2]:
#count+=1
print('total: ',total)
print('count: ',count)
print('accuracy: ',float(count)/total)
"""
dictdict={}
### Voting first!
for i in list(set().union(list1,list2,list3)):
if i>62500:
break
if dict_fasttext[i][1]==dict_cnn[i][1]==dict_rnn[i][1]:
if dict_fasttext[i][-2]>=dict_cnn[i][-2] and dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
elif dict_cnn[i][-2]>=dict_fasttext[i][-2] and dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
elif dict_rnn[i][-2]>=dict_fasttext[i][-2] and dict_rnn[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_rnn[i]
elif dict_fasttext[i][1]==dict_cnn[i][1]:
if dict_fasttext[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_cnn[i]
elif dict_fasttext[i][1]==dict_rnn[i][1]:
if dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_cnn[i][1]==dict_rnn[i][1]:
if dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
else:
dictdict[i]=dict_rnn[i]
else:
if dict_fasttext[i][-2]>=dict_cnn[i][-2] and dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
elif dict_cnn[i][-2]>=dict_fasttext[i][-2] and dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
elif dict_rnn[i][-2]>=dict_fasttext[i][-2] and dict_rnn[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_rnn[i]
for i in range(62501,62512):
if dict_fasttext[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_cnn[i]
"""
# Merge three models by voting!
for i in list(set().union(list1,list2,list3)):
if dict_fasttext.has_key(i) and dict_cnn.has_key(i) and dict_rnn.has_key(i):
if dict_fasttext[i][1]==dict_cnn[i][1] or dict_fasttext[i][1]==dict_rnn[i][1] or dict_fasttext[i][1]==dict_cnn[i][1]==dict_rnn[i][1]:
dictdict[i]=dict_fasttext[i]
elif dict_cnn[i][1]==dict_rnn[i][1]:
dictdict[i]=dict_cnn[i]
elif dict_fasttext[i][1]!=dict_cnn[i][1]!=dict_rnn[i][1]:
if dict_fasttext[i][-2]>=dict_cnn[i][-2] and dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
elif dict_cnn[i][-2]>=dict_fasttext[i][-2] and dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i) and dict_rnn.has_key(i):
if dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i)==False and dict_rnn.has_key(i):
if dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i) and dict_rnn.has_key(i)==False:
if dict_fasttext[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_cnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i)==False and dict_rnn.has_key(i)==False:
dictdict[i]=dict_fasttext[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i) and dict_rnn.has_key(i)==False:
dictdict[i]=dict_cnn[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i)==False and dict_rnn.has_key(i):
dictdict[i]=dict_rnn[i]
"""
"""
# Merge three models by keeping the one with the highest case prob
for i in list(set().union(list1,list2,list3)):
if dict_fasttext.has_key(i) and dict_cnn.has_key(i) and dict_rnn.has_key(i):
if dict_fasttext[i][-2]>=dict_cnn[i][-2] and dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
elif dict_cnn[i][-2]>=dict_fasttext[i][-2] and dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
elif dict_rnn[i][-2]>=dict_fasttext[i][-2] and dict_rnn[i]>=dict_cnn[i][-2]:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i) and dict_rnn.has_key(i):
if dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i)==False and dict_rnn.has_key(i):
if dict_fasttext[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i) and dict_rnn.has_key(i)==False:
if dict_fasttext[i][-2]>=dict_cnn[i][-2]:
dictdict[i]=dict_fasttext[i]
else:
dictdict[i]=dict_cnn[i]
elif dict_fasttext.has_key(i) and dict_cnn.has_key(i)==False and dict_rnn.has_key(i)==False:
dictdict[i]=dict_fasttext[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i) and dict_rnn.has_key(i)==False:
dictdict[i]=dict_cnn[i]
elif dict_fasttext.has_key(i)==False and dict_cnn.has_key(i)==False and dict_rnn.has_key(i):
dictdict[i]=dict_rnn[i]
"""
"""
# Merge Two models by keeping the one with the highest case prob
for i in list(set().union(list2,list3)):
if dict_cnn.has_key(i) and dict_rnn.has_key(i):
if dict_cnn[i][-2]>=dict_rnn[i][-2]:
dictdict[i]=dict_cnn[i]
else:
dictdict[i]=dict_rnn[i]
elif dict_cnn.has_key(i) and dict_rnn.has_key(i)==False:
dictdict[i]=dict_cnn[i]
elif dict_cnn.has_key(i)==False and dict_rnn.has_key(i):
dictdict[i]=dict_rnn[i]
"""
#for i in dictdict.keys():
#print '\t'.join([dictdict[i][0],dictdict[i][1],dictdict[i][2],str(dictdict[i][3]),"i",str(dictdict[i][4])])
total=0
count=0
for i in dictdict.keys():
total+=1
if dictdict[i][1]==dictdict[i][2]:
count+=1
print 'total: ',total
print 'count: ',count
print 'accuracy: ',float(count)/total
for i in range(1,len(dictdict)+1):
print '\t'.join([dictdict[i][0],dictdict[i][1],dictdict[i][2],str(dictdict[i][3]),'hhh',str(dictdict[i][4])])
|
from django.shortcuts import render, get_object_or_404, get_list_or_404
from .models import Article, Media
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
#Displays the 10 latest article titles.
class HomeView(generic.ListView):
context_object_name = 'recentArticles'
template_name = "expose/home.html"
def get_queryset(self):
"""Get the query set of the 10 latest published articles for the home page."""
return( Article.objects.order_by('-article_date')[:10] )
#Displays a specific articles detail.
def article(request, article_id):
article = get_object_or_404(Article, pk=article_id)
return(render(request, 'expose/article.html', { 'article': article }))
#Displays a specific articles detail.
def archive(request):
return(render(request, 'expose/archive.html', {'archive': ""}))
#Displays archive of articles.
def archive_year(request):
archive_year = request.GET['year']
articleList = Article.objects.filter(article_date__year=archive_year)
return(render(request, 'expose/archive_year.html', {'articleList': articleList,}))
#Displays archive of certain month in user specified year and specified month.
def archive_month(request):
archive_month = request.GET['month']
archive_year = request.GET['year']
articleList = Article.objects.filter(article_date__year=archive_year, article_date__month=archive_month)
return(render(request, 'expose/archive_month.html', {'articleList': articleList,}))
def author_article(request):
return(render(request,'expose/author_article.html', {}))
def submit_article(request):
date = request.POST['date']
title = request.POST['title']
text = request.POST['text']
article = Article(article_title=title, article_date=date, article_text=text, article_upvote=0, article_downvote=0)
article.save()
return HttpResponseRedirect(reverse('expose:HomeView', args=())) |
# this is a psuedo code for the coloring project
import tensorflow as tf
import numpy as np
from glob import glob
import math
import sys
import random
#import the necessary packages above I think should be useful
'''
To present the nueral network, I want to create an object with methods and attributes.
The attributes I think could record the values for the filter and similar weights,
and the method will be used to update the weights.
'''
class coloring_machine():
def __init__(self, input_image_size=256, batchsize=5):
# Hi Mr. Dartfler, the __init__ function is the same as the contructor function in javascript. And self is same as this
self.batch_size = batchsize
#batch is the group of training examples in one iteration to update the weights. I want to test the number of training examples in one batch from 4 ~ 10 to see which is better.
|
#! /usr/bin/env python
#coding: utf-8
import timeit
def break_rings(connections):
from collections import Counter
import itertools
remain_rings = lambda L: set(itertools.chain(*L))
remain_links = lambda r, L: [x for x in L if r not in x]
rings = remain_rings(connections)
count = 0
while True:
rings_after_break, links_after_break = len(rings), len(connections)
next_break = None
for r in rings:
check = len(remain_rings(remain_links(r, connections)))
# break the ring that reduces number of rings best
if check < rings_after_break:
rings_after_break = check
next_break = r
# if there are rings that reduce same number,
# break the most connected ring
elif check == rings_after_break:
check = len(remain_links(r, connections))
if check < links_after_break:
links_after_break = check
next_break = r
connections = remain_links(next_break, connections)
rings = remain_rings(connections)
count += 1
if len(rings) == 0:
break
return count
def break_rings_r1(rings):
from itertools import combinations
all_rings = set.union(*rings)
for n in range(1, len(all_rings)):
# it is NP-hard minimum vertex cover problem
# can only be solved with brute force search
for broken_rings in combinations(all_rings, n):
remaining_rings = [pair.difference(broken_rings) for pair in rings]
if all(len(rings) < 2 for rings in remaining_rings):
return n
def break_rings_r2(connections):
if len(connections) == 0:
return 0
r1, r2 = connections[0]
return min(
break_rings([c for c in connections if r1 not in c]) + 1,
break_rings([c for c in connections if r2 not in c]) + 1)
def break_rings_r3(rings):
from functools import reduce
all_vertices = reduce(set.union, rings)
adj = dict((v, reduce(set.union, (r for r in rings if v in r))) for v in all_vertices)
def backtrack(size, vertices, x):
"""Bron-Kerbosch"""
if not vertices and not x:
yield size
while vertices and not any(adj[i].isdisjoint(vertices) for i in x):
v = vertices.pop()
yield from backtrack(size + 1, vertices - adj[v], x - adj[v])
x.add(v)
return len(all_vertices) - max(backtrack(0, all_vertices, set()))
def main(function):
s = """
from __main__ import %s
assert %s(({1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {4, 6})) == 3, "example"
assert %s(({1, 2}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4})) == 3, "All to all"
assert %s(({5, 6}, {4, 5}, {3, 4}, {3, 2}, {2, 1}, {1, 6})) == 3, "Chain"
assert %s(({8, 9}, {1, 9}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {8, 7})) == 5, "Long chain"
assert %s(({3,4},{5,6},{2,7},{1,5},{2,6},{8,4},{1,7},{4,5},{9,5},{2,3},{8,2},{2,4},{9,6},{5,7},{3,6},{1,3},)) == 5
assert %s(({1,2},{1,3},{1,5},{2,3},{2,4},{4,6},{5,6},)) == 3
"""%(function,function,function,function,function,function,function)
print("%s\n\t%.6f s"%(function,timeit.timeit(s, number=100)))
if __name__ == '__main__':
main("break_rings")
main("break_rings_r1")
main("break_rings_r2")
main("break_rings_r3")
|
import shutil
from datetime import datetime
import pytest
import importlib.resources
import sys
try:
import matplotlib # noqa: F401
except ImportError as e:
pytest.skip(f"Matplotlib missing {e}", allow_module_level=True)
import gemini3d.web
import gemini3d.plot
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python >= 3.8 needed for copytree test")
@pytest.mark.parametrize(
"name",
[
"mini2dew_glow",
"mini2dns_glow",
"mini3d_glow",
],
)
def test_plot(name, tmp_path):
# get files if needed
with importlib.resources.path("gemini3d.tests.data", "__init__.py") as fn:
test_dir = gemini3d.web.download_and_extract(name, fn.parent)
shutil.copytree(test_dir, tmp_path, dirs_exist_ok=True)
gemini3d.plot.frame(tmp_path, datetime(2013, 2, 20, 5), saveplot_fmt="png")
plot_files = sorted((tmp_path / "plots").glob("*.png"))
assert len(plot_files) == 66
|
def div(a = 0, b = 0):
try:
a = int(input("Write first number: "))
b = int(input("Write second number: "))
except ValueError as val:
print("Values you've wrote are not numbers!", val)
return None
try:
result = a / b
print(round(result, 2))
return result
except ZeroDivisionError as zeroerr:
print("Division by 0!", zeroerr)
div()
|
#------------------------------------------------------------------------------
# Copyright 2008-2012 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://joinup.ec.europa.eu/system/files/EN/EUPL%20v.1.1%20-%20Licence.pdf
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
import sys
import exceptions
from wnodes.utils import wsocket
def print_list_header():
'''Print List Header'''
print 'Image Tag Details \n'
def print_image_tag_details(info):
'''Print Image tag details'''
if type(info) == dict:
msg_info = []
for key, value in info.items():
msg_info.append((key, value[2]))
msg_header = 'Name'
key_len = [len(x) for x,y in msg_info]
if max(key_len) < len(msg_header):
a = len(msg_header) - max(key_len)
msg_update = []
for i, v in enumerate(msg_info):
msg_new = ''
if len(v[0]) < max(key_len)+a:
b = max(key_len) - len(v[0]) + a
msg_new = v[0] + ' '*b
if msg_new == '':
msg_update.append((v[0], v[1]))
else:
msg_update.append((msg_new, v[1]))
else:
a = max(key_len) - len(msg_header)
msg_header += ' '*a
msg_update = []
for i, v in enumerate(msg_info):
msg_new = ''
if len(v[0]) < max(key_len):
b = max(key_len) - len(v[0])
msg_new = v[0] + ' '*b
if msg_new == '':
msg_update.append((v[0], v[1]))
else:
msg_update.append((msg_new, v[1]))
msg_header += ' arch'
print msg_header
for x in msg_update:
print x[0],x[1]
class ConnectionError(exceptions.Exception):
pass
class ListTags:
def __init__(self, user_data):
try:
c = wsocket.Connection(user_data['nameserver']['ns_host'], \
int(user_data['nameserver']['ns_port']))
msg = {'list_image':
[""]}
self.images_tags = c.sendRequest(msg)
except:
err_msg = "Cannot get images tags from"
err_msg += " host: %s" % user_data['nameserver']['ns_host']
err_msg += " port: %s" % user_data['nameserver']['ns_port']
err_msg += " error: %s %s %s" % sys.exc_info()[:]
raise ConnectionError(err_msg)
def get_tags(self):
return self.images_tags
|
from lib.classifier import Classifier
import os
import pickle
import shutil
from sklearn.ensemble import RandomForestClassifier
class RandomForest(Classifier):
"""
A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the
data set and use averaging to improve the predictive accuracy and control over-fitting.
"""
def __init__(self):
"""
Initiate the classifier by creating the Random Forest directory in data/classifier.
"""
# data directory
self.classifier_directory = 'data/classifier/randomForest'
# Generate classifier directory if it does not exist
if not os.path.exists(self.classifier_directory):
os.makedirs(self.classifier_directory)
def classify(self, samples, name):
"""
Load the given classifier variation (and use it to classify the samples. If the classifier implementation does
not exist, raise a FileNotFoundError.
:param samples: Samples generated with a feature
:param name: Name of the classifier variation
:return: List of probabilities
"""
parameters = self.__load_default()
if os.path.isfile(self.__get_default_name(name)):
classifier = pickle.load(
open(self.__get_default_name(name), "rb"))
probabilities = []
for sample in samples:
prob = classifier.predict_proba(sample)[0]
probabilities.append(prob)
return probabilities
else:
raise FileNotFoundError
def train(self, samples, targets, name):
"""
Train the classifier with the given samples and targets and then save the results with the given name.
:param samples: Samples generated with a feature
:param targets: Targets pf the samples for the training process
:param name: Classifier variation name
:return: Nothing
"""
parameters = self.__load_default()
classifier = RandomForestClassifier(int(parameters[0]))
classifier.fit(samples, targets)
pickle.dump(classifier,
open(self.__get_default_name(name),
"wb"))
def is_trained(self):
"""
Get if the default random forest variation is trained or not.
:return: True if it is trained, otherwise False
"""
# Load all classifier variations
names = [name for name in os.listdir(self.classifier_directory)]
# Loop over all directories
for name in names:
# If the default variation has file in it -> trained!
if 'default' in name and os.listdir(os.path.join(self.classifier_directory, name)) != []:
return True
return False
def default_exist(self):
"""
Get if the a default variation exists.
:return: True if the default variation exists otherwise False
"""
names = [name for name in os.listdir(self.classifier_directory)]
# Loop over all directories
for name in names:
# If exists a default implementation
if 'default' in name:
return True
return False
def show(self):
"""
Show all the available variations.
:return: List of all variations (List of list of parameters)
"""
names = [name for name in os.listdir(self.classifier_directory)]
params = []
for n in names:
params.append(' '.join(n.split('_')))
return params
def delete(self, parameters):
"""
Delete the given Random Forest variation. After the removal, a new default variation must be set. Raise a
FileNotFoundError if the given parameters do not represent an existent variation.
:param parameters: Number of trees
:return: Nothing
"""
# Check if the variation exists
if not self.__check_variation_existence(parameters):
raise FileNotFoundError
# Loop over all directories (variations)
names = [name for name in os.listdir(self.classifier_directory)]
for name in names:
check = True
# If default it must take only the last parameter to compare
if 'default' in name:
params = name.split('_')
for par_1, par_2 in zip(parameters, params[1:]):
if par_1 != par_2:
check = False
break
if check:
shutil.rmtree(os.path.join(self.classifier_directory, name))
break
# All others
else:
for par_1, par_2 in zip(parameters, name.split('_')):
if par_1 != par_2:
check = False
break
if check:
shutil.rmtree(os.path.join(self.classifier_directory, name))
break
def add(self, parameters):
"""
Add a new Random Forest variation to the system. The new variation is set as default automatically. If the
variation already exists raise a FileExistsError. If the parameters are not of the right format raise a
AttributeError.
:param parameters: Number of trees
:return: Nothing
"""
if len(parameters) != 1 or not self.__check_int(parameters[0]):
raise AttributeError
if self.__check_variation_existence(parameters):
raise FileExistsError
else:
name = '_'.join(parameters)
os.makedirs(os.path.join(self.classifier_directory, name))
self.set_default(parameters)
def set_default(self, parameters):
"""
Set as default the given Random Forest variation. Raise a FileNotFoundError if the given parameters do not
represent an existent variation.
:param parameters: Number of trees
:return: Nothing
"""
# Check if the variation exists
if not self.__check_variation_existence(parameters):
raise FileNotFoundError
# Loop over all folder
names = [name for name in os.listdir(self.classifier_directory)]
for n in names:
# Remove default
if 'default' in n:
check = False
params = n.split('_')
# Only if it is not the same as the given parameters
for par_1, par_2 in zip(parameters, params[1:]):
if par_1 != par_2:
os.rename(os.path.join(self.classifier_directory, n),
os.path.join(self.classifier_directory, '_'.join(params[1:])))
check = True
break
if check:
continue
# Set to default the new variation
else:
check = False
for par_1, par_2 in zip(parameters, n.split('_')):
if par_1 != par_2:
check = True
break
if check:
continue
os.rename(os.path.join(self.classifier_directory, n),
os.path.join(self.classifier_directory, 'default_' + n))
# ############################ HELPER ############################
@staticmethod
def __check_int(s):
"""
Check that the given string is a integer.
:param s: Input string
:return: True if the input is an integer otherwise False
"""
try:
int(s)
return True
except ValueError:
return False
def __check_variation_existence(self, parameters):
"""
Check if the given variation exists.
:param parameters: Number of trees
:return: True if the variation exist otherwise False
"""
name = '_'.join(parameters)
if os.path.exists(os.path.join(self.classifier_directory, name)) or os.path.exists(
os.path.join(self.classifier_directory, 'default_' + name)):
return True
else:
return False
def __load_default(self):
"""
Get parameters of the default implementation.
:return: Parameters of the default implementation
"""
names = [name for name in os.listdir(self.classifier_directory)]
for name in names:
if 'default' in name:
params = name.split('_')
# Return parameters without the keyword default
return params[1:]
return None
def __get_default_name(self, name):
"""
Get the directory name of the default variation of the given feature name.
:param name: Name of the feature variation name
:return: Name of the default classifier variation directory
"""
return os.path.join(self.classifier_directory, 'default_' + '_'.join(self.__load_default()), name + ".out")
|
from typing import Any, Mapping, Optional
from .base import api_function, BaseFunction
from ..exceptions import BackendAPIError
from ..request import Request
from ..session import api_session
__all__ = (
'Admin',
)
class Admin(BaseFunction):
"""
Provides the function interface for making admin GrapQL queries.
.. note::
Depending on the privilege of your API access key, you may or may not
have access to querying/mutating server-side resources of other
users.
"""
@api_function
@classmethod
async def query(
cls,
query: str,
variables: Optional[Mapping[str, Any]] = None,
) -> Any:
"""
Sends the GraphQL query and returns the response.
:param query: The GraphQL query string.
:param variables: An optional key-value dictionary
to fill the interpolated template variables
in the query.
:returns: The object parsed from the response JSON string.
"""
return await cls._query(query, variables)
@classmethod
async def _query(
cls,
query: str,
variables: Optional[Mapping[str, Any]] = None,
) -> Any:
"""
Internal async implementation of the query() method,
which may be reused by other functional APIs to make GQL requests.
"""
gql_query = {
'query': query,
'variables': variables if variables else {},
}
if api_session.get().api_version >= (6, '20210815'):
rqst = Request('POST', '/admin/gql')
rqst.set_json(gql_query)
async with rqst.fetch() as resp:
response = await resp.json()
errors = response.get("errors", [])
if errors:
raise BackendAPIError(400, reason="Bad request", data={
'type': 'https://api.backend.ai/probs/graphql-error',
'title': 'GraphQL-generated error',
'data': errors,
})
else:
return response["data"]
else:
rqst = Request('POST', '/admin/graphql')
rqst.set_json(gql_query)
async with rqst.fetch() as resp:
return await resp.json()
|
#!/usr/bin/env python
# pip install gitpython
# pip install pygerrit2
from git import Repo
from pygerrit2 import GerritRestAPI, HTTPBasicAuth
import os
import pprint
import re
# Configuration variables.
repo = "fuchsia"
owner = "smklein"
def minify_subject(subject):
simple_subject = subject.replace('[','_') \
.replace(']','_') \
.replace(' ','_') \
.replace(',','') \
.replace('/','') \
.replace('\'','') \
.replace('\"','') \
.lower()
alnu_subject = re.sub(r'\W+', '', simple_subject)
return 'SYNC_' + '_'.join([s for s in alnu_subject.split('_') if s ])
# Initialize Git repo.
git_repo = Repo(search_parent_directories=True)
assert not git_repo.bare
# Rebase on top the parent branch.
def git_rebase():
git_repo.git.rebase()
# Checkout the branch 'name'.
def git_checkout(name):
git_repo.heads[name].checkout()
# Create branch 'name', rebased on top of the current branch.
def git_branch(name):
print "Creating branch: " + name
old_branch = git_repo.head.reference
try:
git_repo.create_head(name)
except Exception as err:
print "ERROR: Could not create branch. Deleting, trying again..."
git_repo.git.branch("-D", name)
git_repo.create_head(name)
git_checkout(name)
git_repo.git.branch("-u", str(old_branch))
git_rebase()
# Delete the current branch, swap to master.
def git_delete_current_branch():
old_branch = git_repo.head.reference
git_checkout("master")
git_repo.git.branch("-D", str(old_branch))
# Apply the patch file.
def git_apply(patch_file_name):
try:
# "--keep-non-patch" avoids trimming non-patch [TAG] parts of the commit
# message.
git_repo.git.am(patch_file_name, "--keep-non-patch")
return True
except:
print " FAILED TO APPLY PATCH"
git_repo.git.am("--abort")
return False
git_checkout("master")
rest = GerritRestAPI(url='https://fuchsia-review.googlesource.com')
changes = rest.get("/changes/?q=owner:"+owner+"%20status:open%20repo:"+repo)
# Git Commit --> Change Object.
git_commit_map = dict()
# (change object, commit object)
change_list = []
for change in changes:
# XXX This seems unnecessary...
# if not change['mergeable']:
# print " CANNOT MERGE: " + change['subject']
# continue
id = change['id']
commit = rest.get("/changes/" + id + "/revisions/current/commit")
git_commit_map[commit["commit"]] = change
change_list.append((change, commit))
# Git Commit --> Branch Name (once created).
branch_name_map = dict()
while change_list:
(change, commit) = change_list.pop(0)
id = change['id']
branch_name = minify_subject(change['subject'])
current = commit["commit"]
parent = str(commit["parents"][0]["commit"])
if parent in git_commit_map:
# If the parent hasn't been processed yet, come back later.
if parent not in branch_name_map:
print " SKIPPING: " + change['change_id'] + " : " + \
change['subject']
change_list.append((change, commit))
continue
else:
git_checkout(branch_name_map[parent])
else:
git_checkout("master")
# Actually process this element.
print change['change_id'] + " : " + branch_name
git_branch(branch_name)
patch = rest.get("/changes/" + id + "/revisions/current/patch")
patch_file_name = "patchfile"
patch_file_path = git_repo.working_tree_dir + "/" + patch_file_name
patch_file = open(patch_file_path, 'w')
patch_file.write(patch)
patch_file.close()
if not git_apply(patch_file_name):
# If we failed to apply, bail out.
git_delete_current_branch()
del git_commit_map[current]
branch_name_map[current] = branch_name
os.unlink(patch_file_path)
|
from __future__ import division
def defineNodes(nodes):
n = input('How many nodes? ')
for i in range(0,n):
nodes[input("Enter unique name for node " + str(i+1) + ": ")] = {}
for k in nodes.keys():
nodes[k]['LinkedTo'] = [] # establishes node as a hub point
nodes[k]['HubScore'] = 0 # initializes hub score to 0
nodes[k]['AuthScore'] = 0 # initializes authority score to 0
nodes[k]['HubScoreNorm'] = 0.0
nodes[k]['AuthScoreNorm'] = 0.0
for k in nodes.keys():
nodes[k]['LinkedTo'] = buildHubLinks(k)
return nodes
def buildHubLinks(k):
hubs = [int(x) for x in raw_input("For Hub " + str(k) + ", enter Authorities linked to: ").split()]
if (len(hubs) > 1) : hubs.sort()
return hubs
def printNodes(nodes):
print "List of Nodes"
print "==========================="
for k in nodes.keys():
print(k, nodes[k])
print ""
def printHubRankings(nodes, hubs):
print "Hub Rankings"
print "==========================="
for h in hubs:
print str(h) + " : " + str(nodes[h]['HubScoreNorm'])
print ""
def printAuthorityRankings(nodes, authorities):
print "Authority Rankings"
print "==========================="
for a in authorities:
print str(a) + " : " + str(nodes[a]['AuthScoreNorm'])
print ""
def initHubScores(nodes):
for k in nodes.keys():
if (len(nodes[k]['LinkedTo']) > 0):
nodes[k]['HubScore'] = 1
totalHubScores = 0
for k in nodes.keys():
totalHubScores += nodes[k]['HubScore']
for k in nodes.keys():
nodes[k]['HubScoreNorm'] = round(nodes[k]['HubScore'] / totalHubScores, 6)
return nodes
def updateHubScores(nodes, hubs):
for h in hubs:
nodes[h]['HubScore'] = 0
for a in nodes[h]['LinkedTo']:
nodes[h]['HubScore'] += nodes[a]['AuthScore']
totalHubScores = 0
for k in nodes.keys():
totalHubScores += nodes[k]['HubScore']
for k in nodes.keys():
nodes[k]['HubScoreNorm'] = round(nodes[k]['HubScore'] / totalHubScores, 6)
return nodes
def updateAuthScores(nodes, hubs, authorities):
for a in authorities:
nodes[a]['AuthScore'] = 0
for h in hubs:
if (a in nodes[h]['LinkedTo']):
nodes[a]['AuthScore'] += nodes[h]['HubScore']
totalAuthScores = 0
for k in nodes.keys():
totalAuthScores += nodes[k]['AuthScore']
for k in nodes.keys():
nodes[k]['AuthScoreNorm'] = round(nodes[k]['AuthScore'] / totalAuthScores, 6)
return nodes
def getHubs(nodes, hubs):
for k in nodes.keys():
if (len(nodes[k]['LinkedTo']) > 0 and k not in hubs):
hubs.append(k)
return hubs.sort()
def getAuthorities(nodes, auths):
for k in nodes.keys():
if (len(nodes[k]['LinkedTo']) > 0):
for a in nodes[k]['LinkedTo']:
if (a not in auths):
auths.append(a)
return auths.sort()
def countHubs(nodes):
count = 0
for k in nodes.keys():
if (len(nodes[k]['LinkedTo']) > 0):
count += 1
return count
def countAuthorities(nodes):
resultList = []
for k in nodes.keys():
resultList + nodes[k]['LinkedTo']
return len(set(resultList))
nodes = {}
hubs = []
authorities = []
defineNodes(nodes)
getHubs(nodes, hubs)
getAuthorities(nodes, authorities)
steps = input('Number of steps: ')
for i in range(0,steps + 1):
if (i == 0):
initHubScores(nodes)
updateAuthScores(nodes, hubs, authorities)
else:
updateHubScores(nodes, hubs)
updateAuthScores(nodes, hubs, authorities)
print "==============================="
print "Normalized Scores After Step " + str(i)
print "==============================="
printHubRankings(nodes, hubs)
printAuthorityRankings(nodes, authorities) |
import math
segundos = int(input("Digite o tempo em segundos: "))
horas = math.floor(segundos/3600)
segundos = segundos%3600
minutos = math.floor(segundos/60)
segundos = segundos % 60
print(str(horas)+":"+str(minutos)+":"+str(segundos))
|
# Copyright (c) 2016-2017 Adobe Systems Incorporated. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import re
import os
import pytest
import yaml
import six
import umapi_client
# this test relies on a sensitive configuraition
config_file_name = "local/live_configuration.yaml"
pytestmark = pytest.mark.skipif(not os.access(config_file_name, os.R_OK),
reason="Live config file '{}' not found.".format(config_file_name))
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
@pytest.fixture(scope="module")
def config():
with open(config_file_name, "r") as f:
config = yaml.load(f)
creds = config["test_org"]
conn = umapi_client.Connection(org_id=creds["org_id"], auth_dict=creds)
return conn, config
def test_status(config):
conn, _ = config
_, status = conn.status(remote=True)
logging.info("Server status is %s", status)
assert status["state"] == "LIVE"
def test_list_users(config):
conn, _ = config
users = umapi_client.UsersQuery(connection=conn, in_domain="")
for user in users:
email = user.get("email", "")
if re.match(r".*@adobe.com$", str(email).lower()):
assert str(user["type"]) == "adobeID"
logging.info("Found %d users.", len(users.all_results()))
def test_list_groups(config):
conn, params = config
groups = umapi_client.GroupsQuery(connection=conn)
for group in groups:
name = group.get("groupName")
logging.debug("Group: %s", group)
if group.get("memberCount", 0) > params["big_group_size"]:
assert name in params["big_groups"]
logging.info("Found %d groups.", len(groups.all_results()))
def test_get_user(config):
conn, params = config
user_query = umapi_client.UserQuery(conn, params["test_user"]["email"])
user = user_query.result()
logging.info("User: %s", user)
for k, v in six.iteritems(params["test_user"]):
assert user[k].lower() == v.lower()
def test_rename_user(config):
conn, params = config
user = umapi_client.UserAction(id_type=params["test_user"]["type"],
email=params["test_user"]["email"],
username=params["test_user"]["username"])
user.update(first_name="Rodney", last_name="Danger")
user.update(first_name=params["test_user"]["firstname"], last_name=params["test_user"]["lastname"])
assert (0, 1, 1) == conn.execute_single(user, immediate=True)
|
#! usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from xml.etree import ElementTree as ET
from flask import render_template
from urllib import urlopen
from json import loads
app = Flask(__name__)
@app.route('/weixin', methods=['GET', 'POST'])
def wechat():
# 接收和返回GET请求,与微信服务器建立连接
if request.method == 'GET':
echostr = request.args.get('echostr')
return echostr
else:
data = request.get_data()
# 用xml.etree模块解析XML类型数据
xml = ET.fromstring(data)
ToUserName = xml.findtext('.//ToUserName') # 也可以写成xml.find('ToUserName').text
FromUserName = xml.findtext('.//FromUserName')
CreateTime = xml.findtext('.//CreateTime')
MsgType = xml.findtext('.//MsgType')
Content = xml.findtext('.//Content')
MsgId = xml.findtext('.//MsgId')
if u'你是谁' in Content:
return render_template(
'reply_text.html',
ToUserName=ToUserName,
FromUserName=FromUserName,
CreateTime=CreateTime,
MsgType=MsgType,
Content=u'我是你的朋友瘦子君啊^@^', )
session = urlopen('http://op.juhe.cn/robot/index?info=%s&key=ab335a381ee61e8e95e2b5a32c364d66'
% Content.encode('utf-8')).read()
# 处理聚合接口调用返回的json格式数据,将json转换为字典数据
result = loads(session)
Content = result['result']['text']
return render_template(
'reply_text.html',
ToUserName=ToUserName,
FromUserName=FromUserName,
CreateTime=CreateTime,
MsgType=MsgType,
Content=Content,)
if __name__ == '__main__':
app.run(debug=True)
|
with open ('dataset_3363_4.txt') as file:
m = []
f = []
r = []
for line in file:
s1 = line.strip().split(';')
m.append(int(s1[1]))
f.append(int(s1[2]))
r.append(int(s1[3]))
print((int(s1[1]) + int(s1[2]) + int(s1[3]))/3)
print(sum(m) / len(m), sum(f) / len(f), sum(r) / len(r))
print() |
import hello
if __name__ == '__main__':
a = hello.A()
a.f2() |
def train (bayes, item, cat):
wordlist = bayes.get_features(item)
if cat not in bayes.class_count:
bayes.class_count[cat] = 0
bayes.class_count[cat] += 1
for word in wordlist:
if (word,cat) not in bayes.feature_count:
bayes.feature_count [(word, cat)] = 0
bayes.feature_count[(word, cat)] += 1
return bayes |
#!/usr/bin/env python
# coding: utf-8
### Author: zss
### Date: 2018.7.16
## For: 16S Analysis ###
import ConfigParser
import sys
import os
import commands
if len(sys.argv) != 2:
print "\n\t\tUsage: python %s 16S_Analysis.conf\n" % sys.argv[0]
sys.exit(1)
ConfgFile = sys.argv[1]
Config = ConfigParser.ConfigParser()
Config.read(ConfgFile)
### DB
gold_16S_Seq = Config.get("DB", "gold_16S_Seq")
gg_13_8_otus = Config.get("DB", "gg_13_8_otus")
### Get Data Info
DataDir = Config.get("Data", "Dir")
OutPut = Config.get("Data", "OutPut")
### Get Qiime1
MetaData = Config.get("Qiime1", "MetaData")
ParamsFile = Config.get("Qiime1", "ParamsFile")
min_count_fraction = Config.get("Qiime1", "min_count_fraction")
#####################################################################################################################
print "\n################################################ Info ###############################################################\n"
print "\t\tData Dir: %s" % DataDir
print "\t\tOutPut Dir: %s" % OutPut
print "\t\tmin_count_fraction: %s" % min_count_fraction
print "\n#####################################################################################################################\n"
#####################################################################################################################
def MkdirOutPut(OutPut):
for i in [ "0.QC", "1.Assemble", "2.PickOTUs", "3.Taxonomy", "4.Rarefaction", "5.Alpha_diversity", "6.Beta_diversity", "7.PCA_PCoA/pcoa_normalized", "7.PCA_PCoA/2d_plots_normalized", "8.Other_Analysis"]:
if not os.path.exists(OutPut + i):
os.makedirs(OutPut + i)
def DropChimeric(DataDir, OutPut, gold_16S_Seq):
Command1 = "for i in $(find %s -maxdepth 2 -name 'W*.fna');do usearch7 -uchime_ref $i -db %s -strand plus -nonchimeras `echo $i| awk -F'S/' '{print $2}'| awk -F'.fna' '{print $1.'.good.fna'}'`;done" % (DataDir, gold_16S_Seq)
print "\033[1;31;40m --** Filter Data **--Command1: \033[0m \n%s" % (Command1)
os.system(Command1)
Command2 = "cat *.good.fna >> AllSeqs.fna"
print "\033[1;34;40m --** Cat all good fna to AllSeqs.fna **--Command2: \033[0m \n%s" % Command2
os.system(Command2)
def PickOTU(AllSeqFile, MetaData, gg_13_8_otus, OutPut, ParamsFile):
OTU_Dir = OutPut + "2.PickOTUs/otu_output"
### pick otu ###
Command3 = "pick_open_reference_otus.py -i %s -r %s -o %s -p %s \n\n" % (AllSeqFile, gg_13_8_otus, OTU_Dir, ParamsFile)
print "--** OTU Pick **--Command3: \n%s" % Command3
os.system(Command3)
OTU_Table_biom = OTU_Dir + "otu_table_mc2_w_tax_no_pynast_failures.biom"
OTU_Table_biom_alias = OTU_Dir + "otu_table.biom"
OTU_Table_txt = OTU_Dir + "otu_table.txt"
## alias ###
Command3_1 = "ln -S %s %s" % (OTU_Table_biom, OTU_Table_biom_alias)
os.system(Command3_1)
### otu biom to txt ###
Command4 = "biom convert -i %s -o %s --to-tsv --header-key taxonomy" % (OTU_Table_biom, OTU_Table_txt)
print "--** OTU biom to txt **--Command4: \n%s\n\n" % Command4
os.system(Command4)
### otu biom to summary ###
OTU_Table_summary = OTU_Dir + "otu_table_summary.txt"
Command5 = "biom summarize-table -i %s -o %s" % (OTU_Table_biom, OTU_Table_summary)
print "--** OTU biom to summary **--Command5: \n%s\n\n" % Command5
os.system(Command5)
### otu taxa and plot ###
Taxa_Dir = OutPut + "3.Taxonomy/"
Command6_1 = "summarize_taxa_through_plots.py -i %s -m %s -o %s" % (OTU_Table_biom, MetaData, Taxa_Dir)
print "--** OTU summarize taxa and plot **--Command6_1: \n%s\n\n" % Command6_1
os.system(Command6_1)
### otu network ###
OTU_Network = OutPut + "3.Taxonomy/OTU_Network"
Command6_2 = "make_otu_network.py -i %s -m %s -o %s" % (OTU_Table_biom, MetaData, OTU_Network)
print "--** OTU Network **--Command6_2: \n%s\n\n" % Command6_2
os.system(Command6_2)
def Get_High_Abundance_OTU(OutPut, min_count_fraction, MetaData):
OTU_Table_biom_alias = OutPut + "2.PickOTUs/otu_output/otu_table.biom"
High_OTU_Table_biom = OutPut + "2.PickOTUs/HighAbundance/otu_table_high_abundance.biom"
High_OTU_Table_txt = OutPut + "2.PickOTUs/HighAbundance/otu_table_high_abundance.txt"
rep_set_fa = OutPut + "2.PickOTUs/otu_output/rep_set.fna"
High_rep_set_fa = OutPut + "2.PickOTUs/HighAbundance/rep_set_high_abundance.fna"
Command7 = "filter_otus_from_otu_table.py -i %s -o %s --min_count_fraction %s\n\n" % (OTU_Table_biom_alias, High_OTU_Table_biom, min_count_fraction)
print "--** Get High Abundance OTU **--Command7: \n%s" % Command7
os.system(Command7)
### high otu biom to txt ###
Command8 = "biom convert -i %s -o %s --to-tsv --header-key taxonomy" % (High_OTU_Table_biom, High_OTU_Table_txt)
print "--** OTU biom to txt **--Command8: \n%s" % Command8
os.system(Command8)
### otu taxa and plot ###
High_Taxa_Dir = OutPut + "2.PickOTUs/HighAbundance/Taxonomy/"
Command9 = "summarize_taxa_through_plots.py -i %s -m %s -o %s" % (OTU_Table_biom_HighAbundance, MetaData, High_Taxa_Dir)
print "--** High OTU summarize taxa and plot **--Command9: \n%s" % Command9
os.system(Command9)
### get high abundance seq ###
Command10 = "python /data1/script/Pipeline/16S/ExtraOTU2Seq.py %s %s > %s" % (High_OTU_Table_txt,rep_set_fa, High_rep_set_fa )
print "--** High OTU seq **--Command10: \n%s\n\n" % Command10
os.system(Command10)
Command11 = "make_otu_heatmap.py -i %s -o %s -m %s" % (High_OTU_Table_biom, High_OTU_Table_HeatMap, MetaData)
print "--** High OTU HeatMap **--Command11: \n%s\n\n" % Command11
os.system(Command11)
def Alpha_Diversity(OutPut, MetaData):
OTU_Table_biom = OutPut + "2.PickOTUs/otu_output/otu_table.biom"
rep_set_tre = OutPut + "2.PickOTUs/otu_output/rep_set.tre"
alpha_diversity_Dir = OutPut + "5.Alpha_diversity/"
Command13_1 = "alpha_diversity.py -i %s -m observed_species,shannon,PD_whole_tree,singles,simpson,observed_otus,chao1 -o %s -t %s" % (OTU_Table_biom, alpha_diversity_Dir, rep_set_tre)
print "--** aplha diversity **--Command13_1: \n%s\n\n" % Command13_1
os.system(Command13_1)
#Command13_2 = "alpha_rarefaction.py -i %s -m %s -p %s -t %s -e %s" % (OTU_Table_biom, alpha_diversity_Dir, rep_set_tre)
#print "--** aplha rarefaction **--Command13_2: \n%s\n\n" % Command13_2
#os.system(Command13_2)
def Beta_Diversity(OutPut, MetaData):
OTU_Table_biom = OutPut + "2.PickOTUs/otu_output/otu_table.biom"
rep_set_tre = OutPut + "2.PickOTUs/otu_output/rep_set.tre"
Normalized_OTU_Table_biom = OutPut + "6.Beta_diversity/normalized_otu_table.biom"
Command14 = "normalize_table.py -i %s -a CSS -o %s" % (OTU_Table_biom, Normalized_OTU_Table_biom)
print "--** beta diversity: normalize otu **--Command14: \n%s\n\n" % Command14
os.system(Command14)
beta_diversity_Dir = OutPut + "6.Beta_diversity/"
Command15 = "beta_diversity.py -i %s -m weighted_unifrac,unweighted_unifrac -o %s -t %s" % (Normalized_OTU_Table_biom, beta_diversity_Dir, rep_set_tre)
print "--** beta diversity **--Command15: \n%s\n\n" % Command15
os.system(Command15)
### PCOA ###
pcoa_Dir = OutPut + "7.PCA_PCoA/pcoa_normalized/"
Command16 = "principal_coordinates.py -i %s -o %s" % (beta_diversity_Dir, pcoa_Dir)
print "--** beta diversity: PCOA **--Command16: \n%s\n\n" % Command16
os.system(Command16)
### 2d plot ###
pcoa_2d_plot_Dir = OutPut + "7.PCA_PCoA/2d_plots_normalized/"
Command17 = "make_2d_plots.py -i %s -m %s -o %s" % (pcoa_Dir, MetaData, pcoa_2d_plot_Dir)
print "--** beta diversity: 2d plot **--Command17: \n%s\n\n" % Command17
os.system(Command17)
### 3d PCoA ###
bdiv_jk100 = OutPut + "8.Analysis/bdiv_jk100/"
Command18 = "jackknifed_beta_diversity.py -i %s -o %s -e 100 -m %s -t %s" % (OTU_Table_biom, bdiv_jk100, MetaData, rep_set_tre)
print "--** jackknifed_beta_diversity **--Command18: \n%s\n\n" % Command18
os.system(Command18)
### Show similarity of bacterial communities based on 16s rRNA genes ### Weight & Unweight ###
### Unweight ###
bdiv_jk100_master_tree = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/master_tree.tre"
jackknife_support = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/jackknife_support.txt"
unweight_Tree = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/Tree.pdf"
Command19 = "make_bootstrapped_tree.py -m %s -s %s -o %s" % (bdiv_jk100_master_tree, jackknife_support, unweight_Tree)
### Weight ###
wei_bdiv_jk100_master_tree = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/master_tree.tre"
wei_jackknife_support = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/jackknife_support.txt"
weight_Tree = OutPut + "8.Analysis/bdiv_jk100/unweighted_unifrac/upgma_cmp/Tree.pdf"
Command20 = "make_bootstrapped_tree.py -m %s -s %s -o %s" % (wei_bdiv_jk100_master_tree, wei_jackknife_support, weight_Tree)
print "--** Unweight and Weight Tree **--Command19: %s\n\n Command20: \n%s\n\n" % (Command19, Command20)
os.system(Command19)
os.system(Command20)
def
if __name__ == '__main__':
print "\t\t\033[1;32;40m ### Step1: Mkdir ###\033[0m \n\n"
MkdirOutPut(OutPut)
print "\t\t\033[1;32;40m ### Step2: Drop Chimeric Seq ###\033[0m \n\n"
DropChimeric(DataDir, OutPut, gold_16S_Seq)
print "\t\t\033[1;32;40m ### Step3: Pick OTU ###\033[0m \n\n"
PickOTU(AllSeqFile, MetaData, gg_13_8_otus, OutPut, ParamsFile)
print "\t\t\033[1;32;40m ### Step4: Get High Abundance OTU ###\033[0m \n\n"
Get_High_Abundance_OTU(OutPut, min_count_fraction, MetaData)
print "\t\t\033[1;32;40m ### Step5: Alpha_Diversity ###\033[0m \n\n"
Alpha_Diversity(OutPut)
print "\t\t\033[1;32;40m ### Step6: Beta_Diversity ###\033[0m \n\n"
Beta_Diversity(OutPut, MetaData)
|
class Demo:
def __init__(self, v1=11, v2=22):
self.__a = v1
self.__b = v2
def do_something(self):
return self.__a + self.__b
if __name__ == "__main__":
d = Demo(12, 34)
print(d.do_something())
# 檔名: class_demo9.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
from django.urls import path
from .views import (
home,
delete,
update,
cross,
uncross
)
app_name = 'core'
urlpatterns = [
path('', home.as_view(), name='home'),
path('delete/<str:pk>/', delete, name='delete'),
path('update/<str:pk>/', update, name='update'),
path('cross/<str:pk>/', cross, name='cross'),
path('uncross/<str:pk>/', uncross, name='uncross'),
] |
from django.urls import path, re_path
from api.views.v1 import views
from api.controllers.v1.addressbook import add_person, address_book, search_person, delete, edit, add_mobile, add_email, \
add_address
urlpatterns = [
path('health/', views.health, name='health'),
path('health-secure/', views.health_secure, name='health_secure'),
path('addPerson/', add_person, name='add_person'),
path('addressBook/', address_book, name='address_book'),
path('searchPerson/', search_person, name='search_person'),
re_path(r'delete/(?P<pk>[0-9]+$)', delete, name='delete_person_contact'),
re_path(r'edit/(?P<pk>[0-9]+$)', edit, name='edit_person_contact'),
re_path(r'addMobile/(?P<pk>[0-9]+$)', add_mobile, name='add_person_mobile'),
re_path(r'addEmail/(?P<pk>[0-9]+$)', add_email, name='add_person_email'),
re_path(r'addAddress/(?P<pk>[0-9]+$)', add_address, name='add_person_address'),
]
|
# Generated by Django 2.2.3 on 2020-09-13 11:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reviewapp', '0011_delete_ratingevaluationstring'),
]
operations = [
migrations.CreateModel(
name='RatingEvaluationString',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('string_review', models.CharField(max_length=255)),
('fromat_string_review', models.CharField(max_length=255)),
('event', models.CharField(default='review string added', max_length=255)),
('date', models.DateTimeField(auto_now_add=True)),
('product_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activity_product_string', to='reviewapp.Product')),
],
),
]
|
################################################################################
# #
# MHD MODES CONVERGENCE PLOTS #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
RES = [16,32,64] #,128]
# LOOP OVER EIGENMODES
MODES = [1,2,3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
# Magnetic field
var0[5] = 1.
var0[6] = 0.
var0[7] = 0.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in range(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if MODES[n] == 2: # ALFVEN
dvar[3] = -0.339683110243
dvar[4] = 0.339683110243
dvar[6] = 0.620173672946
dvar[7] = -0.620173672946
if MODES[n] == 3: # FAST
dvar[0] = 0.481846076323;
dvar[1] = 0.642461435098;
dvar[2] = -0.0832240462505;
dvar[3] = -0.224080007379;
dvar[4] = -0.224080007379;
dvar[5] = 0.406380545676;
dvar[6] = -0.203190272838;
dvar[7] = -0.203190272838;
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in range(len(RES)):
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = io.get_dumps_list(".")[-1]
hdr, geom, dump = io.load_all(dfile)
X1 = geom['x']
X2 = geom['y']
X3 = geom['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
for k in range(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# MEASURE CONVERGENCE
for k in range(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../plots')
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in range(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
|
import requests
import datetime
from collections import *
get_price_btc_ts = 0
get_price_btc_price = 0
def get_price_btc():
#增加缓存,同一秒钟内只获取一次价格
global get_price_btc_ts, get_price_btc_price
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if timestamp == get_price_btc_ts:
return get_price_btc_price
else:
get_price_btc_ts = timestamp
# 获取当前BTCUSDT价格
url = 'https://api.binance.com/api/v3/ticker/price?symbol=BTCUSDT'
r = requests.get(url)
price = float(r.json()['price'])
get_price_btc_price = price
return price
get_price_eth_ts = 0
get_price_eth_price = 0
def get_price_eth():
#增加缓存,同一秒钟内只获取一次价格
global get_price_eth_ts, get_price_eth_price
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if timestamp == get_price_eth_ts:
return get_price_eth_price
else:
get_price_eth_ts = timestamp
# 获取当前ETHUSDT价格
url = 'https://api.binance.com/api/v3/ticker/price?symbol=ETHUSDT'
r = requests.get(url)
price = float(r.json()['price'])
get_price_eth_price = price
return price
get_price_symbol_ts = defaultdict(int)
get_price_symbol_price = {}
def get_price_symbol(symbol):
#增加缓存,同一秒钟内只获取一次价格
global get_price_symbol_ts, get_price_symbol_price
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if timestamp == get_price_symbol_ts[symbol]:
return get_price_symbol_price[symbol]
else:
get_price_symbol_ts[symbol] = timestamp
# 获取当前symbol价格
url = 'https://api.binance.com/api/v3/ticker/price?symbol=' + symbol
r = requests.get(url)
price = float(r.json()['price'])
get_price_symbol_price[symbol] = price
return price
def get_price():
# 获取当前BTCUSDT价格
price = get_price_btc()
#输出格式:Binance交易所的当前BTCUSDT价格为:price
#北京时间:2020年12月25日 15:00:00
return 'Binance交易所的当前BTCUSDT价格为:' + str(price) + '\n' +\
'北京时间:' + str(datetime.datetime.now().strftime('%Y年%m月%d日 %H:%M:%S'))
kline_cnt = 24*5
def get_ohlcv_list(symbol='BTCUSDT'):
symbol = symbol.upper()
# 获取最近五天的BTCUSDT 30分钟K线数据
url = 'https://api.binance.com/api/v3/klines?symbol='+ symbol +'&interval=1h&limit=' + str(kline_cnt)
#url = 'https://api.binance.com/api/v3/klines?symbol=BTCUSDT&interval=1h&limit=72'
r = requests.get(url)
ohlcv_list = r.json()
return ohlcv_list
|
# Author: Yinsen Miao
import pandas as pd
import numpy as np
import os
import random
import matplotlib.pyplot as plt
import lightgbm
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
from math import sqrt
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
from sklearn.model_selection import train_test_split
from itertools import product
from tqdm import tqdm
# import Henderson Moving Average
from util import hmaSymmetricWeights, Henderson
from statsmodels.tsa.seasonal import seasonal_decompose
sns.set_context("poster")
# specify model and image paths
model_path = "../models"
image_path = "../images"
dat = pd.read_pickle("../data/cleandata2.pkl")
# data engineering
origin_date = "1990-01-01" # let us use this date as the origin
dat["TIME"] = (dat["SALEDATE"] - pd.to_datetime(origin_date)).dt.days / 30
dat["MONTH"] = dat["SALEDATE"].dt.month
dat["LOGPRICE"] = np.log(dat["PRICE"])
dat["LOGFAIRMARKETTOTAL"] = np.log(dat["FAIRMARKETTOTAL"])
dat["LOGLOTAREA"] = np.log(dat["LOTAREA"])
dat["LOGFINISHEDLIVINGAREA"] = np.log(dat["FINISHEDLIVINGAREA"])
dat["PRICEPERSQFT"] = dat["FAIRMARKETTOTAL"] / dat["LOTAREA"]
tiers_cut = dat["FAIRMARKETTOTAL"].quantile([0.05, 0.35, 0.65, 0.95]).tolist()
dat["TIERS"] = pd.cut(dat["FAIRMARKETTOTAL"], tiers_cut, labels=["Bottom", "Middle", "Top"])
# let us drop all data that are considered as outliers
dat.dropna(axis=0, inplace=True)
# compute the median price
median_prc = dat.groupby(["SALEDATE"])["PRICE"].median()
mean_prc = dat.groupby(["SALEDATE"])["PRICE"].mean()
# convert object column to category type
for col in ["NEIGHCODE", "EXTFINISH_DESC", "STYLEDESC", "MUNICODE", "TIERS"]:
dat[col] = dat[col].astype("category")
# split the data into training and testing dataset
train_dat, valid_dat = train_test_split(dat, train_size=0.8, random_state=2021)
ntrain, nvalid = len(train_dat), len(valid_dat)
print("ntrain = %d, ntest = %d" % (ntrain, nvalid))
# separate features into multiple categories
# continuous features
x_feats = [
'TIME', 'GRADERANK', 'CDURANK', 'SCHOOLRANK',
'STORIES', 'BEDROOMS', 'ADJUSTBATHS', 'BSMTGARAGE', 'FIREPLACES', 'YEARBLT', 'BASEMENT',
'LOGLOTAREA', 'LOGFINISHEDLIVINGAREA', 'PRICEPERSQFT',
'LATITUDE', 'LONGITUDE', 'ANXIETY', 'OLD', 'POOR', 'VACANT'
]
# nominal features
x_categorical_feats = [
'NEIGHCODE', 'EXTFINISH_DESC', 'STYLEDESC', 'MONTH', 'TIERS'
]
# target variable log price
y_feats = [
"LOGPRICE"
]
# create training and testing dataloaders
train_dataloader = lightgbm.Dataset(data=train_dat[x_feats + x_categorical_feats],
label=train_dat[y_feats],
categorical_feature=x_categorical_feats)
valid_dataloader = lightgbm.Dataset(data=valid_dat[x_feats + x_categorical_feats],
label=valid_dat[y_feats],
categorical_feature=x_categorical_feats)
# use the LGBM ML parameter
parameters = {
'objective': 'mae',
'metric': ['rmse'],
'boosting': 'gbdt',
'num_leaves': 32,
'min_child_samples': 69,
'feature_fraction': 0.6597079764069385,
'bagging_fraction': 0.6171731471091209,
'bagging_freq': 4,
'lambda_l1': 3.5479988320298905,
'lambda_l2': 3.503857243823598,
'learning_rate': 0.021626597418816215,
'verbose': 0,
'seed': 2021
}
# train LGBM model
model = lightgbm.train(params=parameters,
train_set=train_dataloader,
valid_sets=valid_dataloader,
num_boost_round=10000,
early_stopping_rounds=1000,
verbose_eval=False)
# compute prediction
train_prc_predict = np.exp(model.predict(data=train_dat[x_feats + x_categorical_feats])) # predict train price
valid_prc_predict = np.exp(model.predict(data=valid_dat[x_feats + x_categorical_feats])) # predict valid price
# assess the prediction performance
scale = 10000 # show RMSE in 10K
train_rmse = sqrt(mean_squared_error(train_dat["PRICE"] / scale, train_prc_predict / scale))
valid_rmse = sqrt(mean_squared_error(valid_dat["PRICE"] / scale, valid_prc_predict / scale))
train_corr = pearsonr(train_dat["PRICE"], train_prc_predict)[0]
valid_corr = pearsonr(valid_dat["PRICE"], valid_prc_predict)[0]
print("Training RMSE %.3f, Testing RMSE %.3f" % (train_rmse, valid_rmse))
print("Training Corr %.3f, Testing Corr %.3f" % (train_corr, valid_corr))
date_min, date_max = pd.to_datetime("1990-01-31"), dat["SALEDATE"].max()
parids = dat["PARID"].unique().tolist()
print("Now predict housing price for %d homes" % (len(parids)))
dates = pd.date_range(start=date_min, end=date_max, freq='M').tolist()
universe_df = pd.DataFrame(product(parids, dates), columns=["PARID", "SALEDATE"])
# select feats from housing properties
house_df = dat[[
'PARID', 'GRADERANK', 'CDURANK', 'SCHOOLRANK',
'STORIES', 'BEDROOMS', 'ADJUSTBATHS', 'BSMTGARAGE', 'FIREPLACES', 'YEARBLT', 'BASEMENT',
'LOGLOTAREA', 'LOGFINISHEDLIVINGAREA', 'PRICEPERSQFT',
'LATITUDE', 'LONGITUDE', 'ANXIETY', 'OLD', 'POOR', 'VACANT',
'NEIGHCODE', 'EXTFINISH_DESC', 'STYLEDESC', 'TIERS'
]].drop_duplicates()
# left merge with selected df
universe_df = pd.merge(universe_df, house_df, on=['PARID'], how='left')
# remove the row that SALEDATE < YEARBLT, let us only consider the house that were already built
universe_df["VALID"] = universe_df["SALEDATE"].dt.year - universe_df["YEARBLT"] > 0
universe_df = universe_df[universe_df["VALID"]]
# create additional features
# time in month with reference to 2012
universe_df["TIME"] = (universe_df["SALEDATE"] - pd.to_datetime(origin_date)).dt.days / 30
universe_df["MONTH"] = universe_df["SALEDATE"].dt.month
# https://www.kaggle.com/jens0306/easy-prediction-using-lightgbm-model
# https://www.zillow.com/research/zhvi-methodology/
# https://markthegraph.blogspot.com/2014/06/henderson-moving-average.html
# predict housing price
# the code below take a long time to run
# test_log_prc = model.predict(data=universe_df[x_feats + x_categorical_feats])
# test_prc = np.exp(test_log_prc)
# universe_df["PREDPRICE"] = test_prc
universe_df = pd.read_pickle("../data/prediction.pkl")
# pivot table of universe_df
universe_pivot = universe_df[["PARID", "SALEDATE", "PREDPRICE"]].pivot_table(index="SALEDATE", values="PREDPRICE", columns="PARID")
universe_mth_ret = universe_pivot.pct_change()
universe_mth_wgt = universe_pivot.div(universe_pivot.sum(axis=1), axis=0).shift(1)
index_mth_ret = (universe_mth_ret * universe_mth_wgt).sum(axis=1)
index_mth = 100 * (index_mth_ret + 1).cumprod()
# del universe_df
# smooth the signals using Henderson filter
smoothed_index_mth = Henderson(index_mth, 5)
# decompose signals into level, trend, seasonality, noise
# https://machinelearningmastery.com/decompose-time-series-data-trend-seasonality/
result = seasonal_decompose(smoothed_index_mth, model='additive',
freq=12, extrapolate_trend='freq')
trend, seasonality, noise = result.trend, result.seasonal, result.resid
# save index
trend.to_csv("../clean_data/acounty_index.csv")
# save decomposed time series
fig = result.plot()
fig.set_size_inches(15, 9)
fig.savefig("%s/timeseries_decompose.png" % image_path)
plt.close()
# read the ZHVI index
ZHVI = pd.read_csv("../clean_data/County_zhvi_uc_sfr_tier_0.33_0.67_sm_sa_mon.csv").\
query("RegionName == 'Allegheny County'").iloc[0].iloc[9:]
ZHVI = pd.Series(ZHVI.values.astype(float), index=ZHVI.index)
ZHVI.index = pd.to_datetime(ZHVI.index)
scaler = trend["1996-01-31"] / ZHVI["1996-01-31"]
ZHVI = ZHVI * scaler
scaler_median = trend["1996-01-31"] / median_prc["1996-01-31"]
scaler_mean = trend["1996-01-31"] / mean_prc["1996-01-31"]
scaled_median = scaler_median * median_prc
scaled_mean = scaler_mean * mean_prc
# plot my home value index overlaying the index from mean
plt.figure(figsize=(12, 6))
plt.plot(scaled_mean["1990-01-31":], alpha=0.5, label="MeanHVI")
plt.plot(trend, label="YMHVI")
plt.axvline(x=pd.to_datetime("2008-12-31"), color="red", alpha=0.5, linestyle="--", linewidth=3)
plt.axvline(x=pd.to_datetime("2020-02-28"), color="blue", alpha=0.5, linestyle="--", linewidth=3)
plt.ylabel("Home Value Index")
plt.legend(loc="best")
plt.ylim((95, 300))
plt.savefig("%s/index.png" % image_path)
plt.close()
# plot my home value index overlaying the index from mean
plt.figure(figsize=(12, 6))
plt.plot(scaled_mean["1990-01-31":], alpha=0.5, label="MeanHVI")
plt.plot(trend, label="YMHVI")
plt.plot(ZHVI, label="ZHVI")
plt.axvline(x=pd.to_datetime("2008-12-31"), color="red", alpha=0.5, linestyle="--", linewidth=3)
plt.axvline(x=pd.to_datetime("2020-02-28"), color="blue", alpha=0.5, linestyle="--", linewidth=3)
plt.ylabel("Home Value Index")
plt.ylim((95, 300))
plt.legend(loc="best")
plt.savefig("%s/index_com_zillow.png" % image_path)
plt.close()
# universe_df.to_pickle("../data/prediction.pkl")
|
name = input("What is your name : ")
print("Wellcome "+name+" in the python course") |
# Author: Jyotika Bahuguna: -j.bahuguna@fz-juelich.de
from NeuroTools.parameters import ParameterSet
from NeuroTools.parameters import ParameterRange
from NeuroTools.parameters import ParameterTable
from NeuroTools.parameters import ParameterSpace
import NeuroTools.signals as signal
import numpy,shelve,pylab,os
def get_parameters():
p = ParameterSpace({})
# Parameters for neuronal features
p.outpath = '.'
p.vm = -80.
p.th1 = -45.
p.th2 = -54.
p.th3 = -45.
p.tau_synE1 = 0.3
p.tau_synE2 = 0.3
p.tau_synI1 = 2.
p.tau_synI2 = 2.
p.E_ex = 0.
p.E_in1 = -64.
p.E_in2 = -76.
p.ie = 0.
p.cm1 = 192. # For MSN (Gertler 2008)
p.gL11 = 8. # From Gertler, D1 and D2 1/(124.4Mohm) and 1/(154.83Mohm)
p.gL12 = 6.
p.cm = 200. # For MSN (Wolf 2005)
p.cm_fsi = 500. # For MSN (Wolf 2005)
p.gL1 = 12.5 # For MSN (Wolf 2005)
p.cm2 = 157.
p.gL2 = 25.
p.tref = 2.
p.vi1low = -80.
p.vi2low = -80.
p.vi3low = -80.
p.vi1hi = -45.
p.vi2hi = -54.
p.vi3hi = -45.
# Parameters for running
p.timestep = 0.1
p.min_delay = 0.1
p.max_delay = 50.
p.runtime = 500.
p.num01 = 150 # Neuron population in the cortex that recieve correlated input
p.num02 = 1
p.num1 = 4000 # Pair of neurons in MSN ( inhibitory ) which recieve input from cortex The ratio of cortex::MSn is 10:1
p.numAll = p.num1/2
p.numFSI = 80
p.p_copy = 0.03
p.nc21 = 10
p.prob11 = 0.23
p.delay11 = 1.
p.delay12 = 4.
p.delay21 = 1.
p.delay22 = 0.5
p.j01 = 3.1
p.j02 = 0.55
minInh = -0.5
p.jd1d1 = minInh
p.jd2d2 = minInh*2.0
p.jd1d2 = minInh*2.0*1.21
p.jd2d1 = minInh*2.0*1.21*1.32
p.jfsi = -2.6
p.j21 = -1.0 # New value , IPSP Koos1999
p.Rate = numpy.arange(50.,4560.,500.)
return p
|
def TwoSum(my_list,target):
output=[]
for i in range(0,len(my_list)):
for j in range(1+i,len(my_list)):
tot=my_list[i]+my_list[j]
if tot==target:
output.append(i)
output.append(j)
return output
if len(output)==0:
output.append(-1)
return output
def TwoSum2(my_list,target):
d = {}
for i in range(len(my_list)):
if target - my_list[i] in d:
#print(d)
return [d[target - my_list[i]],i]
d[my_list[i]]=i
return -1
|
import numpy as np
import cv2
IMAGE_HEIGHT=66
IMAGE_WIDTH=200
def clahe_image(image):
yuv_img = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(4,4))
yuv_img[:,:,2]= clahe.apply(yuv_img[:,:,2])
n_img = cv2.cvtColor(yuv_img,cv2.COLOR_HSV2RGB)
return n_img
def resize(image):
"""
Resize the image to IMAGE_WIDTHxIMAGE_HEIGHT
:param image:
:return:
"""
n_image=cv2.resize(image,dsize=(IMAGE_WIDTH,IMAGE_HEIGHT))
return n_image
def crop(image):
cropped_img = image[60:-25, :]
return cropped_img
def to_yuv(image):
yuv_img = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
return yuv_img
def preprossing(image):
n_img=crop(image)
n_img=resize(n_img)
n_img=to_yuv(n_img)
return n_img
|
# Exercício 2.1 - Livro
print(10 + 20 * 30)
print(4 ** (1/2) / 30)
print((9 ** (1/4)) * 6 - 1)
|
import base64
import json
import logging
import requests
import jwt
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
from jwt.exceptions import InvalidTokenError
_default_options = {
"verify_signature": True,
"verify_exp": True,
"verify_nbf": True,
"verify_iat": True,
"verify_aud": False,
"require_exp": True,
"require_iat": True,
"require_nbf": True,
"algorithms": [
# Microsoft Azure Active Directory algorithm
"RS256"
],
}
def validate(jwt_token: str, identity_provider_url: str, **options) -> (dict, dict):
"""
Validate base64 encoded token and return JSON header and body as a tuple.
:param jwt_token: JWT Token (base64 encoded) as provided in Bearer header.
:param identity_provider_url: URL to fetch keys from (will certainly ends with /common/discovery/keys)
:param verify_signature: Default to True
:param verify_exp: Check token expiry. Default to True
:param verify_nbf: Default to True
:param verify_iat: Default to True
:param verify_aud: Default to False
:param require_exp: Default to True
:param require_iat: Default to True
:param require_nbf: Default to True
:param algorithms: Default to ["RS256"]
:raises InvalidTokenError
:raises InvalidKeyError
"""
json_header, json_body = decode(jwt_token)
_validate_json_token(jwt_token, json_header, options, identity_provider_url)
return json_header, json_body
def decode(jwt_token: str) -> (dict, dict):
"""
Decode base64 encoded token and return JSON decoded header and body as a tuple.
:raises InvalidTokenError
"""
if not jwt_token:
raise InvalidTokenError("JWT Token is mandatory.")
if jwt_token.count(".") < 2:
raise InvalidTokenError(
"Invalid JWT Token (header, body and signature must be separated by dots)."
)
(jwt_header, jwt_body, jwt_signature) = jwt_token.split(".", maxsplit=2)
return _to_json(jwt_header), _to_json(jwt_body)
def _validate_json_token(
jwt_token: str, json_header: dict, options: dict, identity_provider_url: str
):
public_key = _get_public_key(json_header, identity_provider_url)
logging.debug(f"Public key: {public_key}")
options = {**_default_options, **options}
jwt.decode(
jwt_token, key=public_key, algorithms=options.pop("algorithms"), options=options
)
def _get_public_key(json_header: dict, identity_provider_url: str) -> str:
key_identifier = json_header.get("kid", "not provided")
# TODO cache this
x5c = _request_x5c(key_identifier, identity_provider_url)
certificate_text = (
b"-----BEGIN CERTIFICATE-----\n"
+ x5c.encode("utf-8")
+ b"\n-----END CERTIFICATE-----"
)
certificate = load_pem_x509_certificate(certificate_text, default_backend())
return certificate.public_key()
def _request_x5c(key_identifier: str, identity_provider_url: str) -> str:
keys = requests.get(identity_provider_url)
if not keys:
raise InvalidTokenError(f"Identify provider cannot be reached: {keys.text}")
keys = keys.json().get("keys", [])
keys = {key["kid"]: key["x5c"][0] for key in keys}
if key_identifier not in keys:
raise InvalidTokenError(
f"{key_identifier} is not a valid key identifier. Valid ones are {list(keys)}."
)
return keys[key_identifier]
def _to_json(base_64_json: str) -> dict:
decoded_json = _decode_base64(base_64_json)
return json.loads(decoded_json.decode("unicode_escape"))
def _decode_base64(base64_encoded_string: str) -> bytes:
"""
Decode base64, padding (with = character) being optional.
:param base64_encoded_string: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
missing_padding = len(base64_encoded_string) % 4
if missing_padding != 0: # Pad with extra = characters at the end
base64_encoded_string += "=" * (4 - missing_padding)
return base64.b64decode(base64_encoded_string, altchars="_+")
|
def CtoF(input_celsius):
return input_celsius * 1.8 + 32
def FtoC(input_fahrenheit):
return (input_fahrenheit - 32) / 1.8
c_or_f = input("What type of number do you want to convert? (Type C or F)")
if c_or_f.lower() == 'c':
print(int(CtoF(float(input("Enter a temperature in celsius: ")))))
elif c_or_f.lower() == 'f':
print(round(FtoC(float(input("Enter a temperature in fahrenheit: "))),2))
else:
print("Did not understand that unit of temperature.")
|
#!/usr/bin/env python3
import os
from ..lib import utils, doc_utils
'''
desc: infomation of linux
per-function infomation in Linux Core API page
- basic description, Parameters, Description, Note, Context, Return
concern: may have some absent situations
'''
def preprocess_linux_apidoc(info, preprocess_dir):
definition = ""
description = {}
desc_keyword = None
desc_status = False
definition = info[0]
# outdated: [type, func_name, arg1, arg2, ..., argn]
# now: {"func_name": .., "func_type": .., "args_name": .., "args_type": ..}
description = doc_utils.get_definition(definition)
if description['func_name'] == "":
return ""
for line in info[1:]:
if line == "Description\n" or line == "Note\n" or line == "Context\n":
desc_keyword = "desc"
if not desc_status:
desc_status = True
elif line == "Return\n":
desc_keyword = "ret_desc"
if not desc_status:
desc_status = True
# xxx: what about Parameters?
elif desc_status:
content = line.replace("\n", " ") if line != "\n" else line
if desc_keyword not in description:
description[desc_keyword] = content
else:
description[desc_keyword] += content
# clean text
for key in description:
if key in ["desc", "ret_desc"]:
description[key] = doc_utils.clean_text(description[key])
func_feature_file = os.path.join(preprocess_dir, f"{description['func_name']}.json")
doc_utils.dump_json(func_feature_file, description)
return description['func_name']
# doc_dir - the storage directory of data
def handle_linux(doc_dir, outdir):
print("==================================================")
print("==== Preprocessing Linux info =====")
''' initialization '''
preprocess_dir = os.path.join(outdir, "linux")
utils.mkdir(preprocess_dir)
doc_file = os.path.join(doc_dir, "linux/linux_api.txt")
doc_lines = doc_utils.read_docfile(doc_file)
''' preprocess documentation '''
total_apis = []
func_info = []
for line in doc_lines:
if line[:20] == "=" * 20:
if func_info != []:
func_name = preprocess_linux_apidoc(func_info, preprocess_dir)
if func_name != "":
total_apis.append(func_name)
func_info = []
elif line != "\n":
func_info.append(line)
if func_info != []:
func_name = preprocess_linux_apidoc(func_info, preprocess_dir)
if func_name != "":
total_apis.append(func_name)
print(f"Total number of functions: {len(total_apis)}")
print("==================================================")
return total_apis
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import numpy as np
from gaussian_color import *
from train_color import *
from util import *
from sklearn import metrics
# Highway sequences configuration, range 1050 - 1350
highway_path_in = "./highway/input/"
highway_path_gt = "./highway/groundtruth/"
# Fall sequences configuration, range 1460 - 1560
fall_path_in = "./fall/input/"
fall_path_gt = "./fall/groundtruth/"
# Traffic sequences configuration, range 950 - 1050
traffic_path_in = "./traffic/input/"
traffic_path_gt = "./traffic/groundtruth/"
# Group sequences
path_tests = [highway_path_in,fall_path_in,traffic_path_in]
path_gts = [highway_path_gt,fall_path_gt,traffic_path_gt]
first_frames = [1050,1460,950]
midle_frames = [1199,1509,999]
last_frames = [1349,1559,1049]
# Define color spaces ['RGB','HSV','YCrCb']
colorSpaces=['YCrCb', 'YCrCb', 'RGB']
# Thresholds on gaussian for each experiment:
alphas = np.arange(0,5,0.5)
dataset = [0, 1, 2]
# Connectivity to fill holes [4, 8]
connectivity = '4'
# Pixels of area filltering
minAreaPixels = [10, 20, 40, 60, 80, 100, 120, 140, 160]
#Define the morphology
ac_morphology=1; # 1 = apply morphology ; 0 = not to apply morphology
SE1size=9;
SE2size=11;
# Define accumulators
FP = np.zeros((3,len(alphas)), np.int)
FN = np.zeros((3,len(alphas)), np.int)
TP = np.zeros((3,len(alphas)), np.int)
TN = np.zeros((3,len(alphas)), np.int)
P = np.zeros((3,len(alphas)), np.float)
R = np.zeros((3,len(alphas)), np.float)
F1 = np.zeros((3,len(alphas)), np.float)
AUC = np.zeros((3,len(minAreaPixels)), np.float)
if __name__ == "__main__":
# Hole filling
# Post process with hole filling
# Try different connectivities: 4 and 8
# Report with AUC & gain for each sequences.
# Provide qualitative interpretation..
print("Evaluating model using {} pixels as min area".format(minAreaPixels))
for cI, colorSpace in enumerate(colorSpaces):
print("Starting gaussian modelling dataset num: "+str(dataset[cI])+" color space: "+colorSpace+"...")
alpha = alphas[cI]
for areaI in range(len(minAreaPixels)):
for aI, alpha in enumerate(alphas):
minAreaP=minAreaPixels[areaI]
mean_matrix, std_matrix = training_color(path_tests[dataset[cI]], first_frames[dataset[cI]], midle_frames[dataset[cI]], alpha, colorSpace);
FP[dataset[cI],aI], FN[dataset[cI],aI], TP[dataset[cI],aI], TN[dataset[cI],aI], P[dataset[cI],aI], R[dataset[cI],aI], F1[dataset[cI],aI] = gaussian_color(path_tests[dataset[cI]], path_gts[dataset[cI]], midle_frames[dataset[cI]]+1, last_frames[dataset[cI]], mean_matrix, std_matrix, alpha, colorSpace,connectivity, minAreaP, ac_morphology, SE1size, SE2size)
print("Computed gaussian modelling dataset num: "+str(dataset[cI])+" color space: "+colorSpace+" with alpha: "+str(alpha))
AUC[dataset[cI], areaI]=metrics.auc(R[dataset[cI], :], P[dataset[cI], :])
print("Starting gaussian modelling dataset num: "+str(dataset[cI])+" alpha: "+str(alpha)+" min_area: "+str(minAreaP)+"... done. AUC: "+str(AUC[dataset[cI], areaI])+"\n")
for i in np.arange(P.shape[0]):
plt.plot(minAreaPixels, AUC[i,:],label='Dataset'+str(i)+'_'+colorSpaces[i])
plt.xlabel('minArea')
plt.ylabel('Precision')
#plt.legend()
plt.savefig('aucvsPminArea.png')
|
from django.contrib import admin
from .models import *
# Register your models here.
# Register your models here.
class Analyse_FFAdmin(admin.ModelAdmin):
list_display = ("ig_user","instagram_account","follower_update_time","following_update_time")
admin.site.register(Analyse_FF,Analyse_FFAdmin)
class Instagram_Accounts_AnalyseAdmin(admin.ModelAdmin):
list_display = ("instagram_account","update_time")
admin.site.register(Instagram_Accounts_Analyse,Instagram_Accounts_AnalyseAdmin)
class Follow_ActionsAdmin(admin.ModelAdmin):
list_display = ("ig_user","source","source_type","relationship","status","instagram_account","update_time")
admin.site.register(Follow_Actions,Follow_ActionsAdmin)
class Like_ActionsAdmin(admin.ModelAdmin):
list_display = ("ig_user","source","source_type","relationship","status","instagram_account","update_time")
admin.site.register(Like_Actions,Like_ActionsAdmin)
class Comment_ActionsAdmin(admin.ModelAdmin):
list_display = ("ig_user","source","source_type","relationship","status","instagram_account","update_time")
admin.site.register(Comment_Actions,Comment_ActionsAdmin)
class AssistantsAdmin(admin.ModelAdmin):
list_display = ("assistant_type","source_type","relationship","instagram_account","activity_status","update_time","queue")
admin.site.register(Assistants,AssistantsAdmin)
class Post_DatasAdmin(admin.ModelAdmin):
list_display = ("instagram_account","source","likers","commenters","source_type")
admin.site.register(Post_Datas,Post_DatasAdmin)
class Api_ErrorAdmin(admin.ModelAdmin):
list_display = ("instagram_account","assistant","api_error_mean","error_source","update_time")
admin.site.register(Api_Error,Api_ErrorAdmin)
class Unfollow_ActionsAdmin(admin.ModelAdmin):
list_display = ["ig_user","status","instagram_account","update_time"]
admin.site.register(Unfollow_Actions,Unfollow_ActionsAdmin)
admin.site.register(Challenge_User)
admin.site.register(Volta)
admin.site.register(Instagram_Accounts)
admin.site.register(Assistants_Settings)
admin.site.register(IG_Users)
admin.site.register(Api_Settings)
admin.site.register(Api_Settings_web_api)
admin.site.register(User_Sources)
admin.site.register(Hashtag_Sources)
admin.site.register(Location_Sources)
admin.site.register(White_List_Users)
admin.site.register(White_List_Assistant)
admin.site.register(SocinstaProxy)
admin.site.register(Instagram_Apı_Settings)
admin.site.register(Instagram_Api_User_Agent)
|
print(1, 2,3,4)
print("a" + "b" + "c"+ "d")
# '%' Formating
print("%d %d %d" %(1, 2, 3))
print("%s %s" % ("1","2"))
# str.format
age = 36;
print("I'm {0} years old." .format(age))
#padding 여유공간을 지정하여 배열 + 소수점 자리수 맞추기
print("Priduct: %5s, Price per unit: %.5f." %("apple", 5.243))
#naming
print("Product: %(name) 10s" %{ "name":"apple"})
|
#!/usr/bin/python3
"""
Copyright (c) 2015, Joshua Saxe
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name 'Joshua Saxe' nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL JOSHUA SAXE BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import networkx
from networkx.drawing.nx_pydot import write_dot
from relationships import formrelations
#def network(malwarepaths, threshold, similaritymeasure, outputdotfile):
def network(malwarepaths, thresholddict, ngram, outputdotfile):
"""
Display malware relationships based on printable strings feature set for
non-exe and dynamic API calls for exe malware samples
Args:
malwarepaths : absolute path of all malware samples
thresholddict: dict containing Jaccard Index threshold values
ngram : ngram api sequence
outputdotfile: name of output dot file
Raises:
Returns:
None
"""
# Initialize graph
graph = networkx.Graph()
# Create relations for non-exe and exe malware samples
graph = formrelations(malwarepaths, thresholddict, ngram, graph)
# write networkx graph object to disk as DOT file
write_dot(graph, outputdotfile)
|
import os
import random
import logging
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import numpy as np
from apex import amp
from typing import Dict
# pylint:disable=no-member
logger = logging.getLogger(__name__)
def set_seed(config):
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
if config.n_gpu > 0:
torch.cuda.manual_seed_all(config.seed)
def send_to_device(data, devide):
data = data.to(devide)
return data
class SimpleTrainer():
def __init__(self, config, model, optimizer, dataloader):
self.config = config
self.dataloader = dataloader
self.model = model
self.optimizer = optimizer
self.progress_states = {
"global_step": 0,
"epochs_trained": 0,
"steps_trained_in_current_epoch": 0,
}
def update_progress_states(self):
"update self.progress_states according to checkpoint"
pass
def fit(self):
self.update_progress_states()
for _ in range( self.progress_states['epochs_trained'], self.config.num_epochs ):
self.train_epoch()
def train_iter(self, batch: Dict[torch.Tensor]) -> Dict[torch.Tensor]:
"""
"""
# set training mode
self.model.train()
batch = send_to_device(batch, self.config.device)
results = self.model(batch)
loss = results["loss"]
if self.config.gradient_accumulation_steps > 1:
loss = loss / self.config.gradient_accumulation_steps
# backward loss
if self.config.distributed:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
def train_epoch(self):
count = 0
self.optimizer.zero_grad()
for data in self.dataloader:
count += 1
self.train_iter(data)
if count % self.config.gradient_accumulation_steps == 0:
self.optimizer.step()
self.zero_grad()
|
from Jumpscale import j
import netaddr
def chat(bot):
"""
"""
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
ips = ["IPv6", "IPv4"]
env = dict()
expiration = j.data.time.epoch + (60 * 60 * 24) # for one day
explorer = j.clients.explorer.explorer
if not email:
raise j.exceptions.BadRequest("Email shouldn't be empty")
form = bot.new_form()
flist = form.string_ask(
"Please add the link to your flist to be deployed. For example: https://hub.grid.tf/usr/example.flist"
)
pub_key = None
while not pub_key:
pub_key = bot.string_ask(
"Please add your public ssh key, this will allow you to access the deployed container using ssh. Just copy your key from ~/.ssh/id_rsa.pub"
)
env_vars = form.string_ask(
"""To set environment variables on your deployed container, enter comma-separated variable=value
For example: var1=value1, var2=value2.
Leave empty if not needed"""
)
form.ask()
inetractive = bot.single_choice(
"Would you like access to your container through the web browser (coreX)?", ["YES", "NO"]
)
env.update({"pub_key": pub_key.value})
if env_vars.value:
var_list = env_vars.value.split(",")
var_dict = {}
for item in var_list:
splitted_item = item.split("=")
if len(splitted_item) == 2:
var_dict[splitted_item[0]] = splitted_item[1]
env.update(var_dict)
# create new reservation
reservation = j.sal.zosv2.reservation_create()
identity = explorer.users.get(name=name, email=email)
ip_version = bot.single_choice("Do you prefer to access your 3bot using IPv4 or IPv6? If unsure, chooose IPv4", ips)
node_selected = j.sal.chatflow.nodes_get(1, ip_version=ip_version)[0]
reservation, config = j.sal.chatflow.network_configure(
bot, reservation, [node_selected], customer_tid=identity.id, ip_version=ip_version
)
ip_address = config["ip_addresses"][0]
conatiner_flist = flist.value
storage_url = "zdb://hub.grid.tf:9900"
if inetractive.value == "YES":
interactive = True
else:
inetractive = False
# create container
cont = j.sal.zosv2.container.create(
reservation=reservation,
node_id=node_selected.node_id,
network_name=config["name"],
ip_address=ip_address,
flist=conatiner_flist,
storage_url=storage_url,
env=env,
interactive=interactive,
)
resv_id = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
res = f"# Container has been deployed successfully: your reservation id is: {resv_id} "
bot.md_show(res)
filename = "{}_{}.conf".format(name, resv_id)
res = """
# Use the following template to configure your wireguard connection. This will give you access to your 3bot.
## Make sure you have wireguard ```https://www.wireguard.com/install/``` installed:
## ```wg-quick up /etc/wireguard/{}```
Click next
to download your configuration
""".format(
filename
)
res = j.tools.jinja2.template_render(text=j.core.text.strip(res), **locals())
bot.md_show(res)
res = j.tools.jinja2.template_render(text=config["wg"], **locals())
bot.download_file(res, filename)
if interactive:
res = "# Open your browser at ```{}:7681```".format(ip_address)
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
else:
res = "# Your IP is ```{}```".format(ip_address)
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
|
# Generated by Django 2.2.11 on 2021-07-17 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0265_auto_20210712_1133'),
]
operations = [
migrations.AlterField(
model_name='dailyround',
name='dialysis_fluid_balance',
field=models.IntegerField(default=None, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='dailyround',
name='dialysis_net_balance',
field=models.IntegerField(default=None, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5000)]),
),
]
|
# Manually start the process of making markdown gallery.
import datetime, glob, os, sys
import gallery_to_markdown as output_format
THUMBNAIL_PREFIX = 't_'
THUMBNAILS_DIR = 't'
EXTENSIONS = ['.jpg', '.jpeg', '.png', '.gif']
IMAGE_TYPE = {
'.jpg': 'JPEG',
'.jpeg': 'JPEG',
'.gif': 'GIF',
'.png': 'PNG'
}
class Gallery(object):
"""Manipulate images in this gallery."""
def __init__(self, gallery_path):
self.gallery_path = gallery_path
self.thumbnails = []
self.images = []
self.image_thumb_pair = []
def process_gallery(self, absolute_path, path_prefix_len):
"""Entry point that controls the flow."""
self.path_prefix_len = path_prefix_len
self.list_images()
output_format.save_file(absolute_path, self.image_thumb_pair)
def list_thumbnails(self, thumbnails_path):
if not os.path.exists(thumbnails_path):
try:
os.makedirs(thumbnails_path)
except OSError:
pass
return []
thumbnails = glob.glob(thumbnails_path + os.sep + '*')
return thumbnails
def list_images(self):
self.thumbnails = self.list_thumbnails(
os.path.join(self.gallery_path, THUMBNAILS_DIR))
images = glob.glob(self.gallery_path + os.sep + '*')
images = filter(os.path.isfile, images)
for image in images:
base, ext = os.path.splitext(image)
if ext.lower() in EXTENSIONS:
tmp_thumb = self.create_thumbnail_name(image)
self.image_thumb_pair.append((
image[self.path_prefix_len:],
tmp_thumb[self.path_prefix_len:]
))
def create_thumbnail_name(
self,
image,
thumb_dir=THUMBNAILS_DIR,
prefix=THUMBNAIL_PREFIX
):
image_path, image_name = os.path.split(image)
thumbnail_name = os.path.join(
image_path,
thumb_dir,
prefix + image_name
)
return thumbnail_name
WATCH_DIR_LEN = len(os.getcwd())
def find_root(pathname, dir_len=WATCH_DIR_LEN):
head, tail = os.path.split(pathname)
return head, tail
WATCH_DIRECTORY, gallery_name = find_root(os.getcwd())
WATCH_DIR_LEN = len(WATCH_DIRECTORY)
WATCH_DIRECTORY = os.getcwd()
fu, idiot = find_root(os.getcwd())
gallery = Gallery(WATCH_DIRECTORY)
gallery.process_gallery(fu, WATCH_DIR_LEN) |
# Dummy GUI just printing date + current temp to the stdout
from mqtt_client import MQTT_Client
from hbmqtt.mqtt.constants import QOS_1
import asyncio
import tkinter as Tk
# Configuration of TOPICS and addresses
from config import *
# For exception handeling
import sys
# Just a variable used for conditional debugging prints
DEBUG = True
class GUI(MQTT_Client):
def __init__(self):
# Setup the MQTT stuff from parent
# Initialize the MQTT_client parent class
MQTT_Client.__init__(self)
# Define my_topic
#self.my_topic = [("TEMP", QOS_1)]
self.my_topic = [TOPICS['temp']]
# Subscribe to the topic. This is done by letter asyncio-loop run that co-routine until completion
# I.e. we will do that before continuting to the rest of the program.
self.loop.run_until_complete(self.subscribe_to(self.my_topic))
self.id = 2
def packet_received_cb(self,topic, payload_dict):
"""
THis function will be called each time a packet is received
This should update a display box in
"""
if DEBUG:
print("DEBUG: packet_received_cb called in dummy_gui")
print("DEBUG: topic = {} data = {}".format(topic, payload_dict['Data']))
# There will be several topics. So we should do a if-elif
# structure to handle the different incoming packets.
# We wish to display on the screen
# First split the packet into its format (btw these things will eventually be implemented in functions)
data = payload_dict['Data']
self.current_temp.set(data)
# Functions for handeling button-events
def tkinter_set_temperature_button_pressed(self):
# Send the setpoint
#First create a bytestring to send
payload = b'%f' % float(self.temp_setpoint.get())
# The we call the async function publish_to with the right topic
# We use ensure_future as it is an async function and we cannot call await on it
# since we are inside a non-async function
asyncio.ensure_future( self.publish_to(TOPICS['temp_setpoint'], payload) )
print("Set temperature button pressed\nSetpoint = {}".format(self.temp_setpoint.get()))
def tkinter_setup(self):
# Setup the TK GUI
self.root = Tk.Tk()
self.current_temp = Tk.StringVar() #Textvariable for updating the label
self.temp_setpoint = Tk.StringVar()
self.label_temp = Tk.Label(self.root, textvariable=self.current_temp)
self.label_temp.grid(row = 0, column = 1)
self.label_description = Tk.Label(self.root, text = "Current Temperature: ")
self.label_description.grid(row = 0, column = 0)
self.button = Tk.Button(self.root, text="Set Temperature", command=self.tkinter_set_temperature_button_pressed)
self.entry_temp = Tk.Entry(self.root, textvariable=self.temp_setpoint)
self.entry_temp.grid(row=1, column=0)
self.button.grid(row=1, column=1)
self.tk_interval = 0.01
def run(self):
"""
This function starts the necessary tasks and runs them in the
event loop. The GUI itself, probably implemented in TKinter should be
added as a task here.
NB! The only code of importance here is the three first lines. The rest is a try to
shutdown the process properly when the user hits CTRL+C
"""
# Setup TKinter
self.tkinter_setup()
try:
# Spawn the tasks to run concurrently
self.loop.create_task(self.listen()) # Listen to subscribed topics
self.loop.create_task(self.run_tk()) # Run GUI
self.loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.loop.close()
async def run_tk(self):
"""
Run TK with asyncio
"""
while True:
self.root.update()
self.root.update_idletasks()
await asyncio.sleep(self.tk_interval) #tk_interval is defined in the __init__
if __name__ == '__main__':
GUI = GUI()
GUI.run()
|
# -*- coding: utf-8 -*-
import scrapy
import labsql
from selenium import webdriver
from bs4 import BeautifulSoup as bs
import time
from threading import Thread
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from selenium.webdriver.support.wait import WebDriverWait
class SohuSpider(scrapy.Spider):
name = 'sohu'
link_list = []
conn = labsql.LabSQL('172.168.1.36', 'sohu', 'sa', 'scucc')
web = 'https://www.sogou.com/sogou'
link_web = 'https://www.sogou.com'
def start_requests(self):
urls = [u"""https://www.sogou.com/sogou?query=%E7%95%AA%E7%A6%BA%E6%B0%B4%E6%B5%B8&pid=sogou-wsse-f880d0d6a01ba52f-&duppid=1&clusterfilter=off&page=85&ie=utf8"""]
for url in urls:
yield scrapy.Request(url=url, callback=self.link_parse)
def link_parse(self, response):
for page_url in response.css('h3.pt a::attr(href)').extract():
self.link_list.append(self.link_web + page_url)
for page_url in response.css('h3.vrTitle a::attr(href)').extract():
self.link_list.append(self.link_web + page_url)
for link in self.link_list:
driver = webdriver.PhantomJS()
driver.set_page_load_timeout(5)
try:
driver.get(link)
except:
driver.execute_script('window.stop()')
html = bs(driver.page_source, 'lxml')
link = driver.current_url
values = [str(html), link]
self.conn.insert("""insert into sohu_html ([html]
,[url]
) values(?,?)""",
values)
driver.quit()
next_page = response.css('a.np::attr(href)').extract_first()
if next_page is not None:
# urljoin can auto extract your domain then append your next link, but at sohu is not formal domain, so here
# we manual create url
next_page = self.web + next_page
with open('link.txt', 'a') as f:
f.write(next_page + '\n')
f.close()
yield scrapy.Request(next_page, callback=self.link_parse, errback=self.errback_httpbin)
else:
print('searching is the end!')
self.conn.close_connection()
def errback_httpbin(self, failure):
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
# this is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
::
ipython -i CInterpolationTest.py
ipython -i CInterpolationTest.py -- --nointerpol
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.ana.proplib import PropLib
np.set_printoptions(precision=3, suppress=True)
def check_exists(xpath, msg):
if not os.path.exists(xpath):
log.info(" NO SUCH PATH %s run \"%s\" " % (xpath, msg) )
sys.exit(0)
def load(ok):
if ok.interpol:
path = "$TMP/InterpolationTest/CInterpolationTest_interpol.npy"
x_shape = (123, 4, 2, 761, 4)
msg = "CInterpolationTest"
else:
path = "$TMP/InterpolationTest/CInterpolationTest_identity.npy"
x_shape = (123, 4, 2, 39, 4)
msg = "CInterpolationTest --nointerpol"
pass
xpath = os.path.expandvars(path)
check_exists(xpath, msg)
c = np.load(xpath)
log.info("load %s %r " % (xpath, c.shape))
assert c.shape == x_shape, "shape mismatch expect %r found %r " % (x_shape, c.shape)
return c
class CFProp(object):
"""
::
In [23]: cf.t.shape
Out[23]: (123, 4, 2, 39, 4)
"""
def __init__(self, ok):
blib = PropLib("GBndLib")
names = blib.names
t = np.load(os.path.expandvars("$IDPATH/GBndLib/GBndLib.npy"))
c = load(ok)
assert len(t) == len(names)
assert len(t) == len(c)
n = len(t)
self.shape = t.shape
self.names = names
self.t = t
self.c = c
self.n = n
self.consistency_check("t",t)
def consistency_check(self,key,x):
log.info(key)
chk = {}
for i in range(self.n):
self.index = i
bnd = self.names[i]
omat,osur,isur,imat = bnd.split("/")
def __getitem__(self, sli):
self.sli = sli
return self
def __call__(self, arg):
if type(arg) is int:
name = self.names[arg]
elif type(arg) is str:
name = arg
elif type(arg) is slice:
return map(lambda name:self(name), self.names[arg])
else:
assert 0, (type(arg), "unexpected type")
pass
return self.check_bnd(name)
def check_bnd(self, name):
omat,osur,isur,imat = name.split("/")
if __name__ == '__main__':
ok = opticks_main()
blib = PropLib("GBndLib")
names = blib.names
n = len(names)
nam = np.zeros( (n,4 ), dtype="|S64")
for i,name in enumerate(names):nam[i] = name.split("/")
cf = CFProp(ok)
if 0:
for i in range(n):
name = names[i]
g4_omat = np.all( t[i,blib.B_OMAT,0] == c[i,blib.B_OMAT,0] )
g4_imat = np.all( t[i,blib.B_IMAT,0] == c[i,blib.B_IMAT,0] )
if omat in g4:
assert g4[omat] == g4_omat
else:
g4[omat] = g4_omat
if imat in g4:
assert g4[imat] == g4_imat
else:
g4[imat] = g4_imat
if len(osur) > 0:
g4_osur = np.all( t[i,blib.B_OSUR,0] == c[i,blib.B_OSUR,0] )
if osur in g4:
assert g4[osur] == g4_osur
else:
g4[osur] = g4_osur
else:
g4_osur = None
pass
if len(isur) > 0:
g4_isur = np.all( t[i,blib.B_ISUR,0] == c[i,blib.B_ISUR,0] )
if isur in g4:
assert g4[isur] == g4_isur
else:
g4[isur] = g4_isur
else:
g4_isur = None
pass
if g4_omat == False:
if not omat in g4b:
g4b[omat] = []
g4b[omat].append( (i,blib.B_OMAT,0) )
if g4_osur == False:
if not osur in g4b:
g4b[osur] = []
g4b[osur].append( (i,blib.B_OSUR,0) )
if g4_isur == False:
if not isur in g4b:
g4b[isur] = []
g4b[isur].append( (i,blib.B_ISUR,0) )
if g4_imat == False:
if not imat in g4b:
g4b[imat] = []
g4b[imat].append( (i,blib.B_IMAT,0) )
print "%4d omat %25s imat %25s g4_omat %7s g4_imat %7s " % ( i, omat, imat, g4_omat, g4_imat )
#if len(isur) > 0 or len(osur) > 0:
# print "%4d osur %35s isur %35s ok_osur %7s ok_isur %7s g4_osur %7s g4_isur %7s " % ( i, osur, isur, ok_osur, ok_isur, g4_osur, g4_isur )
pass
print "g4", g4
for k,v in g4b.items():
if len(v) > 0:print k, str(v)
|
"""
クローリングを説明するための最小限のコードを書く
"""
import re
import requests
import time
import random
# クローリングを開始するURL
start_url = "https://goworkship.com/magazine/"
# サイト管理者に分かるよう自身の連絡先などをUser-Agentに記載する
headers = {
'User-Agent': 'sig-Bot/1.0 (@sig_Left: https://twitter.com/sig_Left)'
}
# アクセスするURL(初期値はクローリングを開始するURL)
url = start_url
# HTMLの格納場所
html_list = []
for i in range(5):
print(f'{i + 1}ページ目クローリング開始')
# 対象ページのhtml
html = requests.get(url, headers=headers).text
# 取得したHTMLの格納
html_list.append(html)
# ページ中のaタグ内のURLを取得する
url = random.choice(re.findall('<a.+?href="(https://.+?)".*?>', html))
# 次のループに行く前に最低でも1秒以上待機する(サイトに負荷をかけないため)
time.sleep(2)
# 収集したHTMLの出力
for i, html in enumerate(html_list):
print(f'{i + 1}ページ取得結果')
print(html) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 03/02/2018 3:09 PM
# @Author : Lee
# @File : quick_sort_3_ways.py
# @Software: PyCharm
import random
from utils.common import swap
from utils.sort_test_helper import SortTestHelper
class QuickSort3Ways(object):
@classmethod
def sort(cls, lists, *args):
if len(args):
left = args[0]
right = args[1]
if left >= right:
return
swap(lists, left, int(random.random() * (right - left + 1)) + left)
base = lists[left]
"""
lists[left+1...lt] < v
lists[gt...right] > v
lists[lt+1...i) == v
保证初始化的三个区间全部为空
后续相应元素逐个添加进去
"""
lt = left
gt = right + 1
i = left + 1
while i < gt:
if lists[i] < base:
swap(lists, lt + 1, i)
lt += 1
i += 1
elif lists[i] > base:
swap(lists, gt - 1, i)
gt -= 1
else:
i += 1
swap(lists, left, lt)
cls.sort(lists, left, lt - 1)
cls.sort(lists, gt, right)
else:
cls.sort(lists, 0, len(lists) - 1)
if __name__ == '__main__':
test_lists = SortTestHelper.generate_random_list(1000000, 0, 1000000)
SortTestHelper.test_sort(QuickSort3Ways, test_lists)
|
# Problems : Candy
# Description : There are N children standing in a line. Each child is assigned a rating value.
# You are giving candies to these children subjected to the following requirements:
# Each child must have at least one candy.
# Children with a higher rating get more candies than their neighbors.
# What is the minimum candies you must give?
# Author : HomeWay88
# Date : 2014-09-26
# Article-Link: http://www.coderblog.cn/article/56/
import sys
class Solution:
# @param ratings, a list of integer
# @return an integer
def candy(self, ratings):
if ratings == None:
return 0
elif len(ratings) <= 1:
return len(ratings)
else:
total = 0
last = 65535*65535
candy = 0
extra = 0
down_start_index = -1
last_up_candy = -1
for i,num in enumerate(ratings):
if num > last:
candy += 1
down_start_index = -1
last_up_candy = candy
elif num < last:
if down_start_index == -1:
down_start_index = i
candy = 1
extra = i - down_start_index
if down_start_index - 1 > 0:
if ratings[down_start_index-1] > ratings[down_start_index]:
if extra+1 >= last_up_candy:
extra += 1
else:
candy = 1
down_start_index = i
last = num
total += candy + extra
extra = 0
return total
def PasreFromArgs():
try:
string = sys.argv[1]
nums = [int(num) for num in string.split(",")]
return nums
except:
return []
test_cases = {
1 : [1],
2 : [1,1],
3 : [1,2],
4 : [1,2,2],
20 : [1,5,10,4,3,2,1,5],
9 : [1,4,1,4,1,4],
9 : [4,1,4,1,4,1],
5 : [4,4,4,1],
9: [4,4,4,1,3,3,3],
11:[4,4,4,1,3,3,3,1],
9 : [4,2,3,4,1]
}
#default = [1,5,10,4,3,2,1,5]
default = [4,4,4,4,4,1]
nums = PasreFromArgs()
nums = nums if nums else default
solution = Solution()
for result in test_cases:
print "Testing:",test_cases[result],
assert solution.candy(test_cases[result]) == result
print '=>',result,' ,Passed'
result = solution.candy(nums)
print "Children Ratings:",nums
print "Minimum Candies:",result
|
from torchvision import datasets, transforms
from torchtext import datasets as textdata
from torchtext import data
from torchtext.vocab import GloVe
from PIL import Image
from .usps import USPS
from . import caltech_ucsd_birds
from . import pascal_voc
import os
import contextlib
import numpy as np
import torch
from collections import namedtuple
import math
import torch.nn as nn
default_dataset_roots = dict(
MNIST='./data/mnist',
MNIST_RGB='./data/mnist',
SVHN='./data/svhn',
USPS='./data/usps',
Cifar10='./data/cifar10',
CUB200='./data/birds',
PASCAL_VOC='./data/pascal_voc',
imdb='./data/text/imdb',
sst5='./data/text/sst',
trec6='./data/text/trec',
trec50='./data/text/trec',
snli='./data/text/snli',
multinli='./data/text/multinli',
)
dataset_normalization = dict(
MNIST=((0.1307,), (0.3081,)),
MNIST_RGB=((0.1307, 0.1307, 0.1307), (0.3081, 0.3081, 0.3081)),
USPS=((0.15972736477851868,), (0.25726667046546936,)),
SVHN=((0.4379104971885681, 0.44398033618927, 0.4729299545288086),
(0.19803012907505035, 0.2010156363248825, 0.19703614711761475)),
Cifar10=((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
CUB200=((0.47850531339645386, 0.4992702007293701, 0.4022205173969269),
(0.23210887610912323, 0.2277066558599472, 0.26652416586875916)),
PASCAL_VOC=((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
imdb=((0,),(0,)),
sst5=((0,),(0,)),
trec6=((0,),(0,)),
trec50=((0,),(0,)),
snli=((0,),(0,)),
multinli=((0,),(0,)),
)
dataset_labels = dict(
MNIST=list(range(10)),
MNIST_RGB=list(range(10)),
USPS=list(range(10)),
SVHN=list(range(10)),
Cifar10=('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'monkey', 'horse', 'ship', 'truck'),
CUB200=caltech_ucsd_birds.class_labels,
PASCAL_VOC=pascal_voc.object_categories,
imdb={0,1},
sst5=list(range(5)),
trec6=list(range(6)),
trec50=list(range(50)),
snli=list(range(3)),
multinli=list(range(3)),
)
# (nc, real_size, num_classes)
DatasetStats = namedtuple('DatasetStats', ' '.join(['nc', 'real_size', 'num_classes']))
dataset_stats = dict(
MNIST=DatasetStats(1, 28, 10),
MNIST_RGB=DatasetStats(3, 28, 10),
USPS=DatasetStats(1, 28, 10),
SVHN=DatasetStats(3, 32, 10),
Cifar10=DatasetStats(3, 32, 10),
CUB200=DatasetStats(3, 224, 200),
PASCAL_VOC=DatasetStats(3, 224, 20),
imdb = DatasetStats(1, 0, 2),
sst5 = DatasetStats(1, 0, 5),
trec6 = DatasetStats(1, 0, 6),
trec50 = DatasetStats(1, 0, 50),
snli = DatasetStats(1, 0, 3),
multinli = DatasetStats(1, 0, 3),
)
assert(set(default_dataset_roots.keys()) == set(dataset_normalization.keys()) ==
set(dataset_labels.keys()) == set(dataset_stats.keys()))
def print_closest_words(vec, glove, n=5):
dists = torch.norm(glove.vectors - vec, dim=1) # compute distances to all words
lst = sorted(enumerate(dists.numpy()), key=lambda x: x[1]) # sort by distance
for idx, difference in lst[0:n+1]: # take the top n
print(glove.itos[idx], difference)
def closest_words(vec, glove, n=5):
dists = torch.norm(glove.vectors - vec, dim=1) # compute distances to all words
lst = sorted(enumerate(dists.numpy()), key=lambda x: x[1]) # sort by distance
return [glove.itos[idx] for idx, _ in lst[0:n+1]] # take the top n
def get_info(state):
dataset_stats['imdb']=DatasetStats(1,state.maxlen,2)
dataset_stats['sst5']=DatasetStats(1,state.maxlen,5)
dataset_stats['trec6']=DatasetStats(1,state.maxlen,6)
dataset_stats['trec50']=DatasetStats(1,state.maxlen,50)
dataset_stats['snli']=DatasetStats(1,state.maxlen,3)
dataset_stats['multinli']=DatasetStats(1,state.maxlen,3)
name = state.dataset # argparse dataset fmt ensures that this is lowercase and doesn't contrain hyphen
assert name in dataset_stats, 'Unsupported dataset: {}'.format(state.dataset)
nc, input_size, num_classes = dataset_stats[name]
normalization = dataset_normalization[name]
root = state.dataset_root
if root is None:
root = default_dataset_roots[name]
labels = dataset_labels[name]
return name, root, nc, input_size, num_classes, normalization, labels
@contextlib.contextmanager
def suppress_stdout():
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
yield
def get_dataset(state, phase):
dataset_stats['imdb']=DatasetStats(1,state.maxlen,2)
dataset_stats['sst5']=DatasetStats(1,state.maxlen,5)
dataset_stats['trec6']=DatasetStats(1,state.maxlen,6)
dataset_stats['trec50']=DatasetStats(1,state.maxlen,50)
dataset_stats['snli']=DatasetStats(1,state.maxlen,3)
dataset_stats['multinli']=DatasetStats(1,state.maxlen,3)
assert phase in ('train', 'test'), 'Unsupported phase: %s' % phase
name, root, nc, input_size, num_classes, normalization, _ = get_info(state)
real_size = dataset_stats[name].real_size
if name == 'MNIST':
if input_size != real_size:
transform_list = [transforms.Resize([input_size, input_size], Image.BICUBIC)]
else:
transform_list = []
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
with suppress_stdout():
return datasets.MNIST(root, train=(phase == 'train'), download=True,
transform=transforms.Compose(transform_list))
elif name == 'MNIST_RGB':
transform_list = [transforms.Grayscale(3)]
if input_size != real_size:
transform_list.append(transforms.Resize([input_size, input_size], Image.BICUBIC))
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
with suppress_stdout():
return datasets.MNIST(root, train=(phase == 'train'), download=True,
transform=transforms.Compose(transform_list))
elif name == 'USPS':
if input_size != real_size:
transform_list = [transforms.Resize([input_size, input_size], Image.BICUBIC)]
else:
transform_list = []
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
with suppress_stdout():
return USPS(root, train=(phase == 'train'), download=True,
transform=transforms.Compose(transform_list))
elif name == 'SVHN':
transform_list = []
if input_size != real_size:
transform_list.append(transforms.Resize([input_size, input_size], Image.BICUBIC))
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
with suppress_stdout():
return datasets.SVHN(root, split=phase, download=True,
transform=transforms.Compose(transform_list))
elif name == 'Cifar10':
transform_list = []
if input_size != real_size:
transform_list += [
transforms.Resize([input_size, input_size], Image.BICUBIC),
]
if phase == 'train':
transform_list += [
# TODO: merge the following into the padding options of
# RandomCrop when a new torchvision version is released.
transforms.Pad(padding=4, padding_mode='reflect'),
transforms.RandomCrop(input_size),
transforms.RandomHorizontalFlip(),
]
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
with suppress_stdout():
return datasets.CIFAR10(root, phase == 'train', transforms.Compose(transform_list), download=True)
elif name == 'CUB200':
transform_list = []
if phase == 'train':
transform_list += [
transforms.RandomResizedCrop(input_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
]
else:
transform_list += [
transforms.Resize([input_size, input_size], Image.BICUBIC),
]
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
return caltech_ucsd_birds.CUB200(root, phase == 'train', transforms.Compose(transform_list), download=True)
elif name == 'PASCAL_VOC':
transform_list = []
if phase == 'train':
transform_list += [
transforms.RandomResizedCrop(input_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
]
else:
transform_list += [
transforms.Resize([input_size, input_size], Image.BICUBIC),
]
transform_list += [
transforms.ToTensor(),
transforms.Normalize(*normalization),
]
if phase == 'train':
phase = 'trainval'
return pascal_voc.PASCALVoc2007(root, phase, transforms.Compose(transform_list))
elif name == 'imdb':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, test = textdata.IMDB.splits(TEXT, LABEL)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#man=TEXT.vocab.vectors[TEXT.vocab["man"]].clone()
#woman=TEXT.vocab.vectors[TEXT.vocab["woman"]].clone()
#king=TEXT.vocab.vectors[TEXT.vocab["doctor"]].clone()
#print(torch.norm(king - man + woman))
#vec = king - man + woman
#print_closest_words(vec, TEXT.vocab)
#print_closest_words(king, TEXT.vocab)
#print(TEXT.vocab.vectors)
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
elif name == 'sst5':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, valid, test = textdata.SST.splits(TEXT, LABEL, fine_grained=True)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
#print(len(TEXT.vocab))
#print(len(LABEL.vocab))
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
elif name == 'trec6':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, test = textdata.TREC.splits(TEXT, LABEL, fine_grained=False)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
#print(len(TEXT.vocab))
#print(len(LABEL.vocab))
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
elif name == 'trec50':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, test = textdata.TREC.splits(TEXT, LABEL, fine_grained=True)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
#print(len(TEXT.vocab))
#print(len(LABEL.vocab))
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
elif name == 'snli':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, valid, test = textdata.SNLI.splits(TEXT, LABEL)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
#print(len(TEXT.vocab))
#print(len(LABEL.vocab))
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
elif name == 'multinli':
transform_list = []
# set up fields
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True, fix_length=state.maxlen)
LABEL = data.LabelField(dtype=torch.long)
# make splits for data
train, valid, test = textdata.MultiNLI.splits(TEXT, LABEL)
# build the vocabulary
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=state.ninp, max_vectors=state.ntoken), max_size=state.ntoken-2) #max_size=state.ntoken,
LABEL.build_vocab(train)
#print(len(TEXT.vocab))
#print(len(LABEL.vocab))
state.pretrained_vec=TEXT.vocab.vectors
state.glove = TEXT.vocab
#ninp=32 #Maybe 400
#ntoken=32
#encoder = nn.Embedding(ntoken, ninp)
#train_iter, test_iter = textdata.IMDB.iters(batch_size=state.batch_size, fix_length=state.ninp)
if phase=="train":
src=train
#src = encoder(train_iter) * math.sqrt(ninp)
else:
src=test
#src = encoder(test_iter) * math.sqrt(ninp)
#src = data.Iterator.splits(
#src, batch_size=state.batch_size, device=state.device, repeat=False, sort_key=lambda x: len(x.src))
return src
else:
raise ValueError('Unsupported dataset: %s' % state.dataset)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.