text
stringlengths 8
6.05M
|
|---|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import random
from numpy.core.fromnumeric import reshape
from numpy.lib.type_check import imag
import weight_mask
from skimage import io
from scipy import ndimage
from scipy.signal import convolve2d
from scipy.signal import wiener
import math
import pywt
import utils
import os
import time
import scipy.io as scio
def myNSCTd(I,levels,pfiltername,dfiltername,type):
[ph0, ph1, pg0, pg1] = atrousfilters(pfiltername)
# filtersd = np.zeros((4))
filtersd = [[],[],[],[],
[],[],[],[],
[],[],[],[],
[],[],[],[]]
[dh1, dh2] = dfilters(dfiltername, 'd')
dh1 = dh1 / np.sqrt(2)
dh2 = dh2 / np.sqrt(2)
filtersd[0] = modulate2(dh1, 'c', [])
filtersd[1] = modulate2(dh2, 'c', [])
[filtersd[2], filtersd[3]] = parafilters(dh1, dh2)
clevels = len( levels )
nIndex = clevels+1
y = []
for _ in range(nIndex):
y.append([])
Insp = []
for _ in range(clevels):
Insp.append([])
for i in range(clevels):
if type == 'NSCT':
[Ilow, Ihigh] = NSPd(I, ph0, ph1, i)
if levels[nIndex-2] > 0:
Ihigh_dir = nsdfbdec(Ihigh, filtersd, levels[nIndex-2])
y[nIndex-1] = Ihigh_dir
else:
y[nIndex-1] = xhigh
nIndex = nIndex - 1
I = Ilow
Insp[i]=I
y[0]=I
Insct=y
return [Insp,Insct]
def atrousfilters(fname):
if fname == 'pyr':
h0 = [
[-0.003236043456039806, -0.012944173824159223, -0.019416260736238835],
[-0.012944173824159223, 0.0625 , 0.15088834764831843],
[-0.019416260736238835, 0.15088834764831843 , 0.3406092167691145]
]
g1 = [[-0.003236043456039806, -0.012944173824159223, -0.019416260736238835],
[-0.012944173824159223,-0.0625 ,-0.09911165235168155],
[-0.019416260736238835, -0.09911165235168155 , 0.8406092167691145]]
g0 = [ [-0.00016755163599004882, -0.001005309815940293, -0.002513274539850732, -0.003351032719800976],
[-0.001005309815940293, -0.005246663087920392, -0.01193886400821893 , -0.015395021472477663],
[-0.002513274539850732, -0.01193886400821893 , 0.06769410071569153 , 0.15423938036811946 ],
[-0.003351032719800976, -0.015395021472477663 , 0.15423938036811946 , 0.3325667382415921]]
h1 = [ [0.00016755163599004882, 0.001005309815940293 , 0.002513274539850732, 0.003351032719800976],
[0.001005309815940293 , -0.0012254238241592198, -0.013949483640099517, -0.023437500000000007],
[0.002513274539850732 , -0.013949483640099517 , -0.06769410071569153 , -0.10246268507148255],
[0.003351032719800976 , -0.023437500000000007 , -0.10246268507148255 , 0.8486516952966369]]
h0 = np.array(h0)
g0 = np.array(g0)
h1 = np.array(h1)
g1 = np.array(g1)
g0 = np.hstack((g0, np.fliplr(g0[:,:-1])))
g0 = np.vstack((g0, np.flipud(g0[:-1,:])))
h0 = np.hstack((h0, np.fliplr(h0[:,:-1])))
h0 = np.vstack((h0, np.flipud(h0[:-1,:])))
g1 = np.hstack((g1, np.fliplr(g1[:,:-1])))
g1 = np.vstack((g1, np.flipud(g1[:-1,:])))
h1 = np.hstack((h1, np.fliplr(h1[:,:-1])))
h1 = np.vstack((h1, np.flipud(h1[:-1,:])))
return [h0,h1,g0,g1]
def dfilters(fname, type):
if fname == 'pkva':
beta = ldfilter(fname)
[h0, h1] = ld2quin(beta)
h0 = np.sqrt(2) * h0
h1 = np.sqrt(2) * h1
if type == 'r':
f0 = modulate2(h1, 'b', [])
f1 = modulate2(h0, 'b', [])
h0 = f0
h1 = f1
return [h0, h1]
def ldfilter(fname):
if fname == 'pkva':
v = np.reshape([0.6300 , -0.1930 , 0.0972 , -0.0526 , 0.0272 , -0.0144], (1,6))
v_ = np.fliplr(v)
f = np.hstack((v_, v))
return f
def ld2quin(beta):
if beta.shape[0] != 1:
print('The input must be an 1-D fitler')
lf = beta.shape[1]
n = int(lf / 2)
sp = np.outer(beta, beta)
h = qupz(sp, 1)
h0 = np.copy(h)
h0[2*n-1, 2*n-1] = h0[2*n-1, 2*n-1] + 1
h0 = h0 / 2
h1 = -1 * convolve2d(h,h0)
h1[4*n-2, 4*n-2] = h1[4*n-2, 4*n-2] + 1
return [h0, h1]
def qupz(x, type):
if type == 1:
x1 = resampz(x, 4, [])
(m, n) = x1.shape
x2 = np.zeros((2*m-1, n))
j = 0
for i in range(x2.shape[0]):
if i % 2 == 0:
x2[i, :] = x1[j]
j += 1
y = resampz(x2, 1, [])
return y
def resampz(x, type, shift):
if shift == []:
shift = 1
sx = x.shape
if type == 3 or type == 4:
y = np.zeros((sx[0], sx[1] + abs(shift * (sx[0] - 1))))
if type != 3:
a = np.arange(sx[0])
shift2 = a * shift
else:
a = np.arange(sx[0])
shift2 = a * (-shift)
if shift2[-1] < 0:
shift2 = shift2 - shift2[-1]
for m in range(sx[0]):
y[m, shift2[m]+np.arange(sx[1])] = x[m, :]
start = 0
u, s, v = np.linalg.svd(np.reshape(y[:, start], (1,-1)), full_matrices=False)
while np.max(s) == 0:
start = start + 1
u, s, v = np.linalg.svd(np.reshape(y[:, start], (1,-1)), full_matrices=False)
finish = y.shape[1]-1
u, s, v = np.linalg.svd(np.reshape(y[:, finish], (1,-1)), full_matrices=False)
while np.max(s) == 0:
finish = finish - 1
u, s, v = np.linalg.svd(np.reshape(y[:, finish], (1,-1)), full_matrices=False)
y = y[:,start:finish+1]
elif type == 1 or type == 2:
y = np.zeros((sx[0] + abs(shift * (sx[1] - 1)), sx[1]))
if type == 1:
shift1 = np.arange(sx[1]) * (-shift)
else:
shift1 = np.arange(sx[1]) * (shift)
if shift1[-1] < 0:
shift1 = shift1 - shift1[-1]
for n in range(sx[1]):
y[shift1[n]+np.arange(sx[0]), n] = x[:, n]
start = 0
u, s, v = np.linalg.svd(np.reshape(y[start, :], (1,-1)), full_matrices=False)
while np.max(s) == 0:
start = start + 1
u, s, v = np.linalg.svd(np.reshape(y[start, :], (1,-1)), full_matrices=False)
finish = y.shape[0]-1
u, s, v = np.linalg.svd(np.reshape(y[finish, :], (1,-1)), full_matrices=False)
while np.max(s) == 0:
finish = finish - 1
u, s, v = np.linalg.svd(np.reshape(y[finish, :], (1,-1)), full_matrices=False)
y = y[start:finish+1]
return y
def modulate2(x, type, center):
if center == []:
center = [0, 0]
s = x.shape
o = [int(s[0] / 2.)+1+center[0], int(s[1] / 2.)+1+center[1]]
n1 = np.arange(1,s[0]+1) - o[0]
n2 = np.arange(1,s[1]+1) - o[1]
if type == 'c':
m2 = [np.power(-1, abs(x)) for x in n2]
m2 = np.array(m2)
m2 = m2.reshape((1,-1))
M = [s[0], 1]
y = x * repmat(m2, M, [])
elif type == 'r':
m1 = [np.power(-1, abs(x)) for x in n1]
m1 = np.array(m1)
m1 = m1.reshape((-1,1))
M = [1, s[1]]
y = x * repmat(m1, M, [])
elif type == 'b':
m1 = [np.power(-1, abs(x)) for x in n1]
m1 = np.array(m1)
m1 = m1.reshape((-1,1))
m2 = [np.power(-1, abs(x)) for x in n2]
m2 = np.array(m2)
m2 = m2.reshape((1,-1))
m = np.outer(m1, m2)
y = x * m
return y
def repmat(A,M,N):
if N == []:
if len(M) > 1:
siz = M
if len(M) > 1 and len(siz) == 2:
(m,n) = A.shape
if m == 1 and siz[1] == 1:
B = np.ones((siz[0],1))
B = np.outer(B,A)
elif n == 1 and siz[0] == 1:
B = np.ones((1,siz[1]))
B = np.outer(A,B)
return B
def parafilters( f1, f2 ):
y1 = [[], [], [], []]
y2 = [[], [], [], []]
y1[0] = modulate2(f1, 'r', [])
y1[1] = modulate2(f1, 'c', [])
y1[2] = np.array(y1[0]).T
y1[3] = np.array(y1[1]).T
y2[0] = modulate2(f2, 'r', [])
y2[1] = modulate2(f2, 'c', [])
y2[2] = np.array(y2[0]).T
y2[3] = np.array(y2[1]).T
for i in range(4):
y1[i] = resampz( y1[i], i+1, [])
y2[i] = resampz( y2[i], i+1, [])
return [y1, y2]
def NSPd(I,h0,h1,level):
index = []
(m,n) = h0.shape
Nh0= np.zeros((np.power(2,level) * m,np.power(2,level) * n))
for i in range(0,Nh0.shape[0],np.power(2,level)):
for j in range(0,Nh0.shape[1],np.power(2,level)):
index.append([i,j])
ind = 0
for i in range(h0.shape[0]):
for j in range(h0.shape[1]):
Nh0[index[ind][0], index[ind][1]] = h0[i,j]
ind += 1
newh0 = Nh0[:(m-1)*np.power(2,level)+1,:(n-1)*np.power(2,level)+1]
index = []
(m,n) = h1.shape
Nh1= np.zeros((np.power(2,level) * m,np.power(2,level) * n))
for i in range(0,Nh1.shape[0],np.power(2,level)):
for j in range(0,Nh1.shape[1],np.power(2,level)):
index.append([i,j])
ind = 0
for i in range(h1.shape[0]):
for j in range(h1.shape[1]):
Nh1[index[ind][0], index[ind][1]] = h1[i,j]
ind += 1
newh1 = Nh1[:(m-1)*np.power(2,level)+1,:(n-1)*np.power(2,level)+1]
# I = np.array(I, dtype=np.float32)
# Ilow = imfilter(I, newh0, 'conv', 'symmetric', 'same')
Ilow = cv2.filter2D(I, -1, newh0, borderType=cv2.BORDER_REFLECT)
# Ihigh = imfilter(I, newh1, 'conv', 'symmetric', 'same')
Ihigh = cv2.filter2D(I, -1, newh1, borderType=cv2.BORDER_REFLECT)
return [Ilow,Ihigh]
def nsdfbdec( x, dfilter, clevels ):
k1 = dfilter[0]
k2 = dfilter[1]
f1 = dfilter[2]
f2 = dfilter[3]
q1 = np.array([[1, -1],[1, 1]])
y = [[],[],[],[]]
if clevels == 1:
[y[0], y[1]] = nssfbdec( x, k1, k2, [])
else:
[x1, x2] = nssfbdec( x, k1, k2, [])
[y[0], y[1]] = nssfbdec( x1, k1, k2, q1 )
[y[2], y[3]] = nssfbdec( x2, k1, k2, q1 )
for l in range(3,clevels+1):
y_old = y
y = []
for _ in range(np.power(2,l)):
y.append([])
for k in range(np.power(2,l-2)):
slk = 2*int( (k) /2 ) - np.power(2,l-3) + 1
mkl = 2*np.matmul(np.array([[np.power(2,l-3), 0],[ 0, 1 ]]),np.array([[1, 0],[-slk, 1]]))
i = np.mod(k, 2)
[y[2*k], y[2*k+1]] = nssfbdec( y_old[k], f1[i], f2[i], mkl )
for k in range(np.power(2,l-2), np.power(2,l-1)):
slk = 2 * int( ( k-np.power(2,l-2)-1 ) / 2 ) - np.power(2,l-3) + 1
mkl = 2*np.matmul(np.array([[ 1, 0],[0, np.power(2,l-3) ]]),np.array([[1, -slk], [0, 1]]))
i = np.mod(k, 2) + 2
[y[2*k], y[2*k+1]] = nssfbdec( y_old[k], f1[i], f2[i], mkl )
return y
def nssfbdec( x, f1, f2, mup ):
if mup == []:
# y1 = imfilter( x, f1,'symmetric' )
y1 = cv2.filter2D(x, -1, f1, borderType=cv2.BORDER_REFLECT)
# f1_ = scio.loadmat('./f1.mat')
# f1_ = f1_['f1']
# diff = np.abs(f1-f1_)
# y2 = imfilter( x, f2,'symmetric' )
y2 = cv2.filter2D(x, -1, f2, borderType=cv2.BORDER_REFLECT)
return [y1, y2]
if (mup == 1).all() or (mup == np.eye(2)).all():
y1 = cv2.filter2D(x, -1, f1, borderType=cv2.BORDER_REFLECT)
y2 = cv2.filter2D(x, -1, f2, borderType=cv2.BORDER_REFLECT)
return [y1, y2]
if mup.shape == (2,2):
y1 = myzconv2( x, f1, mup )
y2 = myzconv2( x, f2, mup )
elif mup.shape == (1, 1):
mup = mup * np.eye(2)
y1 = myzconv2( x, f1, mup )
y2 = myzconv2( x, f2, mup )
return [y1, y2]
def myzconv2(Im,f,M):
(fr,fc) = f.shape
Nfstartr=min([1,1-(fr-1)*M[0,0],1-(fc-1)*M[1,0],1-(fr-1)*M[0,0]-(fc-1)*M[1,0]])
Nfendr=max([1,1-(fr-1)*M[0,0],1-(fc-1)*M[1,0],1-(fr-1)*M[0,0]-(fc-1)*M[1,0]])
Nfstartc=min([1,1-(fr-1)*M[0,1],1-(fc-1)*M[1,1],1-(fr-1)*M[0,1]-(fc-1)*M[1,1]])
Nfendc=max([1,1-(fr-1)*M[0,1],1-(fc-1)*M[1,1],1-(fr-1)*M[0,1]-(fc-1)*M[1,1]])
Nfr=Nfendr-Nfstartr+1
Nfc=Nfendc-Nfstartc+1
Nf=np.zeros((Nfr,Nfc))
for i in range(fr):
for j in range(fc):
Nf[2-(i)*M[0,0]-(j)*M[1,0]-Nfstartr-1,2-(i)*M[0,1]-(j)*M[1,1]-Nfstartc-1]=f[i,j]
# Imout = cv2.filter2D(Im, -1, Nf, borderType=cv2.BORDER_REFLECT)
# Imout = cv2.filter2D(Im, -1, Nf, borderType=cv2.BORDER_WRAP)
# Imout = cv2.filter2D(Im, -1, Nf, borderType=cv2.BORDER_REFLECT_101 )
Imout = ndimage.convolve(Im, Nf, mode='wrap')
return Imout
def myNSCTr(Insct,levels,pfiltername,dfiltername,type):
[ph0, ph1, pg0, pg1] = atrousfilters(pfiltername)
filtersr = [[],[],[],[],
[],[],[],[],
[],[],[],[],
[],[],[],[]]
[dg1, dg2] = dfilters(dfiltername, 'r')
dg1 = dg1 / np.sqrt(2)
dg2 = dg2 / np.sqrt(2)
filtersr[0] = modulate2(dg1, 'c', [])
filtersr[1] = modulate2(dg2, 'c', [])
[filtersr[2], filtersr[3]] = parafilters( dg1, dg2 )
clevels = len( levels )
nIndex = clevels+1
Ilow=Insct[0]
for i in range(clevels):
if len(Insct[i+1]) > 1:
Ihigh = nsdfbrec( Insct[i+1], filtersr )
else:
Ihigh = Insct[i+1]
if type == 'NSCT':
Ilow = NSPr(Ilow, Ihigh, pg0, pg1, clevels-i)
Insctred=Ilow
return Insctred
def nsdfbrec( x, dfilter ):
len_x = 0
for iterm in x:
if iterm != []:
len_x += 1
clevels = int(np.log2( len_x ))
k1 = dfilter[0]
k2 = dfilter[1]
f1 = dfilter[2]
f2 = dfilter[3]
q1 = np.array([[1, -1],[1, 1]])
if clevels == 1:
y = nssfbrec( x[0], x[1], k1, k2, [])
else:
for l in range(clevels,2,-1):
for k in range(np.power(2,l-2)):
slk = 2*int( (k) /2 ) - np.power(2,l-3) + 1
mkl = 2*np.matmul(np.array([[np.power(2,l-3), 0],[ 0, 1 ]]),np.array([[1, 0],[-slk, 1]]))
i = np.mod(k, 2)
x[k] = nssfbrec( x[2*k], x[2*k+1], f1[i], f2[i], mkl )
for k in range(np.power(2,l-2), np.power(2,l-1)):
slk = 2 * int( ( k-np.power(2,l-2)-1 ) / 2 ) - np.power(2,l-3) + 1
mkl = 2*np.matmul(np.array([[ 1, 0],[0, np.power(2,l-3) ]]),np.array([[1, -slk], [0, 1]]))
i = np.mod(k, 2) + 2
x[k] = nssfbrec( x[2*k], x[2*k+1], f1[i], f2[i], mkl )
x[0] = nssfbrec( x[0], x[1], k1, k2, q1 )
x[1] = nssfbrec( x[2], x[3], k1, k2, q1 )
y = nssfbrec( x[0], x[1], k1, k2, [])
return y
def nssfbrec( x1, x2, f1, f2, mup ):
if mup == []:
# y1 = imfilter( x1, f1 ,'symmetric')
y1 = cv2.filter2D(x1, -1, f1, borderType=cv2.BORDER_REFLECT)
# y2 = imfilter( x2, f2 ,'symmetric')
y2 = cv2.filter2D(x2, -1, f2, borderType=cv2.BORDER_REFLECT)
y = y1 + y2
return y
if mup.shape == (2, 2):
y1 = myzconv2( x1, f1, mup )
y2 = myzconv2( x2, f2, mup )
y = y1 + y2
return y
def NSPr(Ilow,Ihigh,g0,g1,level):
level = level - 1
if level != 0:
index = []
(m,n) = g0.shape
Ng0= np.zeros((np.power(2,level) * m,np.power(2,level) * n))
for i in range(0,Ng0.shape[0],np.power(2,level)):
for j in range(0,Ng0.shape[1],np.power(2,level)):
index.append([i,j])
ind = 0
for i in range(g0.shape[0]):
for j in range(g0.shape[1]):
Ng0[index[ind][0], index[ind][1]] = g0[i,j]
ind += 1
newg0 = Ng0[:(m-1)*np.power(2,level)+1,:(n-1)*np.power(2,level)+1]
index = []
(m,n) = g1.shape
Ng1= np.zeros((np.power(2,level) * m,np.power(2,level) * n))
for i in range(0,Ng1.shape[0],np.power(2,level)):
for j in range(0,Ng1.shape[1],np.power(2,level)):
index.append([i,j])
ind = 0
for i in range(g1.shape[0]):
for j in range(g1.shape[1]):
Ng1[index[ind][0], index[ind][1]] = g1[i,j]
ind += 1
newg1 = Ng1[:(m-1)*np.power(2,level)+1,:(n-1)*np.power(2,level)+1]
Ired = cv2.filter2D(Ilow, -1, newg0, borderType=cv2.BORDER_REFLECT) + cv2.filter2D(Ihigh, -1, newg1, borderType=cv2.BORDER_REFLECT)
else:
Ired = cv2.filter2D(Ilow, -1, g0, borderType=cv2.BORDER_REFLECT) + cv2.filter2D(Ihigh, -1, g1, borderType=cv2.BORDER_REFLECT)
return Ired
if __name__ == "__main__":
I = cv2.imread('zoneplate.png', 0)
I = np.array(I, dtype=np.float32)
I = cv2.normalize(I, None, alpha=0, beta=1.0, norm_type=cv2.NORM_MINMAX)
levels = [1, 2, 3, 4]
pname = 'pyr'
dname = 'pkva'
type = 'NSCT'
[Insp,Insct]= myNSCTd(I,levels,pname,dname,type)
IMf = myNSCTr(Insct,levels,pname,dname,type)
diff = np.abs(IMf - I)
plt.figure('1')
plt.subplot(121),plt.imshow(IMf),plt.title('IMf')
plt.subplot(122),plt.imshow(I),plt.title('I')
plt.figure('2')
plt.subplot(111),plt.imshow(diff),plt.title('diff')
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
# a_list = [1,2,3]
# print(sum(a_list))
# input_result = input("Big or Small:")
# a_list = []
# point1 = random.randrange(1,7)
# point2 = random.randrange(1,7)
# point3 = random.randrange(1,7)
def roll_dice(numbers=3,points=None):
print("<<<<< ROLL THE DICE ! >>>>>>")
flag = 1
if not(points):
points = []
while numbers > 0:
point = random.randrange(1,7)
print("Now,open number {} roll result is {}".format(flag,point))
points.append(point)
numbers = numbers - 11
flag += 1
return points
def roll_result(total):
isBig = 11 <= total <= 18
isSmall = 3<= total <= 10
if isBig:
return 'Big'
elif isSmall:
return 'Small'
def start_game(money=1000):
print("<<<<<< GAME STARTS ! >>>>>>")
choices = ['Big','Small']
your_choice = input("Big or Small :")
your_bet = input("How much you wanna bet ? -")
if your_choice in choices:
points = roll_dice()
total = sum(points)
youWin = your_choice == roll_result(total)
if youWin:
money += int(your_bet)
print("The points are ",points,"You Win")
print("You gained 1000,you have {} now".format(money))
start_game(money)
else:
money -= int(your_bet)
print("The points are ",points,"You Lose")
print("You gained 1000,you have {} now".format(money))
while money > 0:
start_game(money)
else:
print("Invalid Words")
start_game()
start_game()
# a_list = [point3,point2,point1]
# total_num = sum(a_list)
# if 3 <= total_num <= 10:
# if input_result == "Small":
# print("The points are [{},{},{}] You {}".format(point1,point2,point3,"Wim"))
# else:
# print("The points are [{},{},{}] You {}".format(point1,point2,point3,"Lose"))
# elif 11 <= total_num <= 18:
# if input_result == "Big":
# print("The points are [{},{},{}] You {}".format(point1,point2,point3,"Wim"))
# else:
# print("The points are [{},{},{}] You {}".format(point1,point2,point3,"Lose"))
#
# else:
# print("other error!")
|
#!/usr/bin/env python
# coding: utf-8
# # P4 Panoramas and Stereo
# ## P4.1 Spherical Reprojection
#
# As we discussed in class, to make a panorama we need to reproject the images onto a sphere, something you will be implementing in this question. I have given you some starter code that you should use to reproject the image onto a sphere: the function `reproject_image_to_sphere`. I have annotated what you need to include to complete this function:
#
# <img src="annotated_projection_code.png" width="600">
#
# **TASK** Complete the `reproject_image_to_sphere` function I have provided below. I recommend that you revisit the lecture slides on panoramas to get the definitions of the unit sphere coordinates.
#
# I have provided you with a simple scene for Blender: `simple_pano_env.blend`. The camera is located at `x=0` and `y=0` and oriented such that it is level with the ground plane and rotated 0-degrees about the z-axis. The only camera in the scene has a Focal Length of 40 mm (expressed with respect to the *36 mm* film size standard used in photography). To test that your image reprojection method is working correctly.
#
# **TASK** Generate 4 images by changing the Focal Length of the camera in Blender and name them as follows:
#
# 1. `b_pano_20mm.png` Rendered after setting the camera Focal Length to `20 mm`.
# 2. `b_pano_30mm.png` Rendered after setting the camera Focal Length to `30 mm`.
# 3. `b_pano_40mm.png` Rendered after setting the camera Focal Length to `40 mm`.
# 4. `b_pano_50mm.png` Rendered after setting the camera Focal Length to `50 mm`.
#
# **Plots** Run the `Evaluation and Plotting` code I have included below. This will generate three figures (all of which you should include in your writeup). (1) shows the four images after the spherical reprojection. (2) shows the images added together, showing that in the center where all images have visibility of the scene, the images properly overlap. (3) The "differences" between consecutive Focal Lengths; if your code is implemented well, the center region (where the two overlap) should be nearly zero ("white" in the color scheme) and large outside of that image (where they do not overlap).
#
# If the second plot, in which all images have been added together, looks "reasonable" (that the images are properly overlapped with one another) and you are convinced that your reprojection function is working properly, you can move on to the next section, in which you are asked to build your own panoramas after reprojecting onto a sphere.
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import scipy.interpolate
def load_image_gray(filepath):
img = Image.open(filepath)
img = np.asarray(img).astype(np.float)/255
if len(img.shape) > 2:
return img[:, :, 0]
else:
return img
def get_image_with_f(filepath, blender_focal_length_mm):
image = load_image_gray(filepath)
f = max(image.shape) * blender_focal_length_mm / 36.00
return image, f
def reproject_image_to_sphere(image, focal_length_px, fov_deg=None, angular_resolution=0.01):
x = np.arange(image.shape[1]).astype(np.float)
y = np.arange(image.shape[0]).astype(np.float)
if fov_deg is None:
fov = np.arctan(max(image.shape)/focal_length_px/2) + angular_resolution
else:
fov = fov_deg * np.pi / 180
print(f"2 * Field of View: {2*fov}")
thetas = np.arange(-fov, fov, angular_resolution)
phis = np.arange(-fov, fov, angular_resolution)
transformed_image = np.zeros((len(phis), len(thetas)))
image_fn = scipy.interpolate.interp2d(x, y, image, kind='linear', fill_value=0)
for ii in range(len(thetas)):
for jj in range(len(phis)):
theta = thetas[ii]
phi = phis[jj]
xt = np.sin(theta)*np.cos(phi)
yt = np.sin(phi)
zt = np.cos(theta)*np.cos(phi)
new_x = len(x)//2 + (focal_length_px*xt/zt)
new_y = len(y)//2 + (focal_length_px*yt/zt)
transformed_image[jj, ii] = image_fn(new_x, new_y)
return transformed_image
img_20, f_20 = get_image_with_f('b_pano_20mm.png', 20)
img_30, f_30 = get_image_with_f('b_pano_30mm.png', 30)
img_40, f_40 = get_image_with_f('b_pano_40mm.png', 40)
img_50, f_50 = get_image_with_f('b_pano_50mm.png', 50)
# In[22]:
sp_img_20 = reproject_image_to_sphere(img_20, f_20, fov_deg=45, angular_resolution=0.002)
sp_img_30 = reproject_image_to_sphere(img_30, f_30, fov_deg=45, angular_resolution=0.002)
sp_img_40 = reproject_image_to_sphere(img_40, f_40, fov_deg=45, angular_resolution=0.002)
sp_img_50 = reproject_image_to_sphere(img_50, f_50, fov_deg=45, angular_resolution=0.002)
plt.figure(figsize=(5,5), dpi=600)
plt.subplot(2, 2, 1)
plt.imshow(sp_img_20)
plt.subplot(2, 2, 2)
plt.imshow(sp_img_30)
plt.subplot(2, 2, 3)
plt.imshow(sp_img_40)
plt.subplot(2, 2, 4)
plt.imshow(sp_img_50)
plt.figure(dpi=600)
plt.imshow(sp_img_20 + sp_img_30 + sp_img_40 + sp_img_50)
plt.figure(figsize=(8,8),dpi=600)
plt.subplot(1, 3, 1)
plt.imshow(sp_img_30 - sp_img_20, vmin=-0.2, vmax=0.2, cmap='PiYG')
plt.subplot(1, 3, 2)
plt.imshow(sp_img_40 - sp_img_30, vmin=-0.2, vmax=0.2, cmap='PiYG')
plt.subplot(1, 3, 3)
plt.imshow(sp_img_50 - sp_img_40, vmin=-0.2, vmax=0.2, cmap='PiYG')
# # P4.2 Panorama Stitching
#
# In this question, you will be building a panorama from images you generate from Blender. This will involve three steps: (1) image generation, (2) image transform estimation, and (3) stitching.
#
# **TASK** Generate images from Blender. To do this, you may using the `simple_pano_env.blend` environment that I have provided you with. By rotating the camera (done by modifying the rotation about its Z-axis). You should set the Focal length of the camera to `40 mm` and sweep the rotation from +40 degrees to -60 degrees; you should rotate the camera in increments such that consecutive images have an overlap of roughly 1/3. You will likely need to generate roughly 5 or 6 images in this range.
#
# **PLOTS** Reproject the images using the `reproject_image_to_sphere` function from the previous question and compute the translation transform between each pair of "consecutive images" (images next to one another in angle space) using OpenCV. For each pair of matched images
#
# To compute the transformation, you may use the same [OpenCV Homography tutorial from the last assignment](https://docs.opencv.org/master/d1/de0/tutorial_py_feature_homography.html). However, we know that the transformation is a translation, and so we do not want to allow the system to generate a general homography matrix, which is what results with `cv.findHomography`. Instead, you should use `affine_mat = cv.estimateAffinePartial2D(src_pts, dst_pts)[0]`, which returns a `2x3` matrix (you will need to convert this to a `3x3` homography by adding a row of `[0, 0, 1]`) that only allows for scale, rotation, and translation. Create a new transformation matrix that includes only the estimated translation parameters. Using this procedure should be more numerically stable.
#
# **PLOT** Create the panorama and include it in a plot! To do this you should:
#
# 1. Pad all images to the size of the output panorama (you will need to determine how wide this will need to be).
# 2. Apply the transformation matrices (using `cv.warpPerspective`) to the images to move them "into place" (the location they will be in the resulting panorama). This means that you will need to apply `translation_mat_2_to_1` (or its inverse) to shift image 2 relative to image 1. Note that moving image 3 into place will require accounting for the translation between 2 and 3 *and* the translation between 1 and 2, and so on. You should prefer to multiply the transformation matrices together before using them to transform the image.
# 3. Combine the images to make the panorama. You do not need to use any of the "fancy" blending techniques we discussed in class. Simply using `np.maximum` between the two images will create a sufficient panorama. Small artifacts from merging are acceptable.
#
# **PLOT** Finally, add the 20 mm focal length image you generated as part of the previous question to your panorama. It might be interesting to see how the significant change in field of view reveals more of the panorama at once and more of the space above and below the horizon.
# In[23]:
img_r1, f_r1 = get_image_with_f('b_pano_rot1.png', 40)
img_r2, f_r2 = get_image_with_f('b_pano_rot2.png', 40)
img_r3, f_r3 = get_image_with_f('b_pano_rot3.png', 40)
img_r4, f_r4 = get_image_with_f('b_pano_rot4.png', 40)
img_r5, f_r5 = get_image_with_f('b_pano_rot5.png', 40)
img_r6, f_r6 = get_image_with_f('b_pano_rot6.png', 40)
# In[26]:
sp_img_r1 = reproject_image_to_sphere(img_r1, f_r1, fov_deg=45, angular_resolution=0.002)
sp_img_r2 = reproject_image_to_sphere(img_r2, f_r2, fov_deg=45, angular_resolution=0.002)
sp_img_r3 = reproject_image_to_sphere(img_r3, f_r3, fov_deg=45, angular_resolution=0.002)
sp_img_r4 = reproject_image_to_sphere(img_r4, f_r4, fov_deg=45, angular_resolution=0.002)
sp_img_r5 = reproject_image_to_sphere(img_r5, f_r5, fov_deg=45, angular_resolution=0.002)
sp_img_r6 = reproject_image_to_sphere(img_r6, f_r6, fov_deg=45, angular_resolution=0.002)
plt.figure(figsize=(8,8), dpi=600)
plt.subplot(2, 3, 1)
plt.imshow(sp_img_r1)
plt.subplot(2, 3, 2)
plt.imshow(sp_img_r2)
plt.subplot(2, 3, 3)
plt.imshow(sp_img_r3)
plt.subplot(2, 3, 4)
plt.imshow(sp_img_r4)
plt.subplot(2, 3, 5)
plt.imshow(sp_img_r5)
plt.subplot(2, 3, 6)
plt.imshow(sp_img_r6)
# In[230]:
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
def combine_images(img0, img1, h_matrix):
points0 = np.array(
[[0, 0], [0, img0.shape[0]], [img0.shape[1], img0.shape[0]], [img0.shape[1], 0]], dtype=np.float32)
points0 = points0.reshape((-1, 1, 2))
points1 = np.array(
[[0, 0], [0, img1.shape[0]], [img1.shape[1], img0.shape[0]], [img1.shape[1], 0]], dtype=np.float32)
points1 = points1.reshape((-1, 1, 2))
points2 = cv.perspectiveTransform(points1, h_matrix)
points = np.concatenate((points0, points2), axis=0)
[x_min, y_min] = np.int32(points.min(axis=0).ravel())
[x_max, y_max] = np.int32(points.max(axis=0).ravel())
H_translation = np.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]])
output_img = cv.warpPerspective(img1, H_translation.dot(h_matrix), (x_max - x_min, y_max - y_min))
output_img[-y_min:img0.shape[0] - y_min, -x_min:img0.shape[1] - x_min] = img0
return output_img
def image_match(img1, img2):
MIN_MATCH_COUNT = 10
sift = cv.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.estimateAffinePartial2D(src_pts, dst_pts)
M = np.vstack([M, [0, 0, 1]])
matchesMask = mask.ravel().tolist()
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (255,255,255),
singlePointColor = None,
matchesMask = matchesMask,
flags = 2)
img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
return img3, M
# In[233]:
img_r1 = cv.imread("b_pano_rot1.png",0)
img_r2 = cv.imread("b_pano_rot2.png",0)
img_r3 = cv.imread("b_pano_rot3.png",0)
img_r4 = cv.imread("b_pano_rot4.png",0)
img_r5 = cv.imread("b_pano_rot5.png",0)
img_r6 = cv.imread("b_pano_rot6.png",0)
im_12, H12 = image_match(img_r1, img_r2)
im_23, H23 = image_match(img_r2, img_r3)
im_34, H34 = image_match(img_r3, img_r4)
im_45, H45 = image_match(img_r4, img_r5)
im_56, H56 = image_match(img_r5, img_r6)
c12 = combine_images(img_r2, img_r1, H12)
c23 = combine_images(img_r3, img_r2, H23)
c34 = combine_images(img_r4, img_r3, H34)
c45 = combine_images(img_r5, img_r4, H45)
c56 = combine_images(img_r6, img_r5, H56)
im_123, H123 = image_match(c12, c23)
im_234, H234 = image_match(c23, c34)
im_345, H345 = image_match(c34, c45)
im_456, H456 = image_match(c45, c56)
c123 = combine_images(c23, c12, H123)
c234 = combine_images(c34, c23, H234)
c345 = combine_images(c45, c34, H345)
c456 = combine_images(c56, c45, H456)
im_1234, H1234 = image_match(c123, c234)
im_2345, H2345 = image_match(c234, c345)
im_3456, H3456 = image_match(c345, c456)
c1234 = combine_images(c234, c123, H1234)
c2345 = combine_images(c345, c234, H2345)
c3456 = combine_images(c456, c345, H3456)
im_12345, H12345 = image_match(c1234, c2345)
im_23456, H23456 = image_match(c2345, c3456)
c12345 = combine_images(c2345, c1234, H12345)
c23456 = combine_images(c3456, c2345, H23456)
im_123456, H123456 = image_match(c12345, c23456)
c123456 = combine_images(c23456, c12345, H123456)
plt.figure(figsize=(17,17), dpi=600)
plt.subplot(3, 2, 1)
plt.imshow(im_12)
plt.subplot(3, 2, 2)
plt.imshow(im_23)
plt.subplot(3, 2, 3)
plt.imshow(im_34)
plt.subplot(3, 2, 4)
plt.imshow(im_45)
plt.subplot(3, 2, 5)
plt.imshow(im_56)
plt.figure(figsize=(17,17), dpi=600)
plt.subplot(3, 2, 1)
plt.imshow(c12)
plt.subplot(3, 2, 2)
plt.imshow(c23)
plt.subplot(3, 2, 3)
plt.imshow(c34)
plt.subplot(3, 2, 4)
plt.imshow(c45)
plt.subplot(3, 2, 5)
plt.imshow(c56)
plt.figure(figsize=(15,15), dpi=600)
plt.subplot(2, 2, 1)
plt.imshow(im_123)
plt.subplot(2, 2, 2)
plt.imshow(im_234)
plt.subplot(2, 2, 3)
plt.imshow(im_345)
plt.subplot(2, 2, 4)
plt.imshow(im_456)
plt.figure(figsize=(15,15), dpi=600)
plt.subplot(2, 2, 1)
plt.imshow(c123)
plt.subplot(2, 2, 2)
plt.imshow(c234)
plt.subplot(2, 2, 3)
plt.imshow(c345)
plt.subplot(2, 2, 4)
plt.imshow(c456)
plt.figure(figsize=(10,10), dpi=600)
plt.subplot(3, 2, 1)
plt.imshow(im_1234)
plt.subplot(3, 2, 2)
plt.imshow(im_2345)
plt.subplot(3, 2, 3)
plt.imshow(im_3456)
plt.figure(figsize=(10,10), dpi=600)
plt.subplot(3, 2, 1)
plt.imshow(c1234)
plt.subplot(3, 2, 2)
plt.imshow(c2345)
plt.subplot(3, 2, 3)
plt.imshow(c3456)
plt.figure(figsize=(10,10), dpi=600)
plt.subplot(2, 1, 1)
plt.imshow(im_12345)
plt.subplot(2, 1, 2)
plt.imshow(im_23456)
plt.figure(figsize=(10,10), dpi=600)
plt.subplot(2, 1, 1)
plt.imshow(c12345)
plt.subplot(2, 1, 2)
plt.imshow(c23456)
plt.figure(figsize=(10,10), dpi=600)
plt.imshow(im_123456)
plt.figure(figsize=(10,10), dpi=600)
plt.imshow(c123456)
# In[234]:
img_20mm = cv.imread("b_pano_20mm.png",0)
im_20_123456, H20_123456 = image_match(img_20mm, c123456)
c20_123456 = combine_images(c123456, img_20mm, H20_123456)
plt.figure(figsize=(10,10), dpi=600)
plt.imshow(im_20_123456)
plt.figure(figsize=(10,10), dpi=600)
plt.imshow(c20_123456)
# ## P4.3 Triangulation
#
# In class, we discussed how you could extract information about a 3D scene given two cameras and their camera projection matrices. Here, we will investigate a simple example to learn the fundamentals.
#
# ### P4.3.1 Projecting Into Image Space
#
# Below, I have provided you with two images taken by two cameras `a` and `b`. In this question, we will go over some camera basics, namely how to compute the image-space point from a 3D point in the scene and the known camera matrices.
#
# Some information about the two camera matrices:
# - The first camera is translated such that `t_a = [0, -0.2, 5]` and `t_b = [-1.5, 0, 5]`
# - No rotation is applied to either camera (so the rotation matrix is the identity matrix)
# - The focal length of the camera (for these 1024 px) images is `f = 1170.3` (in units of pixels).
# - The camera center is located at the center of the image.
#
# **QUESTION** What are the camera matrices $P_a$ and $P_b$? I will accept either the final matrix, or the matrix written in terms of its component matrices (the intrinsic and extrinsic matrices), as long as these are defined.
#
# I have provided you with a single point below in 3D space `X0` that exists on one of the corners of the cube shown in the scene.
#
# **TASK + PLOTS** Implement the function `get_projected_point(P, X)` which takes in a camera matrix `P` and a 3D scene point `X`. If your matrices are implemented correctly, you should see that the projected 3D point overlaps with one of the corners of the cube in image space. Include the two images with the point `X0` projected onto the two images.
# In[119]:
## Starter code
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def load_image(filepath):
img = Image.open(filepath)
img = np.asarray(img).astype(np.float)/255
return img[:, :, :3]
image_a = load_image('two_view_cube_image_a.png')
image_b = load_image('two_view_cube_image_b.png')
plt.figure(figsize=(10,10), dpi=600)
plt.subplot(121)
plt.imshow(image_a)
plt.subplot(122)
plt.imshow(image_b)
# In[227]:
# TASK: Implement the camera matrices & get_projected_point
f = 1137.8
a = image_a[514][514]
b = image_b[514][514]
intrinA = np.array([[(f*a[0])/a[2], 0, a[0]], [0, (f*a[1])/a[2], a[1]], [0, 0, 1]])
intrinB = np.array([[(f*b[0])/b[2], 0, b[0]], [0, (f*b[1])/b[2], b[1]], [0, 0, 1]])
extrinA = np.array([[1, 0, 0, 0], [0, 1, 0, -0.2], [0, 0, 1, 5]])
extrinB = np.array([[1, 0, 0, -1.5], [0, 1, 0, 0], [0, 0, 1, 5]])
Pa = np.dot(intrinA,extrinA)
Pb = np.dot(intrinB,extrinB)
X0 = np.array([ 0.85244616, 0.9508618, -0.51819406, 1])
points_3D = [X0]
def get_projected_point(P, X):
x = np.dot(P, X)
print(x)
return x
# In[196]:
## Plotting Code
if Pa is None or Pb is None:
raise NotImplementedError("Define the camera matrices.")
def visualize_projected_points(image, P, points_3D, verbose=False):
plt.figure(dpi=100)
plt.imshow(image)
for X in points_3D:
x = get_projected_point(P, X)
if verbose:
print(x)
plt.plot(x[0], x[1], 'ko')
visualize_projected_points(image_a, Pa, points_3D)
visualize_projected_points(image_b, Pb, points_3D)
# ### P4.3.2 Determining the Size of the Cube
#
# Now you will invert this operation. In class, we discussed how to triangulate a point from two correspondences. The relevant slide from L08.1 is as follows:
#
# <img src="triangulation_lin_alg.png" width="400">
#
# (*Note*: I have used `Pa` and `Pb` to denote the image matrices, whereas the included slide uses $p$ and $p'$.) You can use SVD to solve for the "best" value of the 3D point $X$ (equivalently, you can find the minimum eigenvector of $A^T A$). Manually determine the (x, y) coordinates of two corners in the provided images (from the upper left corner) and use them as part of this triangulation procedure. By finding the 3D point corresponding to two of the corners and computing the distance between them, you should be able to compute the size of the cube in the images.
#
# **TASK** Pick two corners of the cube and include the $(x, y)$ image coordinates for both `image_a` and `image_b` and the 3D world coordinate $(X, Y, Z)$ in your writeup.
#
# **QUESTION** What is the side length of the cube shown in the two images above? (The answer might be somewhat sensitive to the coordinates you measure in image space, though we are only looking for a "close enough" number within maybe 10% of the "correct" answer. You should feel free to use more than two points and average the results to get a more accurate result.)
#
# You can confirm that your estimated 3D coordinates are correct by reprojecting them back into image space using your solution from the previous question to check for accuracy.
#
# *We will use your full response to evaluate partial credit, so be sure to enumerate the steps you took and (if you feel it helpful) intermediate results or code snippets.*
# In[234]:
X0 = np.array([ 0.85244616, 0.9508618, -0.51819406, 1])
X1 = np.array([ 0.90244616, 0.6508618, -0.51819406, 1])
points_3D_1 = [X0]
points_3D_2 = [X1]
visualize_projected_points(image_a, Pa, points_3D_1)
visualize_projected_points(image_a, Pa, points_3D_2)
# ## P4.4 Stereo Patch Matching
#
# Now I have provided you with a stereo pair of images (already rectified) and a handful of features in one of the images. Your job is to locate the locations of the corresponding features in the other image using *patch match stereo* as we discussed in class. I have provided you with some starter code in the function `patch_match_stereo` below, which iterates through the possible locations
#
# **QUESTION** The possible feature matches in the second image are along the epipolar line. Since the images are properly rectified, what is the epipolar line in the second image corresponding to coordinate `(x_a, y_a)` in the first image?
#
# **TASK** Define the `possible_coordinates` vector in the `patch_match_stereo` function using your answer. Once that is defined, the `patch_match_stereo` function will loop through all possible feature coordinates in the second image and return the coordinate with the best *match_score*.
#
# **TASK** Implement the function `compute_match_score_ssd` (Sum of Squared Differences) using the formula we discussed in class: $$ \text{response} = -\sum_{k,l} (g_{kl} - f_{kl})^2, $$ where $g$ is the patch from `image_a` and $f$ is the patch from `image_b`. If this function is correctly implemented, you should see some of the features are aligned between the two images.
#
# **TASK** Implement the function `compute_match_score_ncc` (Normalized Cross Correlation) using the formula: $$ \text{response} = \frac{\sum_{k,l}(g_{kl} - \bar{g})(f_{kl} - \bar{f})}{\sqrt{\sum_{kl}(g_{kl} - \bar{g})^2}\sqrt{\sum_{kl}(f_{kl} - \bar{f})^2}}$$
#
# Once you have implemented these functions, you should run the plotting code I have included below, which computes a disparity map over the entire image. **NOTE: this will take a long time to run, so be sure that you confirm that your code is working properly first. You may want to test using the code from the breakout session L08B first.**
#
# **PLOTS** Include in your writeup the depth plots generated by each of the two match scores generated by the code below in the code block beginning with `# Compute and plot the depth maps`.
#
# **QUESTION** The left few columns of both depth maps is quite noisy and inaccurate. Give an explanation for why this is the case?
# In[64]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import os
import re
import scipy.signal
import cv2
image_a = cv2.imread('art_view0.png',0)
image_b = cv2.imread('art_view5.png',0)
plt.figure(figsize=(12, 5))
ax_a = plt.subplot(1, 2, 1)
plt.imshow(image_a, cmap='gray')
ax_b = plt.subplot(1, 2, 2)
plt.imshow(image_b, cmap='gray')
# In[112]:
def compute_match_score_ssd(image_a, image_b):
stereo = cv2.StereoBM_create(numDisparities = 144,blockSize = 5)
disparity = stereo.compute(image_a,image_b)
return disparity
def compute_match_score_ncc(image_a, image_b):
stereo = cv2.StereoSGBM_create(numDisparities = 144,blockSize = 5)
disparity = stereo.compute(image_a,image_b)
return disparity
def patch_match_stereo(image_a, image_b, match_score_fn):
response = match_score_fn(image_a, image_b)
return response
# In[115]:
def compute_depth_map(image_a, image_b, match_score_fn):
depth = patch_match_stereo(image_a, image_b, match_score_fn)
return depth
plt.figure()
plt.imshow(compute_depth_map(image_a, image_b, compute_match_score_ssd), cmap = 'gray')
plt.title('Depth Map (SSD)')
plt.figure()
plt.imshow(compute_depth_map(image_a, image_b, compute_match_score_ncc), cmap = 'gray')
plt.title('Depth Map (NCC)')
# In[ ]:
|
#!/usr/bin/python3
print("content-type:text/html")
print()
import cgi
import subprocess
f=cgi.FieldStorage()
cmd=f.getvalue("x")
a=subprocess.getoutput("sudo " + cmd)
print(a)
|
from roboclaw import *
def counterClockwise(speed):
M1Forward(128, speed)
M2Forward(128, speed)
M2Forward(129, speed)
def clockwise(speed):
M1Backward(128, speed)
M2Backward(128, speed)
M2Backward(129, speed)
def right():
M1Backward(128, 60)
M2Backward(128, 60)
M2Forward(129, 120)
def left():
M1Forward(128, 60)
M2Forward(128, 60)
M2Backward(129, 120)
def forward():
M1Forward(128, 127)
M2Backward(128, 127)
M2Forward(129, 2)
def back():
M1Backward(128, 127)
M2Forward(128, 127)
M2Forward(129, 0)
def stop():
M1Forward(128, 0)
M2Forward(128, 0)
M2Forward(129, 0)
def box():
forward()
time.sleep(1)
stop()
time.sleep(.3)
right()
time.sleep(1.5)
stop()
time.sleep(.3)
back()
time.sleep(.8)
stop()
time.sleep(.3)
left()
time.sleep(1.5)
stop()
time.sleep(.3)
stop()
def crazy():
M1Forward(120, 128)
M2Backward(120, 128)
M1Forward(2, 129)
time.sleep(.3)
M1Backward(60, 128)
M2Backward(60, 128)
M1Forward(120, 129)
time.sleep(.3)
M1Backward(120, 128)
M2Forward(120, 128)
M1Forward(0, 129)
time.sleep(.3)
M1Forward(60, 128)
M2Forward(60, 128)
M1Backward(120, 129)
time.sleep(.3)
stop()
|
#ASSIGNMENT6
#QUESTION:1 Take 10 integers from the user and print it on the screen.
#SOLUTION:
l=[]
for n in range(0,10):
l.append(int(input("enter the integer: ")))
print(l)
#QUESTION:2 Write an infinite loop.An infinite loop never ends.Condition is always true.
#SOLUTION:
#first method
i=1
while i<10:
print("hello world")
print(i)
#second method
i=1
while i!=10:
print("hello world")
print(i)
#QUESTION:3 Create a list of integer elements by user input.
# Make a new list which will store square of elements of previous list.
#SOLUTION:
#using whie loop
l=[]
s=[]
for x in range(4):
l.append(int(input("enter a number: ")))
for x in l:
s.append(x**2)
print(l)
print(s)
#QUESTION:4 From a list containing ints, strings and floats, make three lists to store them separately
#SOLUTION:
l=[]
for x in range(0,3):
x=int(input("enter numbers: "))
l.append(x)
for x in range(3,6):
x=input("enter strings: ")
l.append(x)
for x in range(6,9):
x=float(input("enter floats: "))
l.append(x)
print(l)
list1=[]
list2=[]
list3=[]
for x in l:
if type(x)==int:
list1.append(x)
elif type(x)==str:
list2.append(x)
elif type(x)==float:
list3.append(x)
print(list1)
print(list2)
print(list3)
#QUESTION:5 Using range(1,101), make a list containing even and odd numbers.
#SOLUTION:
list1=[]
list2=[]
for i in range(1,101):
if (i%2==0):
print("even",i)
list1.append(i)
elif (i%2==1):
print("odd",i)
list2.append(i)
print(list2)
#QUESTION:6 Print the following patterns:
#*
#**
#***
#***
#SOLUTION:
num=int(input("enter the number of rows: "))
for i in range(0,10):
for j in range(0,i):
print("*",end="")
print()
#QUESTION:7 Create a user defined dictionary and get keys corresponding to the value using for loop.
#SOLUTION:
d={}
for x in range(5):
keys=str(input("enter the keys: "))
values=int(input("enter value item: "))
d[keys]=values
print(d)
#QUESTION:8 Take inputs from user to make a list. Again take one input from user
# and search it in the list and delete that element, if found. Iterate over list using for loop.
#SOLUTION:
l=[]
flag=0
for x in range(5):
x=int(input("enter the number: "))
l.append(x)
print(l)
y=int(input("select any number you want to search: "))
for x in l:
if x==y:
l.remove(x)
flag=1
print(l)
if flag==0:
print("the number you entered is not in the list")
|
#!/usr/bin/python3
import numpy as np
from cpa import CPA
traces_file="traces_capdir58/knownrand_fixed/knownrand_fixed_P58_data/traces/2016.06.01-11.54.29_traces.preprocessed.npy"
key_file="traces_capdir58/knownrand_fixed/knownrand_fixed_P58_data/traces/2016.06.01-11.54.29_keylist.npy"
plaintext_file="traces_capdir58/knownrand_fixed/knownrand_fixed_P58_data/traces/2016.06.01-11.54.29_textin.npy"
traces=np.load(traces_file)
key=np.load(key_file)
plaintext=np.load(plaintext_file)
cpa=CPA(traces, key, plaintext)
cpa.train()
cpa.save_train("krf_cpa_corr.npy")
cpa.plot_train("result/knownrand_fixed")
test_traces_file="traces_capdir58/knownfixed_rand/knownfixed_rand_P58_data/traces/2016.06.01-11.50.56_traces.preprocessed.npy"
test_plaintext_file="traces_capdir58/knownfixed_rand/knownfixed_rand_P58_data/traces/2016.06.01-11.50.56_textin.npy"
test_traces=np.load(test_traces_file)
test_plaintext=np.load(test_plaintext_file)
cpa.test(test_traces, test_plaintext)
|
class Employee():
def __init__(self,last_name,first_name,salary):
self.first_name=first_name
self.last_name=last_name
self.salary=salary
def give_raise(self,increment=0):
self.salary=5000
self.salary+=increment
|
from ..models import Dish
from django.forms import ModelForm
from django.forms import Select, TextInput
class DishForm(ModelForm):
class Meta:
model = Dish
fields = '__all__'
widgets ={
'name':TextInput(attrs={'class': 'form-control mr-3'}),
'unit':Select(attrs={'class': 'form-control mr-3'}),
'out':TextInput(attrs={'class': 'form-control mr-3'}),
'tech_map':Select(attrs={'class': 'form-control mr-3'}),
}
|
import os
import requests
from qubell.api.private.testing import environment, instance, values
from qubell.api.tools import retry
from testtools import skip
from test_runner import BaseComponentTestCase
def eventually(*exceptions):
"""
Method decorator, that waits when something inside eventually happens
Note: 'sum([delay*backoff**i for i in range(tries)])' ~= 580 seconds ~= 10 minutes
:param exceptions: same as except parameter, if not specified, valid return indicated success
:return:
"""
return retry(tries=50, delay=0.5, backoff=1.1, retry_exception=exceptions)
def check_site(instance):
# Check we have 2 hosts up
@eventually(AssertionError, KeyError)
def eventually_assert():
assert len(instance.returnValues['endpoints.entry'])
eventually_assert()
# Check site still alive
url = instance.returnValues['endpoints.entry']
resp = requests.get(url)
assert resp.status_code == 200
assert 'PetClinic :: a Spring Framework demonstration' in resp.text
@environment({
"default": {},
"AmazonEC2_CentOS_63": {
"policies": [{
"action": "provisionVms",
"parameter": "imageId",
"value": "us-east-1/ami-eb6b0182"
}, {
"action": "provisionVms",
"parameter": "vmIdentity",
"value": "root"
}]
},
"AmazonEC2_CentOS_53": {
"policies": [{
"action": "provisionVms",
"parameter": "imageId",
"value": "us-east-1/ami-beda31d7"
}, {
"action": "provisionVms",
"parameter": "vmIdentity",
"value": "root"
}]
},
"AmazonEC2_Ubuntu_1204": {
"policies": [{
"action": "provisionVms",
"parameter": "imageId",
"value": "us-east-1/ami-d0f89fb9"
}, {
"action": "provisionVms",
"parameter": "vmIdentity",
"value": "ubuntu"
}]
},
"AmazonEC2_Ubuntu_1004": {
"policies": [{
"action": "provisionVms",
"parameter": "imageId",
"value": "us-east-1/ami-0fac7566"
}, {
"action": "provisionVms",
"parameter": "vmIdentity",
"value": "ubuntu"
}]
}
})
class PetClinicComponentTestCase(BaseComponentTestCase):
name = "starter-java-web"
apps = [{
"name": name,
"file": os.path.realpath(os.path.join(os.path.dirname(__file__), '../%s.yml' % name))
}, {
"name": "db",
"url": "https://raw.github.com/qubell-bazaar/component-mysql-dev/master/component-mysql-dev.yml",
"launch": False
}, {
"name": "lb",
"url": "https://raw.github.com/qubell-bazaar/component-haproxy/master/component-haproxy.yml",
"launch": False
}, {
"name": "app",
"url": "https://raw.github.com/qubell-bazaar/component-tomcat-dev/master/component-tomcat-dev.yml",
"launch": False
}]
db_name = "petclinic"
@instance(byApplication=name)
@values({"lb-host": "host"})
def test_host(self, instance, host):
resp = requests.get("http://" + host, verify=False)
assert resp.status_code == 200
@instance(byApplication=name)
@values({"db-port": "port", "db-host": "host"})
def test_db_port(self, instance, host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
assert result == 0
@instance(byApplication=name)
def test_petclinic_up(self, instance):
check_site(instance)
@instance(byApplication=name)
def test_scaling(self, instance):
assert len(instance.returnValues['endpoints.app']) == 1
params = {'input.app-quantity': '2'}
instance.reconfigure(parameters=params)
assert instance.ready(timeout=20)
check_site(instance)
# Check we have 2 hosts up
@eventually(AssertionError, KeyError)
def eventually_assert():
assert len(instance.returnValues['endpoints.app']) == 2
eventually_assert()
@skip('Until https://github.com/qubell/starter-java-web/pull/7 applied')
def test_change_branch(self, instance):
params = {'input.app-branch': 'red'}
instance.reconfigure(parameters=params)
assert instance.ready(timeout=20)
check_site()
resp = requests.get(self.url)
assert 'Updated PetClinic :: a Spring Framework demonstration' in resp.text
|
import cv2
import numpy as np
import tensorflow as tf
from collections import defaultdict
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
def read_labels(filename='../tensorflow/labels.txt'):
# Read label file
label_file = open(filename, 'r')
labels = label_file.read().split()
label_file.close()
return labels
def game_change_detect(
video_name,
model_path='../tensorflow/vg-classifier-model/vg-classifier-model.meta',
image_size=256,
num_channels=3
):
image_size = 256
num_channels = 3
# Read Label File
labels = read_labels()
# Read video file
vidObj = cv2.VideoCapture(video_name)
success, img = vidObj.read()
# Start tensorflow session
sess = tf.Session()
saver = tf.train.import_meta_graph(model_path)
saver.restore(sess, tf.train.latest_checkpoint('../tensorflow/vg-classifier-model/'))
graph = tf.get_default_graph()
# Moving average of previous 20 frames
prevClass = []
frameChange = []
classDict = defaultdict(lambda: 0)
prevMajClass = -1
n = 0
frame_arr = []
while success:
frame_arr.append(vidObj.get(cv2.CAP_PROP_POS_MSEC)//1000)
image = []
img = cv2.resize(img, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
image.append(img)
img = np.array(image, dtype=np.uint8)
img = img.astype('float32')
img = np.multiply(img, 1.0 / 255.0)
x_batch = img.reshape(1, image_size, image_size, num_channels)
y_pred = graph.get_tensor_by_name('y_pred:0')
x = graph.get_tensor_by_name('x:0')
y_true = graph.get_tensor_by_name('y_true:0')
y_test_images = np.zeros((1, len(labels)))
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result = sess.run(y_pred, feed_dict=feed_dict_testing)
res = max(result[0])
for i, j in enumerate(result[0]):
if j == res:
prevClass.append(i)
classDict[i] += 1
if len(prevClass) > 20:
classDict[prevClass[0]] -= 1
del prevClass[0]
break
maxClass = max(prevClass, key=classDict.get)
if prevMajClass == -1:
prevMajClass = maxClass
if prevMajClass != maxClass:
prevMajClass = maxClass
frameChange.append(n)
n += 1
success, img = vidObj.read()
frameChange.append(n-1)
return frameChange, frame_arr
def main():
video_name = '../data/videos/blops_and_hstone.mp4'
frames_changed, frame_arr = game_change_detect(video_name)
print('Games changed at frames: ', frames_changed)
response = input('Would you like to download? (y/n): ')
if response == 'y':
prev_frame = 0
for frame in frames_changed:
print(prev_frame, frame)
ffmpeg_extract_subclip(video_name, frame_arr[prev_frame], frame_arr[frame], targetname="./highlight" + str(frame) + ".mp4")
prev_frame = frame
if __name__ == "__main__":
main()
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_player(db: Session, player_id: int):
return db.query(models.Player).filter(models.Player.id == player_id).first()
def get_players(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Player).offset(skip).limit(limit).all()
def create_player(db: Session, player: schemas.PlayerBase):
db_player = models.Player(
first_name=player.first_name,
last_name=player.last_name,
position=player.position,
goals_scored=player.goals_scored,
active=player.active,
wages=player.wages,
games_played=player.games_played,
squad_number=player.squad_number
)
db.add(db_player)
db.commit()
db.refresh(db_player)
return db_player
|
import json
import re
import os
import numpy as np
import scipy.stats as stats
import pandas as pd
import datetime as dt
folder = 'Analysed_Data/'
lumis = ['PLT','HFLumi', 'BCM1F', 'HFLumiET']
fillr = 'output(\d+).*\.json'
dirs = [i for i in os.listdir(folder) if i[0] != 'F' and int(i[:4]) >= 5718]
cols = ['timestamp', 'fill', 'filledbunches', 'fit', 'peak_x_bx','peak_x_bx_stdev', 'capsigma_x_bx','capsigma_x_bx_stdev', 'peak_y_bx','peak_y_bx_stdev', 'capsigma_y_bx','capsigma_y_bx_stdev', 'sigmavis_bx','sigmavis_bx_stdev']
for lumi in lumis:
df = pd.DataFrame(columns = cols)
fitr = 'output\d+' + lumi + '(.*).json'
for dir in dirs:
jsons = [i for i in os.listdir(folder + '/' + dir) if i[-4:] == 'json' and lumi in i]
if lumi == 'HFLumi':
jsons = [i for i in jsons if 'HFLumiET' not in i]
for j in jsons:
fit = re.match(fitr,j).group(1)
fill = re.match(fillr,j).group(1)
j = json.load(open(folder + dir + '/' +j,'r'))
row = {}
key=cols[0]
row.update({key:dt.datetime.fromtimestamp(j[key])})
key = cols[1]
row.update({key:int(j[key])})
key = cols[2]
row.update({key:len([i for i in j[cols[-2]] if i != 0])})
key = cols[3]
row.update({key:fit})
for key in cols[4::2]:
#if 'stdev' not in key:
data = []
data = [i for i in j[key] if i != 0]
row.update({key:np.mean(data)})
row.update({key + '_stdev':stats.sem(data)})
df = df.append(row, ignore_index=True)
df.sort_values(by='timestamp',inplace=True)
df.to_csv(lumi + '_table.csv',index=False)
|
locations = {
(0,0): 'house',
(0,1): 'lake',
(1,0): 'park',
(1,1): 'market',
}
description = {
(0,0): "A BIG DARK HOUSE WITH NO LIGHTS",
(0,1): 'A LAKE USED FOR DUMPING RUBBISH',
(1,0): 'A SUNNY PARK FULL OF PEOPLE',
(1,1): 'A MARKET FULL OF FOOD STALLS',
}
items = {
(0,0): 'candle stick',
(0,1): 'rubber duck',
(1,0): 'football',
(1,1): 'bag of chips',
}
map = {
(0,0): '*H* P\n L',
(0,1): 'H \n*L* M',
(1,0): 'H *P*\n M',
(1,1): ' P\nL *M*',
}
|
from os import listdir
from os.path import isfile, join
from ofxparse import OfxParser
import pandas as pd
import numpy as np
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
UNCATEGORIZED = 'uncategorized'
def load_ofxs(path):
files = []
for f in listdir(path):
if isfile(join(path, f)) and 'ofx' in f:
file_path = join(path, f)
files.append(file_path)
return files
def get_label(phrase, words_df):
for index, row in words_df.iterrows():
if row['word'].lower() in phrase.lower():
return row['label']
# return np.nan
return UNCATEGORIZED
def parse_file(file, transactions_data, words_df):
print(file)
total_file = 0.0
competency = None
with open(file, encoding="ISO-8859-1") as fileobj:
ofx = OfxParser.parse(fileobj)
for transaction in ofx.account.statement.transactions:
if transaction.amount < 0:
label = get_label(transaction.memo, words_df)
day = transaction.date.strftime("%Y-%m-%d")
month = transaction.date.strftime("%Y-%m")
year = transaction.date.year
transactions_data.append(
[
transaction.memo,
transaction.amount,
day,
month,
year,
label
]
)
# print(transaction.memo, "\t", transaction.amount, "\t", label)
return transactions_data
def plot_pie(title, plt, items, total, the_grid, s, e):
labels = []
fracs = []
for item in items:
labels.append(item[0])
percentage = item[1]/total
fracs.append(percentage)
# Make square figures and axes
plt.subplot(the_grid[s, e], aspect=1)
plt.title(title)
plt.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
if __name__ == '__main__':
paths = ['./extratos/bb/', './extratos/bradesco/', './extratos/inter/']
words_df = pd.read_csv('datasets/words.csv', delimiter=";", names=['word', 'label'])
# print(words_df)
transactions_data = []
total = 0.0
totals = {}
for path in paths:
for file in load_ofxs(path):
parse_file(file, transactions_data, words_df)
df = pd.DataFrame(transactions_data, columns = ['memo', 'amount', 'day', 'month', 'year', 'label'])
df.to_csv('datasets/extratos.csv')
df2 = df[df.label == UNCATEGORIZED]
print(df2.groupby(['memo'])['memo'].count())
despesas_by_label = {}
total = {}
# df.loc[df['label']!='cartao']
for index, row in df.iterrows():
year, month, label = row['year'], row['month'], row['label']
# if year not in despesas_by_label:
# despesas_by_label[year] = {}
if month not in despesas_by_label:
despesas_by_label[month] = {}
total[month] = Decimal(0.0)
if label not in despesas_by_label[month]:
despesas_by_label[month][label] = Decimal(0.0)
if label != 'cartao':
despesas_by_label[month][label] += row['amount']
total[month] += row['amount']
# despesas_by_label.pop('cartao', None)
# print(despesas_by_label)
# print(despesas_by_label.items())
grouping_df = df.groupby(['year', 'month', 'label'])['amount'].sum()
grouping_df.to_csv('datasets/totais.csv')
print(grouping_df)
# the_grid = GridSpec(3, 4)
# s = 0
# e = 0
# for despesas in despesas_by_label.items():
# m = despesas[0]
# if e > 3:
# s += 1
# e = 0
# print(s,e,m)
# plot_pie(str(m), plt, despesas_by_label[m].items(), total[m], the_grid, s, e)
# e += 1
# plt.show()
|
from Jumpscale import j
import netaddr
import ipaddress
def chat(bot):
"""
"""
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
ips = ["IPv6", "IPv4"]
default_cluster_name = name.split(".3bot")[0]
expiration = j.data.time.epoch + (60 * 60 * 24) # for one day
explorer = j.clients.explorer.explorer
if not email:
raise j.exceptions.BadRequest("Email shouldn't be empty")
ip_version = bot.single_choice(
"This wizard will help you deploy a kubernetes cluster, do you prefer to access your 3bot using IPv4 or IPv6? If unsure, chooose IPv4",
ips,
)
workers_number = bot.int_ask("Please specify the number of worker nodes") # minimum should be 1
cluster_size = workers_number + 1 # number of workers + the master node
ssh_keys = bot.upload_file(
""""Please add your public ssh key, this will allow you to access the deployed container using ssh.
Just upload the ssh keys file with each key on a seperate line"""
).split("\n")
cluster_secret = bot.string_ask("Please add the cluster secret", default="secret")
# create new reservation
reservation = j.sal.zosv2.reservation_create()
identity = explorer.users.get(name=name, email=email)
# Select nodes
nodes_selected = j.sal.chatflow.nodes_get(workers_number + 1, farm_id=71, ip_version=ip_version)
# Create network of reservation and add peers
reservation, configs = j.sal.chatflow.network_configure(
bot, reservation, nodes_selected, customer_tid=identity.id, ip_version=ip_version
)
rid = configs["rid"]
# Create master and workers
# Master is in the first node from the selected nodes
master = j.sal.zosv2.kubernetes.add_master(
reservation=reservation,
node_id=nodes_selected[0].node_id,
network_name=configs["name"],
cluster_secret=cluster_secret,
ip_address=configs["ip_addresses"][0],
size=cluster_size,
ssh_keys=ssh_keys,
)
# Workers are in the rest of the nodes
for i in range(1, len(nodes_selected)):
worker = j.sal.zosv2.kubernetes.add_worker(
reservation=reservation,
node_id=nodes_selected[i].node_id,
network_name=configs["name"],
cluster_secret=cluster_secret,
ip_address=configs["ip_addresses"][i],
size=cluster_size,
master_ip=master.ipaddress,
ssh_keys=ssh_keys,
)
# register the reservation
resv_id = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
res = """
## Kubernetes cluster has been deployed successfully
# your reservation id is: {}
Click next to proceed the wireguard configurations that need to be setup on your machine
""".format(
resv_id
)
res = j.tools.jinja2.template_render(text=j.core.text.strip(res), **locals())
bot.md_show(res)
filename = "{}_{}.conf".format(f"{default_cluster_name}_{i}", resv_id)
res = """
## Use the following template to configure your wireguard connection. This will give you access to your 3bot.
# Make sure you have wireguard ```https://www.wireguard.com/install/``` installed
## ```wg-quick up /etc/wireguard/{}```
Click next
to download your configuration
""".format(
filename
)
res = j.tools.jinja2.template_render(text=j.core.text.strip(res), **locals())
bot.md_show(res)
res = j.tools.jinja2.template_render(text=configs["wg"], **locals())
bot.download_file(res, filename)
for i, ip in enumerate(configs["ip_addresses"]):
res = """
kubernete {} IP : {}
To connect ssh rancher@{}
""".format(
i + 1, ip, ip
)
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################################
# #
# hrma_plot_trends.py: create hrma src data trend plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 15, 2021 #
# #
#################################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import numpy
import astropy.io.fits as pyfits
import time
import Chandra.Time
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Hrma_src/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
#
#--- temp space
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
x_dist = 500
x_dist2 = 1300
#-----------------------------------------------------------------------------------------
#-- hrma_plot_trends: creates hrma src2 related data ---
#-----------------------------------------------------------------------------------------
def hrma_plot_trends(year):
"""
creates hrma src2 related data
input: year --- the year of which you want to created the plots. if <blank>, plot this year
output: <web_dir>/Plots/<category>/<instrument>/*.png
"""
if year == '':
yday = float(time.strftime("%j", time.gmtime()))
year = time.strftime("%Y", time.gmtime())
iyear = int(float(year))
#
#--- if it is a beginning of the year, just update the last year
#
if yday < 15:
stday = str(iyear -1) + ':001:00:00:00'
ystart = Chandra.Time.DateTime(stday).secs
stday = year + ':001:00:00:00'
ystop = Chandra.Time.DateTime(stday).secs
else:
stday = year + ':001:00:00:00'
ystart = Chandra.Time.DateTime(stday).secs
stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
ystop = Chandra.Time.DateTime(stday).secs
else:
stday = str(year) + ":001:00:00:00"
ystart = Chandra.Time.DateTime(stday).secs
stday = str(year + 1) + ":001:00:00:00"
ystop = Chandra.Time.DateTime(stday).secs
hdata = read_data()
plot_sky_position(hdata[1], hdata[2], hdata[6], hdata[7], ystart, ystop, year)
plot_psf(hdata[1], hdata[2], hdata[12], hdata[13], ystart, ystop, year)
plot_roundness(hdata[1], hdata[2], hdata[10], hdata[13], ystart, ystop, year)
plot_energy_radius(hdata[1], hdata[2], hdata[9], hdata[13], ystart, ystop, year)
plot_dist_snr(hdata[1], hdata[2], hdata[8], hdata[13], ystart, ystop, year)
plot_rotation(hdata[1], hdata[2], hdata[11], hdata[14], hdata[13], hdata[9], ystart, ystop, year)
#-----------------------------------------------------------------------------------------
#-- read_data: read data from hrma_src_data --
#-----------------------------------------------------------------------------------------
def read_data():
"""
read data from hrma_src_data
input: read from <data_dir>/hrma_src_data
output: a list of arrays of:
0 obsid
1 inst
2 start
3 stop
4 sim_x
5 sim_z
6 x
7 y
8 snr
9 ravg
10 rnd
11 rotang
12 psf
13 dist
14 angd
"""
ifile = data_dir + 'hrma_src_data'
data = mcf.read_data_file(ifile)
obsid = []
inst = []
start = []
stop = []
sim_x = []
sim_z = []
x = []
y = []
snr = []
ravg = []
rnd = []
rotang= []
psf = []
dist = []
angd = []
for ent in data:
atemp = re.split('\s+', ent)
obsid.append(int(float(atemp[0])))
inst.append(atemp[1])
start.append(int(float(atemp[2])))
stop.append(int(float(atemp[3])))
sim_x.append(float(atemp[3]))
sim_z.append(float(atemp[5]))
x.append(float(atemp[6]))
y.append(float(atemp[7]))
snr.append(float(atemp[8]))
ravg.append(float(atemp[9]))
rnd.append(float(atemp[10]))
rotang.append(float(atemp[11]))
psf.append(float(atemp[12]))
dist.append(float(atemp[13]))
angd.append(float(atemp[14]))
#
#--- sort by time
#
start.sort()
start = numpy.array(start)
sind = start.argsort()
obsid = numpy.array(obsid)[sind]
inst = numpy.array(inst)[sind]
stop = numpy.array(stop)[sind]
sim_x = numpy.array(sim_x)[sind]
sim_z = numpy.array(sim_z)[sind]
x = numpy.array(x)[sind]
y = numpy.array(y)[sind]
snr = numpy.array(snr)[sind]
ravg = numpy.array(ravg)[sind]
rnd = numpy.array(rnd)[sind]
rotang= numpy.array(rotang)[sind]
psf = numpy.array(psf)[sind]
dist = numpy.array(dist)[sind]
angd = numpy.array(angd)[sind]
return [obsid, inst, start, stop, sim_x, sim_z, x, y, snr, ravg, rnd, rotang, psf, dist, angd]
#-----------------------------------------------------------------------------------------
#-- plot_sky_position: plotting sky positino related plots --
#-----------------------------------------------------------------------------------------
def plot_sky_position(inst, stime, x, y, ystart, ystop, year):
"""
plotting sky positino related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
x --- x position
y --- y position
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Position/<inst>_sky_position.png
"""
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
if det in ['acis_i', 'acis_s']:
xmin = 2800
xmax = 5200
ymin = 2800
ymax = 5200
# elif det == 'acis_s':
# xmin = 0
# xmax = 7000
# ymin = 1000
# ymax = 8000
elif det == 'hrc_i':
xmin = 6000
xmax = 26000
ymin = 7000
ymax = 27000
else:
xmin = 22000
xmax = 42000
ymin = 22000
ymax = 42000
xlabel = 'X Position'
ylabel = 'Y Position'
outdir = web_dir + 'Plots/Positions/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = x[sind]
yp = y[sind]
st = stime[sind]
out = outdir + det + '_sky_position.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out)
# tind = (st >= ystart) &(st < ystop)
# xt = xp[tind]
# yt = yp[tind]
#
# out = outdir + det + '_sky_position_' + str(year) + '.png'
# plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out)
#-----------------------------------------------------------------------------------------
#-- plot_psf: plotting psf related plots --
#-----------------------------------------------------------------------------------------
def plot_psf(inst, stime, psf, dist, ystart, ystop, year):
"""
plotting psf related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
psf --- psf
dist --- ditance from the center
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Psf/<inst>_dist_psf.png
<web_dir>/Plots/Psf/<inst>_dist_psf_<year>.png
"""
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
if det in ['acis_i', 'acis_s']:
xmin = 0
xmax = x_dist
ymin = 0
ymax = 20
else:
xmin = 0
xmax = x_dist2
ymin = 0
ymax = 120
xlabel = 'Off Axis Dist (arcsec)'
ylabel = 'PSF (arcsec)'
outdir = web_dir + 'Plots/Psf/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = dist[sind]
yp = psf[sind]
st = stime[sind]
out = outdir + det + '_dist_psf.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#
#--- yearly plot
#
tind = (st >= ystart) &(st < ystop)
xt = xp[tind]
yt = yp[tind]
out = outdir + det + '_dist_psf_' + str(year) + '.png'
plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#-----------------------------------------------------------------------------------------
#-- plot_roundness: plotting roundness related plots --
#-----------------------------------------------------------------------------------------
def plot_roundness(inst, stime, round, dist, ystart, ystop, year):
"""
plotting roundness related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
round --- roundness
dist --- ditance from the center
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Roundness/<inst>_dist_roundness.png
<web_dir>/Plots/Roundness/<inst>_dist_roundness_<year>.png
"""
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
if det in ['acis_i', 'acis_s']:
xmin = 0
#xmax = 1100
xmax = x_dist
ymin = 1
ymax = 4.5
else:
xmin = 0
#xmax = 10000
xmax = x_dist2
ymin = 0
ymax = 4.5
xlabel = 'Off Axis Distance (arcsec)'
ylabel = 'Roundness'
outdir = web_dir + 'Plots/Roundness/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = dist[sind]
yp = round[sind]
st = stime[sind]
out = outdir + det + '_dist_roundness.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
tind = (st >= ystart) &(st < ystop)
xt = xp[tind]
yt = yp[tind]
#
#--- yearly plot
#
out = outdir + det + '_dist_roundness_' + str(year) + '.png'
plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
# xlabel = 'Time (year)'
# out = outdir + det + '_time_roundness.png'
# st = convert_time_in_fyear(st)
# xmax = int(max(st)) + 1
# plot_panel(st, yp, 1999, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#-----------------------------------------------------------------------------------------
#-- plot_energy_radius: plotting radius related plots --
#-----------------------------------------------------------------------------------------
def plot_energy_radius(inst, stime, radius, dist, ystart, ystop, year):
"""
plotting radius related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
radius --- radius
dist --- ditance from the center
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Radius/<inst>_dist_radius.png
<web_dir>/Plots/Radius/<inst>_dist_radius_<year>.png
"""
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
if det in ['acis_i', 'acis_s']:
xmin = 0
#xmax = 1100
xmax = x_dist
ymin = 0
#ymax = 20
ymax = 10
else:
xmin = 0
#xmax = 10000
xmax = x_dist2
ymin = 0
#ymax = 600
ymax = 80
xlabel = 'Off Axis Distance (arcsec)'
ylabel = 'Encircled Energy Radius'
outdir = web_dir + 'Plots/Radius/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = dist[sind]
yp = radius[sind]
st = stime[sind]
out = outdir + det + '_dist_radius.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
tind = (st >= ystart) &(st < ystop)
xt = xp[tind]
yt = yp[tind]
#
#--- yearly plot
#
out = outdir + det + '_dist_radius_' + str(year) + '.png'
plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#-----------------------------------------------------------------------------------------
#-- plot_dist_snr: plotting snr related plots ---
#-----------------------------------------------------------------------------------------
def plot_dist_snr(inst, stime, snr, dist, ystart, ystop, year):
"""
plotting snr related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
snr --- snr
dist --- ditance from the center
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Snr/<inst>_dist_snr.png
<web_dir>/Plots/Snr/<inst>_dist_snr_<year>.png
"""
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
if det == 'acis_i':
xmin = 0
#xmax = 1100
xmax = x_dist
ymin = 6
ymax = 80
elif det == 'acis_s':
xmin = 0
Exmax = 1100
xmax = x_dist
ymin = 6
#ymax = 300
ymax = 150
elif det in ['hrc_i', 'hrc_s']:
xmin = 0
#xmax = 10000
xmax = x_dist2
ymin = 6
#ymax = 400
ymax = 100
xlabel = 'Off Axis Distance (arcsec)'
ylabel = 'SNR'
outdir = web_dir + 'Plots/Snr/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = dist[sind]
yp = snr[sind]
st = stime[sind]
out = outdir + det + '_dist_snr.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#
#--- yearly plot
#
tind = (st >= ystart) &(st < ystop)
xt = xp[tind]
yt = yp[tind]
out = outdir + det + '_dist_snr_' + str(year) + '.png'
plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
# out = outdir + det + '_time_snr.png'
# st = convert_time_in_fyear(st)
# xmax = int(max(st)) + 1
# xlabel = 'Time (year)'
# plot_panel(st, yp, 1999, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#-----------------------------------------------------------------------------------------
#-- plot_rotation: plotting rotation angle related plots --
#-----------------------------------------------------------------------------------------
def plot_rotation(inst, stime, rotang, angd, dist, rad, ystart, ystop, year):
"""
plotting rotation angle related plots
input: inst --- instrument
stime --- starting time in sec from 1998.1.1
rotang --- rotation angle
angd --- rotation angle estimated from x and y
dist --- ditance from the center
rad --- roundness
ystart --- starting time in sec from 1998.1.1 for yearlyt plot
ystop --- stopping time in sec from 1998.1.1 for yearlyt plot
year --- year of the yearly plot
output: <web_dir>/Plots/Rotation/<inst>_angd_rotang.png
<web_dir>/Plots/Rotation/<inst>_dist_rotation.png
<web_dir>/Plots/Rotation/<inst>_dist_rotation_<year>.png
"""
#
for det in ['acis_i', 'acis_s', 'hrc_i', 'hrc_s']:
xmin = 0
xmax = 3.1
ymin = 0
ymax = 3.1
xlabel = 'ANGD'
ylabel = 'ROTANG'
outdir = web_dir + 'Plots/Rotation/' + det.capitalize() + '/'
cmd = 'mkdir -p ' + outdir
os.system(cmd)
sind = inst == det
xp = angd[sind]
yp = rotang[sind]
st = stime[sind]
ds = dist[sind]
rd = rad[sind]
#
#--- remove xp = 0 cases
#
idx = xp != 0
xp = xp[idx]
yp = yp[idx]
st = st[idx]
ds = ds[idx]
rd = rd[idx]
out = outdir + det + '_angd_rotang.png'
plot_panel(xp, yp, xmin, xmax, ymin, ymax, xlabel, ylabel, out)
#
#--- dist - rotang/angd
#
xmin = 0
if det in ['acis_i', 'acis_s']:
#xmax = 1100
xmax = x_dist
else:
#xmax = 10000
xmax = x_dist2
ymin = 0
ymax = 10
xlabel = 'Off Axis Distance (arcsec)'
ylabel = 'ROTANG/ANGD'
ratio = yp / xp
out = outdir + det + '_dist_rotation.png'
plot_panel(ds, ratio, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#
#--- yearly plot
#
tind = (st >= ystart) &(st < ystop)
xt = ds[tind]
yt = ratio[tind]
out = outdir + det + '_dist_rotation_' + str(year) + '.png'
plot_panel(xt, yt, xmin, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#
#--- time - rotang/angd
#
# out = outdir + det + '_time_rot.png'
# st = convert_time_in_fyear(st)
# xmax = int(max(st)) + 1
# xlabel = 'Time (year)'
# plot_panel(st, ratio, 1999, xmax, ymin, ymax, xlabel, ylabel, out, width=15)
#-----------------------------------------------------------------------------------------
#-- convert_time_in_fyear: convert Chandra time in a fractional year time --
#-----------------------------------------------------------------------------------------
def convert_time_in_fyear(t_list):
"""
convert Chandra time in a fractional year time
input: t_list --- a list of time in sec from 1998.1.1
output: tsave --- a list of time in fractional year
"""
tsave = []
for ent in t_list:
etime = Chandra.Time.DateTime(ent).date
atemp = re.split(':', etime)
year = float(atemp[0])
yday = float(atemp[1])
hh = float(atemp[2])
mm = float(atemp[3])
ss = float(atemp[4])
if mcf.is_leapeyear(year):
base = 366.0
else:
base = 365.0
out = year + (yday + hh /24.0 + mm / 1440.0 + ss / 86400.0) / base
tsave.append(out)
tsave = numpy.array(tsave)
return tsave
#-----------------------------------------------------------------------------------------
#-- plot_panel: plot data ---
#-----------------------------------------------------------------------------------------
def plot_panel(x, y, xmin, xmax, ymin, ymax, xlabel, ylabel, outname, width=10.0, height=10.0, fit=0):
"""
plot data
input: x --- a list of independent data
y --- a list of dependent data
xmin --- min of x plotting range
xmax --- max of x plotting range
ymin --- min of y plotting range
ymax --- max of y plotting range
xlabel --- a label of x axis
ylabel --- a label of y axis
outname --- the output file name
width --- width of the plot in inch; default: 10 in
height --- height of the plot in inch: default: 10 in
fit --- whether fit a line. if 0 no, otherwise, it also indicates the degree of polynomlal fit
output: outname
"""
#
#--- set params
#
fsize = 20
pcolor = 'blue'
lcolor = 'red'
marker = '.'
msize = 8
lw = 4
#
#--- close everything open
#
plt.close('all')
#
#--- set fot size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
#
#--- set plotting range
#
ax = plt.subplot(111)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)
#
#--- plot data
#
plt.plot(x, y, color=pcolor, marker=marker, markersize=msize, lw=0)
plt.xlabel(xlabel, size=fsize)
plt.ylabel(ylabel, size=fsize)
#
#--- fit line
#
if fit > 0:
coeffs = fit_line(x, y, fit)
[x_est, y_est] = estimate_fit_val(xmin, xmax, coeffs)
plt.plot(x_est, y_est, color=lcolor, marker=marker, markersize=0, lw=lw)
else:
coeffs = []
#
#--- create plot and save
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outname, format='png', dpi=200)
plt.close('all')
return coeffs
#-----------------------------------------------------------------------------------------
#-- fit_line: fit polynomial line on the given data --
#-----------------------------------------------------------------------------------------
def fit_line(x, y, deg):
"""
fit polynomial line on the given data
input: x --- independent data
y --- dependent data
deg --- degree of polynomial
output: coeffes --- a list of coefficients
"""
ax = numpy.array(x)
ay = numpy.array(y)
coeffs = numpy.polyfit(ax, ay, deg)
return coeffes
#-----------------------------------------------------------------------------------------
#-- estimate_fit_val: create a list of data points of theestimated line --
#-----------------------------------------------------------------------------------------
def estimate_fit_val(xmin, xmax, coeffs):
"""
create a list of data points of theestimated line
input: xmin --- x starting point
xmax --- x stopping point
coeffs --- coefficients of polynomial fitting
output: x --- x data
y_est --- y data
"""
deg = len(coeffs)
step = (xmax - xmin) / 100.0
x = []
for k in range(0, 100):
x.append(xmin + step * k)
y_est = []
for val in x:
sum = coeffs[0]
for k in range(1, deg):
sum += coeffs[k] * val**k
y_est.append(sum)
return [x, y_est]
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
year = int(float(sys.argv[1]))
else:
year = ''
hrma_plot_trends(year)
# for year in range(1999, 2021):
# print("Year: " + str(year))
# hrma_plot_trends(year)
|
from collections import defaultdict
import cv2
import numpy as np
from matplotlib import pyplot as plt
from maximumIndependentSet import MaximumIndependentSet
from occupancyGrid import OccupancyGrid
from utils import Direction, Point, Submap
class GridSubmapper:
def __init__(self, occ_grid):
self.occ_grid = occ_grid.clone()
self.rectilinear_occ_grid = occ_grid.generateRectilinearOcc()
self.definite_rectangles = []
self.possible_rectangles = set()
self.submaps = []
def getNhoodOccupancy(self, x, y):
'''
Returns a bool array with 4 elements corresponding to whether the top, left, bottom or right cells are occupied.
Out of bounds is treated as unoccupied.
'''
# Top, Left, Bottom, Right
occupied = [False, False, False, False]
# Check Top
if y - 1 >= 0 and self.rectilinear_occ_grid[x, y - 1] == 1:
occupied[0] = True
# Check Left
if x - 1 >= 0 and self.rectilinear_occ_grid[x - 1, y] == 1:
occupied[1] = True
# Check Bottom
if y + 1 < self.rectilinear_occ_grid.size_y and self.rectilinear_occ_grid[x, y + 1] == 1:
occupied[2] = True
# Check Right
if x + 1 < self.rectilinear_occ_grid.size_x and self.rectilinear_occ_grid[x + 1, y] == 1:
occupied[3] = True
return occupied
def getNhoodValues(self, x, y):
'''
Returns an array containing the values above, left, below and right of the given x,y.
Out of bounds is treated as 1.
'''
# Top, Left, Bottom, Right
occupied = [1, 1, 1, 1]
# Check Top
if y - 1 >= 0:
occupied[0] = self.rectilinear_occ_grid[x, y - 1]
# Check Left
if x - 1 >= 0:
occupied[1] = self.rectilinear_occ_grid[x - 1, y]
# Check Bottom
if y + 1 < self.rectilinear_occ_grid.size_y:
occupied[2] = self.rectilinear_occ_grid[x, y + 1]
# Check Right
if x + 1 < self.rectilinear_occ_grid.size_x:
occupied[3] = self.rectilinear_occ_grid[x + 1, y]
return occupied
def isCorner(self, x, y):
# If point is not occupied then it cant be a corner
if self.rectilinear_occ_grid[x, y] == 0:
return False
on_boundary = True if (x == 0 or x == self.rectilinear_occ_grid.size_x - 1 or y == 0 or y == self.rectilinear_occ_grid.size_y - 1) else False
occupied = self.getNhoodOccupancy(x, y)
# If we are on the boundary then a corner only forms if a surrounding 4-neighbourhood has 1 occupied
if on_boundary:
if sum(occupied) == 1:
return True
elif sum(occupied) == 2: # Otherwise a corner only forms when the 4-nhood has 2 occupied
# Check the case where the two occupied positions form a straight line
if (occupied[0] and occupied[2]) or (occupied[1] and occupied[3]):
return False
else:
return True
return False
def getCorners(self):
corners = []
for j in range(self.rectilinear_occ_grid.size_y):
for i in range(self.rectilinear_occ_grid.size_x):
if self.isCorner(i, j):
corners.append(Point(i,j))
return corners
def seperateCorners(self, corners):
'''
Returns all pairs of points in corners that are cogrid, i.e. that lie along the same x or y line. The result
is split into two lists vertical pairs and horizontal pairs.
'''
# Naive O(n^2) implementation
# Could be improved via sorting
has_cogrid_pair = [False] * len(corners)
vertical_pairs = []
horizontal_pairs = []
# Iterate through all points to find cogrid vertices. Need to ensure a clear line exists between the two points
for a in range(len(corners) - 1):
for b in range(a + 1, len(corners)):
if corners[a].x == corners[b].x:
# Check there is an uniterrupted line between the two points
start = min(corners[a].y, corners[b].y)
end = max(corners[a].y, corners[b].y)
isValid = True
for idx in range(start + 1, end):
if self.rectilinear_occ_grid[corners[a].x, idx] == 1:
isValid = False
break
# If the two points form a valid cogrid pair then add to our result
if isValid:
vertical_pairs.append((corners[a], corners[b]))
has_cogrid_pair[a] = True
has_cogrid_pair[b] = True
elif corners[a].y == corners[b].y:
# Check there is an uniterrupted line between the two points
start = min(corners[a].x, corners[b].x)
end = max(corners[a].x, corners[b].x)
isValid = True
for idx in range(start + 1, end):
if self.rectilinear_occ_grid[idx, corners[a].y] == 1:
isValid = False
break
# If the two points form a valid cogrid pair then add to our result
if isValid:
horizontal_pairs.append((corners[a], corners[b]))
has_cogrid_pair[a] = True
has_cogrid_pair[b] = True
remaining_corners = [corners[i] for i in range(len(corners)) if has_cogrid_pair[i] == False]
return vertical_pairs, horizontal_pairs, remaining_corners
def getEdgeDirection(self, x, y):
# [top, left, bottom, right]
occupied = self.getNhoodOccupancy(x, y)
# Take first free direction rather than random dir
direction = occupied.index(False)
# Check what to mark the cell as based on whats around it
check_idx = (direction - 1) % 4
if occupied[check_idx]:
if check_idx == 0:
return Direction.LEFT
elif check_idx == 1:
return Direction.DOWN
elif check_idx == 3:
return Direction.UP
else:
# The case where check_idx is 2 should never occur because we always choose the first unoccupied in CCW direction
print("ERROR")
exit() # TODO:
# An edge case occurs when the cells above the corner and to the right of the corner are unoccupied
elif occupied[(direction + 1) % 4]:
return Direction.SPECIAL
def markEdge(self, x, y, direction):
cur_point = Point(x, y)
if direction == Direction.SPECIAL:
# Move upwards
direction = Direction.UP
cur_point = cur_point.shift(direction)
# Occurs if another edge runs over this one
if self.rectilinear_occ_grid[cur_point.x, cur_point.y] < 0:
return False
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
# But mark the cell with down
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = direction.opposite().value
cur_point = cur_point.shift(direction)
if self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == direction.next().value:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.INTERSECTION.value
else:
cur_point = cur_point.shift(direction)
# Occurs if another edge runs over this one
if self.rectilinear_occ_grid[cur_point.x, cur_point.y] < 0:
return False
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = direction.value
cur_point = cur_point.shift(direction)
# TODO:
if self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == direction.next().value:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.INTERSECTION.value
return True
def markCogrid(self, x1, y1, x2, y2, vertical_line):
occupied_1 = self.getNhoodOccupancy(x1, y1)
occupied_2 = self.getNhoodOccupancy(x2, y2)
# Right rotate occupied_1
right_rotate = occupied_1[-1:] + occupied_1[:-1]
# Left rotate occupied_1
left_rotate = occupied_1[1:] + occupied_1[:1]
if right_rotate == occupied_2:
if vertical_line:
# Start from top most point and move down
start = Point(x1, min(y1, y2))
self.markEdge(start.x, start.y, Direction.DOWN)
# Return the start point shifted one (so that it is the corner of the rectangle) as well as an adjacent tile (that forms a potential rectangle)
rect_corner = start.shift(Direction.DOWN)
return [rect_corner], [rect_corner.shift(Direction.RIGHT)]
else:
# Start from left most point and move right
start = Point(min(x1, x2), y1)
self.markEdge(start.x, start.y, Direction.RIGHT)
rect_corner = start.shift(Direction.RIGHT)
return [rect_corner], [rect_corner.shift(Direction.UP)]
elif left_rotate == occupied_2:
if vertical_line:
# Start from bottom most point and move up
start = Point(x1, max(y1, y2))
self.markEdge(start.x, start.y, Direction.UP)
rect_corner = start.shift(Direction.UP)
return [rect_corner], [rect_corner.shift(Direction.LEFT)]
else:
# Start from right most point and move left
start = Point(max(x1, x2), y1)
self.markEdge(start.x, start.y, Direction.LEFT)
rect_corner = start.shift(Direction.LEFT)
return [rect_corner], [rect_corner.shift(Direction.DOWN)]
elif occupied_1 == occupied_2[::-1]:
'''
If we reach here it means we have two corners whose leading edge move in opposite directions i.e.
| |
--+ +-- or --+
|
+--
|
'''
if vertical_line:
downward_start_point = Point(x1 - 1, min(y1,y2))
upward_start_point = Point(x1 + 1, max(y1, y2))
self.markEdge(downward_start_point.x, downward_start_point.y, Direction.DOWN)
self.markEdge(upward_start_point.x, upward_start_point.y, Direction.UP)
# TODO: Include the 1 line that forms between rectangles in possible_rect?
return [downward_start_point.shift(Direction.DOWN), upward_start_point.shift(Direction.UP)], []
else:
# For this case with horizontal lines, the edge ends on the corner rather than beginning at it thus we need to run markEdge backwards
leftward_end_point = Point(min(x1, x2), y1 - 1)
rightward_end_point = Point(max(x1, x2), y1 + 1)
# Get the start points of the edge (using the end_point will double add a corner during the rectangle phase)
result = []
cur_point = leftward_end_point.shift(Direction.RIGHT)
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.LEFT.value
cur_point = cur_point.shift(Direction.RIGHT)
result.append(cur_point.shift(Direction.LEFT))
cur_point = rightward_end_point.shift(Direction.LEFT)
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.RIGHT.value
cur_point = cur_point.shift(Direction.LEFT)
result.append(cur_point.shift(Direction.RIGHT))
return result, []
else:
'''
Same case as above except the edges are flipped i.e.
| |
--+ +-- or +--
|
--+
|
'''
if vertical_line:
# For this case with vertical lines, the edge ends on the corner rather than beginning at it thus we need to run the mark edge backwards
upward_end_point = Point(x1 + 1, min(y1, y2))
downward_end_point = Point(x1 - 1, max(y1, y2))
# Get the start points of the edge (using the end_point will double add a corner during the rectangle phase)
result = []
cur_point = upward_end_point.shift(Direction.DOWN)
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.UP.value
cur_point = cur_point.shift(Direction.DOWN)
result.append(cur_point.shift(Direction.UP))
cur_point = downward_end_point.shift(Direction.UP)
while self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y) and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 0:
self.rectilinear_occ_grid[cur_point.x, cur_point.y] = Direction.DOWN.value
cur_point = cur_point.shift(Direction.UP)
result.append(cur_point.shift(Direction.DOWN))
return result, []
else:
leftward_initial_point = Point(max(x1, x2), y1 - 1)
rightward_initial_point = Point(min(x1, x2), y1 + 1)
self.markEdge(leftward_initial_point.x, leftward_initial_point.y, Direction.LEFT)
self.markEdge(rightward_initial_point.x, rightward_initial_point.y, Direction.RIGHT)
return [leftward_initial_point.shift(Direction.LEFT), rightward_initial_point.shift(Direction.RIGHT)], []
def makeRectangle(self, x, y, possible_rectangles):
'''
There is an edge case that occurs when an UP edge is broken in two by a LEFT edge. However this is a non-issue since the
order in which we search for edges and mark them is from top to bottom from left to right. This means the LEFT edge will always
occur first before an upwards edge, i.e. only a LEFT edge will be split by an UP edge not vice-versa.
'''
initial_point = Point(x,y)
cur_dir = Direction(self.rectilinear_occ_grid[x, y])
cur_point = initial_point
cur_point = cur_point.shift(cur_dir)
rectangle_corners = [initial_point]
cell_value = self.rectilinear_occ_grid[cur_point.x, cur_point.y]
# Check the edge case where we start on a special corner
if cur_dir == Direction.DOWN and self.rectilinear_occ_grid[cur_point.x, cur_point.y] == 1:
cur_dir = Direction.LEFT
cur_point = initial_point
cur_point = cur_point.shift(cur_dir)
while cur_point != initial_point:
# Update the possible rectangles
if cur_point in possible_rectangles:
possible_rectangles.remove(cur_point)
if len(rectangle_corners) > 4:
print("PROBLEM")
print(rectangle_corners)
exit()
# Check that the current point is in bounds
if self.rectilinear_occ_grid.inBounds(cur_point.x, cur_point.y):
cell_value = self.rectilinear_occ_grid[cur_point.x, cur_point.y]
# Otherwise backtrack, change direction and mark the point as a corner in the rectangle
else:
cur_point = cur_point.shift(cur_dir.opposite())
cur_dir = cur_dir.next()
rectangle_corners.append(cur_point)
# Move in new direction so that we dont have issues with the error checking steps below
cur_point = cur_point.shift(cur_dir)
# Go to next iteration
continue
# print(cur_point, end="--- ")
# print(cell_value)
# If we hit a cell that has 0 or the current direction then we continue moving in same direction
if cell_value == 0 or cell_value == cur_dir.value:
cur_point = cur_point.shift(cur_dir)
# If we encounter an a cell with the next direction then add the corner and follow the next direction
elif cell_value == cur_dir.next().value:
# Move with new direction
cur_dir = cur_dir.next()
rectangle_corners.append(cur_point)
cur_point = cur_point.shift(cur_dir)
# If we encounter an intersection check the value in the next direction after the intersection.
elif cell_value == Direction.INTERSECTION.value:
cur_dir = cur_dir.next()
potential_corner = cur_point
cur_point = cur_point.shift(cur_dir)
if self.rectilinear_occ_grid[cur_point.x, cur_point.y] == cur_dir.value:
rectangle_corners.append(potential_corner)
else:
potential_corner = potential_corner.shift(cur_dir.next())
rectangle_corners.append(potential_corner)
cur_point = potential_corner.shift(cur_dir)
# If we hit an obstacle (i.e. 1) or other marked cell then backtrack, change direction and mark point as a corner in the rectangle
else:
cur_point = cur_point.shift(cur_dir.opposite())
cur_dir = cur_dir.next()
rectangle_corners.append(cur_point)
# Move in new direction so that we dont have issues with the error checking steps
cur_point = cur_point.shift(cur_dir)
return Submap(rectangle_corners)
def splitIntoRectangles(self, concave_corners):
'''
Given a list of concave corner points, splits the rectilinear_occ_grid into rectangles. Returns a list of points that are the corners of unique rectangles and
a set of points that lie on the edges of other potential rectangles that were missed. For any split it can create either 1 or 2 rectangles, thus
the set is used to keep track of extra rectangles.
'''
definite_rectangles = []
possible_rectangles = set()
for corner in concave_corners:
direction = self.getEdgeDirection(corner.x, corner.y)
suitable_edge = self.markEdge(corner.x, corner.y, direction)
if not suitable_edge:
continue
# Based on the direction of the edge add the corner of the rectangle as well as the adjacent rectangle
if direction == Direction.SPECIAL:
definite_rectangles.append(corner.shift(Direction.UP))
# Offset contains the previous shift
possible_rectangles.add(corner.shift(Direction.RIGHT))
else:
definite_rectangles.append(corner.shift(direction))
# Offset contains the previous shift
possible_rectangles.add(corner.shift(direction.next().opposite()))
return definite_rectangles, possible_rectangles
def extractSubmaps(self):
used_corners = set()
# STEP 1: Iterate through the points known to be rectangle corners
for corner in self.definite_rectangles:
# Skip this corner if it has been used in another rectangle
if corner in used_corners:
continue
submap = self.makeRectangle(corner.x, corner.y, self.possible_rectangles)
# Add the corners of this submap into the used corners set
for p in submap.corners:
used_corners.add(p)
self.submaps.append(submap)
# STEP 2: Iterate through the rectangles that may have not been accounted for
while len(self.possible_rectangles) > 0:
cell = self.possible_rectangles.pop()
occupied = self.getNhoodValues(cell.x, cell.y)
direction_of_corner = None
if occupied.count(0) == 3:
if occupied[0]:
direction_of_corner = Direction.RIGHT
elif occupied[1]:
direction_of_corner = Direction.UP
elif occupied[2]:
direction_of_corner = Direction.LEFT
elif occupied[3]:
direction_of_corner = Direction.DOWN
# Need to move to the closest corner
while occupied.count(0) != 2:
cell = cell.shift(direction_of_corner)
occupied = self.getNhoodValues(cell.x, cell.y)
direction_of_corner = direction_of_corner.next()
else:
if occupied[3] and occupied[0]:
direction_of_corner = Direction.DOWN
elif occupied[0] and occupied[1]:
direction_of_corner = Direction.RIGHT
elif occupied[1] and occupied[2]:
direction_of_corner = Direction.UP
elif occupied[2] and occupied[3]:
direction_of_corner = Direction.LEFT
if direction_of_corner == None:
# print(occupied)
continue
# Set the cell to have the correct direction and then make a submap
self.rectilinear_occ_grid[cell.x, cell.y] = direction_of_corner.value
self.submaps.append(self.makeRectangle(cell.x, cell.y, self.possible_rectangles))
def handleCogridCorners(self, vertical_pairs, horizontal_pairs):
bipartite_graph = defaultdict(list)
# Used to keep track of points that dont make it into the graph
isolated_vertical = []
isolated_horizontal = []
# Create a temp_rectilinear_occ_grid to use for faster construction of the bipartite graph (if we use rectilinear_occ_grid it will leave remnants which we dont want)
temp_rectilinear_occ_grid = self.rectilinear_occ_grid.clone()
# Mark the vertical cogrid lines
for num, (point_1, point_2) in enumerate(vertical_pairs, 2):
# Find start and end points
start_y = min(point_1.y, point_2.y)
end_y = max(point_1.y, point_2.y)
# Mark the cells from start to end with num
for i in range(start_y, end_y + 1):
temp_rectilinear_occ_grid[point_1.x, i] = num
# Mark horizontal cogrid lines and build the bipartite graph
for num, (point_1, point_2) in enumerate(horizontal_pairs, 2 + len(vertical_pairs)):
independent_node = True
# Find start and end poitns
start_x = min(point_1.x, point_2.x)
end_x = max(point_1.x, point_2.x)
for i in range(start_x, end_x + 1):
val = int(temp_rectilinear_occ_grid[i, point_1.y])
if val > 1:
independent_node = False
bipartite_graph[val].append(num)
temp_rectilinear_occ_grid[i, point_1.y] = num
# Keep track of the point if it is not part of the graph
if independent_node:
isolated_horizontal.append((point_1, point_2))
# Find the vertical pairs that didnt make it into the graph
for num, pair in enumerate(vertical_pairs, 2):
if num not in bipartite_graph:
isolated_vertical.append(pair)
MIS = MaximumIndependentSet(bipartite_graph)
MIS.compute()
# Maintain a set of used points
used_points = set()
# Mark the corners that are part of the MIS
for num in MIS.max_independent_set:
idx = num - 2
cogrid_definite_rect_corners = None
cogrid_possible_rect_corners = None
if idx < len(vertical_pairs):
point_1, point_2 = vertical_pairs[idx]
cogrid_definite_rect_corners, cogrid_possible_rect_corners = self.markCogrid(point_1.x, point_1.y, point_2.x, point_2.y, True)
used_points.add(point_1)
used_points.add(point_2)
else:
idx -= len(vertical_pairs)
point_1, point_2 = horizontal_pairs[idx]
cogrid_definite_rect_corners, cogrid_possible_rect_corners = self.markCogrid(point_1.x, point_1.y, point_2.x, point_2.y, False)
used_points.add(point_1)
used_points.add(point_2)
self.definite_rectangles.extend(cogrid_definite_rect_corners)
self.possible_rectangles.update(cogrid_possible_rect_corners)
# Mark cogrid corners that were not included in the bipartite graph
for (point_1, point_2) in isolated_vertical:
cogrid_definite_rect_corners, cogrid_possible_rect_corners = self.markCogrid(point_1.x, point_1.y, point_2.x, point_2.y, True)
self.definite_rectangles.extend(cogrid_definite_rect_corners)
self.possible_rectangles.update(cogrid_possible_rect_corners)
for (point_1, point_2) in isolated_horizontal:
cogrid_definite_rect_corners, cogrid_possible_rect_corners = self.markCogrid(point_1.x, point_1.y, point_2.x, point_2.y, False)
self.definite_rectangles.extend(cogrid_definite_rect_corners)
self.possible_rectangles.update(cogrid_possible_rect_corners)
# Lastly, find the leftover points that are not part of the MIS so we use them for further decomposition later
leftover_corners = []
for num in MIS.min_vertex_cover:
idx = num - 2
if idx < len(vertical_pairs):
point_1, point_2 = vertical_pairs[idx]
if point_1 not in used_points:
leftover_corners.append(point_1)
if point_2 not in used_points:
leftover_corners.append(point_2)
else:
idx -= len(vertical_pairs)
point_1, point_2 = horizontal_pairs[idx]
if point_1 not in used_points:
leftover_corners.append(point_1)
if point_2 not in used_points:
leftover_corners.append(point_2)
return leftover_corners
def handleRemainingCorners(self, noncogrid_corners, leftover_corners):
corners = noncogrid_corners + leftover_corners
definite_remaining_rectangles, possible_remaining_rectangles = self.splitIntoRectangles(corners)
self.definite_rectangles.extend(definite_remaining_rectangles)
self.possible_rectangles.update(possible_remaining_rectangles)
def handleNonRectangularRegions(self):
difference = self.rectilinear_occ_grid.grid - self.occ_grid.grid
difference[difference <= 0] = 0
difference = np.uint8(difference)
contours, _ = cv2.findContours(difference, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for shape in contours:
# Convert the numpy array to list of tuples
points = [Point(x,y) for x, y in shape.reshape(len(shape), 2)]
non_rectangle_submap = Submap(points, False)
self.submaps.append(non_rectangle_submap)
def visualization(self):
visual_grid = self.occ_grid.clone()
visual_grid.grid[visual_grid.grid < 0] = 0
visual_grid.grid[visual_grid.grid > 0] = -1
for num, submap in enumerate(self.submaps):
for (x, y) in submap.range():
visual_grid[x, y] = num
return visual_grid
def process(self, block_size_x, block_size_y):
# Find the concave corners
concave_corners = self.getCorners()
# If we found corners then process them
if len(concave_corners) != 0:
# Preproces corners
vertical_cogrid, horizontal_cogrid, noncogrid_corners = self.seperateCorners(concave_corners)
leftover_corners = self.handleCogridCorners(vertical_cogrid, horizontal_cogrid)
self.handleRemainingCorners(noncogrid_corners, leftover_corners)
# Extract
self.extractSubmaps()
# Need to handle the remaining non rectangular regions
self.handleNonRectangularRegions()
# If no corners were found then use the whole map as a submap
else:
self.rectilinear_occ_grid[0, 0] = Direction.RIGHT.value
entire_map = self.makeRectangle(0, 0, self.possible_rectangles)
self.submaps.append(entire_map)
# Finally prune out submaps that are smaller than 50 cells or where the minimum size is smaller than our sweeper
final_submaps = []
for s in self.submaps:
# Skip if less than 50 cells
if len(s.range()) < 50:
continue
# Skip if smallest edge of submap is smaller than our block
if s.is_rectangle and min(block_size_x, block_size_y) > min(s.size_x, s.size_y):
continue
final_submaps.append(s)
self.submaps = final_submaps
return self.submaps
|
"""
A) A dezena mais frequente
B) A dezena menos frequente
C) Tabela de frequencias de dezenas
D) Tabela de frequencias de duplas dezenas
E) Tabela de frequencias de triplas dezenas
"""
import libplnbsi
from operator import itemgetter
def combina(listaDezenas, qtd, dicDezenas):
i = 0; j = 0
while i < len(listaDezenas):
j = i + 1
while j < len(listaDezenas):
tpDezenas = int(listaDezenas[i]), int(listaDezenas[j])
tpDezenas = tuple(sorted(tpDezenas))
if tpDezenas not in dicDezenas:
dicDezenas[tpDezenas] = 1
else:
dicDezenas[tpDezenas] += 1
#
j += 1
#
i += 1
#
return dicDezenas
#
def gravaCombinacoes(lista):
arquivo = open('arqDuplaSena.txt', 'w')
for elem in lista:
arquivo.write(str(elem[0]) + ", " + str(elem[1]) + "\n" )
#
arquivo.close()
#
def frequenciaDeDezena(nomeArq):
dicFreq = {}
dicMais = {}
arq = open(nomeArq, "r")
resp = open('respostaABC.txt', 'w')
linha = arq.readline()
dezenas = libplnbsi.tokenizador(linha.strip())[0]
while linha != "":
for elem in dezenas:
if elem not in dicFreq:
dicFreq[elem] = 1
else:
dicFreq[elem] += 1
#
#
linha = arq.readline()
dezenas = libplnbsi.tokenizador(linha.strip())[0]
#
a = list(dicFreq.values())
a.sort()
for chave, valor in dicFreq.items():
if valor == a[len(a) - 1]:
resp.write("Dezena mais frequente: "+str(chave) + ": " +str(valor) + "\n")
dicMais[chave] = valor
#
if valor == a[0]:
resp.write("Dezena menos frequente: "+str(chave) + ": " +str(valor) + "\n")
dicMais[chave] = valor
#
#
for chave, valor in dicFreq.items():
resp.write(str(chave) + ": " + str(valor) + "\n")
#
arq.close()
resp.close()
#
def main():
dicDezenas = {}
frequenciaDeDezena("megasena.txt")
arqDezenas = open("megasena.txt", "r")
linha = arqDezenas.readline()
while linha != "":
dados = libplnbsi.tokenizador(linha.strip())[0]
combina(dados, 2, dicDezenas)
linha = arqDezenas.readline()
#
arqDezenas.close()
listaDezenas = sorted(dicDezenas.items(), key=itemgetter(1), reverse=True)
gravaCombinacoes(listaDezenas)
return 0
#
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 15:52:21 2018
@author: Administrator
"""
import socket
import threading
import time
import struct
import queue
import serial
import numpy as np
from scipy import signal
from ringbuf import RingBuffer
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
FSM_IDLE = 0
FSM_SYNC = 1
FSM_DATA = 2
SYNC_HEAD = b'\x9b\xdf'
# 0 4000Hz
# 1 2000Hz
# 2 1000Hz
# 3 500Hz
# 4 250Hz
# 5 125Hz
# 6 62.5Hz
# 7 31.25Hz
# 8 15.625Hz
# 9 7.813Hz
# 10 3.906Hz
FILTER_REG = 80
FS = 2000>>(FILTER_REG&0x0f)
TARGET_FREQ = 470
FREQ_SPAN = 30
#FS = 4000
WINDOW_SIZE = 2**16
FFT_MAV_LEN = 32
#WINDOW_SIZE = 1024
fft_size = WINDOW_SIZE
rb = RingBuffer(WINDOW_SIZE,1)
in_buf = []
inb_q = queue.Queue(0)
#gain = 3.9e-6
def calc_ord(reg_val):
fs = 4000>>(reg_val&0x0f)
lpf = fs/4
lpf_reg = reg_val>>4
if(lpf_reg == 0 ):
hpf = 0
else:
hpf = 0.247*fs/(4**(lpf_reg-1))
return fs,lpf,hpf
def func(a):
temp = struct.unpack('f',struct.pack('I',a))
return temp
def checksum(arr_in):
xsum = 0
for item in arr_in[2:]:
xsum ^=item
return xsum
class pkg_fsm(object):
def __init__(self):
self.cstate = FSM_IDLE
self.i_cnt = 0
self.arr = []
self.frame = []
def resolve(self,din):
self.arr.append(din)
# print(bytes(self.arr[-2:]))
if self.cstate == FSM_IDLE:
if(bytes(self.arr[-2:]) == SYNC_HEAD):
# print("OK")
self.frame = []
self.frame.append(int.from_bytes(bytes(SYNC_HEAD),byteorder='big', signed=False))
self.cstate = FSM_SYNC
self.i_cnt = 0
else:
if(self.i_cnt >0):
print("drop\n")
self.i_cnt += 1
self.cstate = FSM_IDLE
elif self.cstate == FSM_SYNC:
if(self.i_cnt >= 1):
CMD = int.from_bytes(bytes(self.arr[-2:]),byteorder='big', signed=False)
self.frame.append(CMD)
self.cstate = FSM_DATA
self.i_cnt = 0
else:
self.i_cnt += 1
self.cstate = FSM_SYNC
elif self.cstate == FSM_DATA:
# off = (self.i_cnt>>2)
# print(bytes(self.frame[0]))
if(self.i_cnt&0x0003 == 0):
if(self.i_cnt == 0):
self.i_cnt += 1
else:
buf = int.from_bytes(bytes(self.arr[-4:]),byteorder='little', signed=False)
self.frame.append(buf)
buf = func(buf)
# print(buf)
rb.append(buf)
self.cstate = FSM_DATA
self.i_cnt += 1
else:
if(self.i_cnt >= ((self.frame[1]&0x0fff)-1)):
self.arr = []
self.frame = []
self.i_cnt = 0
self.cstate = FSM_IDLE
else:
self.i_cnt += 1
pfsm = pkg_fsm()
class my_mav(object):
def __init__(self,row,col):
self.mav_buf = np.zeros((int(row),int(col)))
self.acc_buf = np.zeros(int(col))
self.row_ind = 0
self.mav_cnt = 0
self.acc_cnt = 0
self.row_max = row
def acc_insert(self,din):
self.acc_buf += din
self.acc_cnt += 1
return self.acc_buf/self.acc_cnt
def mav_insert(self,din):
self.mav_buf[self.row_ind] = din
self.row_ind += 1
if(self.row_ind >= self.row_max):
self.row_ind = 0
if(self.mav_cnt < self.row_max):
self.mav_cnt += 1
else:
self.mav_cnt = self.mav_cnt
return self.mav_buf.sum(axis=0)/self.mav_cnt
def get(self,mtype='acc'):
if mtype=='mac':
return self.mav_buf.sum(axis=0)/self.mav_cnt
else:
return self.acc_buf/self.acc_cnt
mav_inst = my_mav(FFT_MAV_LEN,(WINDOW_SIZE/2)+1)
def t_resolve():
while True:
lenq = inb_q.qsize()
if lenq > 0:
for i in range(lenq):
buf = inb_q.get(block=False)
pfsm.resolve(buf)
else:
time.sleep(0.01)
def ser_init():
ser = serial.Serial("com16",115200)
print(ser.name)
if ser.isOpen():
print("open success")
else:
print("open failed")
try:
while True:
count = ser.inWaiting()
if count > 0:
data = ser.read(count)
inb_q.queue.extend(data)
time.sleep(0.001)
except KeyboardInterrupt:
if serial != None:
ser.close()
def tcp_client_init(ip,port):
ser_ip = ip
ser_port = port
tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tcp_client.connect((ser_ip,ser_port))
print('connected')
while True:
data = tcp_client.recv(4096)
if data != '':
inb_q.queue.extend(data)
time.sleep(0.02)
tcp_client.close()
print('Connection closed.')
except socket.error:
print("fail to setup socket connection")
tcp_client.close()
def sys_init(mode,ip,port):
threads = []
if mode == 1:
t1 = threading.Thread(target=tcp_client_init,args=(ip,port))
elif mode == 2:
t1 = threading.Thread(target=ser_init)
threads.append(t1)
t2 = threading.Thread(target=t_resolve)
threads.append(t2)
for t in threads:
t.setDaemon(True)
t.start()
class my_filter:
def __init__(self,N,filt_zone=[0.2],filt_type='lowpass'):
self.b,self.a = signal.butter(N, filt_zone, filt_type)
self.z = np.zeros(max(len(self.a),len(self.b))-1,dtype=np.float)
def filt(self,din):
dout, self.z = signal.lfilter(self.b, self.a, din, zi=self.z)
return dout
class iirpeak_filter:
def __init__(self,fs,f0,Q):
self.b,self.a = signal.iirpeak(f0,Q,fs)
self.z = np.zeros(max(len(self.a),len(self.b))-1,dtype=np.float)
def filt(self,din):
dout, self.z = signal.lfilter(self.b, self.a, din, zi=self.z)
return dout
fig = plt.figure()
ax = plt.subplot2grid((7,1),(0,0),rowspan=2)
af = plt.subplot2grid((7,1),(2,0),rowspan=3)
afs = plt.subplot2grid((7,1),(5,0),rowspan=3)
x = np.arange(0,WINDOW_SIZE)/FS
xh = np.arange(0,WINDOW_SIZE/2+1)*FS/(WINDOW_SIZE)
linex, = ax.plot(x,np.sin(x),'g')
linexf, = af.plot(xh,np.sin(xh),color = 'r',linestyle='-', marker=',')
linexfs, = afs.plot(xh,np.sin(xh),color = 'b',linestyle='-', marker=',')
#filt_inst = my_filter(3,[0.22,0.25],'bandpass')
filt_inst = iirpeak_filter(FS,473,40)
def gen_frames():
yield 0
def choose_windows(name='Hanning', N=20): # Rect/Hanning/Hamming
if name == 'Hamming':
window = np.array([0.54 - 0.46 * np.cos(2 * np.pi * n / (N - 1)) for n in range(N)])
elif name == 'Hanning':
window = np.array([0.5 - 0.5 * np.cos(2 * np.pi * n / (N - 1)) for n in range(N)])
elif name == 'Rect':
window = np.ones(N)
return window
def my_fft(din):
temp = din[:fft_size]*choose_windows(name='Rect',N=fft_size)
# temp = din[:fft_size]
fftx = np.fft.rfft(temp)/fft_size
xfp = np.abs(fftx)*2
return xfp
def goertzel(din,k,N):
win = choose_windows('Hanning',N)
w = 2*np.pi*k/N
coef = 2*np.cos(w)
print("w:%f,coef:%f\n"%(w,coef))
q1=0
q2=0
for i in range(N):
x = din[i]*win[i]
q0 = coef*q1 - q2 + x
q2 = q1
q1 = q0
return np.sqrt(q1**2 + q2**2 - q1*q2*coef)*2/N
def update(i):
temp = rb.view
# temp[:,0] = filt_inst.filt(temp[:,0])
linex.set_ydata(temp[:,0])
ax.set_ylim(np.min(temp[:,0]),np.max(temp[:,0]))
habx_t = my_fft(temp[:,0])
habx_t[:1700] = 0.000005
# habx_t[2500:] = 0.000005
habx = mav_inst.mav_insert(habx_t)
linexf.set_ydata(habx)
af.set_ylim(np.min(habx),np.max(habx))
if rb.flag == 1:
habx_acc = mav_inst.acc_insert(habx_t)
linexfs.set_ydata(habx_acc)
afs.set_ylim(np.min(habx_acc),np.max(habx_acc))
rb.reset_flag()
else:
habx_acc = mav_inst.get('acc')
linexfs.set_ydata(habx_acc)
afs.set_ylim(np.min(habx_acc),np.max(habx_acc))
def initial():
linex.set_ydata(np.sin(x))
linexf.set_ydata(np.zeros(int(WINDOW_SIZE/2 + 1)))
ax.set_ylim(-3,3)
ax.set_xlabel("time")
ax.set_ylabel("x(g)")
ax.grid(True, linestyle='-.')
af.set_ylim(-1,1)
af.grid(True, linestyle='-.')
af.set_xlabel("Freq(Hz)")
af.set_ylabel("Amp-z")
afs.set_ylim(-1,1)
afs.grid(True, linestyle='-.')
afs.set_xlabel("Freq(Hz)")
afs.set_ylabel("Amp-zs")
return linex,
try:
FS,LPF,HPF = calc_ord(FILTER_REG)
print("FS:%.3f,LPF:%.3f,HPF:%.3f\n" % (FS,LPF,HPF))
sys_init(mode=1,ip="192.168.1.100",port=9996)
# sys_init(mode=1,ip="192.168.4.1",port=9996)
ani = animation.FuncAnimation(fig=fig,func=update,frames=gen_frames,init_func=initial,interval=100,blit=False)
plt.show()
except KeyboardInterrupt:
pass
|
import gym
from gym_recording.playback import scan_recorded_traces
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from collections import defaultdict
from pprint import pprint
from dps import cfg
from dps.datasets import Dataset, ImageDataset, ArrayFeature, ImageFeature
from dps.utils import Param, resize_image, animate
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space):
self.action_space = action_space
def act(self, observation, reward, done):
return self.action_space.sample()
def gather_atari_frames(game, policy, n_frames, density=1.0, render=False):
assert 0 < density <= 1.0
env = gym.make(game)
if policy is None:
policy = RandomAgent(env.action_space)
if render:
outdir = '/tmp/random-agent-results'
env = gym.wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
np.random.seed(0)
reward = 0
done = False
frames = []
while len(frames) < n_frames:
ob = env.reset()
while True:
action = policy.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
if np.random.binomial(1, density):
frames.append(ob)
if done:
break
if render:
env.render()
env.close()
return np.array(frames[:n_frames])
def gather_atari_human_frames(game, n_frames, density=1.0):
assert 0 < density <= 1.0
human_agent_action = 0
human_wants_restart = False
human_sets_pause = False
def key_press(key, mod):
nonlocal human_agent_action, human_wants_restart, human_sets_pause
if key == 0xff0d:
human_wants_restart = True
if key == 32:
human_sets_pause = not human_sets_pause
a = int(key - ord('0'))
if a <= 0 or a >= ACTIONS:
return
human_agent_action = a
def key_release(key, mod):
nonlocal human_agent_action
a = int(key - ord('0'))
if a <= 0 or a >= ACTIONS:
return
if human_agent_action == a:
human_agent_action = 0
env = gym.make(game)
ACTIONS = env.action_space.n
SKIP_CONTROL = 0
outdir = '/tmp/random-agent-results'
env = gym.wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
env.render()
env.unwrapped.viewer.window.on_key_press = key_press
env.unwrapped.viewer.window.on_key_release = key_release
np.random.seed(0)
reward = 0
done = False
frames = []
skip = 0
env.reset()
while len(frames) < n_frames:
if not skip:
action = human_agent_action
skip = SKIP_CONTROL
else:
skip -= 1
ob, reward, done, _ = env.step(action)
env.render()
if np.random.binomial(1, density):
frames.append(ob)
print(len(frames))
if done:
env.reset()
env.close()
return np.array(frames[:n_frames])
class ReinforcementLearningDataset(ImageDataset):
rl_data_location = Param()
max_episodes = Param(None)
max_samples_per_ep = Param(None)
history_length = Param(1)
image_shape = Param()
action_dim = Param(1)
reward_dim = Param(1)
store_o = Param(True)
store_a = Param(True)
store_r = Param(True)
store_next_o = Param(True)
depth = 3
_n_examples = 0
def _write_example(self, **kwargs):
image = None
o_size = 0
if self.store_o:
image = kwargs['o']
o_size = image.shape[-1]
if self.store_next_o:
if image is None:
image = kwargs['next_o']
else:
image = np.concatenate([image, kwargs['next_o']], axis=2)
if self.postprocessing == "tile":
images, _, _ = self._tile_postprocess(image, [])
elif self.postprocessing == "random":
images, _, _ = self._random_postprocess(image, [])
else:
images = [image]
for img in images:
_kwargs = {}
_kwargs['a'] = kwargs.get('a', None)
_kwargs['r'] = kwargs.get('r', None)
o, next_o = np.split(img, [o_size], axis=-1)
_kwargs['o'] = o
_kwargs['next_o'] = next_o
self._write_single_example(**_kwargs)
@property
def features(self):
if self._features is not None:
return self._features
_features = []
if self.store_o:
obs_shape = (self.obs_shape[0], self.obs_shape[1], self.obs_shape[2] * self.history_length)
_features.append(ImageFeature("o", obs_shape))
if self.store_a:
action_dim = self.action_dim * self.history_length
_features.append(ArrayFeature("a", (action_dim,)))
if self.store_r:
reward_dim = self.reward_dim * self.history_length
_features.append(ArrayFeature("r", (reward_dim,)))
if self.store_next_o:
_features.append(ImageFeature("next_o", self.obs_shape))
self._features = _features
return _features
def _make(self):
scan_recorded_traces(self.rl_data_location, self._callback, self.max_episodes)
def _callback(self, o, a, r):
episode_length = len(o)
if self.max_samples_per_ep is None:
indices = np.arange(self.history_length, episode_length)
else:
n_indices = episode_length - self.history_length
if n_indices <= self.max_samples_per_ep:
indices = np.arange(n_indices)
else:
indices = np.random.choice(n_indices, size=self.max_samples_per_ep, replace=False)
indices += self.history_length
for idx in indices:
if self._n_examples % 100 == 0:
print("Processing example {}".format(self._n_examples))
_o, _a, _r, _next_o = None, None, None, None
if self.store_o:
_o = list(o[idx-self.history_length:idx])
_o = np.concatenate(_o, axis=2)
if self.store_a:
_a = np.array(a[idx-self.history_length:idx]).flatten()
if self.store_r:
_r = np.array(r[idx-self.history_length:idx]).flatten()
if self.store_next_o:
_next_o = o[idx]
self._write_example(o=_o, a=_a, r=_r, next_o=_next_o)
self._n_examples += 1
def visualize(self):
N = 16
dset = tf.data.TFRecordDataset(self.filename)
dset = dset.shuffle(1000).batch(N).map(self.parse_example_batch)
iterator = dset.make_one_shot_iterator()
sess = tf.get_default_session()
o, a, r, next_o = None, None, None, None
result = sess.run(iterator.get_next())
o = result.get('o', None)
a = result.get('a', None)
r = result.get('r', None)
next_o = result.get('next_o', None)
# in case not enough obs were found
for data in [o, a, r, next_o]:
if data is not None:
N = data.shape[0]
break
stride = self.obs_shape[2]
sqrt_N = int(np.ceil(np.sqrt(N)))
fig, axes = plt.subplots(sqrt_N, sqrt_N * (self.history_length + 1), figsize=(20, 20))
axes = np.array(axes).reshape(sqrt_N, sqrt_N * (self.history_length + 1))
for ax in axes.flatten():
ax.set_axis_off()
for n in range(N):
i = int(n / sqrt_N)
j = int(n % sqrt_N)
for t in range(self.history_length):
ax = axes[i, j * (self.history_length + 1) + t]
ax.set_aspect("equal")
if self.store_o:
ax.imshow(np.squeeze(o[n, :, :, t*stride:(t+1)*stride]))
str_a = str(a[n, t * self.action_dim: (t+1)*self.action_dim]) if self.store_a else ""
str_r = str(r[n, t * self.reward_dim: (t+1)*self.reward_dim]) if self.store_r else ""
ax.set_title("a={}, r={}".format(str_a, str_r))
ax = axes[i, j * (self.history_length + 1) + self.history_length]
ax.set_title("Next Obs")
ax.set_aspect("equal")
if self.store_next_o:
ax.imshow(np.squeeze(next_o[n]))
plt.subplots_adjust(top=0.95, bottom=0, left=0, right=1, wspace=0.1, hspace=0.1)
plt.show()
class RewardClassificationDataset(ReinforcementLearningDataset):
""" Note that in general, the data returned by gym_recording will contain
one more observation than the number of rewards/actions. """
classes = Param()
one_hot = Param(True)
store_o = True
store_a = True
store_r = True
store_next_o = False
@property
def reward_dim(self):
return len(self.classes) if self.one_hot else 1
@property
def features(self):
if self._features is not None:
return self._features
_features = []
_features.append(ImageFeature("o", self.obs_shape))
_features.append(ArrayFeature("a", (self.action_dim,)))
_features.append(ArrayFeature("r", (1,)))
self._features = _features
return _features
def _make(self):
self.examples = defaultdict(list)
scan_recorded_traces(self.rl_data_location, self._callback, self.max_episodes)
def _callback(self, o, a, r):
episode_length = len(o)-1
if not episode_length:
# Only one observation, and no actions or rewards
return
if self.max_samples_per_ep is not None and episode_length > self.max_samples_per_ep:
indices = np.random.choice(episode_length, size=self.max_samples_per_ep, replace=False)
else:
indices = np.arange(episode_length)
for idx in indices:
_o = list(o[idx:idx+1])
_o = np.concatenate(_o, axis=2)
_a = np.array(a[idx:idx+1]).flatten()
_r = int(r[idx])
self._write_example(o=_o, a=_a, r=np.array([_r]))
def parse_example_batch(self, example_proto):
o, a, r = super(RewardClassificationDataset, self).parse_example_batch(example_proto)
if self.one_hot:
r = tf.argmin(tf.abs(r - self.classes), axis=1)
r = tf.one_hot(r, len(self.classes))
else:
r = tf.cast(r, tf.int32)
return o, a, r
def atari_image_shape(game, after_warp):
if after_warp:
return (84, 84)
two_fifty = ("Amidar WizardOfWor DoubleDunk Centipede Tennis BankHeist Skiing "
"Carnival Pooyan AirRaid Assault Tutankham Gopher VideoPinball".split())
if "JourneyEscape" in game:
return (230, 160)
elif any(g in game for g in two_fifty):
return (250, 160)
else:
return (210, 160)
class StaticAtariDataset(ReinforcementLearningDataset):
game = Param(aliases="atari_game")
after_warp = Param()
episode_range = Param()
_obs_shape = None
action_dim = 1
reward_dim = 1
rl_data_location = None
@property
def obs_shape(self):
if self._obs_shape is None:
if self.image_shape is not None:
depth = 1 if self.after_warp else 3
self._obs_shape = (*self.image_shape, depth)
else:
if self.postprocessing:
image_shape = self.tile_shape
else:
image_shape = atari_image_shape(self.game, self.after_warp)
if self.after_warp:
self._obs_shape = (*image_shape, 1)
else:
self._obs_shape = (*image_shape, 3)
return self._obs_shape
def _make(self):
directory = os.path.join(cfg.data_dir, "atari_data")
dirs = os.listdir(directory)
game_full_name = "{}NoFrameskip-v4".format(self.game)
starts_with = "atari_data_env={}.datetime=".format(game_full_name)
matching_dirs = [d for d in dirs if d.startswith(starts_with)]
if not matching_dirs:
pprint(sorted(dirs))
raise Exception("No data found for game {}".format(self.game))
directory = os.path.join(directory, sorted(matching_dirs)[-1])
directory = os.path.join(directory, ("after" if self.after_warp else "before") + "_warp_recording")
scan_recorded_traces(directory, self._callback, self.max_episodes, self.episode_range)
class AtariVideoDataset(Dataset):
atari_game = Param()
n_frames = Param()
image_shape = Param()
after_warp = Param()
episode_range = Param()
max_episodes = Param()
max_samples_per_ep = Param()
max_examples = Param()
frame_skip = Param()
depth = 3
_n_examples = 0
_obs_shape = None
@property
def obs_shape(self):
if self._obs_shape is None:
if self.image_shape is None:
image_shape = atari_image_shape(self.atari_game, self.after_warp)
self._obs_shape = (self.n_frames, *image_shape, self.depth,)
else:
self._obs_shape = (self.n_frames, *self.image_shape, self.depth,)
return self._obs_shape
@property
def features(self):
if self._features is None:
self._features = [
ImageFeature("image", self.obs_shape),
ArrayFeature("action", (self.n_frames,), np.int32),
ArrayFeature("reward", (self.n_frames,), np.float32),
]
return self._features
def _per_ep_callback(self, o, a, r):
""" process one episode """
episode_length = len(a) # o is one step longer than a and r
frame_size = (self.n_frames - 1) * self.frame_skip + 1
max_start_idx = episode_length - frame_size + 1
if max_start_idx <= self.max_samples_per_ep:
indices = np.arange(max_start_idx)
else:
indices = np.random.choice(max_start_idx, size=self.max_samples_per_ep, replace=False)
step = self.frame_skip
for start in indices:
if self._n_examples % 100 == 0:
print("Processing example {}".format(self._n_examples))
end = start + frame_size
_o = np.array(o[start:end:step])
_a = np.array(a[start:end:step]).flatten()
_r = np.array(r[start:end:step]).flatten()
assert len(_o) == self.n_frames
assert len(_a) == self.n_frames
assert len(_r) == self.n_frames
if self.image_shape is not None and _o.shape[1:3] != self.image_shape:
_o = np.array([resize_image(img, self.image_shape) for img in _o])
if self.after_warp:
_o = np.tile(_o, (1, 1, 1, 3))
self._write_example(image=_o, action=_a, reward=_r)
self._n_examples += 1
if self._n_examples >= self.max_examples:
print("Found maximum of {} examples, done.".format(self._n_examples))
return True
def _make(self):
directory = os.path.join(cfg.data_dir, "atari_data")
dirs = os.listdir(directory)
game_full_name = "{}NoFrameskip-v4".format(self.atari_game)
starts_with = "atari_data_env={}.datetime=".format(game_full_name)
matching_dirs = [d for d in dirs if d.startswith(starts_with)]
if not matching_dirs:
pprint(sorted(dirs))
raise Exception("No data found for game {}".format(self.atari_game))
directory = os.path.join(directory, sorted(matching_dirs)[-1])
directory = os.path.join(directory, ("after" if self.after_warp else "before") + "_warp_recording")
scan_recorded_traces(directory, self._per_ep_callback, self.max_episodes, self.episode_range)
def visualize(self, n=4):
sample = self.sample(n)
images = sample["image"]
actions = sample["action"]
rewards = sample["reward"]
labels = ["actions={}, rewards={}".format(a, r) for a, r in zip(actions, rewards)]
fig, *_ = animate(images, labels=labels)
plt.show()
plt.close(fig)
if __name__ == "__main__":
# game = "AsteroidsNoFrameskip-v4"
# dset = AtariAutoencodeDataset(game=game, policy=None, n_examples=100, density=0.01, atari_render=False)
# show_frames(dset.x[:10])
# dset = AtariAutoencodeDataset(
# game=game, policy=None, n_examples=100, samples_per_frame=2, image_shape=(50, 50))
# show_frames(dset.x[:100])
# dset = AtariAutoencodeDataset(
# game=game, policy=None, n_examples=100, samples_per_frame=0, image_shape=(30, 40))
# dset = StaticAtariDataset(
# game=args.game, history_length=3,
# # max_episodes=6,
# max_samples_per_ep=100,
# after_warp=args.warped,
# # after_warp=False,
# episode_range=(-1, None),
# store_o=True,
# store_r=False,
# store_a=False,
# store_next_o=False,
# stopping_criteria="loss_reconstruction,min",
# image_shape=(105, 80),
# )
# dset = RewardClassificationDataset(
# rl_data_location=xo_dir, image_shape=(100, 100),
# classes=[-2, -1, 0, 1, 2], postprocessing="random",
# n_samples_per_image=3, tile_shape=(48, 48))
from dps.utils import Config
config = Config(
atari_game="IceHockey",
n_frames=4,
image_shape=(105, 80),
after_warp=False,
episode_range=None,
# episode_range=(-1, None),
max_episodes=100,
max_examples=200,
max_samples_per_ep=5,
frame_skip=1,
seed=200,
N=16,
)
with config:
config.update_from_command_line()
dset = AtariVideoDataset()
sess = tf.Session()
with sess.as_default():
dset.visualize(cfg.N)
|
import argparse
import os
import sys
import pandas as pd
import numpy as np
import pickle
import sagemaker_containers
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
from io import StringIO
from six import BytesIO
# import model
from model import WordOrderer, Encoder, Decoder
from utils import join_sentence, integer2sentence, one_hot_encode, prepare_predict
# accepts and returns numpy data
CONTENT_TYPE = 'application/x-npy'
def model_fn(model_dir):
print("Loading model.")
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(model_info['input_dim'], model_info['hidden_dim'])
decoder = Decoder(model_info['input_dim'], model_info['output_dim'], model_info['hidden_dim'])
model = WordOrderer(encoder, decoder).to(device)
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
letter2int_dict_path = os.path.join(model_dir, 'letter2int_dict.pkl')
with open(letter2int_dict_path, 'rb') as f:
model.letter2int_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == CONTENT_TYPE:
stream = BytesIO(serialized_input_data)
return np.load(stream)
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
if accept == CONTENT_TYPE:
buffer = BytesIO()
np.save(buffer, prediction_output)
return buffer.getvalue(), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
print('Predicting class labels for the input data...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.letter2int_dict is None:
raise Exception('Model has not loaded the letter2int_dict.')
integer_sentence = []
for word in input_data:
word_batch = [word]
if len(word) > 3:
dict_size = 34
seq_len = 35
batch_size =1
test_seq = one_hot_encode(word_batch, dict_size, seq_len, batch_size)
data = torch.from_numpy(test_seq).float().squeeze().to(device)
# Have the torch as a batch of size 1
data_batch = data.view(1, np.shape(data)[0], np.shape(data)[1])
model.eval()
with torch.no_grad():
output = model.forward(data_batch)
word_integer = []
for letter in output[0]: #as there's only 1 batch
letter_numpy = letter.numpy()
max_value_ind = np.argmax(letter_numpy, axis=0)
word_integer.append(max_value_ind)
else:
word_integer = word_batch.copy()
integer_sentence.append(word_integer)
return integer_sentence
|
visa_free_countries_string = '''
Azerbaijan (up to 90 days)
Albania (up to 90 days)
Antigua and Barbuda from 29 June 2018 of the year (up to 90 days for 180 days)
Argentina (up to 90 days)
Armenia
Belarus
Bosnia and Herzegovina (up to 30 days). You may need tickets back, host invitation or travel voucher.
Brazil (up to 90 days). May need return tickets
Brunei (up to 30 days). Be sure to have tickets in both directions.
Vanuatu (up to 30 days). Citizens of Ukraine entering Vanuatu for a period not exceeding 30 days do not need a visa, a visa stamp is placed directly at the border crossing point. You should have your tickets back, hotel reservations, a document confirming your solvency
Guatemala (up to 90 days). Return tickets are required.
Hong Kong (up to 14 days). Return tickets will be required
Grenada (up to 90 days). Available only with travel vouchers.
Georgia (up to 365 days)
Western Sahara
Israel (up to 90 days)
Indonesia (up to 30 days). You need a passport, valid for at least 6 more months from the date of arrival in the republic, as well as return tickets or tickets to another country.
Kazakhstan (up to 90 days)
Qatar (for a period not exceeding 90 days, for 180 days)
Kyrgyzstan (up to 90 days)
Costa Rica (up to 90 days). Since December 2016, a visa-free regime has been introduced for citizens of Ukraine. A requirement for entry into the country is the presence of a passport document, which must be valid for at least 90 days at the time of the intended departure from Costa Rica. When leaving the state, a US $ 29 fee must be paid, regardless of the type of traveler’s passport document.
Malaysia (up to 30 days). Have return tickets with you
Macedonia (up to 90 days). You will need an insurance policy
Micronesia (only for tourists with vouchers)
Moldova
Mongolia (up to 90 days). Invitation to host is required
Namibia (up to 90 days). Need a migration card filled out in English
Niue (up to 30 days). Looking for return tickets
Nicaragua
United Arab Emirates (31.12.2017 entered into force Memorandum on the mutual abolition of visas.
Visa-free visits to the UAE are available only to citizens with biometric passports that are valid for at least 6 months. Stay without a visa on the territory of the United Arab Emirates possibly up to 30 days
Ck (up to 31 of the day)
Panama (up to 90 days)
Paraguay (up to 90 days). Required return tickets
Peru (up to 90 days). Up to 183 days, subject to availability of documents confirming the tourist purpose of the trip
Pitcairn (on 14 days). In case of arrival and departure on the same vessel
Russia (up to 90 days). Also allowed to enter the territory of Russia on the ID-card
Samoa (up to 60 days)
Swaziland (up to 30 days)
Seychelles (up to 30 days). Return tickets and travel voucher required
Serbia (up to 90 days)
Saint kitts и
Nevis (up to 90 days). Since June 24, the Ukrainian government has adopted a draft agreement on the mutual abolition of the visa regime with Saint Kitts and Nevis
Tajikistan (up to 90 days)
Tunisia (up to 30 days). You should have a travel voucher with you.
Turkey (up to 90 days within 180 days from the date of entry). On request, you may need tickets back and a tourist voucher. Ukrainians can visit Turkey by ID-card
Uzbekistan
Montenegro (up to 90 days)
Chile (up to 90 days)
Spitsbergen
Ecuador (up to 90 days)
Jamaica (up to 30 days). You will need return tickets and proof of solvency.
Yan-Maen (visa-free, but you need to drive through the territory of Norway, visa for Ukrainians).
'''
all_countries_string = '''
Afghanistan
Albania
Algeria
Andorra
Angola
Antigua and Barbuda
Argentina
Armenia
Australia
Austria
Azerbaijan
B
The Bahamas
Bahrain
Bangladesh
Barbados
Belarus
Belgium
Belize
Benin
Bhutan
Bolivia
Bosnia and Herzegovina
Botswana
Brazil
Brunei
Bulgaria
Burkina Faso
Burundi
C
Cabo Verde
Cambodia
Cameroon
Canada
Central African Republic
Chad
Chile
China
Colombia
Comoros
Congo, Democratic Republic of the
Congo, Republic of the
Costa Rica
Côte d’Ivoire
Croatia
Cuba
Cyprus
Czech Republic
D
Denmark
Djibouti
Dominica
Dominican Republic
E
East Timor (Timor-Leste)
Ecuador
Egypt
El Salvador
Equatorial Guinea
Eritrea
Estonia
Eswatini
Ethiopia
F
Fiji
Finland
France
G
Gabon
The Gambia
Georgia
Germany
Ghana
Greece
Grenada
Guatemala
Guinea
Guinea-Bissau
Guyana
H
Haiti
Honduras
Hungary
I
Iceland
India
Indonesia
Iran
Iraq
Ireland
Israel
Italy
J
Jamaica
Japan
Jordan
K
Kazakhstan
Kenya
Kiribati
Korea North
Korea South
Kosovo
Kuwait
Kyrgyzstan
L
Laos
Latvia
Lebanon
Lesotho
Liberia
Libya
Liechtenstein
Lithuania
Luxembourg
M
Madagascar
Malawi
Malaysia
Maldives
Mali
Malta
Marshall Islands
Mauritania
Mauritius
Mexico
Micronesia
Federated States of
Moldova
Monaco
Mongolia
Montenegro
Morocco
Mozambique
Myanmar (Burma)
N
Namibia
Nauru
Nepal
Netherlands
New Zealand
Nicaragua
Niger
Nigeria
North Macedonia
Norway
O
Oman
P
Pakistan
Palau
Panama
Papua New Guinea
Paraguay
Peru
Philippines
Poland
Portugal
Q
Qatar
R
Romania
Russia
Rwanda
S
Saint Kitts and Nevis
Saint Lucia
Saint Vincent and the Grenadines
Samoa
San Marino
Sao Tome and Principe
Saudi Arabia
Senegal
Serbia
Seychelles
Sierra Leone
Singapore
Slovakia
Slovenia
Solomon Islands
Somalia
South Africa
Spain
Sri Lanka
Sudan
Sudan, South
Suriname
Sweden
Switzerland
Syria
T
Taiwan
Tajikistan
Tanzania
Thailand
Togo
Tonga
Trinidad and Tobago
Tunisia
Turkey
Turkmenistan
Tuvalu
U
Uganda
Ukraine
United Arab Emirates
United Kingdom
United States
Uruguay
Uzbekistan
V
Vanuatu
Vatican City
Venezuela
Vietnam
Y
Yemen
Z
Zambia
Zimbabwe
'''
english_native_countries_string = '''
Antigua and Barbuda
Australia
The Bahamas
Barbados
Belize
Canada
Dominica
Grenada
Guyana
Ireland
Jamaica
New Zealand
Saint Kitts and Nevis
Saint Lucia
Saint Vincent and the Grenadines
Trinidad and Tobago
United Kingdom
United States of America
'''
all_countries_list = list(all_countries_string.split('\n'))
visa_free_countries_list = list(visa_free_countries_string.split('\n'))
english_native_countries_list = list(english_native_countries_string.split('\n'))
for item in all_countries_list:
if len(item) < 2:
all_countries_list.remove(item)
else:
continue
for item in visa_free_countries_list:
if len(item) < 2:
visa_free_countries_list.remove(item)
else:
continue
for item in english_native_countries_list:
if len(item) < 2:
english_native_countries_list.remove(item)
else:
continue
new_list = []
for item in visa_free_countries_list:
for elem in all_countries_list:
if elem in item[0: 25]:
item = elem
new_list.append(item)
visa_free_countries_list = new_list
for item in visa_free_countries_list:
print(visa_free_countries_list.index(item), item)
new_list2 = []
for item in visa_free_countries_list:
for elem in english_native_countries_list:
if elem in item[0: 25]:
item = elem
new_list2.append(item)
english_native_countries_list = new_list2
print(english_native_countries_list)
|
# Generated by Django 2.1 on 2018-08-22 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0013_remove_userprofileinfo_questions'),
]
operations = [
migrations.RemoveField(
model_name='questions',
name='testcase1',
),
migrations.RemoveField(
model_name='questions',
name='testcase2',
),
migrations.RemoveField(
model_name='questions',
name='testcase4',
),
migrations.RemoveField(
model_name='userprofileinfo',
name='rank',
),
migrations.AddField(
model_name='userprofileinfo',
name='quest1test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='quest2test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='quest3test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='quest4test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='quest5test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='quest6test',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofileinfo',
name='totaltest',
field=models.IntegerField(default=0),
),
]
|
import numpy as np
from numpy.linalg import solve
import findMin
from scipy.optimize import approx_fprime
import utils
class logReg:
# Logistic Regression
def __init__(self, verbose=0, maxEvals=100):
self.verbose = verbose
self.maxEvals = maxEvals
self.bias = True
def funObj(self, w, X, y):
yXw = y * X.dot(w)
# Calculate the function value
f = np.sum(np.log(1. + np.exp(-yXw)))
# Calculate the gradient value
res = - y / (1. + np.exp(yXw))
g = X.T.dot(res)
return f, g
def fit(self,X, y):
n, d = X.shape
# Initial guess
self.w = np.zeros(d)
utils.check_gradient(self, X, y)
(self.w, f) = findMin.findMin(self.funObj, self.w,
self.maxEvals, X, y, verbose=self.verbose)
def predict(self, X):
return np.sign(X@self.w)
# function for L2 regularization
class logRegL2:
# Logistic Regression
def __init__(self, verbose=0, maxEvals=100, lammy=1.0):
self.verbose = verbose
self.maxEvals = maxEvals
self.lammy = lammy
self.bias = True
def funObj(self, w, X, y):
yXw = y * X.dot(w)
# Calculate the function plus l2 regularization value
f = np.sum(np.log(1. + np.exp(-yXw))) + 0.5 * self.lammy * np.inner(w,w)
# Calculate the gradient value
res = - y / (1. + np.exp(yXw))
g = X.T.dot(res) + self.lammy * w
return f, g
def fit(self, X, y):
n, d = X.shape
# Initial guess
self.w = np.zeros(d)
utils.check_gradient(self, X, y)
(self.w, f) = findMin.findMin(self.funObj, self.w,
self.maxEvals, X, y, verbose=self.verbose)
def predict(self, X):
return np.sign(X@self.w)
# function for L1 regularization
class logRegL1:
# Logistic Regression
def __init__(self, verbose=0, maxEvals=100, L1_lambda=1.0):
self.verbose = verbose
self.maxEvals = maxEvals
self.lammy = L1_lambda
self.bias = True
def funObj(self, w, X, y):
yXw = y * X.dot(w)
# Calculate the function plus l2 regularization value
f = np.sum(np.log(1. + np.exp(-yXw)))
# Calculate the gradient value
res = - y / (1. + np.exp(yXw))
g = X.T.dot(res)
return f, g
def fit(self,X, y):
n, d = X.shape
# Initial guess
self.w = np.zeros(d)
utils.check_gradient(self, X, y)
(self.w, f) = findMin.findMinL1(self.funObj, self.w, self.lammy,
self.maxEvals, X, y, verbose=self.verbose)
def predict(self, X):
return np.sign(X@self.w)
class logRegL0(logReg):
# L0 Regularized Logistic Regression
def __init__(self, L0_lambda=1.0, verbose=2, maxEvals=400):
self.verbose = verbose
self.L0_lambda = L0_lambda
self.maxEvals = maxEvals
def fit(self, X, y):
n, d = X.shape
minimize = lambda ind: findMin.findMin(self.funObj,
np.zeros(len(ind)),
self.maxEvals,
X[:, ind], y, verbose=0)
selected = set()
selected.add(0)
minLoss = np.inf
oldLoss = 0
bestFeature = -1
while minLoss != oldLoss:
oldLoss = minLoss
print("Epoch %d " % len(selected))
print("Selected feature: %d" % (bestFeature))
print("Min Loss: %.3f\n" % minLoss)
for i in range(d):
if i in selected:
continue
# TODO for Q2.3: Fit the model with 'i' added to the features,
# then compute the loss and update the minLoss/bestFeature
selected_new = selected | {i} # tentatively add feature "i" to the seected set
w, loss_value = minimize(list(selected_new))
loss_value += self.L0_lambda * np.count_nonzero(w)
if loss_value < minLoss:
minLoss = loss_value
bestFeature = i
selected.add(bestFeature)
self.w = np.zeros(d)
self.w[list(selected)], _ = minimize(list(selected))
class leastSquaresClassifier:
def fit(self, X, y):
n, d = X.shape
self.n_classes = np.unique(y).size
# Initial guess
self.W = np.zeros((self.n_classes,d))
for i in range(self.n_classes):
ytmp = y.copy().astype(float)
ytmp[y==i] = 1
ytmp[y!=i] = -1
# solve the normal equations
# with a bit of regularization for numerical reasons
self.W[i] = np.linalg.solve(X.T@X+0.0001*np.eye(d), X.T@ytmp)
def predict(self, X):
return np.argmax(X@self.W.T, axis=1)
class softmaxClassifier:
def __init__(self, verbose=0, maxEvals=100):
self.verbose = verbose
self.maxEvals = maxEvals
self.bias = True
def funObj(self, W, X, y):
n, d = X.shape
k = self.n_classes
# reshape the vector w to a matrix W
W = np.reshape(W, (k, d)) # the dimension of W is k*d
G = np.zeros((k,d)) # the dimension of the matrix is k*d
f_1 = np.zeros(k)
I = np.unique(y)
# calculate each elemant in the gradient matrix
for i in range(k):
I_i = np.where(y == I[i])
f_1[i] = np.sum(X[I_i]@W[i].T)
p_1 = np.exp(X@W[i].T)/np.sum(np.exp(X@W.T), axis = 1)
for j in range(d):
G[i,j] = -np.sum(X[I_i,j]) + p_1.T@X[:,j]
F = -np.sum(f_1) + np.sum(np.log(np.sum(np.exp(X@W.T),axis = 1)))
G = G.flatten()
return F, G
def fit(self, X, y):
n, d = X.shape
self.n_classes = np.unique(y).size
# Initial guess
self.W = np.zeros((self.n_classes,d))
m = self.n_classes*d
self.w = np.reshape(self.W, m)
self.w, f = findMin.findMin(self.funObj, self.w, self.maxEvals, X, y)
self.W = np.reshape(self.w,(self.n_classes,d))
def predict(self, X):
return np.argmax(X@self.W.T, axis=1)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import sys
import pytest
from pants.build_graph.address import Address
from pants.core.target_types import FileTarget
from pants.core.util_rules import adhoc_binaries
from pants.core.util_rules.adhoc_binaries import (
PythonBuildStandaloneBinary,
_DownloadPythonBuildStandaloneBinaryRequest,
_PythonBuildStandaloneBinary,
)
from pants.core.util_rules.environments import EnvironmentTarget, LocalEnvironmentTarget
from pants.testutil.rule_runner import MockGet, QueryRule, RuleRunner, run_rule_with_mocks
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*adhoc_binaries.rules(),
QueryRule(
_PythonBuildStandaloneBinary,
[_DownloadPythonBuildStandaloneBinaryRequest],
),
],
target_types=[LocalEnvironmentTarget, FileTarget],
)
@pytest.mark.parametrize("env_tgt", [None, LocalEnvironmentTarget({}, address=Address(""))])
def test_local(env_tgt) -> None:
result = run_rule_with_mocks(
adhoc_binaries.get_python_for_scripts,
rule_args=[EnvironmentTarget("local", env_tgt)],
mock_gets=[
MockGet(
output_type=_PythonBuildStandaloneBinary,
input_types=(_DownloadPythonBuildStandaloneBinaryRequest,),
mock=lambda _: pytest.fail(),
)
],
)
assert result == adhoc_binaries.PythonBuildStandaloneBinary(sys.executable)
def test_docker_uses_helper() -> None:
result = run_rule_with_mocks(
adhoc_binaries.get_python_for_scripts,
rule_args=[EnvironmentTarget("docker", FileTarget({"source": ""}, address=Address("")))],
mock_gets=[
MockGet(
output_type=_PythonBuildStandaloneBinary,
input_types=(_DownloadPythonBuildStandaloneBinaryRequest,),
mock=lambda _: _PythonBuildStandaloneBinary(""),
)
],
)
assert result == PythonBuildStandaloneBinary("")
def test_docker_helper(rule_runner: RuleRunner):
rule_runner.write_files(
{
"BUILD": "local_environment(name='local')",
}
)
rule_runner.set_options(
["--environments-preview-names={'local': '//:local'}"], env_inherit={"PATH"}
)
pbs = rule_runner.request(
_PythonBuildStandaloneBinary,
[_DownloadPythonBuildStandaloneBinaryRequest()],
)
assert not pbs.path.startswith("/")
|
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez (thomas.duriez@gmail.com)
# Copyright (C) 2015, Adrian Durán (adrianmdu@gmail.com)
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk (ezequiel.torresfeyuk@gmail.com)
# Copyright (C) 2016-2017, Marco Germano Zbrun (marco.germano@intraway.com)
# Copyright (C) 2016-2017, Raúl Lopez Skuba (raulopez0@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import sys
sys.path.append("/home/htomar/MLC-0.0.5/MLC-0.0.5")
import binascii
import numpy as np
import struct
import MLC.Log.log as lg
from MLC.Common.LispTreeExpr.LispTreeExpr import LispTreeExpr
from MLC.Log.log import set_logger
from MLC.mlc_parameters.mlc_parameters import Config
def initialize_config():
config = Config.get_instance()
config.read('/home/htomar/MLC-0.0.5/Clone_18/Clone_18.conf')
return config
# Set printable resolution (don't alter numpy interval resolution)
np.set_printoptions(precision=9)
# Show full arrays, no matter what size do they have
np.set_printoptions(threshold=np.inf)
# Don't show scientific notation
np.set_printoptions(suppress=True)
initialize_config()
set_logger('console')
# Full expression
expr6 = "(root (/ (+ 636.4469 (cos S6)) (log (* 1069.5890 (/ (/ (/ (/ (/ (sin (- -1491.0946 (- S7 1541.5198))) (- (exp (sin (- -701.6797 (- 394.8346 (- S5 0.5484))))) -0.1508)) (exp (- (sin (sin (/ S3 1053.8509))) -0.0004))) (tanh (exp (log S6)))) (exp (sin (sin (tanh (sin (sin (- -701.6797 (- 394.8346 (- S5 0.5484)))))))))) (tanh (exp (- (tanh (- (log S3) (exp (sin (- -701.6797 (- 394.8346 (- S5 0.5484))))))) -0.0004))))))))"
expr61 = "(root (exp (- -6.3726 (* -7.1746 S0))))"
expr612 = "(root (- -6.3726 (* -7.1746 S0)))"
tree = LispTreeExpr(expr6)
out = LispTreeExpr.formal(tree)
print out
|
from node import Node
class SplayTree:
def __init__(self):
self._nil = Node()
self._root = self._nil
def _successor(self, local_root: Node) -> Node:
succ = local_root
if succ.right is not self._nil:
succ = self._min(succ.right)
else:
while succ is not self._root or succ is not succ.parent.left:
succ = succ.parent
return succ
def _predecessor(self, local_root: Node) -> Node:
pred = local_root
if pred.left is not self._nil:
pred = self._max(pred.left)
else:
while pred is not self._root or pred is not pred.parent.right:
pred = pred.parent
return pred
def _min(self, local_root: Node) -> Node:
minimum = local_root
while minimum.left is not self._nil:
minimum = minimum.left
return minimum
def _max(self, local_root: Node) -> Node:
maximum = local_root
while maximum.right is not self._nil:
maximum = maximum.right
return maximum
def _search(self, val) -> Node:
searched = self._root
while searched is not self._nil:
if searched.val < val:
searched = searched.right
elif val < searched.val:
searched = searched.left
else:
self._splay(searched)
return searched
return None
def _left_rotate(self, local_root: Node):
right_child = local_root.right
local_root.right = right_child.left
if right_child.left is not self._nil:
right_child.left.parent = local_root
self._transplant(local_root, right_child)
right_child.left = local_root
right_child.left.parent = right_child
def _right_rotate(self, local_root: Node):
left_child = local_root.left
local_root.left = left_child.right
if left_child.right is not self._nil:
left_child.right.parent = local_root
self._transplant(local_root, left_child)
left_child.right = local_root
left_child.right.parent = left_child
def _transplant(self, local_parent: Node, local_child: Node):
if local_parent.parent is self._nil:
self._root = local_child
elif local_parent is local_parent.parent.left:
local_parent.parent.left = local_child
elif local_parent is local_parent.parent.right:
local_parent.parent.right = local_child
if local_child is not self._nil:
local_child.parent = local_parent.parent
def _splay(self, pivot_node: Node):
while pivot_node is not self._root:
if pivot_node.parent is self._root:
if pivot_node is pivot_node.parent.left:
self._right_rotate(pivot_node.parent)
elif pivot_node is pivot_node.parent.right:
self._left_rotate(pivot_node.parent)
else:
if pivot_node is pivot_node.parent.left and pivot_node.parent is pivot_node.parent.parent.left:
self._right_rotate(pivot_node.parent.parent)
self._right_rotate(pivot_node.parent)
elif pivot_node is pivot_node.parent.right and pivot_node.parent is pivot_node.parent.parent.right:
self._left_rotate(pivot_node.parent.parent)
self._left_rotate(pivot_node.parent)
elif pivot_node is pivot_node.parent.right and pivot_node.parent is pivot_node.parent.parent.left:
self._left_rotate(pivot_node.parent)
self._right_rotate(pivot_node.parent)
elif pivot_node is pivot_node.parent.left and pivot_node.parent is pivot_node.parent.parent.right:
self._right_rotate(pivot_node.parent)
self._left_rotate(pivot_node.parent)
def insert(self, val):
if self._search(val):
return
pre_insert_place = self._nil
insert_place = self._root
while insert_place is not self._nil:
pre_insert_place = insert_place
if insert_place.val < val:
insert_place = insert_place.right
else:
insert_place = insert_place.left
insert_element = Node(val=val, parent=pre_insert_place, left=self._nil, right=self._nil)
if pre_insert_place is self._nil:
self._root = insert_element
elif pre_insert_place.val < insert_element.val:
pre_insert_place.right = insert_element
elif pre_insert_place.val > insert_element.val:
pre_insert_place.left = insert_element
self._splay(insert_element)
def remove(self, val):
remove_element = self._search(val)
if remove_element:
if remove_element.right is self._nil:
self._transplant(remove_element, remove_element.left)
elif remove_element.left is self._nil:
self._transplant(remove_element, remove_element.right)
else:
local_root = self._min(remove_element.right)
if local_root.parent is not remove_element:
self._transplant(local_root, local_root.right)
local_root.right = remove_element.right
local_root.right.parent = local_root
self._transplant(remove_element, local_root)
local_root.left = remove_element.left
local_root.left.parent = local_root
self._splay(local_root)
def _build_output(self, cur_node, output, level):
if cur_node.left is not self._nil:
self._build_output(cur_node.left, output, level + 1)
output.append(" " * level + " " + "/")
output.append(" " * level + "|" + str(cur_node.val))
if cur_node.right is not self._nil:
output.append(" " * level + " " + "\\")
self._build_output(cur_node.right, output, level + 1)
def search(self, val):
return bool(self._search(val))
def empty(self):
return self._root is self._nil
def __str__(self):
output = []
level_counter = 0
self._build_output(self._root, output, level_counter)
res = ''
for row in output:
res += '\n' + row
return res
|
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from zlsPro.items import ZlsproItem
class MovieSpider(CrawlSpider):
name = 'movie'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://www.4567kan.com/frim/index5.html']
# 创建redis链接
conn = Redis(host='127.0.0.1', port=6379,password='123456')
rules = (
Rule(LinkExtractor(allow=r'frim/index5\-\d+.html'), callback='parse_item', follow=False),
)
# 解析电影的名称+detail_url
def parse_item(self, response):
li_list = response.xpath('/html/body/div[1]/div/div/div/div[2]/ul/li')
for li in li_list:
title = li.xpath('./div/a/@title').extract_first()
detail_url ='http://www.4567kan.com' + li.xpath('./div/a/@href').extract_first()
# ex=1添加成功,添加的数据不存在
# ex=0添加失败,添加的数据已经存在
ex = self.conn.sadd('movie_url',detail_url)
item = ZlsproItem()
item['title'] = title
if ex == 1:
print('正在爬取')
yield scrapy.Request(detail_url,callback=self.parse_detail,meta={'item':item})
else:
print('该数据已爬过')
def parse_detail(self,response):
item = response.meta['item']
desc = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[5]/span[2]/text()').extract_first()
item['desc'] = desc
yield item
|
n,k = int(input()),int(input())
xx = list(map(int,input().split()))
ans = 0
for x in xx:
if k-x > x:
ans += x*2
else:
ans += (k-x)*2
print(ans)
|
"""tool for reconstructing a cryptarchive index."""
import sys
import json
import re
import os
from cryptarchive.index import Index
def find_all_ids(s):
"""find all ids in s."""
q = r'\"id\": ?\"[0-f]+'
# q = r'\"id\": ?\"[a-zA-Z0-9/_\- \(\)]+'
matches = re.findall(q, s)
result = []
for m in matches:
result.append(m[m.rfind('"') + 1:])
return result
def get_entry_for_id(s, id):
"""find the entry for id"""
si = ei = s.rfind(id)
# find dict subsection
while s[si] != "{":
si -= 1
while s[ei] != "}":
ei += 1
if ei >= len(s):
s += "}"
m = s[si:ei+1]
# find key
ki = si
kc = 0
while True:
if s[ki] == '"':
kc += 1
if kc == 2:
break
ki -= 1
key = s[ki:si]
key = key[:key.rfind(":")]
key = key[1:-1]
try:
loaded = json.loads(m)
except:
return None
else:
return (key, loaded)
def reconstruct_pathlist(keys):
"""reconstruct the path list from the keys."""
paths = []
for key in keys:
if key in paths:
continue
segments = key.split("/")[:-1]
prev = []
for i in range(len(segments)):
seg = segments[i]
p = "/".join(prev + [seg])
if p not in paths:
paths.append(p)
prev.append(seg)
return paths
def reconstruct(s, filelist=[], verbose=False):
"""
Attemp to reconstruct the index.
:param s: decrypted content of the old index
:type s: str
:param filelist: list of existing files in user directory
:type filelist: list of str
:param verbose: enable more output
:type verbose: bool
"""
# attemp to load index first
if verbose:
print "Loading index... ",
try:
index = Index.loads(s)
except Exception as e:
if verbose:
print "Error: {e}\nBeginning recovery...".format(e=repr(e))
else:
if verbose:
print "Done.\nThe index appears to be working, skipping reconstruction."
return index
# find ids
if verbose:
print "Searching for IDs... ",
ids = find_all_ids(s)
if verbose:
print "{n} found.".format(n=len(ids))
# load entries and keys
if verbose:
print "Reading index entries for IDs... ",
entries, keys = [], []
for id in ids:
v = get_entry_for_id(s, id)
if v is None:
continue
key, entry = v
keys.append(key)
entries.append(entry)
if verbose:
print "{n} read.".format(n=len(entries))
# reconstruct path list
if verbose:
print "Searching for hints of paths... ",
paths = reconstruct_pathlist(keys)
if verbose:
print "{n} found.".format(n=len(paths))
# begin reconstruction
if verbose:
print "Recovery complete, beginning reconstruction..."
index = Index.new()
if verbose:
print "Recreating paths... ",
sorted_paths = [e[1] for e in sorted([(len(t), t) for t in paths])]
for p in sorted_paths:
index.mkdir(p)
if verbose:
print "{n} added.".format(n=len(sorted_paths))
if verbose:
print "Readding known files... ",
added = []
for k in keys:
nfid = index.create_file(k)
if nfid not in ids:
raise Exception("It seems like the ID generation was changed; reconstruction is not possible :(")
added.append(nfid)
if verbose:
print "{n} added.".format(n=len(added))
if verbose:
print "Adding unknown files...",
skipped = []
for fid in filelist:
if fid in added:
skipped.append(fid)
continue
else:
index._index["dirs"]["/"]["/"+fid] = {
"name": fid,
"isdir": False,
"id": fid,
}
if verbose:
print "{n} added, {s} skipped.".format(n=len(filelist)-len(skipped), s=len(skipped))
# end
if verbose:
print "Done."
print "Data loss summary below:"
filenameloss = (float(len(filelist)-len(skipped)) / (len(filelist) + len(added)))* 100
print "filenames: {p}% lost.".format(p=(filenameloss))
return index
if __name__ == "__main__":
reconstruct(open(sys.argv[1], "rb").read(), verbose=True)
|
# -*- coding: utf-8 -*-
# import os
# import random
# import urllib
#import json
# from django.utils import simplejson as json
# import pickle
# from google.appengine.ext.webapp import template
#import cgi
# from google.appengine.api import users
# from google.appengine.ext import webapp
# from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from sets import Set
from datetime import datetime
from time import strptime
import logging
# import inspect
#logging.debug(inspect.currentframe().f_lineno)
#
# Model
#
class Item(db.Model):
u"""トレーニング種目
"""
item_id = db.IntegerProperty(required=True)
status = db.BooleanProperty(default=True)
created_at = db.DateTimeProperty(auto_now_add=True)
user = db.EmailProperty(required=True)
name = db.TextProperty(required=True) # 種目名
attr = db.TextProperty(required=False) # 負荷単位名
# is_saved = db.BooleanProperty(default=True)
@classmethod
def get_by_item_id(cls, item_id, user):
items = cls.all().filter('user =', user).filter('item_id =', item_id).fetch(1)
logging.info(items)
# return items[0] if len(items) > 0 else None
dir(items)
return items
class Record(db.Model):
u"""トレーニング記録
"""
status = db.BooleanProperty(default=True)
created_at = db.DateTimeProperty(auto_now_add=True)
user = db.EmailProperty(required=True)
item_id = db.IntegerProperty(required=True)
record_id = db.IntegerProperty(required=True)
value = db.IntegerProperty(required=True)
# is_saved = db.BooleanProperty(default=True)
@classmethod
def get_by_record_id(cls, record_id, user):
records = cls.all().filter('user =', user).filter('record_id =', record_id).fetch(1)
logging.info(records)
return records
# @classmethod
# def get_days(cls, user):
# trainnings = cls.all().filter('status =', True).filter('user =', user).fetch(100)
# return Set([t.created_at.strftime('%Y-%m-%d') for t in trainnings])
# @classmethod
# def get_list_at(cls, user, created_at):
# trainnings = Trainning.all().filter('user =', user)
# trainnings.filter('created_at >=', datetime(*strptime(created_at + ' 00:00:00', '%Y-%m-%d %H:%M:%S')[0:6]))
# trainnings.filter('created_at <=', datetime(*strptime(created_at + ' 23:59:59', '%Y-%m-%d %H:%M:%S')[0:6]))
# trainnings.filter('status =', True)
# return trainnings
|
from datetime import date, timedelta
def u_to_g(d):
if d < date(1582, 10, 5):
g_date = d
elif date(1582, 10, 5) <= d < date(1700, 2, 28):
g_date = d + 10
elif date(1700, 3, 1) <= d < date(1800, 2, 28):
g_date = d + 11
elif date(1800, 3, 1) <= d < date(1900, 2, 28):
g_date = d + 12
elif date(1900, 3, 1) <= d < date(2100, 2, 28):
g_date = d + timedelta(days=13)
return g_date
|
import numpy as np
import cv2
img = cv2.imread('images/model.png')
cv2.imshow('original', img)
subimg = img[200:300, 200:400]
cv2.imshow('cutting', subimg)
#200~300 행과 200~400열을 ROI로 잡는다.
img[100:200, 100:300] = subimg
#잘라낸 subimg를 해당 좌표에 집어 넣는다.
print(img.shape)
print(subimg.shape)
cv2.imshow('modified', img)
#수정된 이미지를 보여준다
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# coding: utf-8
# # Problem 7
#
# **Letter frequencies.** This problem has three (3) exercises worth a total of ten (10) points.
# Letter frequency in text has been studied in cryptoanalysis, in particular frequency analysis. Linguists use letter frequency analysis as a rudimentary technique for language identification, where it's particularly effective as an indicator of whether an unknown writing system is alphabetic, syllablic, or ideographic.
#
# Primarily, three different ways exist for letter frequency analysis. Each way generally results in very different charts for common letters. Based on the provided text, the first method is to count letter frequency in root words of a dictionary. The second way is to include all word variants when counting, such as gone, going and goes and not just the root word go. Such a system results in letters like "s" appearing much more frequently. The last variant is to count letters based on their frequency in the actual text that is being studied.
#
# For more details, refer to the link:
# https://en.wikipedia.org/wiki/Letter_frequency
#
# In this problem, we will focus on the 3rd methodology.
# **Exercise 0** (2 points). First, given a string input, define a function `preprocess` that returns a string with non-alphabetic characters removed and all the alphabets converted into a lower case.
#
# For example, 'We are coding letter Frequency! Yay!" would be transformed into "wearecodingletterfrequencyyay"
# In[6]:
def preprocess(S):
s = ''.join([c.lower() for c in S if c.isalpha()])
return s
# In[7]:
# Test cell: valid_string
import random, string
N_str = 100 #Length of random string
def generate_str(n):
random_str = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation) for _ in range(n))
return random_str
def check_preprocess_str(n):
random_str = generate_str(n)
print("Input String: ",random_str)
assert preprocess('random_str').islower() == True
assert preprocess(random_str).isalpha() == True
print("|----Your function seems to work correct for the string----|"+"\n")
check_preprocess_str(N_str)
check_preprocess_str(N_str)
check_preprocess_str(N_str)
print("\n(Passed)!")
# **Exercise 1** (4 points). With the necessary pre-processing complete, the next step is to write a function `count_letters(S)` to count the number of occurrences of each letter in the alphabet.
#
# You can assume that only letters will be present in the input string. It should output a dictionary and if any alphabet (a-z) is missing in the input string, it should still be a part of the output dictionary and its corresponding value should be equal to zero.
#
# In[8]:
import random
def count_letters(S):
alphabet = string.ascii_lowercase
count_dict = {c: S.count(c) for c in alphabet}
return count_dict
# In[9]:
# Test cell: count_letters
import collections
N_processed_str = 100
def generate_processed_str(n):
random_processed_str = ''.join(random.choice(string.ascii_lowercase) for _ in range(n))
return random_processed_str
def check_count_letters(S):
print("Input String: ",S)
random_char = chr(random.randint(97,122))
print("Character frequency evaluated for: ", random_char)
if(random_char in S):
assert count_letters(S)[random_char] == collections.Counter(S)[random_char]
print("|----Your function seems to return correct freq for the char----|"+"\n")
else:
assert count_letters(S)[random_char] == 0
print("|----Your function seems to return correct freq for the char----|"+"\n")
check_count_letters(generate_processed_str(N_processed_str))
check_count_letters(generate_processed_str(N_processed_str))
check_count_letters(generate_processed_str(N_processed_str))
print("\n(Passed)!")
# **Exercise 2** (4 points). The next step is to sort the distribution of a dictionary containing all the letters in the alphabet as keys and number of occurrences in text as associated value.
#
# Sorting should be first done in decreasing order by occurrence count and for two elements with same count, the order should be alphabetic. The function `find_top_letter(d)` should return the 1st character in the order.
# In[13]:
def find_top_letter(d):
t = [(l, o) for l,o in d.items()] # change items in dict to a list
t.sort(key = lambda x: (x[1]*-1, x[0]))
return t[:1][0][0]
# In[14]:
# Test cell: highest_freq_letter
def create_random_dict():
max_char_value = random.randint(5, 20)
random_dict = {c:random.randint(0,max_char_value-1) for c in string.ascii_lowercase}
random_letter1, random_letter2 = random.sample(string.ascii_lowercase, 2)
random_dict[random_letter1], random_dict[random_letter2] = max_char_value, max_char_value
if(random_letter1 < random_letter2):
return random_letter1, random_dict
else:
return random_letter2, random_dict
def check_top_letter():
top_letter, random_dict = create_random_dict()
user_letter = find_top_letter(random_dict)
assert user_letter == top_letter
print("Input Dictionary: ", random_dict)
print("Your function correctly returned most frequent letter: {} \n".format(user_letter))
check_top_letter()
check_top_letter()
check_top_letter()
print("\n(Passed)!")
# **Fin!** You've reached the end of this problem. Don't forget to restart the kernel and run the entire notebook from top-to-bottom to make sure you did everything correctly. If that is working, try submitting this problem. (Recall that you *must* submit and pass the autograder to get credit for your work!)
|
import datetime
import json
import random
import time
import traceback
# import faker
import requests
# from tqdm import tqdm
import os
# Data source:
# https://raw.githubusercontent.com/BlankerL/DXY-COVID-19-Data/master/json/DXYArea-TimeSeries.json
# fake = faker.Factory.create("zh-CN")
# api = "http://8.210.248.203"
api = "http://localhost"
s = requests.session()
res = json.loads(s.post(api + "/user/logIn?identifier=admin&password=admin").text)
print(res)
logf = open('prescriptionimporter.log', 'a+')
def log(s, sender=''):
global logf
fs = "<{} {}>: {}".format(datetime.datetime.now().isoformat()[:-4], sender, s)
print(fs)
logf.write(fs + '\n')
def new_prescription(patient_id):
medid = random.randint(1, 2064)
resM = s.post(api + "/medicine/getMedicineInfoByID", data={
"medicine_id": medid
})
resP = s.post(api + "/patient/getPatientInfoByID", data={
"patient_id": patient_id
})
resD = s.post(api + "/doctor/getDoctorInfo", data={
"hospital_id": json.loads(resP.text)['data']['hospital_id'],
"page": 1,
"size": 100
})
med = json.loads(resM.text)['data']
dosage = random.choice(["一日一次", "一日两次", "一日三次", "四小时一次"]) + ',一次 '
if med['medicine_name'].find('液') != -1:
dosage = dosage + random.choice(['100mL', '150mL', '200mL', '250mL'])
else:
dosage = dosage + random.choice(['5g', '10g', '15g', '20g'])
dosage = dosage + "。"
if med['medicine_name'].find('口服') != -1 or med['medicine_name'].find('颗粒') != -1 or med['medicine_name'].find('片') != -1:
usage = "口服"
elif med['medicine_name'].find('注射') != -1:
usage = "注射"
else:
usage = "外用"
prescript = {
"patient_id": patient_id,
"medicine_id": medid,
"dosage": dosage,
"usage": usage,
"doctor_id": random.choice(json.loads(resD.text)['data'])['doctor_id']
}
respre = s.post(api + '/prescription/createPrescription', data=prescript)
log(json.loads(respre.text)['data'])
START_POSITION = 139159
for i in range(START_POSITION, 140342):
for j in range(3):
new_prescription(i)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/as_comthreads.py
# <pep8 compliant>
"""Use of osc4py3 in own created threads for communication.
Functions defined here allow to use OSC with a mixed scheduling whehre
communications and encoding/decoding are realized in background
threads, but methods calls are processed in an event loop.
"""
from . import oscscheduling
from . import oscdispatching
from . import oscchannel
from . import oscmethod
from . import osctoolspools
from . import oscdistributing
from . import as__common # All doc strings are shared here.
# Useful methods of this module.
__all__ = [
"osc_startup",
"osc_terminate",
"osc_process",
"osc_method",
"osc_send",
"osc_udp_server",
"osc_udp_client",
"osc_multicast_server",
"osc_multicast_client",
"osc_broadcast_server",
"osc_broadcast_client",
]
dispatcher = None
generallogger = None
execute_queue = None
write_queue = None
def osc_startup(**kwargs):
global dispatcher, generallogger, write_queue, execute_queue
if dispatcher is not None:
return
if 'logger' in kwargs:
generallogger = kwargs['logger']
dispatcher = oscdispatching.Dispatcher("global", {
"logger": generallogger,
})
oscdispatching.register_global_dispatcher(dispatcher)
# This monitoring thread will look at sockets.
oscscheduling.get_global_socket_monitor(generallogger)
# This thread will get, decode and process received raw packets.
oscdistributing.create_rawpackets_thread(generallogger)
# This thread will encode and transmit packets to send.
oscdistributing.create_sendingpackets_thread(generallogger)
# This thread will process delayed bundles.
oscdispatching.create_delayed_thread(generallogger)
# To execute methods in the process context. This is just used
# as a fifo queue to store received messages before processing
# them (its add_working_thread() method is not called).
execute_queue = osctoolspools.WorkQueue()
# To send pending packets. This is just used as a fifo queue to
# store jobs between osc_send() and osc_process(), no thread is
# created (its add_working_thread() method is not called).
write_queue = osctoolspools.WorkQueue()
def osc_terminate():
global dispatcher, manager, execute_queue, write_queue
if dispatcher is None:
return
oscchannel.terminate_all_channels()
execute_queue.terminate()
execute_queue = None
write_queue.terminate()
write_queue = None
oscdispatching.unregister_global_dispatcher()
dispatcher = None
oscscheduling.terminate_global_socket_monitor()
oscscheduling.terminate_global_polling_monitor()
oscdistributing.terminate_sendingpackets_thread()
oscdistributing.terminate_rawpackets_thread()
oscdispatching.terminate_delayed_thread()
def osc_process():
while True:
job = execute_queue.wait_for_job(0)
if job is osctoolspools.LAST_JOB or job is None:
break
try:
job()
except:
generallogger.exception("Failure in method execution job")
def osc_method(addrpattern, function, argscheme=oscmethod.OSCARG_DATAUNPACK, extra=None):
# Important: specify the workqueue to really process methods in the
# context of osc_process() call.
apf = oscmethod.MethodFilter(addrpattern, function, logger=generallogger,
workqueue=execute_queue, argscheme=argscheme,
extra=extra)
oscdispatching.register_method(apf)
def osc_send(packet, names):
oscdistributing.send_packet(packet, names)
def osc_udp_server(address, port, name):
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "r",
{
'udpread_host': address,
'udpread_port': port,
'monitor': oscchannel.SCHED_SELECTTHREAD,
'auto_start': True, # The channel will automaticaly register
# with monitor..
'logger': generallogger,
})
def osc_udp_client(address, port, name):
global channels
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "w",
{
'udpwrite_host': address,
'udpwrite_port': port,
"udpwrite_nonblocking": True,
'monitor': oscchannel.SCHED_SELECTTHREAD,
"write_workqueue": write_queue,
'auto_start': True,
'logger': generallogger,
})
def osc_multicast_server(address, port, name):
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "r",
{
'udpread_host': address,
'udpread_port': port,
'monitor': oscchannel.SCHED_SELECTTHREAD,
'auto_start': True,
'mcast_enabled': True,
'logger': generallogger,
})
def osc_multicast_client(address, port, name, ttl=1):
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "w",
{
'udpwrite_host': address,
'udpwrite_port': port,
'udpwrite_ttl': ttl,
"udpwrite_nonblocking": True,
"write_workqueue": write_queue,
'monitor': oscchannel.SCHED_SELECTTHREAD,
'auto_start': True,
'mcast_enabled': True,
'logger': generallogger,
})
def osc_broadcast_server(address, port, name):
global channels
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "r",
{
'udpread_host': address,
'udpread_port': port,
'monitor': oscchannel.SCHED_SELECTTHREAD,
'auto_start': True,
'bcast_enabled': True,
'logger': generallogger,
})
def osc_broadcast_client(address, port, name, ttl=1):
from . import oscudpmc # Only import if necessary.
chan = oscudpmc.UdpMcChannel(name, "w",
{
'udpwrite_host': address,
'udpwrite_port': port,
'udpwrite_ttl': ttl,
"udpwrite_nonblocking": True,
"write_workqueue": write_queue,
'monitor': oscchannel.SCHED_SELECTTHREAD,
'auto_start': True,
'bcast_enabled': True,
'logger': generallogger,
})
as__common.apply_docs(globals())
|
from base64 import b64encode
from datetime import datetime
from manticora.models.database_functions.account import (
query_user_accounts_by_user,
query_account_by_id,
find_all_extrato,
query_all_account_in_rest,
change_status,
query_all_requests_from_user)
from manticora.models.database_functions.usuario import query_user_by_id
from manticora.models.database_functions.restaurante import get_actual_rest
def show_rests_accounts(current_user):
accounts = query_user_accounts_by_user(current_user)
rests = [[item.restaurante.adm.nome,
item.restaurante,
item.conta,
item.status,
item.id]
for item in accounts]
for item in rests:
item[1].imagem = b64encode(item[1].imagem).decode('utf-8')
return rests
def query_account_and_build_html(id):
account = query_account_by_id(id)
extrato = find_all_extrato(account)
html_head = """
<div class="card">
<div class="card-body" id="addr">
<h5 class="card-title">Restaurantes</h5>
<div class="table-wrapper-scroll-y my-custom-scrollbar">
<table class="table">
<thead>
<tr>
<th scope="col">Data</th>
<th scope="col">Itens</th>
<th scope="col">Valor</th>
</tr>
</thead>
<tbody>
""".format(account.restaurante.adm.nome)
html_end = """
</tbody>
</table>
</div>
</div>
</div>
"""
html_middle = ""
for item in extrato:
html_middle += """
<tr>
<td>{}</td>
<td>{}</td>
<td class="red">{}</td>
</tr>
""".format(item.data.date(), item.itens.replace('[', '').replace(']', '').replace('\'', ''), item.valor) #NOQA
return html_head + html_middle + html_end
def show_clients_account(current_user):
rest = get_actual_rest(current_user)
account = query_all_account_in_rest(rest)
return account
def update_status_account(id_account):
try:
change_status(int(id_account), "Pago")
return "ok"
except Exception:
return "Erro, Tente novamente mais tarde"
def mount_user_data(user_id):
user = query_user_by_id(int(user_id))
if user:
html = """
<p>Nome: {}</p>
<p>Email: {}</p>
<p>Cidade: {}</p>
<p>Bairro: {}</p>
<p>Rua: {}</p>
<p>Numero: {}</p>
<p>Complemento: {}</p><br>
""".format(user.nome, user.email, user.cidade,
user.bairro, user.rua, user.numero, user.complemento)
return html
return "erro"
def get_user_requests(current_user):
return query_all_requests_from_user(int(current_user.id),
datetime.now().date())
|
from django.contrib import admin
from django.http import HttpRequest
from django.http import HttpResponse
from django.urls import path
from task4.views import view
def hello_world(request: HttpRequest):
return HttpResponse("hello world")
urlpatterns = [
path('admin/', admin.site.urls),
path("hw/", hello_world),
path("task4/", view),
]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C51 agent with fixed replay buffer(s)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from dopamine.agents.rainbow import rainbow_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayRainbowAgent(rainbow_agent.RainbowAgent):
"""An implementation of the DQN agent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
init_checkpoint_dir=None, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
tf.logging.info(
'Creating FixedReplayAgent with replay directory: %s', replay_data_dir)
tf.logging.info('\t init_checkpoint_dir %s', init_checkpoint_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
# Set replay_log_dir before calling parent's initializer
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(
init_checkpoint_dir, 'checkpoints')
else:
self._init_checkpoint_dir = None
super(FixedReplayRainbowAgent, self).__init__(sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayRainbowAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# The logger subscribes to various topics and creates a local and remote log
from mqtt_client import MQTT_Client
from hbmqtt.mqtt.constants import QOS_1
import asyncio
# For making GET/POST request to WEB
import requests
# For structuring data
import json
# Configuration of TOPICS and addresses
from config import *
# For exception handeling
import sys
# For working with dates
import datetime
import dateutil.parser
# for enums
from enum import Enum
class LogEntryType(Enum):
TEMP = 0
CONTROL = 1
CONTROLLER=3
# For IO
from file_read_backwards import FileReadBackwards
# Just a variable used for conditional debugging prints
DEBUG = True
class Logger(MQTT_Client):
def __init__(self):
# Setup the MQTT stuff from parent
# Initialize the MQTT_client parent class
MQTT_Client.__init__(self)
# Store what topics to listen to
self.my_topics = [TOPICS['temp'], TOPICS['temp_setpoint'], TOPICS['ping']]
# Subscribe to the topics. This is done by letter asyncio-loop run that co-routine until completion
# I.e. we will do that before continuting to the rest of the program.
self.loop.run_until_complete(self.subscribe_to(self.my_topics))
self.id = 1
self.remote_poll_interval = 60
def packet_received_cb(self,topic, payload_dict):
"""
THis function will be called each time a packet is received. Make an entry in local and remote log
"""
if DEBUG:
print("DEBUG: packet_received_cb called in logger.py")
print("DEBUG: topic = {} data = {}".format(topic, payload_dict['Data']))
# There will be several topics. So we should do a if-elif
# structure to handle the different incoming packets.
self.log_to_local_file(topic, payload_dict)
self.log_to_remote_db(topic, payload_dict)
if topic == TOPICS['temp'][0] or topic == TOPICS['temp_setpoint'][0]:
self.update_current_value_log(topic, payload_dict)
def log_to_local_file(self, topic, payload_dict):
# Open file
fo = open(LOG_FILE_PATH, "a")
# Write to the file
fo.write(log_entry_encode(topic, payload_dict))
# Close the file
fo.close()
def log_to_remote_db(self, topic, payload_dict):
# Add the API key to the payload_dict
payload_dict['APIKEY'] = APIKEY
if topic == TOPICS['temp'][0]:
try:
r = requests.post(DB_POST_TEMP_PATH, json=payload_dict, headers = {'content-type': 'application/json'})
if r.status_code != 200:
print("COULDNT POST: {}".format(r.text))
else:
print(r.text)
except Exception as e:
print(e)
print("No internet connection to log_to_remote_db")
elif topic == TOPICS['temp_setpoint'][0]:
try:
r = requests.post(DB_POST_CONTROL_PATH, json=payload_dict, headers = {'content-type': 'application/json'})
if r.status_code != 200:
print("COULDNT POST: {}".format(r.text))
else:
print(r.text)
except Exception as e:
print(e)
print("No internet connection to log_to_remote_db")
def update_current_value_log(self, topic, payload_dict):
print("JADDA")
fo = open(CURRENT_STATE_PATH, "r")
entry = fo.readlines()[0]
fo.close()
entry_dict = dict(item.split('=') for item in entry.split(';'))
if topic == TOPICS['temp'][0]:
entry_dict['TimestampTemp'] = payload_dict['Timestamp']
entry_dict['Temp'] = payload_dict['Data']
elif topic == TOPICS['temp_setpoint'][0]:
entry_dict['TimestampControl'] = payload_dict['Timestamp']
entry_dict['Control'] = payload_dict['Data']
new_entry = ""
for key,val in entry_dict.items():
new_entry += "{}={};".format(key,val)
new_entry = new_entry[:-1]
print(new_entry)
fo = open(CURRENT_STATE_PATH, "w")
fo.write(new_entry)
fo.close()
async def poll_remote_db(self):
# Poll last entry from DB
try:
print(DB_GET_CONTROL_PATH, APIKEY)
r = requests.get(DB_GET_CONTROL_PATH, headers={'APIKEY':APIKEY})
print(r)
if r.status_code == 200:
last_control = r.json()
print(last_control)
ret = compare_local_log(last_control, LogEntryType.CONTROL)
print(ret)
if ret == -1:
print("Local log is outdated")
await self.publish_to(topic=TOPICS['temp_setpoint'][0],data=last_control['Data'])
# Update log file
self.log_to_local_file(TOPICS['temp_setpoint'][0], last_control)
elif ret == 1:
print("remote DB is out-dated")
self.log_to_remote_db(topic=TOPICS['temp_setpoint'][0], payload_dict=read_log(LogEntryType.CONTROL, 1)[0])
else:
print("Logger verify consistency between ")
else:
print("Failed to get control policy in poll_remote_db: {}".format(r.text))
except Exception as e:
print("No internet connection to poll_remote_db: {}".format(e))
pass
async def db_poller(self):
# This is the infinte loop that keeps polling the DB
while True:
await self.poll_remote_db()
await asyncio.sleep(LOGGER_INTERVAL_S)
def run(self):
"""
This function starts the necessary tasks and runs them in the
event loop. The GUI itself, probably implemented in TKinter should be
added as a task here.
NB! The only code of importance here is the three first lines. The rest is a try to
shutdown the process properly when the user hits CTRL+C
"""
try:
# Spawn the tasks to run concurrently
self.loop.create_task(self.listen()) # Listen to subscribed topics
self.loop.create_task(self.db_poller())
self.loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.loop.close()
def log_entry_encode(topic, payload_dict):
# Format the log entry
if topic == TOPICS['temp'][0]:
log_topic = LogEntryType.TEMP
if topic == TOPICS['temp_setpoint'][0]:
log_topic = LogEntryType.CONTROL
logEntry = "EntryID={};Timestamp={};Data={}\n".format(
log_topic.value, payload_dict['Timestamp'], payload_dict['Data']
)
return logEntry
def log_entry_decode(entry):
# Take a log entry and return the topic and payload_dict
my_dict = dict(item.split('=') for item in entry.split(';'))
return my_dict
# Main entry point to reading the log
def read_log(entryType, nEntries):
fo = FileReadBackwards(LOG_FILE_PATH, encoding="utf-8")
entriesRead= 0
entries = []
for line in fo:
entry_dict = log_entry_decode(line)
if int(entry_dict['EntryID']) == entryType.value:
entriesRead += 1
entries.append(entry_dict)
if entriesRead == nEntries:
break
fo.close()
return entries
def read_current_state_log():
fo = open(CURRENT_STATE_PATH, "r")
entry = fo.readlines()[0]
fo.close()
return dict(item.split('=') for item in entry.split(';'))
def compare_local_log(db_entry, entryType):
print("check")
last_local_entry = read_log(entryType, nEntries = 1)
if not last_local_entry:
return -1 # local out-of-date
local_time = dateutil.parser.parse(last_local_entry[0]['Timestamp'])
db_time = dateutil.parser.parse(db_entry['Timestamp'])
if local_time > db_time:
return 1
elif db_time > local_time:
return -1
elif db_time == local_time:
return 0
def get_temp_24h():
# Construct the dates for the different intervals.
resolution = 1
n_hours = 24
n_entries = n_hours * 3600 /SENSOR_SAMPLING_INTERVAL_S
now = datetime.datetime.now()
h = datetime.timedelta(hours=1)
now_rounded = now.replace(minute = 0, second=0, microsecond=0)
dates = []
# Construct the labels as datetime objects
for i in range(int(n_hours/resolution),-1,-1):
dates.append(now_rounded - i*h)
dates.append(now.replace(microsecond = 0))
# Get enough recent temp-entries
entries = read_log(LogEntryType.TEMP, n_entries)
values = create_stats_for_plotting(entries,dates)
#Generate the strings for the plotting
str_labels = [date.strftime("%a %H:%M") for date in dates[0:-1]]
return (str_labels, values)
def create_stats_for_plotting(entries, dates):
values = [None] * (len(dates)-1)
tot = [0] * (len(dates)-1)
n_vals = [0] * (len(dates)-1)
for entry in entries:
entry_time = dateutil.parser.parse(entry['Timestamp'])
for idx, date in enumerate(dates):
if idx == 0:
if entry_time < date:
break
elif idx == (len(dates)-2):
tot[idx] += float(entry['Data'])
n_vals[idx] += 1
else:
if entry_time < date:
tot[idx-1] += float(entry['Data'])
n_vals[idx-1] +=1
break
for i, n_val in enumerate(n_vals):
if n_val > 0:
values[i] = tot[i]/n_val
return values
def get_temp_1w():
# Construct the dates for the different intervals.
resolution=7
n_hours = 24*7
n_entries = n_hours * 3600 /SENSOR_SAMPLING_INTERVAL_S
now = datetime.datetime.now()
h = datetime.timedelta(hours=resolution)
now_rounded = now.replace(minute = 0, second=0, microsecond=0)
dates = []
# Construct the labels as datetime objects
for i in range(int(n_hours/resolution),-1,-1):
dates.append(now_rounded - i*h)
dates.append(now.replace(microsecond = 0))
# Get enough recent temp-entries
entries = read_log(LogEntryType.TEMP, n_entries)
values = create_stats_for_plotting(entries,dates)
#Generate the strings for the plotting
str_labels = [date.strftime("%-d/%m %H:%M") for date in dates[0:-1]]
return (str_labels, values)
def get_current_control_policy():
current_state = read_current_state_log()
values = [float(x) for x in current_state['Control'].split('-')]
labels = ["02:00", "04:00","06:00", "08:00", "10:00", "12:00", "14:00", "16:00", "18:00", "20:00", "22:00", "00:00"]
return [labels, values]
def get_current_temp():
current_state = read_current_state_log()
value = float(current_state['Temp']);
return value
def create_json(payload_dict):
data = {}
data['APIKEY'] = APIKEY
data['Timestamp'] = payload_dict['Timestamp']
data['Data'] = payload_dict['Data']
return json.dumps(data)
def run():
L = Logger()
L.run()
if __name__ == '__main__':
run()
|
from spack import *
class Yoda(Package):
url = "http://cern.ch/service-spi/external/MCGenerators/distribution/yoda/yoda-1.6.5-src.tgz"
version('1.6.5', '634fa27412730e511ca3d4c67f6086e7')
depends_on('root')
depends_on('py-cython', type='build')
def install(self, spec, prefix):
with working_dir(str(self.spec.version), create=False):
configure('--enable-root', '--prefix=%s' % self.prefix)
make('all')
make('install')
|
# Types from OFP
STR = "String"
ARRAY = "Array"
BOOL = "Boolean"
GROUP = "Group"
NUM = "Number"
OBJ = "Object"
SIDE = "Side"
# Types from ARMA
CODE = "Code"
CONF = "Config"
CTRL = "Control"
DISP = "Display"
SCRPT = "Script(Handle)"
STRUCTURED = "Structured Text"
# Types from ARMA2
DIARY = "Diary_Record"
TASK = "Task"
TEAM_MEMBER = "Team_Member"
NAMESPACE = "Namespace"
TRANS = "Trans"
ORIENT = "Orient"
TARGET = "Target"
VECT = "Vector"
VOID = "Void"
# Magic
ANY = "AnyType"
VARIABLE = "Variable"
|
#!/usr/bin/python2
#coding=utf-8
#The Credit For This Code Goes To lovehacker
#If You Wanna Take Credits For This Code, Please Look Yourself Again...
#Reserved2020
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
##### LOGO #####
logo = """
\033[1;93m██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗
\033[1;93m██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝
\033[1;93m██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░
\033[1;93m██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░
\033[1;93m██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗
\033[1;93m╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝
\033[1;92m███╗░░░███╗░█████╗░███████╗██╗░█████╗░
\033[1;92m████╗░████║██╔══██╗██╔════╝██║██╔══██╗
\033[1;92m██╔████╔██║███████║█████╗░░██║███████║
\033[1;92m██║╚██╔╝██║██╔══██║██╔══╝░░██║██╔══██║
\033[1;92m██║░╚═╝░██║██║░░██║██║░░░░░██║██║░░██║
\033[1;92m╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░░░░╚═╝╚═╝░░╚═╝
\033[1;95m«-----------------\033[1;91mBlackMafia\033[1;95m-----------------»"""
R = '\033[1;91m'
G = '\033[1;92m'
Y = '\033[1;93m'
B = '\033[1;94m'
P = '\033[1;95m'
S = '\033[1;96m'
W = '\033[1;97m'
######Clear######
def clear():
os.system('clear')
#### time sleep ####
def t():
time.sleep(1)
def t1():
time.sleep(0.01)
#### print std #love###
def love(z):
for e in z + "\n":
sys.stdout.write(e)
sys.stdout.flush()
t1()
def menu():
clear()
print(logo)
print("Frends Tool Update ho raha ha Taklef k liay Mazrat")
print("Gabrana ni ha ahahhahah 24hrs main hò jy ga update")
print("\033[1;92m[1] Install With Out Fb Id Tool ")
print("\033[1;92m[2] Install Facebook login Tool ")
print("\033[1;92m[3] Install SpiderMan Tool ")
print("\033[1;92m[4] Install Kalilinux Tool ")
print("\033[1;92m[5] Install CoviD-19 Tool× ")
print("\033[1;92m[6] Install B.Mafia2020 Tool× ")
print("\033[1;92m[7] Install love3Hack3r Tool× ")
print("\033[1;92m[8] Install Cobra Tool ")
print("\033[1;92m[9] Install Dragon Tool ")
print("\033[1;92m[10]Install NetHunting Tool ")
print("\033[1;92m[11]Install Payload Tool ")
print("\033[1;91m[0] EXIT")
print
mafia()
def mafia():
black = raw_input("\033[1;93m slect option>>> ")
if black =="":
print ("Select a valid option !")
mafia()
elif black =="1":
clear()
print(logo)
os.system("ls $HOME")
os.system("cd $HOME/World")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("$HOME/python2 Cloningx.py")
elif black =="2":
clear()
print(logo)
os.system("rm -rf $HOME/World")
os.system("cd $HOME && git clone https://github.com/lovehacker404/World")
print (logo)
love("\033[1;93mCongratulations Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/World && python2 AsifJaved.py")
elif black =="3":
clear()
print(logo)
os.system("rm -rf $HOME/Spider")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Spider")
print (logo)
love("\033[1;91mCongratulations Cobra Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
love("Tool User Name SpiderMan Password lovehacker")
time.sleep(5)
os.system("cd $HOME/Spider && python2 SpiderMan.py")
elif black =="4":
clear()
print(logo)
os.system("rm -rf $HOME/KaliIndia")
os.system("cd $HOME && git clone https://github.com/lovehacker404/KaliIndia")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/KaliIndia && python2 kalilinux.India.py")
elif black =="5":
clear()
print(logo)
os.system("rm -rf $HOME/CoviD-19")
os.system("cd $HOME && git clone https://github.com/lovehacker404/CoviD-19")
print (logo)
love("\033[1;93mCongratulations CoviD-19 Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/CoviD-19 && python2 Virus.py")
elif black =="6":
clear()
print(logo)
os.system("rm -rf $HOME/BlakMafia2020")
os.system("cd $HOME && git clone https://github.com/lovehacker404/BlakMafia2020")
print (logo)
love("\033[1;91mCongratulations BlakMafia2020 Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/BlakMafia2020 && python2 lovehacker.py")
elif black =="7":
clear()
print(logo)
os.system("rm -rf $HOME/lov3Hak3r")
os.system("cd $HOME && git clone https://github.com/lovehacker404/lov3Hak3r")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/lov3Hak3r && python2 lovehacker.py")
elif black =="8":
clear()
print(logo)
os.system("rm -rf $HOME/Cobra")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Cobra")
print (logo)
love("\033[1;93mCongratulations Cobra Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
love("Tool User Name Cobra Password lovehacker")
time.sleep(5)
os.system("cd $HOME/Cobra && python2 Scorpion.py")
elif black =="9":
clear()
print(logo)
os.system("rm -rf $HOME/Dragon")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Dragon")
print (logo)
love("\033[1;91mCongratulations Dragon Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
love("Tool User Name Dragon Password lovehacker")
time.sleep(5)
os.system("cd $HOME/Dragon && python2 lovehacker.py")
elif black =="10":
clear()
print(logo)
os.system("rm -rf $HOME/NetHunting")
os.system("cd $HOME && git clone https://github.com/lovehacker404/NetHunting")
print (logo)
love("\033[1;96mCongratulations NetHunting Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
love("Tool User Name linux Password lovehacker")
time.sleep(5)
os.system("cd $HOME/NetHunting && python2 NetHunting.py")
elif black =="11":
clear()
print(logo)
os.system("rm -rf $HOME/Black_Mafia")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Black_Mafia")
print (logo)
love("\033[1;93mCongratulations Black_Mafia Payload Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/Black_Mafia && python3 Black_Mafia.py")
elif black =="12":
clear()
print(logo)
os.system("rm -rf $HOME/Cobra")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Cobra")
print (logo)
love("\033[1;91mCongratulations Cobra Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/Cobra && python2 Scorpion.py")
elif black =="13":
clear()
print(logo)
os.system("rm -rf $HOME/World")
os.system("cd $HOME && git clone https://github.com/lovehacker404/World")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/World && python2 Cloning.py")
elif black =="14":
clear()
print(logo)
os.system("rm -rf $HOME/Testing")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Testing")
print (logo)
love("\033[1;93mCongratulations CoviD-19 Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/CoviD-19 && python2 Project.py")
elif black =="15":
clear()
print(logo)
os.system("rm -rf $HOME/Cobra")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Cobra")
print (logo)
love("\033[1;91mCongratulations Cobra Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/Cobra && python2 Scorpion.py")
elif black =="16":
clear()
print(logo)
os.system("rm -rf $HOME/World")
os.system("cd $HOME && git clone https://github.com/lovehacker404/World")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/World && python2 Cloning.py")
elif black =="17":
clear()
print(logo)
os.system("rm -rf $HOME/Testing")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Testing")
print (logo)
love("\033[1;93mCongratulations CoviD-19 Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/CoviD-19 && python2 Project.py")
elif black =="18":
clear()
print(logo)
os.system("rm -rf $HOME/Cobra")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Cobra")
print (logo)
love("\033[1;91mCongratulations Cobra Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/Cobra && python2 Scorpion.py")
elif black =="19":
clear()
print(logo)
os.system("rm -rf $HOME/World")
os.system("cd $HOME && git clone https://github.com/lovehacker404/World")
print (logo)
love("\033[1;96mCongratulations BlackMafia Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/World && python2 Cloning.py")
elif black =="20":
clear()
print(logo)
os.system("rm -rf $HOME/Testing")
os.system("cd $HOME && git clone https://github.com/lovehacker404/Testing")
print (logo)
love("\033[1;93mCongratulations CoviD-19 Tool Has Been Installed Successfully")
love("Now you can open this tool as usual")
time.sleep(5)
os.system("cd $HOME/CoviD-19 && python2 Project.py")
elif black =="0":
os.system("exit")
if __name__ == "__main__":
menu()
|
#Use backtracking to generate binary strings of a n bit binary string
A=[None]*3
def binaryStrings(n):
#print "Hello"
if (n<1):
print A
return
A[n-1]='0'
binaryStrings(n-1)
A[n-1]='1'
binaryStrings(n-1)
binaryStrings(3)
|
# -*- coding: utf-8 -*-
"""
语言版本:
python:3.7
scrapy:1.6
功能:不使用正则表达式,改用scrapy爬取段子.
"""
import scrapy
class DuanZiSpider(scrapy.Spider):
name = "duanSpider"
allowed_domains = ["duanziwang.com"]
start_urls = ['http://duanziwang.com/category/经典段子/']
def parse(self, response):
duanzi_list = response.css('article') # 提取首页所有笑话,保存至变量duanzi_list
for viv in duanzi_list: # 循环获取每一条笑话里面的:标题、内容
title = viv.css('.post-title a::text').extract_first() # 提取笑话标题
contents = viv.css('.post-content p::text').extract() # 提取笑话内容
text = ''.join(contents)
# text = text.encode('UTF-8','ignore')
# text = text.encode('gb2312','ignore')
"""
接下来进行写文件操作,储存在一个txt文档里面
"""
file_name = 'happy.txt' # 定义文件名,如:happy.txt
f = open(file_name, "a+", encoding='utf-8') # “a+”以追加的形式
f.write('标题:' + str(title))
f.write('\n') # ‘\n’ 表示换行
f.write(str(text))
f.write('\n-------\n')
f.close()
next_page = response.css('.next::attr(href)').extract_first() # css选择器提取下一页链接
# print("!!!!!!The page is:" + str(next_page))
if next_page is not None: # 判断是否存在下一页
"""
相对路径如:/page/1
urljoin能把相对路径替我们转换为绝对路径,也就是加上文件开头设置的域名
最终next_page为:http://host/page/2/
"""
next_page = response.urljoin(next_page)
"""
scrapy.Request()
第一个参数:下一页链接,第二个参数为内容回调处理函数,这里是parse函数。
不断的爬取,直到不存在下一页
"""
yield scrapy.Request(next_page, callback=self.parse)
|
import sys,os,time
import PLstats
from PyQt4 import QtCore, QtGui, uic,QtSql
from PLtable_auto import *
import sqlite3
import PLpoints_mod
import PLresults_mod
class MyWindowClass(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
for j in range(1,2,1):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.ui=Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle("PL Simulation")
self.connect(self.ui.actionPoints_Table,QtCore.SIGNAL("triggered()"),self.points)
self.connect(self.ui.actionPL_Results,QtCore.SIGNAL("triggered()"),self.results)
self.ui.btn_next.clicked.connect(self.nextweek)
self.ui.btn_simulate.clicked.connect(self.simulate)
dbfile="PLtable.db"
self.conn=sqlite3.connect(dbfile)
cursor=self.conn.cursor()
cursor.execute("update PL_table set P=0,W=0,D=0,L=0,GF=0,GA=0,GD=0,Pts = 0")
comma='"",'
statement="update PL_results set Match_1="+comma+"Match_2="+comma+"Match_3="+comma+"Match_4="+comma+"Match_5="+comma+"Match_6="+comma+"Match_7="+comma+"Match_8="+comma+"Match_9="+comma+"Match_10="+'""'
cursor.execute(statement)
print(statement)
self.gameweek=0
if os.path.exists(dbfile):
db=QtSql.QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(dbfile)
db.open()
else:
QtQui.QMessageBox.critical(self,"Critical Error", "Database file was not fouNnd here")
return None
self.retrieve()
for i in range(1,38,1):
self.nextweek()
self.simulate()
cursor.execute("select team from PL_table order by Pts desc,GD desc,GF desc")
self.conn.commit()
fetch=cursor.fetchall()
winner=fetch[0]
team=""
for i in winner:
team=team+i
setwinner="Insert into PL_winners (Winner) values ("+'"'+team+'")'
print(setwinner)
cursor.execute(setwinner)
self.nextweek()
def week(self):
return self.gameweek
def points(self):
points = PLpoints_mod.MyForm(self)
points.exec()
def results(self):
results = PLresults_mod.MyForm(self)
results.exec()
def retrieve(self):
self.ui.listWidget.clear()
cursor=self.conn.cursor()
week=str(self.gameweek)
cursor.execute("select Match_1,Match_2,Match_3,Match_4,Match_5,Match_6,Match_7,Match_8,Match_9,Match_10,Date from PL_fixtures where Game_Week='"+week+"'")
self.conn.commit()
row=cursor.fetchall()
r=""
# print(row)
for r in row:
match1,match2,match3,match4,match5,match6,match7,match8,match9,match10,date = r
self.match1=match1
self.match2=match2
self.match3=match3
self.match4=match4
self.match5=match5
self.match6=match6
self.match7=match7
self.match8=match8
self.match9=match9
self.match10=match10
print(r)
for i in r:
self.fixture="\t"+i
self.ui.listWidget.addItem(self.fixture)
def nextweek(self):
print("Next week")
self.gameweek=self.gameweek+1
self.ui.lbl_week.setText("Matchday "+str(self.gameweek))
self.retrieve()
def simulate(self):
self.ui.listWidget.clear()
cursor=self.conn.cursor()
week=str(self.gameweek)
cursor.execute("select Match_1,Match_2,Match_3,Match_4,Match_5,Match_6,Match_7,Match_8,Match_9,Match_10 from PL_fixtures where Game_Week='"+week+"'")
self.conn.commit()
row=cursor.fetchall()
# print(row)
for r in row:
match1,match2,match3,match4,match5,match6,match7,match8,match9,match10= r
self.match1=match1
self.match2=match2
self.match3=match3
self.match4=match4
self.match5=match5
self.match6=match6
self.match7=match7
self.match8=match8
self.match9=match9
self.match10=match10
print(r)
count=1
for i in r:
if count!=11:
data=PLstats.simulate(i)
print(data)
result=data.split(" ")
winner=result[0]
score=result[1]
match=i.split(" v ")
team1=match[0]
team2=match[1]
print(team1)
print(team2)
goals=score.split('-')
print(goals)
t1goals=int(goals[0])
t2goals=int(goals[1])
details=team1+" v "+team2+" "+score
print(details)
cursor=self.conn.cursor()
comma='"'
matchno="Match_"+str(count)
statement="update PL_results set "+matchno+"="+comma+details+comma+" where Game_Week="+str(self.gameweek)
print("\n\n\n\n"+statement+"\n\n\n\n")
cursor.execute(statement)
spaces=40-len(i)
if len(i)<30:
self.fixture="\t"+i+"\t\t"+score
time.sleep(0)
self.ui.listWidget.addItem(self.fixture)
print(self.fixture+"Test")
else:
self.fixture="\t"+i+"\t"+score
time.sleep(0)
self.ui.listWidget.addItem(self.fixture)
print(self.fixture+"Test")
if winner=="team1":
#SQL code for updating team1 in points table
cursor=self.conn.cursor()
GD1=t1goals-t2goals
GD2=t2goals-t1goals
comma='"'
print(t1goals)
print(t2goals)
statement1="update PL_table set P=P+1,W=W+1,GF=GF+"+str(t1goals)+",GA=GA+"+str(t2goals)+",GD=GD+"+str(GD1)+",Pts=Pts+3 where team="+comma+team1+comma
statement2="update PL_table set P=P+1,L=L+1,GF=GF+"+str(t2goals)+",GA=GA+"+str(t1goals)+",GD=GD+"+str(GD2)+" where team="+comma+team2+comma
cursor.execute(statement1)
cursor.execute(statement2)
self.conn.commit()
elif winner=="team2":
#SQL code for updating team2 in points table
cursor=self.conn.cursor()
GD1=t2goals-t1goals
GD2=t1goals-t2goals
comma='"'
print(t1goals)
print(t2goals)
statement1="update PL_table set P=P+1,W=W+1,GF=GF+"+str(t2goals)+",GA=GA+"+str(t1goals)+",GD=GD+"+str(GD1)+",Pts=Pts+3 where team="+comma+team2+comma
statement2="update PL_table set P=P+1,L=L+1,GF=GF+"+str(t1goals)+",GA=GA+"+str(t2goals)+",GD=GD+"+str(GD2)+" where team="+comma+team1+comma
cursor.execute(statement1)
cursor.execute(statement2)
self.conn.commit()
else:
#SQL code for draw and updating both teams' score by 1 point
cursor=self.conn.cursor()
GD=t2goals-t1goals
comma='"'
statement1="update PL_table set P=P+1,D=D+1,GF=GF+"+str(t1goals)+",GA=GA+"+str(t2goals)+",GD=GD+"+str(GD)+",Pts=Pts+1 where team="+comma+team1+comma
statement2="update PL_table set P=P+1,D=D+1,GF=GF+"+str(t2goals)+",GA=GA+"+str(t1goals)+",GD=GD+"+str(GD)+",Pts=Pts+1 where team="+comma+team2+comma
cursor.execute(statement1)
cursor.execute(statement2)
self.conn.commit()
else:
pass
count=count+1
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass(None)
myWindow.show()
app.exec_()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 12:42:14 2020
@author: insun
"""
import re,string
from nltk.tokenize import word_tokenize
from RealOrNot.code import engAbbrCorpus
abbreviations = engAbbrCorpus.abbreviations
contractions = engAbbrCorpus.contractions
class DataClean :
def __init__(self, dataframe) :
self.df = self.call_fn(dataframe)
def call_fn(self,dataframe) :
dataframe['c_text'] = dataframe['text'].apply(self.clean_text)
dataframe['c_text'] = dataframe['c_text'].apply(self.remove_punct)
dataframe['c_text'] = dataframe['c_text'].apply(self.remove_emoji)
dataframe['c_text'] = dataframe['c_text'].apply(self.convert_abbrev_in_text)
dataframe['c_text'] = dataframe['c_text'].apply(self.remove_contractions)
return dataframe['c_text']
def clean_text(self,text) :
text = re.sub(r'https?://\S+', '', text) # Remove link
text = re.sub(r'\n',' ', text) # Remove line breaks
text = re.sub('\s+', ' ', text).strip() # Remove leading, trailing, and extra spaces
return text
def remove_punct(self,text):
table = str.maketrans('','',string.punctuation)
return text.translate(table)
def remove_emoji(self,text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags = re.UNICODE)
return emoji_pattern.sub(r'', text)
def convert_abbrev(self,word):
return abbreviations[word.lower()] if word.lower() in abbreviations.keys() else word
def convert_abbrev_in_text(self,text): #약어를 원래 형태로 변형
tokens = word_tokenize(text)
tokens = [self.convert_abbrev(word) for word in tokens]
text = ' '.join(tokens)
return text
def remove_contractions(self,text): #수축 단어 원래 형태로 변형
return contractions[text.lower()] if text.lower() in contractions.keys() else text
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import math
J = 2 # Cout du jeton
# Si modif des valeurs, garder la 3 en meilleure machine ou adapter l'algo du regret
def testGain(k) :
if k == 1 :
mu = 0.2
elif k == 2 :
mu = 1.2
elif k == 3 :
mu = 1.5
elif k == 4 :
mu = 0.7
else :
mu = 1
return np.random.poisson(mu+J) - J, mu
T=1000 #Nombre total de jetons
nb_machines = 5
def calculIC(res,n) :
gain = np.sum(res)
moy = np.mean(res)
var = n/(n-1)*np.var(res) - J
return moy, moy - 1.96 * var / np.sqrt(n), moy + 1.96 * var / np.sqrt(n), gain
def Afficher_IC(res,n) :
out = ""
a = calculIC(res,n)#on choisit le bras avec la meilleure moyenne avec une probabilité de 1-E
out += "[" + str(a[1]) + ", " + str(a[2]) + "]"
return out
def Strategie_1(T1) :
n = T1//nb_machines # nombre d'essais dans chaque machine en phase 1
essais = [] # Contient un vecteur d'essais par machine pour les T1 premiers lancers
intervalles = []
regret = [0]
# On fait T1/nb_machines essais pour chauqe machine qu'on range dans essais_machine
for k in range(nb_machines) :
essai_machine = []
for i in range(n) :
essai_machine.append(testGain(k+1)[0])
regret.append(regret[-1] + (-testGain(k+1)[1] + testGain(3)[1])/T)
essais.append(essai_machine)
res = np.asarray(essais)
gain = 0
moy = []
for i in range(nb_machines) :
a = calculIC(res[i],n)
moy.append(a[0])
gain += a[3]
intervalles.append(Afficher_IC(res[i],n))
k = moy.index(max(moy))+1
essai_machine = []
for i in range(T-T1-1) :
essai_machine.append(testGain(k)[0])
regret.append(regret[-1] + (-testGain(k)[1] + testGain(3)[1])/T)
gain_tot = gain + sum(essai_machine)
# print("Nombre de jetons pour tester les machines : T1 = ", T1, "\22000nIntervalles de confiance : \n", intervalles, "\n", "Machine choisie : ", k, "\nNombre de jetons dans la machine choisie : T2 = ", T-T1, "\nGain total obtenu : ", gain_tot, sep = '')
# plt.plot(regret)
return np.asarray(regret)
def Eps_greedy(E) :
gain_tot = [0] * nb_machines #gain total pour chaque machine
s = [0] * nb_machines #nombre de fois où le bras k a été joué
regret = [0]
for i in range(nb_machines) :
gain_tot[i] = testGain(i+1)[0]
s[i] += 1
regret.append(regret[-1] + (-testGain(i+1)[1] + testGain(3)[1])/T)
for i in range(nb_machines, T+1) :
if np.random.random() < 1-E :
k = np.argmax(np.divide(gain_tot, s))#on choisit le bras avec la meilleure moyenne avec une probabilité 1-E
else :
k = np.random.randint(0,nb_machines)#ou on choisit le bras aléatoirement parmi les 5 machines (proba E)
gain_tot[k] += testGain(k+1)[0]
s[k] += 1
regret.append(regret[-1] + (-testGain(k+1)[1] + testGain(3)[1])/T)
return np.asarray(regret[1:-1])
def Eps_greedy_temps(E) :
gain_tot = [0] * nb_machines #gain total pour chaque machine
s = [0] * nb_machines #nombre de fois où le bras k a été joué
regret = [0]
for i in range(nb_machines) :
gain_tot[i] = testGain(i+1)[0]
s[i] += 1
regret.append(regret[-1] + (-testGain(i+1)[1] + testGain(3)[1])/T)
for i in range(nb_machines, T+1) :
if np.random.random() < 1-E/i**2 : #Meilleure courbe de regret lorsque E décroit en 1/t²
k = np.argmax(np.divide(gain_tot, s))
else :
k = np.random.randint(0,nb_machines)
gain_tot[k] += testGain(k+1)[0]
s[k] += 1
regret.append(regret[-1] + (-testGain(k+1)[1] + testGain(3)[1])/T)
return np.asarray(regret[1:-1])
def regret_moyen(N, methode, arg) : #Calcul regret moyen sur N itérations avec T1 premiers lancers
regret_cumule = [0]*T
for i in range(N) :
regret_cumule = np.sum([regret_cumule, methode(arg)], axis=0)
return np.divide(regret_cumule, N)
def plot_regret(N) : # Bon truc, à garder ! Calcul regrets moyens sur N itérations en fonction du nombre T1 de premiers lancers
y = []
for t1 in range(2*nb_machines, T//3, 2*nb_machines) :
y.append(regret_moyen(N, t1))
plt.plot(range(2*nb_machines, T//3, 2*nb_machines), y)
# les regrets théoriques correspondent au cas parfait où on choisit la bonne machine après T1 lancers
def regret_moyen_th(T1) :
regret_th = [0]
for i in range(T1) :
k = math.ceil(i*nb_machines/T1)
regret_th.append(regret_th[-1] + (testGain(3)[1] - testGain(k)[1])/T)
for i in range(T-T1) :
regret_th.append(regret_th[-1])
return regret_th
#Strategie_1(250)
#plot_regret(400)
plt.plot(regret_moyen(100, Strategie_1, 180), "blue")
plt.plot(regret_moyen(100, Eps_greedy, 0.05), "red")
plt.plot(regret_moyen(100, Eps_greedy_temps, 900), "green")
plt.plot(regret_moyen_th(180))
|
import pyupbit
import numpy as np
# OHLCV(open, high, low, close, volume)로 당일 시가, 고가, 저가, 종가, 거래량 데이터 취득
df = pyupbit.get_ohlcv("KRW-XRP", count=3)
# # 전략부분
# 변동성 돌파 기준 범위 계산 (고가 - 저가) * k값
df['range'] = (df['high'] - df['low']) * 0.5
# target(매수가), range 컬럼을 한칸씩 밑으로 내림 (shift(1))
df['target'] = df['open'] + df['range'].shift(1)
# # 기본설정부분
# 수수료 설정
fee = 0
# ror(수익률), np.where(조건문, 참일때 값, 거짓일때 값)
df['ror'] = np.where(df['high'] > df['target'], df['close'] / df['target'] - fee, 1)
# 누적 곱 계산(cumprod) => 누적 수익률
df['hpr'] = df['ror'].cumprod()
# Drw Down 계산 (누적 최대값과 현재 hpr 차이 / 누적 최대값 * 100)
# 낙폭, 특정 기간 동안 발생한 시세 고점에서 저점까지의 하락
df['dd'] = (df['hpr'].cummax() - df['hpr']) / df['hpr'].cummax() * 100
# MDD계산
# 맥시멈 드로우다운(Maximum Drawdown), 최대 낙폭
# 시세가 새 고점에 도달하기 전에 그 이전 고점에서 저점까지의 최대 손실을 의미
print("MDD(%): ", df['dd'].max())
# 엑셀 출력
df.to_excel("xrp-3day.xlsx")
|
APPLICATION_NAME = 'stag.datalink_v2.streaming'
APPLICATION_VERS = "1.0.0"
|
# Data Preprocessing final steps
# Importing the libraries
import numpy as np
import matplotlib.pyplot as pyplot
import pandas as pd
# Importing the dataset
dataset = pd.read_csv("Data.csv")
# Matrix of features
x = dataset.iloc[:, :-1].values
# IV array
y = dataset.iloc[:, -1].values
# Taking care of missing data
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = np.nan, strategy = "mean")
imputer.fit(x[:, 1:3])
x[:, 1:3] = imputer.transform(x[:, 1:3])
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
# Encoding the Independent Variable
labelencoder_x = LabelEncoder()
x[:, 0] = labelencoder_x.fit_transform(x[:, 0])
# New way to encode features
transformerX = ColumnTransformer(
[(
"dummy_colX",
OneHotEncoder(categories = "auto"),
[0]
)], remainder = "passthrough"
)
x = transformerX .fit_transform(x)
x = x.astype(float)
# Future Deprecated way to encode features
# onehotencoder = OneHotEncoder(categorical_features = [0])
# x = onehotencoder.fit_transform(x).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
print(x)
print(y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
x_train = sc_x.fit_transform(x_train)
# Dont need to fit again because it was done in the line above fitted to the training set
x_test = sc_x.transform(x_test)
|
"""Initial migration.
Revision ID: 57ef1a3f1d22
Revises:
Create Date: 2021-01-21 17:15:13.816196
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '57ef1a3f1d22'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('recipient_alias',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('alias', sqlalchemy_utils.types.email.EmailType(length=255), nullable=True, comment='The "To" address that should be redirected'),
sa.Column('recipient', sqlalchemy_utils.types.email.EmailType(length=255), nullable=True, comment='The "To" address to redirect to'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_recipient_alias'))
)
op.create_table('sender_alias',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sender', sa.String(length=120), nullable=True, comment='The username of the sender who has this alias'),
sa.Column('alias', sqlalchemy_utils.types.email.EmailType(length=255), nullable=True, comment='The email address for which the sender is allowed to send as'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_sender_alias'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('sender_alias')
op.drop_table('recipient_alias')
# ### end Alembic commands ###
|
import sys
def solution():
# input
# points : (duration - T) + bonus
duration, inter_n, street_n, car_n, bonus = map(int, input().split())
adj = [list() for _ in range(inter_n)]
name_to_street = {}
street_count = {}
cars = []
for _ in range(street_n):
s, e, name, L = input().split()
s, e, L = int(s), int(e), int(L)
adj[s].append((e, name, L))
name_to_street[name] = (s, e, L)
for _ in range(car_n):
path = input().split()
cars.append(path[1:])
# solution
answer = [[] for _ in range(inter_n)]
for i in range(car_n):
for p in cars[i]:
if p not in street_count:
street_count[p] = 1
else:
street_count[p] += 1
values = set()
for i in range(inter_n):
for e, name, L in adj[i]:
if name not in street_count:
continue
answer[e].append((name, street_count[name] / L))
values.add(street_count[name])
# output
# A : the number of intersections for which you specify the schedule
A = 0
for i in range(inter_n):
if answer[i]:
A += 1
print(A)
for i in range(inter_n):
if not answer[i]:
continue
# i : id
# E : number of incoming
# E lines : street name , duration
print(i)
print(len(answer[i]))
for name, val in answer[i]:
MAX_TIME = 30 # simulated annealing
print(name, max(1, int(MAX_TIME * val)))
if __name__ == "__main__":
f_names = ['a', 'b', 'c', 'd', 'e', 'f']
for f_name in f_names:
sys.stdin = open(f_name + '.txt', 'r')
sys.stdout = open(f_name + "_output_LC.txt", 'w')
solution()
|
import json
import torchvision
import numpy as np
import os
import torch
from torch.utils.data import Dataset
from PIL import Image
from augmentations import SobelTransform
from functools import reduce
def compute_weight_map(mask: torch.tensor):
"""
Computes a weight map for a given mask, to balance
amount of pixels of each kind on a mask
Weights are computed with formula
'w(x) = 1 + scale * (mask(x) + 2 * is_border_pixel(x))',
where scale is ratio multiplier
:params:
mask - numpy array of shape (W, H)
:returns:
weight_map - torch.tensor of shape (1, W, H)
"""
scale_mult = 1
if torch.sum(mask):
scale_mult = reduce(lambda x, y: x * y, mask.shape) / torch.sum(mask)
weight_map = mask.view(1, 1, mask.shape[0], mask.shape[1]).float()
weight_map = 1 + scale_mult * weight_map
return weight_map.view(1, mask.shape[0], mask.shape[1])
class CigButtDataset(Dataset):
"""
An artificial dataset for cigarette butts
(see https://www.immersivelimit.com/datasets/cigarette-butts)
Implement torch.utils.data.Dataset interface
Names of the files must contain numerals, due to the indexing issues
"""
def __init__(
self, root_dir: str,
transforms=None
):
"""
:params:
root_dir - Directory, which should contain both images and masks
in folders 'images' and 'masks' file respectively.
transforms - Optional list transforms to be applied
on image and mask
"""
self.root_dir = os.path.join(os.getcwd(), root_dir)
self.image_dir = os.path.join(self.root_dir, 'images')
self.mask_dir = os.path.join(self.root_dir, 'masks')
self.transforms = transforms
self.dir_content = [file for file in os.listdir(self.image_dir)]
# required to use torchvision.transforms methods for PIL images
self.tensor_to_image = torchvision.transforms.ToPILImage()
self.image_to_tensor = torchvision.transforms.ToTensor()
def __len__(self):
return len(self.dir_content)
def __getitem__(self, idx):
"""
Method allows to do image loading from disk
"""
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = os.path.join(self.image_dir, self.dir_content[idx])
img = Image.open(img_path).convert('RGB')
msk_path = os.path.join(self.mask_dir, self.dir_content[idx])
msk = Image.open(msk_path).convert('L')
if self.transforms:
for tf in self.transforms:
img, msk = tf.transform(img, msk)
weights = self.image_to_tensor(msk)
weights = compute_weight_map(weights.view(weights.shape[1], weights.shape[2]))
return {
'image': self.image_to_tensor(img).float(),
'mask' : self.image_to_tensor(msk).float(),
'weights': weights.float()
}
class SeparableCigButtDataset(CigButtDataset):
"""
An artificial dataset for cigarette butts
(see https://www.immersivelimit.com/datasets/cigarette-butts)
Implement torch.utils.data.Dataset interface
Names of the files must contain numerals, due to the indexing issues.
Contains two subsets, separated by 'complexity' of the image.
Complexity is estimated with the total weight of the boder pixels,
extracted from converted to grayscale images with Sobel's transform.
"""
def __init__(
self, root_dir: str,
transforms=None
):
"""
:params:
root_dir - Directory, which should contain both images and masks
in folders 'images' and 'masks' file respectively.
transforms - Optional list transforms to be applied
on image and mask
"""
super(SeparableCigButtDataset, self).__init__(root_dir, transforms)
self.SIMPLE = 0
self.COMPLEX = 1
sobel = SobelTransform()
img_to_weight = {}
for image in self.dir_content:
img_path = os.path.join(self.image_dir, image)
img = torch.unsqueeze(self.image_to_tensor(
Image.open(img_path).convert('L').resize((64, 64))),0)
img = img / torch.max(img)
total_weight = torch.sum(sobel(img) > 1)
img_to_weight[image] = total_weight.data.numpy()
weights = np.array([a for a in img_to_weight.values()])
q25 = np.quantile(weights, 0.15)
q75 = np.quantile(weights, 0.85)
self.dir_content_simple, self.dir_content_complex = [], []
for a in img_to_weight:
if (img_to_weight[a] > q25) and (img_to_weight[a] < q75):
self.dir_content_complex.append(a)
else: self.dir_content_simple.append(a)
self.complexity = self.SIMPLE # an indicator of current complexity
self.dir_content = self.dir_content_simple
def switch_type(self, complexity: int):
"""
A method to switch current complexity of dataset
"""
if complexity != self.SIMPLE and complexity != self.COMPLEX:
raise ValueError("Invalid type argument")
self.complexity = complexity
self.dir_content = self.dir_content_complex if complexity else self.dir_content_simple
|
from __future__ import division
import os
from astropy.io import ascii
data_sequence = 'all'
GRB_name = 'GRB190114C'
model_name = 'CPL+BB_'+data_sequence #change if you want to change the time pulse bin, also change if you change the SNR
# Read PHA files from an ascii file listed in table
pha_files = ascii.read("Pulse1_"+data_sequence+"_list", header_start = None)
print (pha_files)
# Read models to be used
mod_file = open("model_file_"+model_name, 'r+')
model_list = mod_file.read()
models_to_use = [ele for ele in model_list.split('\n') if ele != '']
mod_file.close()
print ('\n')
#print pha_files['col1']
print (pha_files['col1'][0])
print (pha_files['col2'][0])
t_bins = len(pha_files['col1'])
print ("Number of time bins", t_bins)
print (models_to_use)
print ("Number of models tested ", len(models_to_use))
create_xspec_file = "echo \"\" > " + "xspec_fit_v3_"+model_name+".xcm"
os.system(create_xspec_file)
xcm_file = open("xspec_fit_v3_"+model_name+".xcm", 'r+')
xcm_file.write("lmod grbep /home/ankush/Desktop/HEASoft/LocalModels/XspecLocalModels/Band_model\n")
xcm_file.write("time\n")
xcm_file.write("query yes\n")
xcm_file.write("set file1 [open "+GRB_name+"info_fitstat_"+model_name+" w+]\n")
xcm_file.write("set file2 [open "+GRB_name+"info_fitpar_"+model_name+" w+]\n")
for i in range(0, t_bins):
xcm_file.write("log "+GRB_name+"_"+model_name+"_pha"+str(i+1)+".log\n")
xcm_file.write("data 1:1 "+pha_files['col1'][i]+"\n")
xcm_file.write("data 2:2 "+pha_files['col2'][i]+"\n")
xcm_file.write("data 3:3 "+pha_files['col3'][i]+"\n")
xcm_file.write("data 4:4 "+pha_files['col4'][i]+"\n")
xcm_file.write("data 5:5 "+pha_files['col5'][i]+"\n")
xcm_file.write("setp ene\n")
#~ xcm_file.write("setp rebin 5 5\n")
xcm_file.write("ign 1-3: **-8.0 900.0-**\n")
xcm_file.write("ig 1-3: 25.0-40.0\n")
xcm_file.write("ign 4-5: **-200.0 40000.0-**\n")
#xcm_file.write("ign 5: **-15.0 150.0-**\n")
xcm_file.write("mo cons*("+models_to_use[0]+")\n")
initial_values = open("init_"+model_name, 'r+')
xcm_file.write(initial_values.read())
initial_values.close()
initial_values1 = open("init_"+model_name, 'r+')
initial_values_list = initial_values1.read()
nlines = [ele for ele in initial_values_list.split('\n') if ele != '']
lines_num = len(nlines)
initial_values1.close()
xcm_file.write("statistic pgstat\n")
xcm_file.write("setplot rebin 5 5\n")
xcm_file.write("fit 500\n")
xcm_file.write("fit 500\n")
xcm_file.write("err 1-"+str(lines_num)+"\n")
#xcm_file.write("flux 8.0 40000.0 err\n")
xcm_file.write("set chisq [tcloutr stat]\n")
xcm_file.write("tclout dof\n")
xcm_file.write("set dof [lindex $xspec_tclout 0]\n")
xcm_file.write("tclout flux 2\n")
xcm_file.write("set flu [lindex $xspec_tclout 0]\n")
xcm_file.write("set f1 [lindex $xspec_tclout 1]\n")
xcm_file.write("set f2 [lindex $xspec_tclout 2]\n")
xcm_file.write("set phot [lindex $xspec_tclout 3]\n")
xcm_file.write("set ph1 [lindex $xspec_tclout 4]\n")
xcm_file.write("set ph2 [lindex $xspec_tclout 5]\n")
xcm_file.write("set rchi [expr ($chisq)/($dof)]\n")
xcm_file.write("if {$rchi < 2} {error 1-"+str(lines_num)+"}\n")
xcm_file.write("cpd /xw\n")
xcm_file.write("pl d del\n")
xcm_file.write("cpd individual_pha_"+model_name+"_"+str(i+1)+".ps/cps\n")
#xcm_file.write("pl uf del\n")
#xcm_file.write("pl euf del\n")
xcm_file.write("pl eeuf del\n")
xcm_file.write("cpd none\n")
xcm_file.write("set model [tcloutr model]\n")
xcm_file.write("set chisq [tcloutr stat]\n")
xcm_file.write("tclout dof\n")
xcm_file.write("set dof [lindex $xspec_tclout 0]\n")
xcm_file.write("set rchi [expr ($chisq)/($dof)]\n")
xcm_file.write("puts $file1 \""+str(i+1) +" [lindex $rchi] [lindex $chisq] [lindex $dof]\"\n")
z_arr=''
for j in range(1, int(lines_num/5) + 2):
xcm_file.write("tclout param "+str(j)+"\n")
xcm_file.write("set par"+str(j)+" [lindex $xspec_tclout 0]\n")
xcm_file.write("set parstat"+str(j)+" [lindex $xspec_tclout 1]\n")
xcm_file.write("tclout error "+str(j)+"\n")
xcm_file.write("set par"+str(j)+"err1"+" [lindex $xspec_tclout 0]\n")
xcm_file.write("set par"+str(j)+"err2"+" [lindex $xspec_tclout 1]\n")
xcm_file.write("set l"+str(j)+" [expr $par"+str(j)+"err1-$par"+str(j)+"]\n")
xcm_file.write("set u"+str(j)+" [expr $par"+str(j)+"err2-$par"+str(j)+"]\n")
z_arr=z_arr+" [lindex $par"+str(j)+"] [lindex $l"+str(j)+"] [lindex $u"+str(j)+"]"
#xcm_file.write("new 7 0.0\n")
xcm_file.write("freeze 3\n")
xcm_file.write("flux 8.0 900.0 err\n")
xcm_file.write("tclout flux 2\n")
xcm_file.write("set fluz [lindex $xspec_tclout 0]\n")
xcm_file.write("set fz1 [lindex $xspec_tclout 1]\n")
xcm_file.write("set fz2 [lindex $xspec_tclout 2]\n")
xcm_file.write("set photz [lindex $xspec_tclout 3]\n")
xcm_file.write("set phz1 [lindex $xspec_tclout 4]\n")
xcm_file.write("set phz2 [lindex $xspec_tclout 5]\n")
#xcm_file.write("new 5 0.0\n")
xcm_file.write("freeze 3\n")
xcm_file.write("flux 8.0 900.0 err\n") #Change if you want to change the flux energy range
xcm_file.write("tclout flux 2\n")
xcm_file.write("set fluzz [lindex $xspec_tclout 0]\n")
xcm_file.write("set fzz1 [lindex $xspec_tclout 1]\n")
xcm_file.write("set fzz2 [lindex $xspec_tclout 2]\n")
xcm_file.write("set photzz [lindex $xspec_tclout 3]\n")
xcm_file.write("set phzz1 [lindex $xspec_tclout 4]\n")
xcm_file.write("set phzz2 [lindex $xspec_tclout 5]\n")
z_arr="puts $file2 \""+z_arr+" [lindex $flu] [lindex $f1] [lindex $f2] [lindex $phot] [lindex $ph1] [lindex $ph2] [lindex $fluz] [lindex $fz1] [lindex $fz2] [lindex $photz] [lindex $phz1] [lindex $phz2] [lindex $fluzz] [lindex $fzz1] [lindex $fzz2] [lindex $photzz] [lindex $phzz1] [lindex $phzz2]\""
xcm_file.write(z_arr+"\n")
xcm_file.write("log none\n")
xcm_file.write("time\n")
xcm_file.write("close $file1\n")
xcm_file.write("close $file2\n")
xcm_file.close()
|
import heapq
class Solution:
# @param A : list of integers
# @param B : list of integers
# @return a list of integers
def solve(self, A, B):
size = len(A)
A.sort(reverse=True)
B.sort(reverse=True)
heap = []
for anum in A:
for bnum in B:
pair_sum = anum + bnum
if len(heap) < size:
heapq.heappush(heap, pair_sum)
else:
if pair_sum > heap[0]:
heapq.heappushpop(heap, pair_sum)
else:
break
return sorted(list(heap), reverse=True)
|
[<name>]
username =
password =
|
n,m=input().split()
n1,m1=input().split()
n=int(n)
m=int(m)
n1=int(n1)
m1=int(m1)
print(abs(n-n1),abs(m-m1))
|
from src.reddit_handler import RedditHandler
from src.polarization_classifier import PolarizationClassifier
from src.textstatistics_generator import TextStatisticGenerator
out_folder = 'RedditHandler_Outputs'
extract_post = True # True if you want to extract Post data, False otherwise
extract_comment = True # True if you want to extract Comment data, False otherwise
category = {'gun':['guncontrol'], 'politics':['EnoughTrumpSpam','Fuckthealtright']}
start_date = '13/12/2018'
end_date = '13/03/2019'
n_months = 1 # time_period to consider: if you don't want it n_months = 0
# default post attributes
post_attributes = ['id','author', 'created_utc', 'num_comments', 'over_18', 'is_self', 'score', 'selftext', 'stickied', 'subreddit', 'subreddit_id', 'title']
# default comment attributes
comment_attributes = ['id', 'author', 'created_utc', 'link_id', 'parent_id', 'subreddit', 'subreddit_id', 'body', 'score']
my_handler = RedditHandler(out_folder, extract_post, extract_comment, category, start_date, end_date, n_months=n_months, post_attributes=post_attributes, comment_attributes=comment_attributes)
my_handler.extract_data()
my_handler.create_network()
#PolarizationClassifier
file_model = 'Model/model_glove.json'
file_weights = 'Model/model_glove.h5'
file_tokenizer = 'Model/tokenizer_def.pickle'
my_pol_classifier = PolarizationClassifier(out_folder, extract_post, extract_comment, category, start_date, end_date, file_model, file_weights, file_tokenizer)
#my_pol_classifier.compute_polarization()
#TextStatisticGenerator
my_stats_generator = TextStatisticGenerator(out_folder, extract_post, extract_comment, category, start_date, end_date)
my_stats_generator.extract_statistics()
|
#Created on 1/23/2015
#@author: rspies
# Python 2.7
######################################################################################################
#This script contains several functions for calculating error statistics:
#1 pct_bias: percent bias
#2 nash_sut: nash sutcliffe
#3 ma_error: mean absolute error
#4 rms_error: root mean squared error and normalized root mean squared error (normalized by obs mean)
#5 corr_coef: correlation coefficient (numpy function)
######################################################################################################
# error statistics
import numpy as np
def pct_bias(obsx,modely):
print 'Length of sim: ' + str(len(modely))
print 'Length of obs: ' + str(len(obsx))
cnt = 0
a = 0
b = 0
for each in modely:
a += (float(modely[cnt]) - obsx[cnt])
cnt += 1
b = sum(obsx)
bias = (sum(modely)-b)/float(len(obsx))
pbias = (a/b) * 100
print 'Bias: ' + str(bias)
print 'P Bias: ' + str(pbias) + '%'
return bias, pbias
###### Nash Sutcliffe #####
def nash_sut(obsx,modely):
cnt = 0
a = 0
c = 0
print 'calculating NS...'
b = sum(obsx)/len(obsx)
for each in modely:
a += ((modely[cnt] - obsx[cnt])**2)
c += ((obsx[cnt] - b)**2)
cnt += 1
ns = round(1 - (a/c), 2)
print 'NSE: ' + str(ns)
return ns
###### Mean Absolute Error #####
def ma_error(obsx,modely):
cnt = 0
a = 0
for each in modely:
a += (abs(modely[cnt] - obsx[cnt]))
cnt += 1
mae = round(a/len(modely),2)
print 'MAE: ' + str(mae)
return mae
###### Normalized (mean obs) Root Mean Squared Error #####
def rms_error(obsx,modely):
cnt = 0
a = 0
for each in modely:
a += ((modely[cnt] - obsx[cnt])**2)
cnt += 1
mean = sum(obsx)/len(obsx)
rmse = round((a/len(modely))**.5,2)
nrmse = round(rmse/mean,2)
print 'RMSE: ' + str(rmse)
print 'NRMSE: ' + str(nrmse)
return rmse, nrmse
###### Correlation Coefficient #######
def corr_coef(obsx,modely):
cc = np.corrcoef(obsx,modely)
print 'cc: ' + str(cc[1][0]) # call the 3 value in the matrix output
return cc
#############################################################################
|
def firstFit(capacity, weights):
binlist = []
for weight in weights:
foundBin = False
for bin in range(len(binlist)):
if binlist[bin] >= weight:
binlist[bin] -= weight
foundBin = True
break
if foundBin == False:
binlist.append(capacity-weight)
return len(binlist)
def firstFitD(capacity, weights):
return firstFit(capacity, sorted(weights, reverse=True))
def bestFit(capacity, weights):
binlist = []
for weight in weights:
bestBin = None
for bin in range(len(binlist)):
this = binlist[bin] - weight
if this < 0:
continue
if bestBin is None or this < binlist[bestBin] - weight:
bestBin = bin
if bestBin is None:
binlist.append(capacity-weight)
else:
binlist[bestBin] -= weight
return len(binlist)
def main():
with open("bin.txt") as file:
TC = int(file.readline())
for TC_index in range(TC):
capacity = int(file.readline())
N = int(file.readline())
weights = [int(a) for a in file.readline().strip().split(" ")]
ff = firstFit(capacity, weights)
ffd = firstFitD(capacity, weights)
bf = bestFit(capacity, weights)
print("Test Case {} First Fit: {}, First Fit Decreasing: {}, Best Fit: {}".format(TC_index+1, ff, ffd, bf))
main()
|
# Bài 10: Cho list sau: ["www.hust.edu.vn", "www.wikipedia.org", "www.asp.net", "www.amazon.com"]
# Viết chương trình để in ra hậu tố (vn, org, net, com) trong các tên miền website trong list trên.
my_list = ["www.hust.edu.vn", "www.wikipedia.org", "www.asp.net", "www.amazon.com"]
my_tuple = []
for i in my_list :
a = i.split('.')
my_tuple.append(a[-1])
print(tuple(my_tuple))
|
"""
This module demonstrates the ACCUMULATOR pattern in three classic forms:
SUMMING: total = total + number
COUNTING: count = count + 1
IN GRAPHICS: x = x + pixels
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
and their colleagues. September 2015.
"""
# ----------------------------------------------------------------------
# Students: Read and run this program. There is nothing else
# for you to do in here. Just use it as an example.
# Before you leave this example:
# *** MAKE SURE YOU UNDERSTAND THE 3 ACCUMULATOR PATTERNS ***
# *** shown in this module: SUMMING, COUNTING, and IN GRAPHICS ***
# ----------------------------------------------------------------------
import rosegraphics as rg
import math
def main():
""" Calls the TEST functions in this module. """
test_summing_example()
test_counting_example()
test_draw_row_of_circles()
def test_summing_example():
""" Tests the summing_example function. """
print()
print('--------------------------------------------------')
print('Testing the summing_example function:')
print('--------------------------------------------------')
# Test 1:
expected = 9
answer = summing_example(2)
print('Expected and actual are:', expected, answer)
# Test 2:
expected = 44100
answer = summing_example(20)
print('Expected and actual are:', expected, answer)
# Test3:
expected = 0
answer = summing_example(0)
print('Expected and actual are:', expected, answer)
def summing_example(n):
"""
Returns (1 cubed) + (2 cubed) + (3 cubed) + ... + (n cubed).
For example, summing_example(2) returns (1 cubed) + (2 cubed),
which is 9, and summing_example(20) returns 44,100.
Precondition: n is a nonnegative integer.
"""
total = 0 # Initialize to 0 BEFORE the loop
for k in range(n): # Loop
total = total + ((k + 1) ** 3) # Accumulate INSIDE the loop.
return total
def test_counting_example():
""" Tests the counting_example function. """
print()
print('--------------------------------------------------')
print('Testing the counting_example function:')
print('--------------------------------------------------')
# Test 1:
expected = 1
answer = counting_example(2)
print('Expected and actual are:', expected, answer)
# Test 2:
expected = 11
answer = counting_example(20)
print('Expected and actual are:', expected, answer)
# Test3:
expected = 0
answer = counting_example(0)
print('Expected and actual are:', expected, answer)
def counting_example(n):
"""
Returns the number of integers from 1 to n, inclusive,
whose cosine is positive.
For example, counting_example(2) returns 1, and
counting_example(20) returns 11, and
counting_example(0) returns 0.
Precondition: n is a nonnegative integer.
"""
count = 0 # Initialize to 0 BEFORE the loop
for k in range(n): # Loop
if math.cos(k + 1) > 0: # If the condition holds:
count = count + 1 # Increment INSIDE the loop.
return count
def test_draw_row_of_circles():
""" Tests the draw_row_of_circles function. """
print()
print('--------------------------------------------------')
print('Testing the draw_row_of_circles function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# TWO tests on ONE window.
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_ROW_OF_CIRCLES:'
title = title + ' 7 GREEN circles, 4 BLUE circles!'
window1 = rg.RoseWindow(500, 250, title)
# Test 1:
center = rg.Point(50, 50)
draw_row_of_circles(7, center, 'green', window1)
# Test 2:
center = rg.Point(100, 150)
draw_row_of_circles(4, center, 'blue', window1)
window1.close_on_mouse_click()
# ------------------------------------------------------------------
# A third test on ANOTHER window.
# ------------------------------------------------------------------
title = 'Test 3 of DRAW_ROW_OF_CIRCLES: Row of 12 RED circles!'
window2 = rg.RoseWindow(600, 150, title)
# Test 3:
center = rg.Point(50, 50)
draw_row_of_circles(12, center, 'red', window2)
window2.close_on_mouse_click()
def draw_row_of_circles(n, point, color, window):
"""
Draws n rg.Circles in a row, such that:
-- The first rg.Circle is centered at the given point.
-- Each rg.Circle just touches the previous one (to its left).
-- Each rg.Circle has radius 20.
-- Each rg.Circle is filled with the given color.
-- Each rg.Circle is drawn on the given window.
Must ** NOT ** close the window.
Preconditions:
:type n: int (that is positive)
:type point: rg.Point
:type color: str (that is a rosegraphics color)
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# The example below shows one way to solve problems using
# HELPER variables (aka AUXILIARY variables)
# In this approach:
# 1. You determine all the variables that you need
# to construct/draw whatever the problem calls for.
# We call these HELPER variables.
# 2. You initialize them BEFORE the loop, choosing values that
# make them just right for constructing and drawing the
# FIRST object to be drawn, in the FIRST time through the loop.
# For example, x = point.x in the example below.
# 3. You determine how many times the loop should run
# (generally, however many objects you want to draw)
# and write the FOR statement for the loop.
# For example, for k in range(n): in the example below.
# 4. Inside the loop you write the statements to construct and
# draw the FIRST object to be drawn, using your helper
# variables. This is easy because you choose just the right
# values for those helper variables for this FIRST object.
# 5. Test: Make sure the FIRST object appears (it will be redrawn
# many times, that is OK).
# 6. Add code at the BOTTOM of the loop that changes the helper
# variables appropriately for the NEXT time through the loop.
# For example, x = x + (radius * 2) in the example below.
# 7. Test and fix as needed.
#
# Many students (and professionals) find this technique less
# error-prone that using the loop variable to do all the work.
# ------------------------------------------------------------------
radius = 20
x = point.x # Initialize x and y BEFORE the loop
y = point.y # Choose values that make the FIRST object easy to draw
for _ in range(n): # Loop that does NOT use its index variable
# --------------------------------------------------------------
# Construct the relevant object(s),
# based on the current x, y and other variables.
# --------------------------------------------------------------
point = rg.Point(x, y)
circle = rg.Circle(point, radius)
circle.fill_color = color
# Attach the object to the window.
circle.attach_to(window)
# --------------------------------------------------------------
# Increment x (and in other problems, other variables)
# for the thing(s) to draw in the NEXT iteration of the loop.
# --------------------------------------------------------------
x = x + (radius * 2)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
#!/usr/bin/env python
"""
Tool for debugging the dependence of a rpy2 RandomForest classifier on various features
in order to determine/debug mismatched of noisy featutes.
TODO:
- should use two different Debosscher arff, with features generated from differeing algorithms.
- Should be able to disable certain features in arff datasets to see whether
crossvalidation errors change by the same ammount.
"""
import os, sys
import numpy
import pprint
import datetime
import time
algorithms_dirpath = os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/')
sys.path.append(algorithms_dirpath)
import rpy2_classifiers
class Debug_Feature_Class_Dependence:
"""
"""
def __init__(self, pars={}):
self.pars = pars
self.load_rpy2_rc()
def load_rpy2_rc(self, algorithms_dirpath=''):
""" This object contains hooks to R
"""
if len(algorithms_dirpath) == 0:
algorithms_dirpath=self.pars.get('algorithms_dirpath','')
self.rc = rpy2_classifiers.Rpy2Classifier(algorithms_dirpath=algorithms_dirpath)
def exclude_features_in_arff(self, arff_str='', noisify_attribs=[]):
""" Insert some missing-value features to arff rows.
Exepect the input to be a single string representation of arff with \n's.
Returning a similar single string.
"""
# # # TODO: do not add the missing features to the arff header
out_lines = []
misattrib_name_to_id = {}
i_attrib = 0
lines = arff_str.split('\n')
do_attrib_parse = True
for line in lines:
if do_attrib_parse:
if line[:10] == '@ATTRIBUTE':
feat_name = line.split()[1]
if feat_name in noisify_attribs:
misattrib_name_to_id[feat_name] = i_attrib
i_attrib += 1
continue # skip out_lines.append(line) for this feature
else:
i_attrib += 1
elif '@data' in line.lower():
do_attrib_parse = False
out_lines.append(line)
continue
### Should only get here after hitting @data line, which means just feature lines
#if random.random() > prob_source_has_missing:
# out_lines.append(line)
# continue # don't set any attributes as missing for this source
attribs = line.split(',')
new_attribs = []
for i, attrib_val in enumerate(attribs):
if i in misattrib_name_to_id.values():
# Then do not add this feature
#import pdb; pdb.set_trace()
#print
continue
#if random.random() <= prob_misattrib_is_missing:
# new_attribs.append('?')
# continue
new_attribs.append(attrib_val)
new_line = ','.join(new_attribs)
out_lines.append(new_line)
new_train_arff_str = '\n'.join(out_lines)
return new_train_arff_str
def get_crossvalid_errors_for_single_arff(self, arff_fpath='',
noisify_attribs=[],
ntrees=None,
mtry=None,
nodesize=None,
n_iters=None,
classifier_base_dirpath='',
algorithms_dirpath=''):
""" Given an arff file, features to exclude, params
- use the arff for training and testing, by doing some fractional partioning
"""
train_arff_str = open(arff_fpath).read()
train_arff_str = self.exclude_features_in_arff(arff_str=train_arff_str,
noisify_attribs=noisify_attribs)
traindata_dict = self.rc.parse_full_arff(arff_str=train_arff_str, fill_arff_rows=True)
arff_header = self.rc.parse_arff_header(arff_str=train_arff_str)#, ignore_attribs=['source_id'])
Gen_Fold_Classif = rpy2_classifiers.GenerateFoldedClassifiers()
all_fold_data = Gen_Fold_Classif.generate_fold_subset_data(full_data_dict=traindata_dict,
n_folds=10,
do_stratified=False,
classify_percent=40.)
meta_parf_avgs = []
meta_R_randomForest_avgs = []
meta_R_cforest_avgs = []
out_dict = {'means':[],
'stds':[]}
for k in range(n_iters):
error_rate_list = []
results_dict = {}
for i_fold, fold_dict in all_fold_data.iteritems():
results_dict[i_fold] = {}
### Do the R randomForest here:
do_ignore_NA_features = False
for i_fold, fold_data in all_fold_data.iteritems():
classifier_fpath = os.path.expandvars("%s/classifier_RF_%d.rdata" % (classifier_base_dirpath, i_fold))
Gen_Fold_Classif.generate_R_randomforest_classifier_rdata(train_data=fold_data['train_data'],
classifier_fpath=classifier_fpath,
do_ignore_NA_features=do_ignore_NA_features,
algorithms_dirpath=algorithms_dirpath,
ntrees=ntrees, mtry=mtry,
nfolds=10, nodesize=nodesize)
r_name='rf_clfr'
classifier_dict = {'class_name':r_name}
self.rc.load_classifier(r_name=r_name,
fpath=classifier_fpath)
classif_results = self.rc.apply_randomforest(classifier_dict=classifier_dict,
data_dict=fold_data['classif_data'],
do_ignore_NA_features=do_ignore_NA_features)
print "classif_results['error_rate']=", classif_results['error_rate']
results_dict[i_fold]['randomForest'] = {'class_error':classif_results['error_rate']}
error_rate_list.append(classif_results['error_rate'])
out_dict['means'].append(numpy.mean(error_rate_list))
out_dict['stds'].append(numpy.std(error_rate_list))
return out_dict
def initialize_mec(self, client=None):
""" partially adapted from citris33/arff_generateion_master.py
"""
mec = client.MultiEngineClient()
mec.reset(targets=mec.get_ids()) # Reset the namespaces of all engines
tc = client.TaskClient()
mec_exec_str = """
import sys, os
import numpy
import random
sys.path.append(os.path.abspath('/global/home/users/dstarr/src/TCP/Software/ingest_tools'))
import debug_feature_classifier_dependence
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/'))
import rpy2_classifiers
DebugFeatureClassDependence = debug_feature_classifier_dependence.Debug_Feature_Class_Dependence()
"""
print 'before mec()'
#print mec_exec_str
#import pdb; pdb.set_trace()
engine_ids = mec.get_ids()
pending_result_dict = {}
for engine_id in engine_ids:
pending_result_dict[engine_id] = mec.execute(mec_exec_str, targets=[engine_id], block=False)
n_pending = len(pending_result_dict)
i_count = 0
while n_pending > 0:
still_pending_dict = {}
for engine_id, pending_result in pending_result_dict.iteritems():
try:
result_val = pending_result.get_result(block=False)
except:
print "get_result() Except. Still pending on engine: %d" % (engine_id)
still_pending_dict[engine_id] = pending_result
result_val = None # 20110105 added
if result_val == None:
print "Still pending on engine: %d" % (engine_id)
still_pending_dict[engine_id] = pending_result
if i_count > 10:
mec.clear_pending_results()
pending_result_dict = {}
mec.reset(targets=still_pending_dict.keys())
for engine_id in still_pending_dict.keys():
pending_result_dict[engine_id] = mec.execute(mec_exec_str, targets=[engine_id], block=False)
###
time.sleep(20) # hack
pending_result_dict = [] # hack
###
i_count = 0
else:
print "sleeping..."
time.sleep(5)
pending_result_dict = still_pending_dict
n_pending = len(pending_result_dict)
i_count += 1
print 'after mec()'
time.sleep(5) # This may be needed, although mec() seems to wait for all the Ipython clients to finish
print 'after sleep()'
#import pdb; pdb.set_trace()
return tc
def wait_for_task_completion(self, task_id_list=[], tc=None):
""" partially adapted from citris33/arff_generateion_master.py
"""
new_orig_feat_tups = []
while ((tc.queue_status()['scheduled'] > 0) or
(tc.queue_status()['pending'] > 0)):
tasks_to_pop = []
for task_id in task_id_list:
temp = tc.get_task_result(task_id, block=False)
if temp == None:
continue
temp2 = temp.results
if temp2 == None:
continue
results = temp2.get('new_orig_feat_tups',None)
if results == None:
continue # skip some kind of NULL result
if len(results) > 0:
tasks_to_pop.append(task_id)
new_orig_feat_tups.append(results)
for task_id in tasks_to_pop:
task_id_list.remove(task_id)
print tc.queue_status()
print 'Sleep... 20 in wait_for_task_completion()', datetime.datetime.utcnow()
time.sleep(20)
# IN CASE THERE are still tasks which have not been pulled/retrieved:
for task_id in task_id_list:
temp = tc.get_task_result(task_id, block=False)
if temp == None:
continue
temp2 = temp.results
if temp2 == None:
continue
results = temp2.get('new_orig_feat_tups',None)
if results == None:
continue #skip some kind of NULL result
if len(results) > 0:
tasks_to_pop.append(task_id)
new_orig_feat_tups.append(results)
return new_orig_feat_tups
def main_ipython_cluster(self, noisify_attribs=[],
ntrees = 100,
mtry=25,
nodesize=5,
n_iters=23):
""" Main() for Debug_Feature_Class_Dependence
Partially adapted from compare_randforest_classifers.py
do training and crossvalidation on just Debosscher data for spped.
- parse debosscher arff
- remove certain features
- train/test classifier using cross validation
- store error rates for those removed features
"""
try:
from IPython.kernel import client
except:
pass
tc = self.initialize_mec(client=client)
result_dict = {}
new_orig_feat_tups = []
task_id_list = []
for feat_name in noisify_attribs:
tc_exec_str = """
new_orig_feat_tups = ''
task_randint = random.randint(0,1000000000000)
classifier_base_dirpath = os.path.expandvars("$HOME/scratch/debug_feature_classifier_dependence/%d" % (task_randint))
os.system("mkdir -p %s" % (classifier_base_dirpath))
try:
DebugFeatureClassDependence = debug_feature_classifier_dependence.Debug_Feature_Class_Dependence(pars={'algorithms_dirpath':os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/')})
out_dict = DebugFeatureClassDependence.get_crossvalid_errors_for_single_arff(arff_fpath=pars['orig_arff_dirpath'],
noisify_attribs=[feat_name],
ntrees=ntrees,
mtry=mtry,
nodesize=nodesize,
n_iters=n_iters,
classifier_base_dirpath=classifier_base_dirpath,
algorithms_dirpath=os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/'))
orig_wa = numpy.average(out_dict['means'], weights=out_dict['stds'])
DebugFeatureClassDependence.load_rpy2_rc(algorithms_dirpath=os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/'))
out_dict = DebugFeatureClassDependence.get_crossvalid_errors_for_single_arff(arff_fpath=pars['new_arff_dirpath'],
noisify_attribs=[feat_name],
ntrees=ntrees,
mtry=mtry,
nodesize=nodesize,
n_iters=n_iters,
classifier_base_dirpath=classifier_base_dirpath,
algorithms_dirpath=os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/'))
new_wa = numpy.average(out_dict['means'], weights=out_dict['stds'])
new_orig_feat_tups = (new_wa - orig_wa, feat_name, numpy.std(out_dict['means']))
except:
new_orig_feat_tups = str(sys.exc_info())
"""
taskid = tc.run(client.StringTask(tc_exec_str,
push={'pars':pars,
'feat_name':feat_name,
'ntrees':ntrees,
'mtry':mtry,
'nodesize':nodesize,
'n_iters':n_iters},
pull='new_orig_feat_tups', #'new_orig_feat_tups',
retries=3))
task_id_list.append(taskid)
if 0:
### debug: This inspect.getmembers() only works if task doesnt fail:
time.sleep(60)
temp = tc.get_task_result(taskid, block=False)
import inspect
for a,b in inspect.getmembers(temp):
print a, b
out_dict = temp.results.get('new_orig_feat_tups',None)
import pdb; pdb.set_trace()
print
######
new_orig_feat_tups = self.wait_for_task_completion(task_id_list=task_id_list,
tc=tc)
new_orig_feat_tups.sort()
pprint.pprint(new_orig_feat_tups)
import pdb; pdb.set_trace()
print
def main(self, noisify_attribs=[],
ntrees = 100,
mtry=25,
nodesize=5,
n_iters=23):
""" Main() for Debug_Feature_Class_Dependence
Partially adapted from compare_randforest_classifers.py
do training and crossvalidation on just Debosscher data for spped.
- parse debosscher arff
- remove certain features
- train/test classifier using cross validation
- store error rates for those removed features
"""
result_dict = {}
new_orig_feat_tups = []
for feat_name in noisify_attribs:
result_dict[feat_name] = {}
print 'orig:', feat_name
out_dict = self.get_crossvalid_errors_for_single_arff(arff_fpath=self.pars['orig_arff_dirpath'],
noisify_attribs=[feat_name],
ntrees=ntrees,
mtry=mtry,
nodesize=nodesize,
n_iters=n_iters,
algorithms_dirpath=self.pars['algorithms_dirpath'])
pprint.pprint(out_dict)
orig_wa = numpy.average(out_dict['means'], weights=out_dict['stds'])
print 'weighted average:', orig_wa
result_dict[feat_name]['orig'] = (orig_wa,
numpy.std(out_dict['means']))
self.load_rpy2_rc()
print 'new:', feat_name
out_dict = self.get_crossvalid_errors_for_single_arff(arff_fpath=self.pars['new_arff_dirpath'],
noisify_attribs=[feat_name],
ntrees=ntrees,
mtry=mtry,
nodesize=nodesize,
n_iters=n_iters,
algorithms_dirpath=self.pars['algorithms_dirpath'])
pprint.pprint(out_dict)
new_wa = numpy.average(out_dict['means'], weights=out_dict['stds'])
print 'weighted average:', new_wa
result_dict[feat_name]['new'] = (new_wa,
numpy.std(out_dict['means']))
result_dict[feat_name]['0_new-orig'] = new_wa - orig_wa
new_orig_feat_tups.append((new_wa - orig_wa, feat_name, numpy.std(out_dict['means'])))
pprint.pprint(result_dict)
new_orig_feat_tups.sort()
pprint.pprint(new_orig_feat_tups)
import pdb; pdb.set_trace()
print
if __name__ == '__main__':
#pars = {'algorithms_dirpath':algorithms_dirpath,
# 'orig_arff_dirpath':'/media/raid_0/historical_archive_featurexmls_arffs/tutor_123/2011-04-30_00:32:56.250499/source_feats.arff',
# 'new_arff_dirpath':'/media/raid_0/historical_archive_featurexmls_arffs/tutor_123/2011-05-13_04:22:08.073940/source_feats.arff',
# }
pars = {'algorithms_dirpath':algorithms_dirpath,
'orig_arff_dirpath':'/media/raid_0/historical_archive_featurexmls_arffs/tutor_126/2011-06-13_17:51:12.002706/source_feats__ACVSclasses.arff',
'new_arff_dirpath':'/media/raid_0/historical_archive_featurexmls_arffs/tutor_126/2011-06-13_17:51:12.002706/source_feats__ACVSclasses.arff',
'ntrees':100,
'mtry':25,
'nodesize':5,
'n_iters':11,
}
"""
noisify_attribs = ['freq1_harmonics_amplitude_0',
'freq2_harmonics_amplitude_0',
'freq3_harmonics_amplitude_0',
'freq2_harmonics_freq_0',
'freq3_harmonics_freq_0',
'freq_signif_ratio_21',
'fold2P_slope_90percentile',
'medperc90_2p_p',
'p2p_scatter_pfold_over_mad',
'p2p_ssqr_diff_over_var',
'qso_log_chi2_qsonu',
'qso_log_chi2nuNULL_chi2nu',
'std',
'amplitude',
'stetson_j',
'percent_difference_flux_percentile']
"""
noisify_attribs = [ \
'amplitude',
'beyond1std'
'flux_percentile_ratio_mid20',
'flux_percentile_ratio_mid35',
'flux_percentile_ratio_mid50',
'flux_percentile_ratio_mid65',
'flux_percentile_ratio_mid80',
'fold2P_slope_10percentile',
'fold2P_slope_90percentile',
'freq1_harmonics_amplitude_0',
'freq1_harmonics_amplitude_1',
'freq1_harmonics_amplitude_2',
'freq1_harmonics_amplitude_3',
'freq1_harmonics_freq_0',
'freq1_harmonics_rel_phase_0',
'freq1_harmonics_rel_phase_1',
'freq1_harmonics_rel_phase_2',
'freq1_harmonics_rel_phase_3',
'freq2_harmonics_amplitude_0',
'freq2_harmonics_amplitude_1',
'freq2_harmonics_amplitude_2',
'freq2_harmonics_amplitude_3',
'freq2_harmonics_freq_0',
'freq2_harmonics_rel_phase_0',
'freq2_harmonics_rel_phase_1',
'freq2_harmonics_rel_phase_2',
'freq2_harmonics_rel_phase_3',
'freq3_harmonics_amplitude_0',
'freq3_harmonics_amplitude_1',
'freq3_harmonics_amplitude_2',
'freq3_harmonics_amplitude_3',
'freq3_harmonics_freq_0',
'freq3_harmonics_rel_phase_0',
'freq3_harmonics_rel_phase_1',
'freq3_harmonics_rel_phase_2',
'freq3_harmonics_rel_phase_3',
'freq_amplitude_ratio_21',
'freq_amplitude_ratio_31',
'freq_frequency_ratio_21',
'freq_frequency_ratio_31',
'freq_signif',
'freq_signif_ratio_21',
'freq_signif_ratio_31',
'freq_varrat',
'freq_y_offset',
'linear_trend',
'max_slope',
'median_absolute_deviation',
'median_buffer_range_percentage',
'medperc90_2p_p',
'p2p_scatter_2praw',
'p2p_scatter_over_mad',
'p2p_scatter_pfold_over_mad',
'p2p_ssqr_diff_over_var',
'percent_amplitude',
'percent_difference_flux_percentile',
'qso_log_chi2_qsonu',
'qso_log_chi2nuNULL_chi2nu',
'scatter_res_raw',
'skew',
'small_kurtosis',
'std',
'stetson_j',
'stetson_k']
if 0:
# This is to generate the error_rate differences dict:
DebugFeatureClassDependence = Debug_Feature_Class_Dependence(pars=pars)
DebugFeatureClassDependence.main_ipython_cluster(noisify_attribs=noisify_attribs,
ntrees = pars['ntrees'],
mtry=pars['mtry'],
nodesize=pars['nodesize'],
n_iters=pars['n_iters'])
#DebugFeatureClassDependence.main(noisify_attribs=noisify_attribs,
# ntrees = self.pars['ntrees'],
# mtry=self.pars['mtry'],
# nodesize=self.pars['nodesize'],
# n_iters=self.pars['n_iters'])
if 1:
# This is to generate a plot of error_rate differences vs freq
asas_recent_diff_fpath = "/home/pteluser/scratch/debug_feature_classifier_dependence_dicts/asas_niters11_2011-06-13_17:51:12.002706_2011-06-13_17:51:12.002706.dict"
exec(open(asas_recent_diff_fpath).read())
asas_recent_diff = data
asas_newold_diff_fpath = "/home/pteluser/scratch/debug_feature_classifier_dependence_dicts/asas_niters11_2011-04-30_02:53:31.959591_2011-06-13_17:51:12.002706.dict"
exec(open(asas_newold_diff_fpath).read())
asas_newold_diff = data
deboss_recent_diff_fpath = "/home/pteluser/scratch/debug_feature_classifier_dependence_dicts/deboss_niters23_2011-06-08_17:40:25.373520_2011-06-13_18:40:44.673995.dict"
exec(open(deboss_recent_diff_fpath).read())
deboss_recent_diff = data
deboss_newold_diff_fpath = "/home/pteluser/scratch/debug_feature_classifier_dependence_dicts/deboss_niters23_2011-04-30_00:32:56.250499_2011-05-13_04:22:08.073940.dict"
exec(open(deboss_newold_diff_fpath).read())
deboss_newold_diff = data
deboss_newold_diff['error_diffs'] = []
for ediff, featname in deboss_newold_diff['error_diffs_2tup']:
deboss_newold_diff['error_diffs'].append((ediff, featname, deboss_newold_diff['old_dict'][featname]['new'][1]))
###(new_wa - orig_wa, feat_name, numpy.std(out_dict['means']))
###new:0.22895141275051539 - orig:0.23029385649882161 == -0.001342 :: improvement in new classifier
#data_dict = deboss_newold_diff
#data_dict3 = deboss_recent_diff
#data_name = "deboss"
data_dict = asas_newold_diff
data_dict3 = asas_recent_diff
data_name = "ASAS"
data_list = data_dict['error_diffs']
data_list3 = data_dict3['error_diffs']
data3_dict = {}
for (errordiff, featname, stdev) in data_list3:
data3_dict[featname] = (errordiff, stdev)
errordiff_list = []
featname_list = []
stddev_list = []
sort_list = []
for i, (errordiff, featname, stdev) in enumerate(data_list):
errordiff_list.append(errordiff)
featname_list.append(featname)
stddev_list.append(stdev)
sort_list.append((errordiff_list, i))
sort_list.sort()
errordiff_list2 = []
featname_list2 = []
stddev_list2 = []
errordiff_list3 = []
stddev_list3 = []
x_inds3 = []
for j, (errordiff, i) in enumerate(sort_list):
errordiff_list2.append(errordiff_list[i])
featname_list2.append(featname_list[i])
stddev_list2.append(stddev_list[i])
if data3_dict.has_key(featname_list[i]):
errordiff_list3.append(data3_dict[featname_list[i]][0])
stddev_list3.append(data3_dict[featname_list[i]][1])
x_inds3.append(j)
#else:
# errordiff_list3.append(None)
# stddev_list3.append(None)
import matplotlib.pyplot as plt
fig = plt.figure() #figsize=(5,3), dpi=100)
ax = fig.add_subplot(211)
x_inds = range(len(errordiff_list2))
#ax.plot(range(len(errordiff_list2)), errordiff_list2)
plt_errordiff = ax.errorbar(x_inds, errordiff_list2, yerr=stddev_list, fmt='bo')
plt_errordiff3 = ax.errorbar(x_inds3, errordiff_list3, yerr=stddev_list3, fmt='ro')
plt.grid(True)
newold_new_date = data_dict['new_arff_dirpath'].split('/')[-2].split('_')[0]
newold_old_date = data_dict['orig_arff_dirpath'].split('/')[-2].split('_')[0]
newnew_new_date = data_dict3['new_arff_dirpath'].split('/')[-2].split('_')[0]
newnew_old_date = data_dict3['orig_arff_dirpath'].split('/')[-2].split('_')[0]
ax.legend((plt_errordiff[0], plt_errordiff3[0]),
("%s - %s" % (newold_new_date, newold_old_date),
"%s - %s" % (newnew_new_date, newnew_old_date)), loc='lower right', numpoints=1)
ax.set_xticks(x_inds)
xtickNames = plt.setp(ax, xticklabels=featname_list2)
plt.setp(xtickNames, rotation=90, fontsize=8)
ax.set_ylabel("New ARFF Error - Older ARFF Error")
ax.annotate("Newer ARFF has lower error", xy=(0.2, 0.1), xycoords='axes fraction',
horizontalalignment='center',
verticalalignment='top',
fontsize=10)
ax.annotate("Newer ARFF has higher error", xy=(0.2, 0.9), xycoords='axes fraction',
horizontalalignment='center',
verticalalignment='top',
fontsize=10)
title_str = "%s ARFFs" % (data_name)
plt.title(title_str)
fpath = "/home/pteluser/scratch/debug_feature_classifier_dependence_dicts/%s_%s_%s.ps" % ( \
data_name,
"%s-%s" % (newold_new_date, newold_old_date),
"%s-%s" % (newnew_new_date, newnew_old_date))
plt.savefig(fpath)
#plt.show()
os.system("gv %s &" % (fpath))
import pdb; pdb.set_trace()
print
|
#!/usr/bin/env python
"""
"""
import os
from util import libtool, liblogger
import ujson as json
from collections import defaultdict
import math
#=== file settings ===
using_cache = bool(os.environ["using_cache"])
lex_count_dict = json.load(open(os.environ["lex_count_dict_file"]))
converted_file = os.environ["converted_file"]
cooc_dict_file = os.environ["cooc_dict_file"]
#=== context extraction settings ===
pmi = bool(os.environ["pmi"])
ngram = int(os.environ["ngram"])
window_size = int(os.environ["window_size"])
assert(window_size >= ngram) #otherwise no context feature will be extracted.
positional = bool(os.environ["positional"])
iv_only = bool(os.environ["iv_only"])
#=== global variables ===
w_dict = None
c_dict = None
cw_dict = None
#=== global resources ===
voc_set = set(json.load(open(os.environ["voc_file"])))
def build_cooc(s, e, pos, tokens):
"""
build co-occurrence features in a tweet
"""
cooc = []
for i in range(s, e):
ctx = None
if iv_only:
ctx = " ".join([tok for tok in tokens[i:i + ngram] if tok != "unk"] and tok.lower() in voc_set)
else:
ctx = " ".join([tok for tok in tokens[i:i + ngram] if tok != "unk"])
if not ctx:
continue
rel_pos = pos - i
if positional:
ctx = (ctx, rel_pos)
else:
ctx = (ctx, )
cooc.append(ctx)
return cooc
def build_cooc_dict():
"""
Build a context-target co-occurrence dictionary organised by target words.
"""
cooc_dict = dict()
liblogger.info("build cooc dict")
for l in open(converted_file):
tokens = l.rstrip().split(' ')
length = len(tokens)
for pos in range(length):
target = tokens[pos]
if not libtool.valid_token(target):
continue
s = max(0, pos - window_size)
e = min(length, pos + window_size + 1)
cooc_vec = []
cooc_vec_left = build_cooc(s, pos - ngram, pos, tokens)
cooc_vec.extend(cooc_vec_left)
cooc_vec_right = build_cooc(pos + 1, e - ngram, pos, tokens)
cooc_vec.extend(cooc_vec_right)
if target not in cooc_dict:
cooc_dict[target] = dict()
for cooc in cooc_vec:
try:
cooc_dict[target][cooc] += 1
except KeyError:
cooc_dict[target][cooc] = 1
liblogger.info("cooc dict is built")
liblogger.info("cooc dict size: {0}".format(len(cooc_dict)))
return cooc_dict
def generate_cooc_probs(cooc_dict):
"""
Generate co-occurrence probabilities P(x,y) and context probabilities P(y).
"""
global cw_dict, c_dict
pxy_cache_file = cooc_dict_file + ".pxy.cache"
py_cache_file = cooc_dict_file + ".py.cache"
liblogger.info("load cooc dict")
cw_dict = defaultdict(int)
c_dict = defaultdict(int)
for w in cooc_dict:
#ctxs = [eval(ctx) for ctx in cooc_dict[w].keys()]
for ctx in cooc_dict[w]:
count = cooc_dict[w][ctx]
cw = (w, ctx)
count = cooc_dict[w][ctx]
cw_dict[cw] += count
c_dict[ctx] += count
liblogger.info("norm cooc dict P(x, y)")
cw_sum = float(sum(cw_dict.values()))
for cw in cw_dict:
cw_dict[cw] = math.log(cw_dict[cw] / cw_sum)
#json.dump(cw_dict, open(pxy_cache_file, "w"))
liblogger.info("norm ctx dict P(y)")
c_sum = float(sum(c_dict.values()))
for c in c_dict:
c_dict[c] = math.log(c_dict[c] / c_sum)
#json.dump(c_dict, open(py_cache_file, "w"))
def generate_lex_probs():
"""
Generate P(x) probabilities
"""
global w_dict
cache_file = cooc_dict_file + ".px.cache"
if using_cache and os.path.exists(cache_file):
w_dict = json.load(open(cache_file))
return
w_sum = float(sum(lex_count_dict.values()))
w_dict = dict()
liblogger.info("norm word dict P(x)")
for w in lex_count_dict:
w_dict[w] = math.log(lex_count_dict[w] / w_sum)
json.dump(w_dict, open(cache_file, "w"))
def calc_pmi(w, ctx):
"""
Calculate pointwise mutual information
"""
pmi = cw_dict[(w, ctx)] - w_dict[w] - c_dict[ctx]
return pmi
def weight_cooc_dict(cooc_dict):
"""
build vectors
"""
generate_lex_probs()
generate_cooc_probs(cooc_dict)
cooc_dict = None
cooc_pmi_dict = defaultdict(int)
liblogger.info("calc pmi")
for cw in cw_dict:
w, ctx = cw
if w not in cooc_pmi_dict:
cooc_pmi_dict[w] = defaultdict(int)
pmi = cw_dict[cw] - w_dict[w] - c_dict[ctx]
if pmi > 0:
cooc_pmi_dict[w][ctx] = pmi
liblogger.info("pmi calculated")
return cooc_pmi_dict
def main():
liblogger.info("STEP2 begins")
if using_cache and os.path.exists(cooc_dict_file):
return
#TODO: debug
#tmp_cache = "../data/tmp_cache"
#cooc_dict = None
#if os.path.exists(tmp_cache):
# cooc_dict = json.load(open(tmp_cache))
#else:
# cooc_dict = build_cooc_dict()
# json.dump(cooc_dict, open(tmp_cache, "w"))
cooc_dict = build_cooc_dict()
if pmi:
cooc_dict = weight_cooc_dict(cooc_dict)
json.dump(cooc_dict, open(cooc_dict_file, "w"))
liblogger.info("STEP2 ends")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2.7
"""Facilitates the measurement of current network bandwidth."""
import collections
class Bandwidth(object):
"""Object containing the current bandwidth estimation."""
def __init__(self):
self._current = 0
self._previous = 0
self._trend = collections.deque(maxlen=100)
def change(self, bandwidth):
"""
Change the current bandwidth estimation.
Also records a bandwidth trend (1 for increasing, 0 for the same
and -1 for decreasing).
"""
self._previous = self._current
self._current = bandwidth
if self._current > self._previous:
self._trend.append(1)
elif self._current == self._previous:
self._trend.append(0)
elif self._current < self._previous:
self._trend.append(-1)
def historical_trend(self):
"""Return the historical trend in bandwidth."""
return list(self._trend)
def __str__(self):
"""Returns the current estimated bandwidth."""
return str(self._current)
def __int__(self):
"""Returns the current estimated bandwidth."""
return int(self._current)
|
import pytest
from dict import locators
from selenium import webdriver
import time
@pytest.fixture(scope="session")
def driver():
driver = webdriver.Chrome('C:/Python34/Lib/site-packages/selenium/webdriver/common/chromedriver')
return driver
def test_togo(driver):
driver.get(locators['url'])
def test_enter1(driver):
driver = webdriver.Chrome('C:/Python34/Lib/site-packages/selenium/webdriver/common/chromedriver')
driver.get(locators['url'])
window_before = driver.window_handles[0]
driver.find_elements_by_css_selector(locators['Шапка']['Войти в шапке'][0])[
locators['Шапка']['Войти в шапке'][1]].click()
window_after = driver.window_handles[1]
driver.switch_to_window(window_after)
driver.close()
def test_enter2(driver):
window_before = driver.window_handles[0]
driver.find_elements_by_css_selector(locators['Лендинг']['Войти из тела'][0])[
locators['Лендинг']['Войти из тела'][1]].click()
window_after = driver.window_handles[1]
driver.switch_to_window(window_after)
driver.close()
def test_watch_video(driver):
driver.find_elements_by_css_selector(locators['Лендинг']['Смотреть видео'][0])[
locators['Лендинг']['Смотреть видео'][1]].click()
time.sleep(5)
driver.find_elements_by_css_selector(locators['Лендинг']['Закрыть видео'][0])[
locators['Лендинг']['Закрыть видео'][1]].click()
driver.close()
def test_faq(driver):
driver.find_elements_by_css_selector(locators['Лендинг']['FAQ-1'][0])[locators['Лендинг']['FAQ-1'][1]].click()
driver.find_elements_by_css_selector(locators['Лендинг']['FAQ-2'][0])[locators['Лендинг']['FAQ-2'][1]].click()
driver.find_elements_by_css_selector(locators['Лендинг']['FAQ-3'][0])[locators['Лендинг']['FAQ-3'][1]].click()
driver.find_elements_by_css_selector(locators['Лендинг']['FAQ-4'][0])[locators['Лендинг']['FAQ-4'][1]].click()
driver.find_elements_by_css_selector(locators['Лендинг']['FAQ-5'][0])[locators['Лендинг']['FAQ-5'][1]].click()
driver.close()
|
import numpy as np
import os
import json
import re
from collections import Counter
from nltk.corpus import stopwords
from time import time
english = stopwords.words('english')
class Data(object):
def __init__(self):
self.vocab = []
self.keys_vocab = []
self.index_map = {}
self.keys_index_map = {}
self.E_talks = []
self.E_keywords = []
self.talks_train = []
self.talks_validate = []
self.talks_test = []
self.keywords_train = []
self.keywords_validate = []
self.keywords_test = []
def flatten(l):
return [elem for sub_l in l for elem in sub_l]
labels = ["ooo", "ooD", "oEo", "oED", "Too", "ToD", "TEo", "TED"]
def _make_label(keywords):
keywords = keywords.replace(' ', '').lower().split(',')
index = 0
if "technology" in keywords:
index += 4
if "entertainment" in keywords:
index += 2
if "design" in keywords:
index += 1
x = np.zeros(8, dtype=np.float32)
x[index] = 1.
return x
def _process_keywords(keywords):
return keywords.replace(' ', '').lower().split(',')
def _process_talk(talk):
# Remove text in parenthesis
talk_noparens = re.sub(r'\([^)]*\)', '', talk)
# Remove the names of the speakers
sentences_strings = []
for line in talk_noparens.split('\n'):
m = re.match(r'^(?:(?P<precolon>[^:]{,20}):)?(?P<postcolon>.*)$', line)
sentences_strings.extend(sent for sent in m.groupdict()['postcolon'].split('.') if sent)
talk_tokens = []
for sent_str in sentences_strings:
tokens = re.sub(r"[^a-z0-9]+", " ", sent_str.lower()).split()
talk_tokens.extend(tokens)
# for stopword in english:
# print(stopword)
# talk_tokens = filter(lambda x: x != stopword, talk_tokens)
talk_tokens = [word for word in talk_tokens if word not in english]
return (talk_tokens)
def _process_talk_gen(talk):
# Remove text in parenthesis
talk_noparens = re.sub(r'\([^)]*\)', '', talk)
# Remove the names of the speakers
sentences_strings = []
for line in talk_noparens.split('\n'):
m = re.match(r'^(?:(?P<precolon>[^:]{,20}):)?(?P<postcolon>.*)$', line)
sentences_strings.extend(sent for sent in m.groupdict()['postcolon'].split('.') if sent)
talk_tokens = ["<START>"]
for sent_str in sentences_strings:
tokens = re.sub(r"[^a-z0-9]+", " ", sent_str.lower()).split()
tokens.append("<EOS>")
talk_tokens.extend(tokens)
talk_tokens.append("<END>")
return talk_tokens
def _to_vec_seq(word_to_id, talks, MAX_SIZE=None):
def to_id(word):
if word in word_to_id:
return word_to_id[word]
else:
return 1
print("Converting talks to vectors...")
if MAX_SIZE is None:
return [[to_id(word) for word in talk] for talk in talks]
else:
return[[to_id(word) for i, word in enumerate(talk) if i < MAX_SIZE] for talk in talks]
def _make_glove_embedding(words, keywords, embedding_size=200):
print("Loading GloVe...")
data = Data()
data.vocab = ["<pad>", "<unk>", "<START>", "<EOS>", "<END>"]
data.keys_vocab = ["<pad>", "<unk>"]
data.index_map = {
"<pad>": 0,
"<unk>": 1,
"<START>": 2,
"<EOS>": 3,
"<END>": 4
}
data.keys_index_map = {
"<pad>": 0,
"<unk>": 1
}
index = 5
data.E_talks =( [np.zeros(embedding_size, dtype=np.float32) for _ in range(2)] +
[2 * np.random.randn(embedding_size) for _ in range(3)])
with open('glove.6B.200d.txt', encoding='utf-8') as f:
for line in f:
vec = line.split()
word = vec.pop(0)
if word in words:
vec = np.array([float(r) for r in vec], dtype=np.float32)
data.E_talks.append(vec)
data.vocab.append(word)
data.index_map[word] = index
index += 1
data.E_keywords = [np.zeros(50, dtype=np.float32) for _ in range(2)]
index = 2
with open('glove.6B.50d.txt', encoding='utf-8') as f:
for line in f:
vec = line.split()
word = vec.pop(0)
if word in keywords:
vec = np.array([float(r) for r in vec], dtype=np.float32)
data.E_keywords.append(vec)
data.keys_vocab.append(word)
data.keys_index_map[word] = index
index += 1
data.E_talks = np.array(data.E_talks, dtype=np.float32)
data.E_keywords = np.array(data.E_keywords, dtype=np.float32)
return data
def _make_random_embeddings(words, keywords, embedding_size=200):
print("Making Random Embeddings...")
data = Data()
data.vocab = ["<pad>", "<unk>", "<START>", "<EOS>", "<END>"]
data.keys_vocab = ["<pad>", "<unk>"]
data.index_map = {
"<pad>": 0,
"<unk>": 1,
"<START>": 2,
"<EOS>": 3,
"<END>": 4
}
data.keys_index_map = {
"<pad>": 0,
"<unk>": 1
}
index = 5
data.E_talks =( [np.zeros(embedding_size, dtype=np.float32) for _ in range(2)] +
[2 * np.random.randn(embedding_size) for _ in range(3)])
for word in words:
if word not in data.vocab:
vec = 2 * np.random.randn(embedding_size)
data.index_map[word] = index
data.vocab.append(word)
index += 1
data.E_talks.append(vec)
data.E_keywords = [np.zeros(embedding_size, dtype=np.float32) for _ in range(2)]
index = 2
for word in keywords:
if word in data.index_map:
vec = data.E_talks[data.index_map[word]]
else:
vec = 2 * np.random.randn(embedding_size)
data.E_keywords.append(vec)
data.keys_vocab.append(word)
data.keys_index_map[word] = index
index += 1
data.E_talks = np.array(data.E_talks, dtype=np.float32)
data.E_keywords = np.array(data.E_keywords, dtype=np.float32)
return data
def _make_random_embedding(talks, embedding_size=20):
index_map = {}
index = 2
# 0 is for padding and 1 unknown word
mat = [np.zeros(embedding_size, dtype=np.float32)]
for talk in talks:
for word in talk:
if word not in index_map:
vec = 2 * np.random.randn(embedding_size)
mat.append(vec)
index_map[word] = index
index += 1
return index_map, np.array(mat, dtype=np.float32)
def _pad(talk, length):
return talk + ["<pad>" for _ in range(length-len(talk))]
def get_generation_data(n_train, n_validate, n_test, MAX_SIZE=None, voc_size=40000, keys_voc_size=330):
start = time()
if os.path.isfile('talks_gen.json'):
print("Loading the data...")
with open('talks_gen.json', 'r') as f:
talks = json.load(f)
with open('keywords_gen.json', 'r') as f:
keywords = json.load(f)
else:
print("Processing the data...")
# Download the dataset if it's not already there: this may take a minute as it is 75MB
if not os.path.isfile('ted_en-20160408.zip'):
import urllib.request
print("Downloading the data...")
urllib.request.urlretrieve("https://wit3.fbk.eu/get.php?path=XML_releases/xml/ted_en-20160408.zip&filename=ted_en-20160408.zip", filename="ted_en-20160408.zip")
import zipfile
import lxml.etree
# For now, we're only interested in the subtitle text, so let's extract that from the XML:
with zipfile.ZipFile('ted_en-20160408.zip', 'r') as z:
doc = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
talks = doc.xpath('//content/text()')
keywords = doc.xpath('//head/keywords/text()')
del doc
keywords = list(map(_process_keywords, keywords))
talks = list(map(_process_talk_gen, talks))
print(list(map(len, talks[:20])))
res = sorted(zip(talks, keywords), key=lambda x: len(x[0]))
res = [(talk, keys) for talk, keys in res if len(talk) > 100]
print("Talks:", len(res))
keywords = [keys for _, keys in res]
talks = [talk for talk, _ in res]
del res
print(list(map(len, talks[:20])))
print(max(map(len, talks))) # => 7020
# Save talks
with open('talks_gen.json', 'w') as f:
json.dump(talks, f)
# Save keywords
with open('keywords_gen.json', 'w') as f:
json.dump(keywords, f)
words = set(flatten(talks[:n_train]))
keys = set(flatten(keywords[:n_train]))
# all_words = flatten(talks[:n_train])
# counter = Counter(all_words)
# words = [word for word, _ in counter.most_common(voc_size)]
# del all_words, counter
# print(len(words))
# all_keys = flatten(keywords[:n_train])
# counter = Counter(all_keys)
# keys = [key for key, _ in counter.most_common(keys_voc_size)]
# del all_keys, counter
# print(len(keys))
data = _make_glove_embedding(words, keys)
keywords = _to_vec_seq(data.keys_index_map, keywords)
talks = _to_vec_seq(data.index_map, talks, MAX_SIZE=MAX_SIZE)
data.talks_train = talks[:n_train]
data.talks_validate = talks[n_train: n_train+n_validate]
data.talks_test = talks[n_train+n_validate: n_train+n_validate+n_test]
data.keywords_train = keywords[:n_train]
data.keywords_validate = keywords[n_train: n_train+n_validate]
data.keywords_test = keywords[n_train+n_validate: n_train+n_validate+n_test]
end = time()
print(end-start, "seconds")
return data
def get_raw_data(n_train, n_validate, n_test, MAX_SIZE=None):
if os.path.isfile('talks.json'):
print("Loading the data...")
start = time()
with open('talks.json', 'r') as f:
talks = json.load(f)
keywords = np.load('keywords.npy')
end = time()
print(end-start, "seconds")
else:
print("Processing the data...")
# Download the dataset if it's not already there: this may take a minute as it is 75MB
if not os.path.isfile('ted_en-20160408.zip'):
import urllib.request
print("Downloading the data...")
urllib.request.urlretrieve("https://wit3.fbk.eu/get.php?path=XML_releases/xml/ted_en-20160408.zip&filename=ted_en-20160408.zip", filename="ted_en-20160408.zip")
import zipfile
import lxml.etree
# For now, we're only interested in the subtitle text, so let's extract that from the XML:
with zipfile.ZipFile('ted_en-20160408.zip', 'r') as z:
doc = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
talks = doc.xpath('//content/text()')
keywords = doc.xpath('//head/keywords/text()')
del doc
# Process keywords
keywords = list(map(_make_label, keywords))
# ooo 62% of training and 42.4% of validation
res = []
deleted = 0
for i, talk in enumerate(talks):
curr = _process_talk(talk)
if len(curr) < MAX_SIZE:
if len(curr) == 0:
keywords.pop(i)
deleted += 1
else:
curr = _pad(curr, MAX_SIZE)
res.append(curr)
else:
res.append(curr)
if i%100 == 0:
print(i, "talks done")
print("Deleted:", deleted)
keywords = np.array(keywords)
talks = res
# print(max(map(len, talks))) => 2941
# Save talks
with open('talks.json', 'w') as f:
json.dump(talks, f)
# Save keywords
np.save('keywords', keywords)
index_map, vocab, E = _make_glove_embedding(talks[:n_train])
talks = _to_vec_seq(index_map, talks, MAX_SIZE=MAX_SIZE)
talks_dict = {
"train": talks[:n_train],
"validate": talks[n_train: n_train+n_validate],
"test": talks[n_train+n_validate: n_train+n_validate+n_test]
}
keywords_dict = {
"train": keywords[:n_train],
"validate": keywords[n_train: n_train+n_validate],
"test": keywords[n_train+n_validate: n_train+n_validate+n_test]
}
return E, talks_dict, keywords_dict
def _to_chunks(l1, l2, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l1), n):
yield l1[i:i + n], l2[i: i + n]
def make_batches(talks, keywords, batch_size, equal_len=True, equal_size=True):
batches = []
for talks_batch, keywords_batch in _to_chunks(talks, keywords, batch_size):
n_steps = max(map(len, talks_batch))
# Make talks equally long
if equal_len:
talks_batch = [talk + [0 for _ in range(n_steps - len(talk))] for talk in talks_batch]
batches.append((talks_batch, keywords_batch))
if equal_size:
if len(batches[-1][0]) < batch_size:
batches.pop(-1)
return batches
def make_batches_gen(talks, keywords, batch_size):
batches = []
for talks_batch, keywords_batch in _to_chunks(talks, keywords, batch_size):
n_keywords = max(map(len, keywords_batch))
keywords_batch = [keys + [0 for _ in range(n_keywords-len(keys))]
for keys in keywords_batch]
batch = {
"inputs": [],
"targets": [],
"keywords": keywords_batch,
"mask": [],
"seq_lengths": list(map(lambda x: len(x)-1, talks_batch)),
"max_len": 0
}
batch["max_len"] = max(batch["seq_lengths"])
for talk in talks_batch:
padding = [0 for _ in range(batch["max_len"]-len(talk)+1)]
batch["mask"].append(
[1 for _ in range(len(talk)-1)] + padding )
batch["inputs"].append(
talk[:len(talk)-1] + padding)
batch["targets"].append(
talk[1:]+ padding)
batch["mask"] = np.array(batch["mask"], dtype=np.float32)
batch["loss_weights"] = [np.ones(len(talks_batch)*batch["max_len"], dtype=np.float32)]
batches.append(batch)
return batches
def make_array(talks, keywords):
batch = make_batches(talks, keywords, len(talks))
return batch[0]
|
from calendar import weekday
DAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
def most_frequent_days(year):
beg = weekday(year, 1, 1)
end = weekday(year, 12, 31)
return DAYS[beg:end + 1] if beg <= end else DAYS[:end + 1] + DAYS[beg:]
|
import requests
import openpyxl
import json
import os
book = openpyxl.Workbook()
for vol in range(1, 55):
print(vol)
type = "all"
prefix = "https://raw.githubusercontent.com/nakamura196/genji_curation/master/docs/iiif"
prefix2 = "/Users/nakamurasatoru/git/d_genji/genji_curation/docs/iiif"
url = prefix + "/nijl_kuronet_taisei_"+type+"/"+str(vol).zfill(2)+".json"
# curation_data = requests.get(url).json()
path = prefix2 + "/nijl_kuronet_taisei_"+type+"/"+str(vol).zfill(2)+".json"
if not os.path.exists(path):
continue
with open(path) as f:
curation_data = json.load(f)
selection = curation_data["selections"][0]
within = selection["within"]
members = selection["members"]
manifest = within["@id"]
canvas_pos_map = {}
tmp = {}
for member in members:
metadata = member["metadata"]
member_id = member["@id"].split("#xywh=")
canvas_id = member_id[0]
page = int(member_id[0].split("/canvas/")[1]) # NIJL仕様
tmp[page] = canvas_id
pos = 1
for page in sorted(tmp):
canvas_pos_map[tmp[page]] = pos
pos += 1
'''
with open(prefix2 + "/nijl/"+str(vol).zfill(2)+".json") as f:
manifest_data = json.load(f)
canvases = manifest_data["sequences"][0]["canvases"]
for i in range(len(canvases)):
canvas = canvases[i]
canvas_pos_map[canvas["@id"]] = i + 1
'''
map = {}
for member in members:
metadata = member["metadata"]
member_id = member["@id"].split("#xywh=")
print(member_id)
page = int(member_id[0].split("/canvas/")[1]) # NIJL仕様
pos = canvas_pos_map[member_id[0]]
link = "http://codh.rois.ac.jp/software/iiif-curation-viewer/demo/?curation="+url+"&mode=annotation&pos="+str(pos)+"&lang=ja"
if len(metadata) > 1:
p = -1
koui = ""
text = ""
for obj in metadata:
if obj["label"] == "校異源氏テキスト":
koui = obj["value"]
elif obj["label"] == "KuroNet翻刻":
text = obj["value"]
elif obj["label"] == "p":
p = int(obj["value"])
map[text] = {
"p" : p,
"koui" : koui
}
else:
text = metadata[0]["value"]
if text not in map:
map[text] = {
"p" : "",
"koui" : ""
}
map[text]["url"] = link
map[text]["page"] = page
rows = []
rows.append(["担当", "結果", "大成番号", "校異テキスト", "OCRテキスト", "鵜飼文庫ページ番号", "URL"])
for text in map:
obj = map[text]
row = ["", obj["p"], obj["p"], obj["koui"], text, obj["page"], obj["url"]]
rows.append(row)
sheet = book.create_sheet(index=(vol-1), title=str(vol)+" "+within["label"])
for row in rows:
sheet.append(row)
# break
# print(map)
book.save('data/taisei_'+type+'.xlsx')
|
from django.shortcuts import render
from django.contrib import auth
from django.http import HttpResponseRedirect
# 會員登入 login_action、redirect to main if member exist
def member_login(request):
if request.user.is_authenticated:
if request.session.get("as") == "student":
return HttpResponseRedirect("/main/member/")
if request.session.get("as") == "entrepreneur":
return HttpResponseRedirect("/main/company/")
return render(request, "login/memberlogin.html")
def company_login(request):
if request.user.is_authenticated:
if request.session.get("as") == "entrepreneur":
return HttpResponseRedirect("/main/company/")
if request.session.get("as") == "student":
return HttpResponseRedirect("/main/member/")
return render(request, "login/companylogin.html")
# 登入行為檢查
def member_login_action(request):
if request.method == "POST":
uid = request.POST.get("id")
pwd = request.POST.get("pwd")
user = auth.authenticate(username=uid, password=pwd)
if user is not None:
if user.groups.filter(name="student").exists():
auth.login(request, user)
request.session["user"] = uid
request.session["as"] = "student"
return HttpResponseRedirect("/main/")
return render(
request,
"login/memberlogin.html",
{"msg": "User has no permission to log in"},
)
return render(
request,
"login/memberlogin.html",
{"msg": "username or password error"},
)
return render(
request, "login/memberlogin.html", {"msg": "not a valid login method"}
)
def company_login_action(request):
if request.method == "POST":
uid = request.POST.get("id")
pwd = request.POST.get("pwd")
user = auth.authenticate(username=uid, password=pwd)
if user is not None:
if user.groups.filter(name="entrepreneur").exists():
auth.login(request, user)
request.session["user"] = uid
request.session["as"] = "entrepreneur"
return HttpResponseRedirect("/main/")
return render(
request,
"login/companylogin.html",
{"msg": "User has no permission user to log in"},
)
return render(
request,
"login/companylogin.html",
{"msg": "username or password error"},
)
return render(
request, "login/companylogin.html", {"msg": "not a valid login method"}
)
|
'''
I really wanna finish this lab here is docstring
'''
import ngrams.ngram_trie as ngrams
from lab_4.main import WordStorage
from lab_4.main import encode_text
from lab_4.main import decode_text
from lab_4.main import NGramTextGenerator
from lab_4.main import LikelihoodBasedTextGenerator
from lab_4.main import BackOffGenerator
if __name__ == '__main__':
corpus = ('i', 'have', 'a', 'cat', '<END>',
'his', 'name', 'is', 'bruno', '<END>',
'i', 'have', 'a', 'dog', 'too', '<END>',
'his', 'name', 'is', 'rex', '<END>',
'her', 'name', 'is', 'rex', 'too', '<END>')
storage = WordStorage()
storage.update(corpus)
encoded = encode_text(storage, corpus)
trie = ngrams.NGramTrie(2, encoded)
context = (storage.get_id('i'),)
generator = NGramTextGenerator(storage, trie)
actual = generator.generate_text(context, 5)
actual = decode_text(storage, actual)
print(actual)
generator = LikelihoodBasedTextGenerator(storage, trie)
actual = generator.generate_text(context, 5)
actual = decode_text(storage, actual)
print(actual)
two = ngrams.NGramTrie(2, encoded)
trie = ngrams.NGramTrie(3, encoded)
context = (storage.get_id('name'),
storage.get_id('is'),)
generator = BackOffGenerator(storage, trie, two)
actual = generator.generate_text(context, 5)
RESULT = decode_text(storage, actual)
print(RESULT)
assert RESULT == ('Name is rex', 'Her name is rex', 'Her name is rex', 'Her name is rex', 'Her name is rex')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 22 22:22:55 2015
@author: lenovo
"""
"""
优势:
可以使一个蕴含递推关系且结构复杂的程序简洁精练,增加可读性
特别是在难于找到从边界到解的全过程的情况下,如果把问题推进一步,其结果仍然维持原问题的关系
劣势:
嵌套层次深,函数调用开销大
重复计算
"""
"""
汉诺塔问题
"""
count = 0
def hanoi(n,A,B,C):
global count
if n == 1:
print "Move disk",n,"from",A,"to",C
count += 1
else:
hanoi(n-1,A,C,B)
print "Move disk",n,"from",A,"to",C
count += 1
hanoi(n-1,B,A,C)
n = int(raw_input("input n:"))
hanoi(2,'Left','Mid','Right')
print count
"""路边停车问题"""
import random
def parking(low,high):
if high - low < 1:
return 0
else:
x = random.uniform(low,high-1)
return parking(low,x) + 1 + parking(x + 1,high)
s = 0
for i in range(1000):
s += parking(0,5)
print s/1000.
|
"""
Random Walker or Drunkard's Walk is the Programg where a Drunkard is in the middle of the city laid out like a grid.
Drunkard is taking random choice to move in East, West, North or South. Drunkard starts from Origin(0,0) and makes a random choice
to move to East, West, North or South Direction. Let's Suppose he took 16 random steps and reached at position (8,8) covering the Distance 8 + 8 = 16 (Blocks)
In this Program we will be simulating the Problem
Please refer the link for more details on the Program : https://www.quantamagazine.org/random-walk-puzzle-solution-20160907/
"""
import random
|
import math
def union(R, S):
return R + S
def difference(R, S):
return [t for t in R if t not in S]
def intersect(R, S):
return [t for t in R if t in S]
def project(R, p):
return [p(t) for t in R]
def select(R, s):
return [t for t in R if s(t)]
def product(R, S):
return [(t,u) for t in R for u in S]
def aggregate(R, f):
keys = {r[0] for r in R}
return [(key, f([v for (k,v) in R if k == key])) for key in keys]
def map(f, R):
return [t for (k,v) in R for t in f(k,v)]
def reduce(f, R):
keys = {k for (k,v) in R}
return [f(k1, [v for (k2,v) in R if k1 == k2]) for k1 in keys]
def dist(p, q):
(x1,y1) = p
(x2,y2) = q
return (x1-x2)**2 + (y1-y2)**2
def plus(args):
p = [0,0]
for (x,y) in args:
p[0] += x
p[1] += y
return tuple(p)
def scale(p, c):
(x,y) = p
return (x/c, y/c)
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
# 3959 radius of the great circle in miles...some algorithms use 3956
# 6371 radius in kilometers...some algorithms use 6367
# 3959 * 5280 radius in feet
# 6371 * 1000 radius in meters
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def ADD(R):
tmp1 = 0
tmp2 = 0
for i in R:
tmp1 += i[0]
tmp2 += i[1]
return (tmp1, tmp2)
|
from django.urls import path,include
from . import views
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
router = routers.DefaultRouter()
router.register('items',views.ItemsView)
urlpatterns = [
path('',include(router.urls)),
]
|
numeroDePessoas = int(input())
pessoas = input().split(' ')
menor = pessoas.copy()
menor.sort()
menor = menor[0]
print( str (pessoas.index(menor) + 1 ) )
|
from django.db import models
from workprogramsapp.models import Topic, WorkProgram
from django.conf import settings
class AdditionalMaterial(models.Model):
"""
Материалы тем
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE, verbose_name='тема рабочей программы',
related_name='additional_materials_for_topic')
title = models.CharField(max_length=300, verbose_name="Описание")
url = models.URLField(verbose_name="Ссылка на материал")
class StructuralUnit(models.Model):
title = models.CharField(max_length=300, verbose_name="Описание")
isu_id = models.IntegerField(blank=True, null=True, verbose_name="ID структурного подразделения в ИСУ")
class UserStructuralUnit(models.Model):
status_choise = (
('leader', 'leader'),
('deputy', 'deputy'),
('employee', 'employee'),
)
structural_unit = models.ForeignKey('StructuralUnit', on_delete=models.SET_NULL, verbose_name='Структурное подразделени',
related_name='user_in_structural_unit', blank=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True, related_name='user_for_structural_unit')
status = models.CharField(max_length=30, choices=status_choise, verbose_name='Архив', default = 'l')
|
import temp
import time
print("Press CTRL-C to interupt within 5 seconds")
for _ in range(5):
print(".")
time.sleep(1)
temp.main()
|
import logging
import sys
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
class ExitOnCriticalHandler(logging.StreamHandler):
def emit(self, record):
super().emit(record)
if record.levelno is logging.CRITICAL:
exit(1)
def get_logger(name):
logger = logging.getLogger(name)
handler = ExitOnCriticalHandler(sys.stderr)
handler.setFormatter(
logging.Formatter("%(asctime)s [%(levelname)s]\t[%(name)s]\t%(message)s")
)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
|
#!/usr/bin/env python
import re
import sys
import binascii
from struct import unpack
from .helpers import *
def analyze(fp):
fp.seek(0)
while not IsEof(fp):
line = dataUntil(fp, b'\x0a', 1).decode('utf-8')
m = re.match(r'^:(..)(....)00(.*)(..)', line)
if m:
(count, addr, data, csum) = m.group(1,2,3,4)
assert int(count,16) == len(data)/2
tagDataUntil(fp, b'\x0a', 'DATA %s: %s' % (addr, data))
continue
m = re.match(r'^:00000001FF', line)
if m:
tagDataUntil(fp, b'\x0a', 'EOF')
continue
m = re.match(r'^:02(....)02(....)(..)', line)
if m:
(addr, saddr, csum) = m.group(1,2,3)
tagDataUntil(fp, b'\x0a', 'EXTENDED SEGMENT ADDR %s' % (saddr))
continue
m = re.match(r'^:04(....)03(....)(....)(..)', line)
if m:
(addr, cs, ip, csum) = m.group(1,2,3,4)
tagDataUntil(fp, b'\x0a', 'START SEGMENT ADDR %s:%s' % (cs, ip))
continue
m = re.match(r'^:02(....)04(....)(..)', line)
if m:
(addr, upper16, csum) = m.group(1,2,3,4)
tagDataUntil(fp, b'\x0a', 'EXTENDED LINEAR ADDR %s0000' % (csum))
continue
m = re.match(r'^:04(....)05(........)(..)', line)
if m:
(addr, linear, csum) = m.group(1,2,3,4)
tagDataUntil(fp, b'\x0a', 'START LINEAR ADDR %s' % (linear))
continue
tagDataUntil(fp, b'\x0a', 'UNKNOWN')
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as fp:
analyze(fp)
|
from django import template
from stories.models import Comment
register = template.Library()
@register.simple_tag
def get_verbose_field_name(instance, field_name):
"""
Returns verbose_name for a field.
"""
return instance._meta.get_field(field_name).verbose_name.title()
@register.simple_tag
def get_comments(story_id):
"""
Returns latest 10 comments for a story
"""
comments = Comment.objects.filter(story=story_id).order_by('-posted_on')[:10]
return comments
|
# This program will open a .txt file, change the format of the strings in this file
# and then write the reformatted strings into a new .txt file
def main():
print("This program converts an all lower case text file to all capital letters")
input= open('Before.txt',"r")
output= open('After.txt', "w")
for i in input:
uppercase=i.upper()
print(uppercase, file=output)
input.close()
output.close()
print("The lowercase names have been converted to uppercase")
print("and written into After.txt")
main()
|
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_oauthlib.client import OAuth
import os
import markdown
app = Flask(__name__)
if "DEBUG" in os.environ:
app.config.from_object("config.Config")
elif "TESTING" in os.environ:
app.config.from_object("config.TestConfig")
else:
app.config.from_object("config.ProdConfig")
oauth = OAuth(app)
bnet = oauth.remote_app("battle.net", app_key="BNET")
lm = LoginManager()
lm.login_view = "login"
lm.init_app(app)
db = SQLAlchemy(app)
def render_default_template(tmpl_name, **kwargs):
region = app.config["REGION"]
colors = app.config["CLASS_COLORS"]
return render_template(tmpl_name, region=region, colors=colors, markdown=markdown.markdown, **kwargs)
import models
from plugin_manager import PluginManager
pm = PluginManager(db, app)
import views
|
from random import randint
numero = randint(0,5)
resp = int(input('Digite o número que o computador pensou:'))
if numero == resp:
print('Parabéns você acertou!')
else:
print('Que pena você errou feio!! errou rude')
|
# 生成一个随机字符串
# 密钥定为 043
import os
import hashlib
import socket
sk = socket.socket()
sk.bind(('127.0.0.1',43))
sk.listen()
conn,addr = sk.accept()
ret = os.urandom(32) #生成一个 参数长度 的随机字符串
print(ret)
conn.send(ret)
sha = hashlib.sha1(b'043')
sha.update(ret)
yanzheng = sha.hexdigest()
yansheng2 = conn.recv(1024).decode('utf-8')
if yansheng2==yanzheng:
print('是合法的客户端')
conn.send(b'hello')
else:
conn.close()
|
import os
import sys
import yaml
from bioblend import toolshed
"""
A simple program to update the tool revisions in a tools.yml file.
The program will either replace the list of revisions with just the latest
revision available on the ToolShed, or append the latest revision (if a newer
revision is available) to the existing list of revisions.
NOTE: The program doesn't (can't) actually check if an available revision is
"newer" than the current revision, just if they have differnt SHA values. If
the revisions are different it is assumed that the version on the ToolShed is
newer.
USAGE:
python .github/scripts/update_tools.py [append|replace] ./production/anvil/tools.yml /path/to/write.yml
"""
DEFAULT_TOOLSHED = 'https://toolshed.g2.bx.psu.edu'
# Common keys into the tools dict. Defined solely so our IDE can do completions
# and I don't consistently misspell revisisions or have to remember if it is
# toolshed_url or tool_shed_url
NAME = 'name'
OWNER = 'owner'
TOOLS = 'tools'
SHED = 'tool_shed_url'
REVISIONS = 'revisions'
# The toolsheds that we have already connected to.
tool_sheds = { DEFAULT_TOOLSHED: toolshed.ToolShedInstance(DEFAULT_TOOLSHED) }
def validate(tool):
"""Ensure the tool has the fields we need so we don't need to check later."""
if SHED not in tool:
tool[SHED] = DEFAULT_TOOLSHED
if REVISIONS not in tool:
tool[REVISIONS] = []
def append(tool, revision):
if revision not in tool[REVISIONS]:
tool[REVISIONS].append(revision)
def replace(tool, revision):
tool[REVISIONS] = [ revision ]
def update_file(add_to_list, infile, outfile):
with open(infile, "r") as f:
data = yaml.safe_load(f)
tool_list = data[TOOLS]
for tool in tool_list:
print(f"Getting latest revision for {tool[NAME]}")
validate(tool)
url = tool[SHED]
if url in tool_sheds:
ts = tool_sheds[url]
else:
ts = toolshed.ToolShedInstance(url)
tool_sheds[url] = ts
revs = ts.repositories.get_ordered_installable_revisions(tool[NAME], tool[OWNER])
if revs and len(revs) > 0:
add_to_list(tool, revs[-1])
data = { "tools": tool_list }
with open(outfile, "w") as f:
yaml.dump(data, f, default_flow_style=False)
if __name__ == '__main__':
# Very very simple command line parsing.
if len(sys.argv) != 4:
print(f"ERROR: Expected 3 parameters but found {len(sys.argv)-1}")
print(f"USAGE: python {sys.argv[0]} [append|replace] <input file> <output file>")
sys.exit(1)
mode = None
if sys.argv[1] == 'append':
mode = append
elif sys.argv[1] == 'replace':
mode = replace
else:
print(f"Invalid mode: {sys.argv[1]}")
print("Must be one of append or replace")
sys.exit(1)
infile = sys.argv[2]
outfile = sys.argv[3]
if not os.path.exists(infile):
print(f"Could not find the input file {infile}")
sys.exit(1)
update_file(mode, infile, outfile)
|
#!/usr/bin/env python3
#coding:utf-8
from main import Run
def t():
r = Run()
r.main()
|
# -*- coding: utf-8 -*-
import json
import time
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from appium.webdriver.common.touch_action import TouchAction
import os
import xlrd
from xlutils.copy import copy
import xlwt
class devices_test():
def __init__(self,device,appPackage,appActivity,server,appfile=None):
"""Constructor"""
print(device,appPackage,appActivity,server)
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.4'
desired_caps['deviceName'] = device
desired_caps['appPackage'] = appPackage
if appfile != None:
desired_caps['app'] = appfile
desired_caps['appActivity'] = appActivity
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
self.driver = webdriver.Remote(server, desired_caps)
#self.driver.implicitly_wait(8)
self.Wait=3
print(self.driver.current_activity)
Popup=[('xpath','//android.widget.TextView[@resource-id="com.ellabookhome:id/confirm" and text="确定"'),('id','com.ellabookhome:id/update_close'),('id','com.ellabookhome:id/iv_market_close'),('id','com.ellabookhome:id/ivTaskNoticeClose')]
for pop in Popup:
che=eval('self.'+pop[0])(pop[1],2)
if che!=None:
che.click()
print(self.driver.current_activity)
def id(self,key,timing=None):
if timing == None:
timing=self.Wait
try:
wait = WebDriverWait(self.driver,timing,0.5).until(EC.presence_of_element_located((By.ID,key)),message='lllllllllllllll')
return wait
except:
return None
def className(self,key,timing=None):
if timing == None:
timing=self.Wait
try:
wait = WebDriverWait(self.driver,timing,0.5).until(EC.presence_of_element_located((By.CLASS_NAME,key)))
return wait
except:
return None
def name(self,key,timing=None):
if timing == None:
timing=self.Wait
try:
wait = WebDriverWait(self.driver,timing,0.5).until(EC.presence_of_element_located((By.NAME,key)))
return wait
except:
return None
def xpath(self,key,timing=None):
if timing == None:
timing=self.Wait
try:
wait = WebDriverWait(self.driver,timing,0.5).until(EC.presence_of_element_located((By.XPATH,key)))
return wait
except:
return None
def tap(self,x,y):
#self.driver.tap([(141,97)(903,196)],500)
TouchAction(self.driver).press(x=x, y=y).release().perform()
def seipe(self,x,y,x2,y2,durat=500):
time.sleep(2)
self.driver.swipe(x,y,x2,y2,durat)
def keyevent(self,keycode):
self.driver.keyevent(keycode)
def check(self,typ,content):
comm=[]
if typ=='Activity':
for i in content:
if i[1]!=self.driver.current_activity:
comm.append (i[1]+':Not found')
elif typ=='element':
for i in content:
cck=eval('self.'+i[0])(i[1])
if cck==None:
comm.append(i[2]+':('+i[0]+')'+i[1]+':Not found')
return comm
def homing(self):
while(True):
if self.driver.current_activity != 'com.ellahome.home.HomeActivity':
self.driver.keyevent(4)
else:
break
def reset(self):
self.driver.reset()
time.sleep(5)
self.popup(3)
def popup(self,wait=3):
Popup=[('xpath','//android.widget.TextView[@resource-id="com.ellabookhome:id/confirm" and text="确定"'),('id','com.ellabookhome:id/update_close'),('id','com.ellabookhome:id/iv_market_close'),('id','com.ellabookhome:id/ivTaskNoticeClose')]
for pop in Popup:
che=eval('self.'+pop[0])(pop[1],wait)
if che!=None:
che.click()
def setp(case):
step=case.split('->')
Notes=[]
Result='Pass'
for carryout in step:
print(carryout)
n=carryout.index(')')+1
en=carryout[0:n]
obje=carryout[n:]
Event=['check']
if en[0:en.index('(')] in Event:
if en[0:en.index('(')]=='check':
p=obje.split(',')
content=[]
for i in p:
if i in value:
content.append((value[i]['type'],value[i]['value'],i))
else:
Notes.append(i+':Not collected')
Result_step=devices.check(en[en.index('(')+1:en.index(')')], content)
if len(Result_step)>0:
for r in Result_step:
Notes.append(r)
Result='Fail'
elif len(obje)>0:
if obje in value:
element=eval('devices.'+value[obje]['type'])(value[obje]['value'])
if element !=None:
print(element)
exec('element.'+en)
else:
Notes.append(obje+':('+value[obje]['type']+')'+value[obje]['value']+',Not found')
Result='Fail'
break
else:
Notes.append(obje+':Not collected')
if Result !='Fail':
Result='Block'
break
else:
exec('devices.'+en)
print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),devices.driver.current_activity)
return (Result,Notes)
def readtext(filename):
f = open(filename,"r")
str =f.read().replace('\n','')
f.close()
data=json.loads(str)
return data
value=readtext('data.txt')
devices=devices_test('G2W7N16115000817', 'com.ellabookhome','com.ellahome.start.splash.SplashActivity', 'http://127.0.0.1:4723/wd/hub')
filenmae='D:\\Python\\test\\test.xls'
workbook = xlrd.open_workbook(filenmae) # 打开工作簿
sheets = workbook.sheet_names() # 获取工作簿中的所有表格
worksheet = workbook.sheet_by_name(sheets[0]) # 获取工作簿中所有表格中的的第一个表格
rows_old = worksheet.nrows # 获取表格中已存在的数据的行数
new_workbook = copy(workbook) # 将xlrd对象拷贝转化为xlwt对象
new_worksheet = new_workbook.get_sheet(0) # 获取转化后工作簿中的第一个表格
for i in range(2,rows_old):
reus=''
Resu=setp(worksheet.cell(i,2).value)
new_worksheet.write(i, 2, Resu[0]) # 追加写入数据,注意是从i+rows_old行开始写入
for n in Resu[1]:
if len(reus)>1:
reus=reus+'\r\n'+n
else:
reus=n
print(type(reus),'reus:',reus)
new_worksheet.write(i, 4, reus)
new_workbook.save(filenmae) # 保存工作簿
devices.driver.quit()
#devices.id('com.ellabookhome:id/image')
|
import os #manipulate files
with open("mydata.txt", mode="w", encoding="utf-8") as myFile: #a for append
myFile.write("some random text\nMore random filestext\n")
with open("mydata.txt", encoding="utf-8") as myFile:
#read() readline() readlines()
print(myFile.read())
print(myFile.closed)
print(myFile.name)
print(myFile.mode)
#os.rename("mydata.txt", "data.txt") rename file
#os.remove("data.txt") remove file
#os.mkdir("mydir") make directory
#os.chdir("mydir") to change directory
print("current directory:", os.getcwd()) #get name of current working directory
#os.chdir("..") to get one step up in directory
#os.rmdir("mydir")
|
#!/usr/bin/env python
import pytraj as pt
traj = pt.iterload('./RAN.rst7', './RAN.parm7')
t0 = traj[:]
flist = []
for deg in range(-180, 175, 5):
pt._rotate_dih(t0, resid='1', dihtype='chin', deg=deg)
flist.append(t0[0].copy())
print(pt.calc_chin(flist, top=t0.top))
pt.write_traj('combined_traj.nc', flist, top=t0.top)
|
import requests
import json
import csv
import datetime
# Modify these to suit your needs
TOKEN = "YOURTOKEN"
COMMUNITY = "YOURCOMMUNITYID"
DAYS = 14
# No need to modify these
GRAPH_URL_PREFIX = "https://graph.facebook.com/"
GROUPS_SUFFIX = "/groups"
# Default paging limit for Graph API
# No need to modify, unless you're seeing timeouts
DEFAULT_LIMIT = "100"
# Set to true if you like seeing console output
VERBOSE = True
# Calculating a timestamp from DAYS
SINCE = datetime.datetime.now() - datetime.timedelta(days=DAYS)
def getFeed(group, name):
# Token-based auth header
headers = {'Authorization': 'Bearer ' + TOKEN}
# Get the relevant group post content for each feed item
# Include a fetch for like and comment summaries to get total count
# No need to fetch actual likes & comments, so set the limit to 0
params = "?fields=permalink_url,from,story,type,message,link,created_time,updated_time,likes.limit(0).summary(total_count),comments.limit(0).summary(total_count)"
# Default paging limit
params += "&limit=" + DEFAULT_LIMIT
# Time-based limit
params += "&since=" + SINCE.strftime("%s")
graph_url = GRAPH_URL_PREFIX + group + "/feed" + params
result = requests.get(graph_url, headers=headers)
result_json = json.loads(result.text, result.encoding)
feed = []
# Got an error? Ok let's break out
if "error" in result_json:
print "Error", result_json["error"]["message"]
return []
# Did we get back data?
if "data" in result_json:
for feed_item in result_json["data"]:
# Convenience: Add empty field for message / link if not existent
feed_item["message"] = feed_item["message"] if "message" in feed_item else ""
feed_item["link"] = feed_item["link"] if "link" in feed_item else ""
feed.append(feed_item)
return feed
def getGroups(after=None):
# Token-based auth header
headers = {'Authorization': 'Bearer ' + TOKEN}
# Fetch feed for each group, since a given time, but only get 1 feed item.
# We'll use this later to check if there's fresh content in the group
params = "?fields=feed.since(" + SINCE.strftime("%s") + ").limit(1),name,updated_time&"
# Default paging limit
params += "&limit=" + DEFAULT_LIMIT
# Are we paging? Get the next page of data
if after:
params += "&after=" + after
graph_url = GRAPH_URL_PREFIX + COMMUNITY + GROUPS_SUFFIX + params
result = requests.get(graph_url, headers=headers)
result_json = json.loads(result.text, result.encoding)
groups = []
# Got an error? Ok let's break out
if "error" in result_json:
print "Error", result_json["error"]["message"]
return []
# Did we get back data?
if "data" in result_json:
for group_obj in result_json["data"]:
# Only cache this group ID if there's fresh feed content
if "feed" in group_obj:
groups.append(group_obj)
# Is there more data to page through? Recursively fetch the next page
if "paging" in result_json:
getGroups(after=result_json["paging"]["cursors"]["after"])
# Return an array of group IDs which have fresh content
return groups
for group in getGroups():
feed = getFeed(group["id"], group["name"])
# Create a new CSV named after the timestamp / group id / group name, to ensure uniqueness
csv_filename = SINCE.strftime("%Y-%m-%d %H:%M:%S") + " " + group["id"] + " " + group["name"] + ".csv"
if VERBOSE:
print csv_filename
else:
print ".",
with open(csv_filename, "wb") as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# CSV Header
header = ["Post ID", "Permalink", "Create Time", "Updated Time", "Author", "Author ID", "Message", "Link", "Likes", "Comments"]
writer.writerow(header)
for item in feed:
row = [item["id"], item["permalink_url"], item["created_time"], item["updated_time"], item["from"]["name"], item["from"]["id"], item["message"].decode("utf-8").strip(), item["link"], item["likes"]["summary"]["total_count"], item["comments"]["summary"]["total_count"]]
if VERBOSE:
print row
writer.writerow(row)
|
import os, pickle, pyaes, sys, random, pycurl
key = "This_key_for_demo_purposes_only!"
aes = pyaes.AESModeOfOperationCTR(key)
#size of the database
dbsize=20000
#size of elements
elesize=10
def deciph(ciphertext):
return aes.decrypt(ciphertext)
#size of partition
partisize = int(sys.argv[1])
size_buc =1
rr=random.randint(0,19999)
modus=pickle.load(open('data_encrypted.txt', 'rb'))
mapp=pickle.load(open('map.txt', 'rb'))
results=[]
counter=0
PY3 = sys.version_info[0] > 2
class Test:
def __init__(self):
self.contents = 'def switch_join(switch):# Repeat Port 1 to Port 2 p1 = {in_port:1} a1 = [forward(2)] install(switch, p1, DEFAULT, a1) # Repeat Port 2 to Port 1 p2 = {in_port:2} a2 = [forward(1)] install(switch, p2, DEFAULT, a2)'
if PY3:
self.contents = self.contents.encode('ascii')
def body_callback(self, buf):
self.contents = self.contents + buf
sys.stderr.write("Testing %s\n" % pycurl.version)
t = Test()
c = pycurl.Curl()
c.setopt(c.URL, 'http://localhost')
c.setopt(c.WRITEFUNCTION, t.body_callback)
c.perform()
print(t.contents)
c.close()
for j in range(0,dbsize/partisize):
counter=counter+1
#print modus[i][0]
#results.append(modus[rr][0])
results.append(deciph(modus[rr][0]))
rr=[]
strf='22'
for i in range(0, counter):
rr.append(results[i].find(strf))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step06_run_weilbull_hazard_test
# @Date: 2020/3/25
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m ConstructRegressionFile.Stata.step06_run_weilbull_hazard_test
"""
import os
from Constants import Constants as const
if __name__ == '__main__':
date_str = '20200326'
save_file = os.path.join(const.STATA_CODE_PATH, '{}_wh_code_5.do'.format(date_str))
output_path = os.path.join(const.STATA_RESULT_PATH, '{}_wh_3'.format(date_str))
if not os.path.isdir(output_path):
os.makedirs(output_path)
cmd_list = ['clear',
'use "{}"'.format(os.path.join(const.STATA_DATA_PATH, '20200326_weibull_harzard_model_data2.dta')),
'stset fyear, f(post_event) id(country_iso3)']
ctrl_list = [
['ln_GDP', 'ln_GDP_PC'],
['ln_GDP', 'ln_GDP_PC', 'NY_GDP_MKTP_KD_ZG'],
['ln_GDP', 'ln_GDP_PC', 'NE_EXP_GNFS_KD_ZG', 'NE_IMP_GNFS_KD_ZG'],
['ln_GDP', 'ln_GDP_PC', 'NE_EXP_GNFS_KD_ZG', 'NE_IMP_GNFS_KD_ZG', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'NE_EXP_GNFS_KD_ZG', 'NE_IMP_GNFS_KD_ZG', 'FP_CPI_TOTL_ZG', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'SL_UEM_TOTL_ZS', 'FP_CPI_TOTL_ZG'],
['ln_GDP', 'ln_GDP_PC', 'NE_IMP_GNFS_ZS', 'NE_EXP_GNFS_ZS', 'NE_EXP_GNFS_KD_ZG', 'NE_IMP_GNFS_KD_ZG'],
['ln_GDP', 'ln_GDP_PC', 'NE_IMP_GNFS_ZS', 'NE_EXP_GNFS_ZS'],
['ln_GDP', 'ln_GDP_PC', 'NE_IMP_GNFS_ZS', 'NE_EXP_GNFS_ZS', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'NE_IMP_GNFS_ZS', 'NE_EXP_GNFS_ZS', 'SL_UEM_TOTL_ZS', 'FP_CPI_TOTL_ZG'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_TOTL_ZS', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_TOTL_ZS', 'NY_GDP_MKTP_KD_ZG'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_MANF_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_MANF_ZS', 'SL_UEM_TOTL_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_MANF_ZS', 'NY_GDP_MKTP_KD_ZG'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_MANF_ZS', 'CM_MKT_LCAP_GD_ZS'],
['ln_GDP', 'ln_GDP_PC', 'ln_IMPORT', 'ln_EXPORT', 'NV_IND_MANF_ZS', 'CM_MKT_LCAP_GD_ZS', 'NY_GDP_MKTP_KD_ZG'],
]
# ctrl_list = CTRL_LIST[:]
# dep_vars = 'TobinQ_1 TANGIBILITY_1 ROA_1 R_B0_1 CASH_HOLDING_1 CASH_RATIO_1 CAPEX_1 ln_sale_1 ln_emp_1 SALE_RATIO_1'
dep_vars = 'CAPEX R_D TANGIBILITY ROA SALE_RATIO ln_emp TobinQ MV'
output_option = 'addtext(Cluster, Country) pvalue bdec(4) pdec(4) rdec(4) addstat(chi-square test, e(chi2)) ' \
'nolabel append'
i = 13
for ctrl in ctrl_list:
output_file = os.path.join(output_path, 'control_combination_{}.xls'.format(i))
cmd_list.append('foreach dep_var in {}{{'.format(dep_vars))
cmd_list.append(" capture qui streg `dep_var' {}, vce(cluster country_iso3) d(w)".format(' '.join(ctrl)))
cmd_list.append(' outreg2 using "{}", {}'.format(output_file, output_option))
cmd_list.append('}\n')
i += 1
with open(save_file, 'w') as f:
f.write('\n'.join(cmd_list))
print('do "{}"'.format(save_file))
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io, img_as_uint
from skimage.morphology import skeletonize, medial_axis, skeletonize_3d
from skimage.measure import regionprops, label
from skimage.filters import threshold_otsu
from skimage.measure._regionprops import _RegionProperties
from typing import Container
from numbers import Number
class BBox:
def __init__(self, rprops_bbox):
min_row, min_col, max_row, max_col = rprops_bbox
# regionprops bbox representation
self.min_row = min_row
self.min_col = min_col
self.max_row = max_row
self.max_col = max_col
self.bbox = rprops_bbox
# rectangle representation
self.x, self.y = min_col, min_row
self.width = max_col - min_col
self.height = max_row - min_row
# coordinate representation
self.P1 = (min_col, min_row)
self.P2 = (max_col, min_row)
self.P3 = (min_col, max_row)
self.P4 = (max_col, max_row)
def __repr__(self):
return str(self.bbox)
def __getitem__(self, item):
return self.bbox[item]
def IOU(self, other_bbox):
# determining the intersection coordinates
P1_int = (max(self.P1[0], other_bbox.P1[0]),
max(self.P1[1], other_bbox.P1[1]))
P4_int = (min(self.P4[0], other_bbox.P4[0]),
min(self.P4[1], other_bbox.P4[1]))
# check for intersections
if (P1_int[0] > P4_int[0]) or (P1_int[1] > P4_int[1]):
return 0
intersection_area = (P4_int[0] - P1_int[0]) * (P4_int[1] - P1_int[1])
union_area = self.area() + other_bbox.area() - intersection_area
return intersection_area / union_area
def area(self):
return self.width * self.height
class Hypo:
def __init__(self, rprops, dpm=False):
self.length = rprops.area
if dpm:
self.length /= dpm
self.bbox = BBox(rprops.bbox)
def __repr__(self):
return "[%d, %s]" % (self.length, self.bbox)
def IOU(self, other_hypo):
return self.bbox.IOU(other_hypo.bbox)
class HypoResult:
def __init__(self, rprops_or_hypos, dpm=False):
if isinstance(rprops_or_hypos[0], Hypo):
self.hypo_list = rprops_or_hypos
elif isinstance(rprops_or_hypos[0], _RegionProperties):
self.hypo_list = [Hypo(rprops, dpm) for rprops in rprops_or_hypos]
self.gt_match = None
def __getitem__(self, item):
if isinstance(item, Number):
return self.hypo_list[item]
if isinstance(item, Container):
# check the datatype of the list
if isinstance(item[0], np.bool_):
item = [idx for idx, val in enumerate(item) if val]
return HypoResult([self.hypo_list[idx] for idx in item])
def __len__(self):
return len(self.hypo_list)
def mean(self):
return np.mean([hypo.length for hypo in self.hypo_list])
def std(self):
return np.std([hypo.length for hypo in self.hypo_list])
def score(self, gt_hyporesult, match_threshold=0.5):
scores = []
hypo_ious = np.zeros((len(self), len(gt_hyporesult)))
objectwise_df = pd.DataFrame(columns=['algorithm', 'ground truth'], index=range(len(gt_hyporesult)))
for hypo_idx, hypo in enumerate(self.hypo_list):
hypo_ious[hypo_idx] = np.array([hypo.IOU(gt_hypo) for gt_hypo in gt_hyporesult])
best_match = np.argmax(hypo_ious[hypo_idx])
# a match is found if the intersection over union metric is
# larger than the given threshold
if hypo_ious[hypo_idx][best_match] > match_threshold:
# calculate the accuracy of the measurement
gt_hypo = gt_hyporesult[best_match]
error = abs(hypo.length - gt_hypo.length)
scores.append(1 - error/gt_hypo.length)
gt_hypo_ious = hypo_ious.T
for gt_hypo_idx, gt_hypo in enumerate(gt_hyporesult):
objectwise_df.loc[gt_hypo_idx, 'ground truth'] = gt_hypo.length
best_match = np.argmax(gt_hypo_ious[gt_hypo_idx])
if gt_hypo_ious[gt_hypo_idx][best_match] > match_threshold:
objectwise_df.loc[gt_hypo_idx, 'algorithm'] = self.hypo_list[best_match].length
# precision, recall
self.gt_match = np.apply_along_axis(np.any, 0, hypo_ious > match_threshold)
self.match = np.apply_along_axis(np.any, 1, hypo_ious > match_threshold)
# identified_objects = self[self.match]
true_positives = self.gt_match.sum()
precision = true_positives/len(self)
recall = true_positives/len(gt_hyporesult)
score_dict = {'accuracy': np.mean(scores),
'precision': precision,
'recall': recall,
'gt_mean': gt_hyporesult.mean(),
'result_mean': self.mean(),
'gt_std': gt_hyporesult.std(),
'result_std': self.std()}
return score_dict, objectwise_df
def make_df(self):
result_df = pd.DataFrame(
[[hypo.length, *hypo.bbox] for hypo in self.hypo_list],
columns=['length', 'min_row', 'min_col', 'max_row', 'max_col'],
index=range(1, len(self)+1)
)
return result_df
def hist(self, gt_hyporesult, export_path):
lengths = [hypo.length for hypo in self.hypo_list]
gt_lengths = [hypo.length for hypo in gt_hyporesult]
histogram_bins = range(0, 500, 10)
with plt.style.context('seaborn-white'):
plt.figure(figsize=(10, 15))
plt.hist(lengths, bins=histogram_bins, color='r', alpha=0.2, label='result')
plt.hist(gt_lengths, bins=histogram_bins, color='b', alpha=0.2, label='ground truth')
plt.legend()
plt.savefig(export_path)
plt.close('all')
def filter(self, flt):
if isinstance(flt, Container):
min_length, max_length = flt
self.hypo_list = [h for h in self.hypo_list if min_length <= h.length <= max_length]
elif isinstance(flt, bool) and flt:
otsu_thresh = threshold_otsu(np.array([h.length for h in self.hypo_list]))
self.hypo_list = [h for h in self.hypo_list if otsu_thresh <= h.length]
def bbox_to_rectangle(bbox):
# bbox format: 'min_row', 'min_col', 'max_row', 'max_col'
# Rectangle format: bottom left (x, y), width, height
min_row, min_col, max_row, max_col = bbox
x, y = min_col, min_row
width = max_col - min_col
height = max_row - min_row
return (x, y), width, height
def get_hypo_rprops(hypo, filter=True, already_skeletonized=False, skeleton_method=skeletonize_3d,
return_skeleton=False, dpm=False):
"""
Args:
hypo: segmented hypocotyl image
filter: boolean or list of [min_length, max_length]
"""
hypo_thresh = (hypo > 0.5)
if not already_skeletonized:
hypo_skeleton = label(img_as_uint(skeleton_method(hypo_thresh)))
else:
hypo_skeleton = label(img_as_uint(hypo_thresh))
hypo_rprops = regionprops(hypo_skeleton)
# filter out small regions
hypo_result = HypoResult(hypo_rprops, dpm)
hypo_result.filter(flt=filter)
if return_skeleton:
return hypo_result, hypo_skeleton > 0
return hypo_result
def visualize_regions(hypo_img, hypo_result, export_path=None, bbox_color='r', dpi=800):
with plt.style.context('seaborn-white'):
# parameters
fontsize = 3.0 * (800.0 / dpi)
linewidth = fontsize / 10.0
figsize = (hypo_img.shape[1]/dpi, hypo_img.shape[0]/dpi)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = plt.Axes(fig, [0,0,1,1]) #plt.subplot(111)
fig.add_axes(ax)
ax.imshow(hypo_img)
for hypo_idx, hypo in enumerate(hypo_result):
rectangle = patches.Rectangle((hypo.bbox.x, hypo.bbox.y), hypo.bbox.width, hypo.bbox.height,
linewidth=linewidth, edgecolor=bbox_color, facecolor='none')
ax.add_patch(rectangle)
ax.text(hypo.bbox.x, hypo.bbox.y - linewidth - 24, "N.%d." % (hypo_idx+1), fontsize=fontsize, color='k')
ax.text(hypo.bbox.x, hypo.bbox.y - linewidth, str(hypo.length)[:4], fontsize=fontsize, color=bbox_color)
fig.axes[0].get_xaxis().set_visible(False)
fig.axes[0].get_yaxis().set_visible(False)
if export_path is None:
plt.show()
else:
plt.savefig(export_path, dpi=dpi)
plt.close('all')
|
#------------------------------------------------------------------------------------------------
# Flight time and delay predictor
# Description: Simple python script that runs a regression to predic actual flight time
# and probability of delay of airlines by route
# Creation Date: Nov 23, 2016
#------------------------------------------------------------------------------------------------
#------------------------------------------------
# Import Libraries
# Here we import all of the necessary libraries
#------------------------------------------------
import pandas as pd
import statsmodels.api as sm
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import matplotlib.pyplot as plt
#------------------------------------------------
# Extract Data from the CSV downloaded from the BOT Website
#------------------------------------------------
gdf = pd.read_csv("./CSV/merged.csv")
#list(gdf.columns.values) #Lists all of the columns names
#------------------------------------------------
# Select data to use -- We select the columns we are going to use and filter by the major airports
#------------------------------------------------
#df1 = gdf[['QUARTER', 'MONTH', 'DAY_OF_MONTH', 'DAY_OF_WEEK', 'FL_DATE', 'AIRLINE_ID', 'FL_NUM', 'ORIGIN', 'DEST', 'DEP_TIME', 'DEP_DELAY', 'DEP_DELAY_NEW', 'DEP_DEL15', 'DEP_DELAY_GROUP','ARR_TIME', 'ARR_DELAY', 'ARR_DELAY_NEW', 'ARR_DEL15', 'ARR_DELAY_GROUP','CANCELLED', 'CANCELLATION_CODE', 'DIVERTED','ACTUAL_ELAPSED_TIME', 'AIR_TIME', 'FLIGHTS', 'DISTANCE', 'DISTANCE_GROUP']]
#Select the columns we need for the analysis
# We use these columns because in prevoius anylysis we found that are the more
df1 = gdf[['AIRLINE_ID','ORIGIN', 'DEST', 'DEP_TIME','ARR_TIME','DEP_DELAY','ARR_DELAY','CANCELLED','DIVERTED','ACTUAL_ELAPSED_TIME']]
#Filter by the airports we want to concentrate the analysis on (ATL, DFW, JFK, LAX and ORD)
#We only used the most important airports
df2 = df1.query('(ORIGIN == "ATL" or ORIGIN == "DFW" or ORIGIN == "JFK" or ORIGIN == "LAX" or ORIGIN == "ORD") and (DEST == "ATL" or DEST == "DFW" or DEST == "JFK" or DEST == "LAX" or DEST == "ORD")')
#------------------------------------------------
#Get Random Sample Data from the data
#------------------------------------------------
#Get a 10,000 sample data
sampledf = df2.sample(n=10000)
#------------------------------------------------
# Clean Data
#------------------------------------------------
#Trim the string columns to avoid any unexpected error
sampledf["AIRLINE_ID"] = sampledf.apply(lambda row: str.strip(str(row.AIRLINE_ID)), axis=1)
sampledf["ORIGIN"] = sampledf.apply(lambda row: str.strip(str(row.ORIGIN)), axis=1)
sampledf["DEST"] = sampledf.apply(lambda row: str.strip(str(row.DEST)), axis=1)
#Delete any rows with null values
sampledf = sampledf.dropna()
#Change Actual Elapse Time to an integer
sampledf["ACTUAL_ELAPSED_TIME"] = sampledf.apply(lambda row: int(float(row.ACTUAL_ELAPSED_TIME)), axis=1)
#Clean invalid Data - any flight that has negative time
sampledf = sampledf[sampledf.ACTUAL_ELAPSED_TIME >= 0]
#------------------------------------------------
# Add new Columns
#------------------------------------------------
#Calculate flight periords Columns - Morning is from 6 to 12 , Afternoon is from 12 to 19, Night is from 19 to 24, and Dawn is from 24 to 6
sampledf["Morning"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 600 and int(row.DEP_TIME) < 1200) else 0, axis=1)
sampledf["Afternoon"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 1200 and int(row.DEP_TIME) < 1900) else 0, axis=1)
sampledf["Night"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 1900 and int(row.DEP_TIME) < 2400) else 0, axis=1)
sampledf["Dawn"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 2400 and int(row.DEP_TIME) < 600) else 0, axis=1)
#Calculate Delayed Column - Calculates if a flight was delayed or not, consideres cancelled, diverted, or delay time over 10 min a delay
sampledf["Delayed"] = sampledf.apply(lambda row: 1 if(row.CANCELLED or row.DIVERTED or row.ARR_DELAY > 30) else 0 , axis=1)
#------------------------------------------------
# Dummy Variables
#------------------------------------------------
#Create dummy variables for each relevant column
originDummy = pd.get_dummies(sampledf["ORIGIN"], prefix="ORG", drop_first=True)
destDummy = pd.get_dummies(sampledf["DEST"], prefix="DST", drop_first=True)
airlineDummy = pd.get_dummies(sampledf["AIRLINE_ID"], prefix="AIRLN", drop_first=True)
#Create a table for the regression by concatenating all of the dummy columns and the dependant variable
dummyDf = pd.DataFrame()
dummyDf = pd.concat([originDummy,destDummy,airlineDummy,sampledf['Morning'], sampledf['Afternoon'], sampledf['Night'],sampledf['Delayed'],sampledf['ACTUAL_ELAPSED_TIME']], axis=1)
#------------------------------------------------
# Split Test & Learn Datasets
#------------------------------------------------
#Split the sample data in training and test data set -- Test size is 20% of the hole data set
trainingDF, testDF = train_test_split(dummyDf, test_size = 0.2)
#len(testDF.axes[0])
#Make sure all variables are an integer for the regression
trainingDF = trainingDF.applymap(np.int)
testDF = testDF.applymap(np.int)
#------------------------------------------------
# 50-50 Data - Divide data to have 50% delayed rows and 50% non delayed rows
#------------------------------------------------
#Get 500 rows delayed and non-delayed for the training set
trainingDFDelayed = trainingDF[trainingDF.Delayed == 1].head(500)
trainingDFNotDelayed = trainingDF[trainingDF.Delayed == 0].head(500)
#Merge the two data sets
allTraining = [trainingDFNotDelayed,trainingDFDelayed]
trainingDF = pd.concat(allTraining)
#Get 100 rows delayed and non-delayed for the testing set
testDFDelayed = testDF[testDF.Delayed == 1].head(100)
testDFNotDelayed = testDF[testDF.Delayed == 0].head(100)
#Merge the two data sets
allTest = [testDFDelayed,testDFNotDelayed]
testDF = pd.concat(allTest)
#------------------------------------------------
# Regression - Delayed
#------------------------------------------------
#Run the regression to predict the delayed flights
XValues = sm.add_constant(trainingDF[trainingDF.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])], prepend=False)
resultDelayed = sm.OLS(trainingDF['Delayed'], XValues).fit()
resultDelayed.summary()
#------------------------------------------------
# Regression - Predicted Total Time (Flight time + taxi)
#------------------------------------------------
#Run the regression to predict total time of flights
XValues = sm.add_constant(trainingDF[trainingDF.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])])
resultTime = sm.OLS(trainingDF['ACTUAL_ELAPSED_TIME'], XValues ).fit()
resultTime.summary()
#------------------------------------------------
# Output Prediction Data to CSV
#------------------------------------------------
#Output of sample data
resultTime.params.to_frame().to_csv(path_or_buf="./conf/paramsTime.csv", sep=',')
resultDelayed.params.to_frame().to_csv(path_or_buf="./conf/paramsDelayed.csv", sep=',')
#---------------------------------------------------------------------------------------------------------------
#-----------VALIDATION AND TESTING -----------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------
#------------------------------------------------
# Validate with Test Data -- Delayed Prediction
#------------------------------------------------
#Copy of the testing data set
validateDataDelay = testDF.copy()
#Get a subset of the data without the validation data
subsetPredictDelay = validateDataDelay[validateDataDelay.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])]
#Predict the outcome with the regression and put the result in a new column
subsetPredictDelay['Calculated_Delay'] = subsetPredictDelay.apply(lambda row: (row * resultDelayed.params).sum(),axis=1)
#Add the real outcome in a new column
subsetPredictDelay["Real_Delayed"] = testDF["Delayed"]
#------------------------------------------------
# Validate with Test Data -- Predicted Total Time (Flight time + taxi)
#------------------------------------------------
#Copy of the testing data set
validateDataTime = testDF.copy()
subsetPredictTime = validateDataTime[validateDataTime.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])]
subsetPredictTime["const"] = 1
subsetPredictTime['Calculated'] = subsetPredictTime.apply(lambda row: (row * resultTime.params).sum(),axis=1)
subsetPredictTime["ACTUAL_ELAPSED_TIME"] = validateDataTime["ACTUAL_ELAPSED_TIME"]
subsetPredictTime["Difference"] = subsetPredictTime.apply(lambda row: abs(row.ACTUAL_ELAPSED_TIME - row.Calculated), axis=1)
#------------------------------------------------
# Calculate ROC -- Predicted Total Time (Testing set is used here)
#------------------------------------------------
#Create dataframe with the difference ranges
roicTime = pd.DataFrame({"Values":range(int(subsetPredictTime["Difference"].min()),int(subsetPredictTime["Difference"].max()),10)})
roicTime["Percentage"] = roicTime.apply(lambda row: len(subsetPredictTime[subsetPredictTime.Difference < row.Values]["Difference"]) / len(subsetPredictTime["Difference"]) * 100, axis=1 )
roicTime.to_csv(path_or_buf="./CSV/test3.csv")
roicTime
plt.plot(roicTime.Values,roicTime.Percentage)
plt.show()
#------------------------------------------------
# Calculate ROC -- Predicted Delay (Testing set is used here)
#------------------------------------------------
roicDelay = pd.DataFrame({"Values": np.arange(subsetPredictDelay["Calculated_Delay"].min(),subsetPredictDelay["Calculated_Delay"].max(),0.1)})
#True Positive
roicDelay["T_P"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay > row.Values) & (subsetPredictDelay.Real_Delayed == 1)]),axis=1)
#False Positive
roicDelay["F_P"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay > row.Values) & (subsetPredictDelay.Real_Delayed == 0)]),axis=1)
#True Negative
roicDelay["T_N"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay < row.Values) & (subsetPredictDelay.Real_Delayed == 0)]),axis=1)
#False Negative
roicDelay["F_N"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay < row.Values) & (subsetPredictDelay.Real_Delayed == 1)]),axis=1)
#False Posive Ration
roicDelay["F_P_R"] = roicDelay.apply(lambda row: row["F_P"]/(row["F_P"] + row["T_N"]),axis=1)
#Recall Ration
roicDelay["Recall"] = roicDelay.apply(lambda row: row["T_P"]/(row["T_P"] + row["F_N"]),axis=1)
roicDelay.to_csv("")
#Plot graph
plt.plot(roicDelay["F_P_R"],roicDelay["Recall"] )
plt.xlabel("F_P_R")
plt.ylabel("Recall")
plt.title('ROC Chart')
plt.show()
|
# Admin Request Handler
# Manager for adding books and configure database
# Later will support users manage
import logging
import tornado.web
class IndexHandler(tornado.web.RequestHandler):
"""
Admin page Index Request Handler
"""
def get(self):
self.write("Hello I am admin manager")
class BooksManagerHandler(tornado.web.RequestHandler):
"""
The page off mange books
Function:
add -- add ebooks information and file path to database
delete --delete ebook info from database
"""
def get(self):
self.write("Hello, here is book manager page")
class AuthLoginManagerHandler(tonado.web.RequestHandler):
"""
Admin login Request Handler
"""
def get(self):
self.write("Login manager")
class AuthLogoutMangerHandler(tornado.web.RequestHandler):
"""
Admin logout Request handler
"""
def get(self):
self.write("admin logout manager")
|
print("Hello World! I am Joel Okpara")
|
from django.db import models
class Contact(models.Model):
# primary_key
id = models.AutoField(auto_created=True, primary_key=True)
# contact_type = models.CharField(max_length=32, verbose_name="문의 유형")
contact_type = models.CharField(max_length=32, verbose_name="문의 유형")
# contact_user = models.ForeignKey('login.NcUser', on_delete=models.CASCADE, verbose_name='문의한 유저')
contact_email = models.EmailField(max_length=64, verbose_name='연락받을 이메일')
contact_title = models.CharField(max_length=64, verbose_name='문의 제목')
contact_contents = models.CharField(max_length=2000, verbose_name='문의 내용')
# contact_is_answer = models.BooleanField(default=False, verbose_name='답변 여부')
contact_is_answer = models.BooleanField(default=False, verbose_name='답변 여부')
# contact_regist_dttm = models.DateTimeField(auto_now_add=True, verbose_name='등록시간')
contact_regist_dttm = models.DateTimeField(auto_now_add=True, verbose_name='등록시간')
def __str__(self):
return self.contact_title
class Meta:
db_table = 'nc_about_contact'
verbose_name = '문의 및 버그신고'
verbose_name_plural = '문의 및 버그신고'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.